Python prometheus_client.Gauge() Examples
The following are 30
code examples of prometheus_client.Gauge().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
prometheus_client
, or try the search function
.
Example #1
Source File: test_testutils.py From django-prometheus with Apache License 2.0 | 12 votes |
def setUp(self): self.registry = prometheus_client.CollectorRegistry() self.some_gauge = prometheus_client.Gauge( "some_gauge", "Some gauge.", registry=self.registry ) self.some_gauge.set(42) self.some_labelled_gauge = prometheus_client.Gauge( "some_labelled_gauge", "Some labelled gauge.", ["labelred", "labelblue"], registry=self.registry, ) self.some_labelled_gauge.labels("pink", "indigo").set(1) self.some_labelled_gauge.labels("pink", "royal").set(2) self.some_labelled_gauge.labels("carmin", "indigo").set(3) self.some_labelled_gauge.labels("carmin", "royal").set(4) self.test_case = SomeTestCase()
Example #2
Source File: monitor.py From CrawlerMonitor with MIT License | 7 votes |
def create_metric(self): # record app conf self.conf_info = Info('celery_conf_info','APP_CONF') self.conf_info_c = CollectorRegistry() # monitor worker info self.workers_info = Info('celery_workers_info', 'WORKER_INFO') self.workers_info_c = CollectorRegistry() # monitor worker info real-time self.workers_state = Gauge('celery_workers_state', 'WORKER_STATE', ['worker']) self.workers_state_c = CollectorRegistry() self.workers_processed = Gauge('celery_processed_tasks_total', 'WORKER_TASKS_PROCESSED', ['worker']) self.workers_processed_c = CollectorRegistry() self.workers_active = Gauge('celery_active_tasks_total', 'WORKER_TASKS_ACTIVE', ['worker']) self.workers_active_c = CollectorRegistry() # monitor tasks info self.tasks_counter = Counter('celery_tasks_total', 'TASK_COUNT_INFO', ['worker','task','result']) self.tasks_counter_c = CollectorRegistry() self.tasks_runtime = Summary('celery_tasks_seconds', 'TASK_RUNTIME', ['worker', 'task']) self.tasks_runtime_c = CollectorRegistry() self.tasks_info = Info('celery_tasks_info', 'TASK_INFO') self.tasks_info_c = CollectorRegistry()
Example #3
Source File: utils.py From zentral with Apache License 2.0 | 7 votes |
def get_prometheus_incidents_metrics(): registry = CollectorRegistry() g = Gauge('zentral_incidents_count', 'Zentral incidents', ['name', 'id', 'severity', 'status', 'opened'], registry=registry) query = ( "select count(*), " "i.id, i.name, i.severity, " "mi.status, (CASE WHEN mi.status in ('CLOSED', 'RESOLVED') THEN FALSE ELSE TRUE END) as opened " "from incidents_incident as i " "join incidents_machineincident as mi on (mi.incident_id = i.id) " "group by i.name, i.id, i.severity, mi.status, opened " "order by i.id, mi.status;" ) cursor = connection.cursor() cursor.execute(query) columns = [col[0] for col in cursor.description] for row in cursor.fetchall(): d = dict(zip(columns, row)) d["severity"] = str(SEVERITY_CHOICES_DICT.get(d.pop("severity"), "Unknown")) d["status"] = str(STATUS_CHOICES_DICT.get(d.pop("status"), "Unknown")) d["opened"] = 'Y' if d["opened"] else 'N' count = d.pop('count') g.labels(**d).set(count) return registry
Example #4
Source File: cinder_services.py From prometheus-openstack-exporter with Apache License 2.0 | 6 votes |
def get_stats(self): registry = CollectorRegistry() labels = ['region', 'host', 'service', 'state'] cinder_services_stats_cache = self.get_cache_data() for cinder_services_stat in cinder_services_stats_cache: stat_gauge = Gauge( self.gauge_name_sanitize( cinder_services_stat['stat_name']), 'Openstack Cinder Service statistic', labels, registry=registry) label_values = [self.osclient.region, cinder_services_stat.get('host', ''), cinder_services_stat.get('service', ''), cinder_services_stat.get('state', '')] stat_gauge.labels( * label_values).set( cinder_services_stat['stat_value']) return generate_latest(registry)
Example #5
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 6 votes |
def gen_volume_quota_stats(self): gbs = Gauge('cinder_quota_volume_disk_gigabytes', 'Cinder volume metric (GB)', ['cloud', 'tenant', 'type'], registry=self.registry) vol = Gauge('cinder_quota_volume_disks', 'Cinder volume metric (number of volumes)', ['cloud', 'tenant', 'type'], registry=self.registry) if not self.use_nova_volumes: return for t, q in self.prodstack['volume_quotas'].items(): if t in self.tenant_map: tenant = self.tenant_map[t] else: tenant = 'orphaned' for tt in ['limit', 'in_use', 'reserved']: gbs.labels(config['cloud'], tenant, tt).inc(q['gigabytes'][tt]) vol.labels(config['cloud'], tenant, tt).inc(q['volumes'][tt])
Example #6
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 6 votes |
def gen_subnet_size(self): labels = ['cloud', 'network_name'] net_size = Gauge('neutron_net_size', 'Neutron networks size', labels, registry=self.registry) for n in self.prodstack['networks']: size = 0 for subnet in n['subnets']: for pool in self.subnet_map[subnet]['pool']: if ':' in pool['start']: # Skip IPv6 address pools; they are big enough to # drown the IPv4 numbers we might care about. continue size += IPRange(pool['start'], pool['end']).size label_values = [config['cloud'], self.network_map[n['id']]] net_size.labels(*label_values).set(size)
Example #7
Source File: metrics.py From client_python with Apache License 2.0 | 6 votes |
def __init__(self, name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, labelvalues=None, multiprocess_mode='all', ): self._multiprocess_mode = multiprocess_mode if multiprocess_mode not in self._MULTIPROC_MODES: raise ValueError('Invalid multiprocess mode: ' + multiprocess_mode) super(Gauge, self).__init__( name=name, documentation=documentation, labelnames=labelnames, namespace=namespace, subsystem=subsystem, unit=unit, registry=registry, labelvalues=labelvalues, ) self._kwargs['multiprocess_mode'] = self._multiprocess_mode
Example #8
Source File: gauge_prom.py From faucet with Apache License 2.0 | 6 votes |
def __init__(self, reg=None): super(GaugePrometheusClient, self).__init__(reg=reg) self.table_tags = collections.defaultdict(set) self.metrics = {} self.dp_status = Gauge( # pylint: disable=unexpected-keyword-arg 'dp_status', 'status of datapaths', self.REQUIRED_LABELS, registry=self._reg) for prom_var in PROM_PORT_VARS + PROM_PORT_STATE_VARS: exported_prom_var = PROM_PREFIX_DELIM.join( (PROM_PORT_PREFIX, prom_var)) self.metrics[exported_prom_var] = Gauge( # pylint: disable=unexpected-keyword-arg exported_prom_var, '', self.REQUIRED_LABELS + ['port', 'port_description'], registry=self._reg) for prom_var in PROM_METER_VARS: exported_prom_var = PROM_PREFIX_DELIM.join( (PROM_METER_PREFIX, prom_var)) self.metrics[exported_prom_var] = Gauge( # pylint: disable=unexpected-keyword-arg exported_prom_var, '', self.REQUIRED_LABELS + ['meter_id'], registry=self._reg)
Example #9
Source File: nova_services.py From prometheus-openstack-exporter with Apache License 2.0 | 6 votes |
def get_stats(self): registry = CollectorRegistry() labels = ['region', 'host', 'service', 'state'] services_stats_cache = self.get_cache_data() for services_stat in services_stats_cache: stat_gauge = Gauge( self.gauge_name_sanitize( services_stat['stat_name']), 'Openstack Nova Service statistic', labels, registry=registry) label_values = [self.osclient.region, services_stat.get('host', ''), services_stat.get('service', ''), services_stat.get('state', '')] stat_gauge.labels(*label_values).set(services_stat['stat_value']) return generate_latest(registry)
Example #10
Source File: check_os_api.py From prometheus-openstack-exporter with Apache License 2.0 | 6 votes |
def get_stats(self): registry = CollectorRegistry() labels = ['region', 'url', 'service'] check_api_data_cache = self.get_cache_data() for check_api_data in check_api_data_cache: label_values = [ check_api_data['region'], check_api_data['url'], check_api_data['service']] gague_name = self.gauge_name_sanitize( "check_{}_api".format(check_api_data['service'])) check_gauge = Gauge( gague_name, 'Openstack API check. fail = 0, ok = 1 and unknown = 2', labels, registry=registry) check_gauge.labels(*label_values).set(check_api_data['status']) return generate_latest(registry)
Example #11
Source File: neutron_agents.py From prometheus-openstack-exporter with Apache License 2.0 | 6 votes |
def get_stats(self): registry = CollectorRegistry() labels = ['region', 'host', 'service', 'state'] neutron_agent_stats_cache = self.get_cache_data() for neutron_agent_stat in neutron_agent_stats_cache: stat_gauge = Gauge( self.gauge_name_sanitize( neutron_agent_stat['stat_name']), 'Openstack Neutron agent statistic', labels, registry=registry) label_values = [self.osclient.region, neutron_agent_stat.get('host', ''), neutron_agent_stat.get('service', ''), neutron_agent_stat.get('state', '')] stat_gauge.labels( * label_values).set( neutron_agent_stat['stat_value']) return generate_latest(registry)
Example #12
Source File: metrics.py From Carnets with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, labelvalues=None, multiprocess_mode='all', ): self._multiprocess_mode = multiprocess_mode if multiprocess_mode not in self._MULTIPROC_MODES: raise ValueError('Invalid multiprocess mode: ' + multiprocess_mode) super(Gauge, self).__init__( name=name, documentation=documentation, labelnames=labelnames, namespace=namespace, subsystem=subsystem, unit=unit, registry=registry, labelvalues=labelvalues, ) self._kwargs['multiprocess_mode'] = self._multiprocess_mode
Example #13
Source File: conftest.py From integrations-core with BSD 3-Clause "New" or "Revised" License | 6 votes |
def poll_mock(): registry = CollectorRegistry() g1 = Gauge('metric1', 'processor usage', ['matched_label', 'node', 'flavor'], registry=registry) g1.labels(matched_label="foobar", node="host1", flavor="test").set(99.9) g2 = Gauge('metric2', 'memory usage', ['matched_label', 'node', 'timestamp'], registry=registry) g2.labels(matched_label="foobar", node="host2", timestamp="123").set(12.2) c1 = Counter('counter1', 'hits', ['node'], registry=registry) c1.labels(node="host2").inc(42) g3 = Gauge('metric3', 'memory usage', ['matched_label', 'node', 'timestamp'], registry=registry) g3.labels(matched_label="foobar", node="host2", timestamp="456").set(float('inf')) poll_mock_patch = mock.patch( 'requests.get', return_value=mock.MagicMock( status_code=200, iter_lines=lambda **kwargs: ensure_unicode(generate_latest(registry)).split("\n"), headers={'Content-Type': "text/plain"}, ), ) with poll_mock_patch: yield
Example #14
Source File: metrics.py From anchore-engine with Apache License 2.0 | 6 votes |
def gauge_set(name, observation, description="", **kwargs): global metrics if not enabled: return True try: if name not in metrics: metrics[name] = Gauge(name, description, list(kwargs.keys())) if kwargs: metrics[name].labels(**kwargs).set(observation) else: metrics[name].set(observation) except Exception as err: logger.warn("adding metric failed - exception: " + str(err)) return True
Example #15
Source File: __init__.py From prometheus_flask_exporter with MIT License | 6 votes |
def gauge(self, name, description, labels=None, **kwargs): """ Use a Gauge to track the number of invocations in progress for the method. :param name: the name of the metric :param description: the description of the metric :param labels: a dictionary of `{labelname: callable_or_value}` for labels :param kwargs: additional keyword arguments for creating the Gauge """ return self._track( Gauge, lambda metric, time: metric.dec(), kwargs, name, description, labels, registry=self.registry, before=lambda metric: metric.inc(), revert_when_not_tracked=lambda metric: metric.dec() )
Example #16
Source File: dataexporters.py From ouroboros with MIT License | 6 votes |
def __init__(self, data_manager, config): self.config = config self.data_manager = data_manager self.http_server = prometheus_client.start_http_server( self.config.prometheus_port, addr=self.config.prometheus_addr ) self.updated_containers_counter = prometheus_client.Counter( 'containers_updated', 'Count of containers updated', ['socket', 'container'] ) self.monitored_containers_gauge = prometheus_client.Gauge( 'containers_being_monitored', 'Gauge of containers being monitored', ['socket'] ) self.updated_all_containers_gauge = prometheus_client.Gauge( 'all_containers_updated', 'Count of total updated', ['socket'] ) self.logger = getLogger()
Example #17
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 5 votes |
def gen_overcommit_stats(self): labels = ['cloud', 'resource'] openstack_overcommit = Gauge('openstack_allocation_ratio', 'Openstack overcommit ratios', labels, registry=self.registry) label_values = [config['cloud'], 'vcpu'] openstack_overcommit.labels(*label_values).set(config['openstack_allocation_ratio_vcpu']) label_values = [config['cloud'], 'ram'] openstack_overcommit.labels(*label_values).set(config['openstack_allocation_ratio_ram']) label_values = [config['cloud'], 'disk'] openstack_overcommit.labels(*label_values).set(config['openstack_allocation_ratio_disk'])
Example #18
Source File: prometheus.py From jaeger-client-python with Apache License 2.0 | 5 votes |
def create_gauge(self, name, tags=None): gauge = self._get_metric(Gauge, name, tags) def update(value): gauge.set(value) return update
Example #19
Source File: metrics.py From sanic-prometheus with MIT License | 5 votes |
def init(app, latency_buckets=None, multiprocess_mode='all', memcollect_enabled=True, metrics_list=None): app.metrics['RQS_COUNT'] = Counter( 'sanic_request_count', 'Sanic Request Count', ['method', 'endpoint', 'http_status'] ) hist_kwargs = {} if latency_buckets is not None: hist_kwargs = {'buckets': latency_buckets} app.metrics['RQS_LATENCY'] = Histogram( 'sanic_request_latency_sec', 'Sanic Request Latency Histogram', ['method', 'endpoint', 'http_status'], **hist_kwargs ) if memcollect_enabled: app.metrics['PROC_RSS_MEM_BYTES'] = Gauge( 'sanic_mem_rss_bytes', 'Resident memory used by process running Sanic', multiprocess_mode=multiprocess_mode ) app.metrics['PROC_RSS_MEM_PERC'] = Gauge( 'sanic_mem_rss_perc', 'A per cent of total physical memory used by ' + 'the process running Sanic', multiprocess_mode=multiprocess_mode ) if metrics_list: for name, pm_metric in metrics_list: app.metrics[name] = pm_metric
Example #20
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 5 votes |
def gen_quota_stats(self): cores = Gauge('nova_quota_cores', 'Nova cores metric', ['cloud', 'tenant', 'type'], registry=self.registry) fips = Gauge('nova_quota_floating_ips', 'Nova floating IP addresses (number)', ['cloud', 'tenant', 'type'], registry=self.registry) inst = Gauge('nova_quota_instances', 'Nova instances (number)', ['cloud', 'tenant', 'type'], registry=self.registry) ram = Gauge('nova_quota_ram_mbs', 'Nova RAM (MB)', ['cloud', 'tenant', 'type'], registry=self.registry) for t, q in self.prodstack['nova_quotas'].items(): if t in self.tenant_map: tenant = self.tenant_map[t] else: tenant = 'orphaned' # we get detailed quota information only on recent OS versions if isinstance(q['cores'], int): cores.labels(config['cloud'], tenant, 'limit').set(q['cores']) fips.labels(config['cloud'], tenant, 'limit').set(q['floating_ips']) inst.labels(config['cloud'], tenant, 'limit').set(q['instances']) ram.labels(config['cloud'], tenant, 'limit').set(q['ram']) else: for tt in ['limit', 'in_use', 'reserved']: cores.labels(config['cloud'], tenant, tt).inc(q['cores'][tt]) fips.labels(config['cloud'], tenant, tt).inc(q['floating_ips'][tt]) inst.labels(config['cloud'], tenant, tt).inc(q['instances'][tt]) ram.labels(config['cloud'], tenant, tt).inc(q['ram'][tt])
Example #21
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 5 votes |
def gen_up_stats(self): labels = ['cloud', 'hostname'] swift_up = Gauge('swift_host_up', 'Swift host reachability', labels, registry=self.registry) for h in self.swift_hosts: try: requests.get(self.baseurl.format(h, 'diskusage')) swift_up.labels(config['cloud'], h).set(1) except requests.exceptions.RequestException: swift_up.labels(config['cloud'], h).set(0)
Example #22
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 5 votes |
def gen_disk_usage_stats(self): labels = ['cloud', 'hostname', 'device', 'type'] swift_disk = Gauge('swift_disk_usage_bytes', 'Swift disk usage in bytes', labels, registry=self.registry) for h in self.swift_hosts: try: r = requests.get(self.baseurl.format(h, 'diskusage')) except requests.exceptions.RequestException: continue for disk in r.json(): if not all([disk.get(i, False) for i in ['size', 'used', 'device']]): continue swift_disk.labels(config['cloud'], h, disk['device'], 'size').set(int(disk['size'])) swift_disk.labels(config['cloud'], h, disk['device'], 'used').set(int(disk['used']))
Example #23
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 5 votes |
def gen_quarantine_stats(self): labels = ['cloud', 'hostname', 'ring'] swift_quarantine = Gauge('swift_quarantined_objects', 'Number of quarantined objects', labels, registry=self.registry) for h in self.swift_hosts: try: r = requests.get(self.baseurl.format(h, 'quarantined')) except requests.exceptions.RequestException: continue for ring in ['accounts', 'objects', 'containers']: swift_quarantine.labels(config['cloud'], h, ring).set(r.json().get(ring))
Example #24
Source File: prometheus_openstack_exporter.py From prometheus-openstack-exporter with GNU General Public License v3.0 | 5 votes |
def gen_account_stats(self): self.keystone_tenants_map = self._read_keystone_tenants_map( config.get('keystone_tenants_map', None)) labels = ['cloud', 'swift_account', 'tenant'] swift_account = Gauge( 'swift_account_bytes_used', 'Swift account usage in bytes', labels, registry=self.registry) for tenant_name, tenant_id in self.keystone_tenants_map.iteritems(): account = self.reseller_prefix + tenant_id bytes_used = self._get_account_usage(account) swift_account.labels(config['cloud'], account, tenant_name).set(bytes_used)
Example #25
Source File: prometheus.py From nbresuse with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, metricsloader: PSUtilMetricsLoader): super().__init__() self.metricsloader = metricsloader self.config = metricsloader.config self.session_manager = metricsloader.nbapp.session_manager gauge_names = ["total_memory", "max_memory", "total_cpu", "max_cpu"] for name in gauge_names: phrase = name + "_usage" gauge = Gauge(phrase, "counter for " + phrase.replace("_", " "), []) setattr(self, phrase.upper(), gauge)
Example #26
Source File: faucet_metrics.py From faucet with Apache License 2.0 | 5 votes |
def _gauge(self, var, var_help, labels): return PromGauge(var, var_help, labels, registry=self._reg) # pylint: disable=unexpected-keyword-arg
Example #27
Source File: gauge_prom.py From faucet with Apache License 2.0 | 5 votes |
def reregister_flow_vars(self, table_name, table_tags): """Register the flow variables needed for this client""" for prom_var in PROM_FLOW_VARS: table_prom_var = PROM_PREFIX_DELIM.join((prom_var, table_name)) try: self._reg.unregister(self.metrics[table_prom_var]) except KeyError: pass self.metrics[table_prom_var] = Gauge( # pylint: disable=unexpected-keyword-arg table_prom_var, '', list(table_tags), registry=self._reg)
Example #28
Source File: prom_client.py From faucet with Apache License 2.0 | 5 votes |
def __init__(self, reg=None): if reg is not None: self._reg = reg self.version = VersionInfo('faucet').semantic_version().release_string() self.faucet_version = PromGauge( # pylint: disable=unexpected-keyword-arg 'faucet_pbr_version', 'Faucet PBR version', ['version'], registry=self._reg) self.faucet_version.labels(version=self.version).set(1) # pylint: disable=no-member self.server = None self.thread = None
Example #29
Source File: middleware.py From python-prometheus-demo with MIT License | 5 votes |
def setup_metrics(app, app_name): app['REQUEST_COUNT'] = Counter( 'requests_total', 'Total Request Count', ['app_name', 'method', 'endpoint', 'http_status'] ) app['REQUEST_LATENCY'] = Histogram( 'request_latency_seconds', 'Request latency', ['app_name', 'endpoint'] ) app['REQUEST_IN_PROGRESS'] = Gauge( 'requests_in_progress_total', 'Requests in progress', ['app_name', 'endpoint', 'method'] ) app.middlewares.insert(0, prom_middleware(app_name)) app.router.add_get("/metrics", metrics)
Example #30
Source File: oscache.py From prometheus-openstack-exporter with Apache License 2.0 | 5 votes |
def get_stats(self): registry = CollectorRegistry() labels = ['region'] label_values = [self.region] duration = Gauge('openstack_exporter_cache_refresh_duration_seconds', 'Cache refresh duration in seconds.', labels, registry=registry) duration.labels(*label_values).set(self.duration) return generate_latest(registry)