com.yammer.metrics.Metrics Java Examples

The following examples show how to use com.yammer.metrics.Metrics. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ColumnFamilyMetrics.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a counter that will also have a global counter thats the sum of all counters across 
 * different column families
 */
protected Counter createColumnFamilyCounter(final String name)
{
    Counter cfCounter = Metrics.newCounter(factory.createMetricName(name));
    if (register(name, cfCounter))
    {
        Metrics.newGauge(globalNameFactory.createMetricName(name), new Gauge<Long>()
        {
            public Long value()
            {
                long total = 0;
                for (Metric cfGauge : allColumnFamilyMetrics.get(name))
                {
                    total += ((Counter) cfGauge).count();
                }
                return total;
            }
        });
    }
    return cfCounter;
}
 
Example #2
Source File: DeepPagingCache.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
public DeepPagingCache(long maxEntriesForDeepPaging) {
  _hits = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, HIT), HIT, TimeUnit.SECONDS);
  _misses = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, MISS), MISS, TimeUnit.SECONDS);
  _evictions = Metrics.newMeter(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, EVICTION), EVICTION,
      TimeUnit.SECONDS);
  _lruCache = new ConcurrentLinkedHashMap.Builder<DeepPageKeyPlusPosition, DeepPageContainer>()
      .maximumWeightedCapacity(maxEntriesForDeepPaging)
      .listener(new EvictionListener<DeepPageKeyPlusPosition, DeepPageContainer>() {
        @Override
        public void onEviction(DeepPageKeyPlusPosition key, DeepPageContainer value) {
          _positionCache.remove(key);
          _evictions.mark();
        }
      }).build();
  Metrics.newGauge(new MetricName(ORG_APACHE_BLUR, DEEP_PAGING_CACHE, SIZE), new Gauge<Long>() {
    @Override
    public Long value() {
      return _lruCache.weightedSize();
    }
  });
  _positionCache = new ConcurrentSkipListMap<DeepPageKeyPlusPosition, DeepPageContainer>();
}
 
Example #3
Source File: HBaseIndexerMapper.java    From hbase-indexer with Apache License 2.0 6 votes vote down vote up
private void copyIndexingMetricsToCounters(Context context) {
    final String COUNTER_GROUP = "HBase Indexer Metrics";
    SortedMap<String, SortedMap<MetricName, Metric>> groupedMetrics = Metrics.defaultRegistry().groupedMetrics(
            new IndexerMetricsUtil.IndexerMetricPredicate());
    for (Entry<String, SortedMap<MetricName, Metric>> metricsGroupEntry : groupedMetrics.entrySet()) {
        SortedMap<MetricName, Metric> metricsGroupMap = metricsGroupEntry.getValue();
        for (Entry<MetricName, Metric> metricEntry : metricsGroupMap.entrySet()) {
            MetricName metricName = metricEntry.getKey();
            Metric metric = metricEntry.getValue();
            String counterName = metricName.getType() + ": " + metricName.getName();
            if (metric instanceof Counter) {
                Counter counter = (Counter) metric;
                context.getCounter(COUNTER_GROUP, counterName).increment(counter.count());
            } else if (metric instanceof Meter) {
                Meter meter = (Meter) metric;
                context.getCounter(COUNTER_GROUP, counterName).increment(meter.count());
            } else if (metric instanceof Timer) {
                Timer timer = (Timer) metric;
                context.getCounter(COUNTER_GROUP, counterName).increment((long) timer.sum());
            }
        }
    }
}
 
Example #4
Source File: FileCacheMetrics.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public FileCacheMetrics()
{
    hits = Metrics.newMeter(factory.createMetricName("Hits"), "hits", TimeUnit.SECONDS);
    requests = Metrics.newMeter(factory.createMetricName("Requests"), "requests", TimeUnit.SECONDS);
    hitRate = Metrics.newGauge(factory.createMetricName("HitRate"), new RatioGauge()
    {
        protected double getNumerator()
        {
            return hits.count();
        }

        protected double getDenominator()
        {
            return requests.count();
        }
    });
    size = Metrics.newGauge(factory.createMetricName("Size"), new Gauge<Long>()
    {
        public Long value()
        {
            return FileCacheService.instance.sizeInBytes();
        }
    });
}
 
Example #5
Source File: KeyspaceMetrics.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a gauge that will sum the current value of a metric for all column families in this keyspace
 * @param name
 * @param MetricValue 
 * @return Gauge&gt;Long> that computes sum of MetricValue.getValue()
 */
private <T extends Number> Gauge<Long> createKeyspaceGauge(String name, final MetricValue extractor)
{
    allMetrics.add(name);
    return Metrics.newGauge(factory.createMetricName(name), new Gauge<Long>()
    {
        public Long value()
        {
            long sum = 0;
            for (ColumnFamilyStore cf : keyspace.getColumnFamilyStores())
            {
                sum += extractor.getValue(cf.metric);
            }
            return sum;
        }
    });
}
 
Example #6
Source File: DirectSolrInputDocumentWriter.java    From hbase-indexer with Apache License 2.0 6 votes vote down vote up
public DirectSolrInputDocumentWriter(String indexName, SolrClient solrServer) {
    this.solrServer = solrServer;
    
    indexAddMeter = Metrics.newMeter(metricName(getClass(), "Index adds", indexName), "Documents added to Solr index",
            TimeUnit.SECONDS);
    indexDeleteMeter = Metrics.newMeter(metricName(getClass(), "Index deletes", indexName),
            "Documents deleted from Solr index", TimeUnit.SECONDS);
    solrAddErrorMeter = Metrics.newMeter(metricName(getClass(), "Solr add errors", indexName),
            "Documents not added to Solr due to Solr errors", TimeUnit.SECONDS);
    solrDeleteErrorMeter = Metrics.newMeter(metricName(getClass(), "Solr delete errors", indexName),
            "Documents not deleted from Solr due to Solr errors", TimeUnit.SECONDS);
    documentAddErrorMeter = Metrics.newMeter(metricName(getClass(), "Document add errors", indexName),
            "Documents not added to Solr due to document errors", TimeUnit.SECONDS);
    documentDeleteErrorMeter = Metrics.newMeter(metricName(getClass(), "Document delete errors", indexName),
            "Documents not deleted from Solr due to document errors", TimeUnit.SECONDS);

}
 
Example #7
Source File: DirectSolrClassicInputDocumentWriter.java    From hbase-indexer with Apache License 2.0 6 votes vote down vote up
public DirectSolrClassicInputDocumentWriter(String indexName, List<SolrClient> solrServers) {
    this.solrServers = solrServers;

    indexAddMeter = Metrics.newMeter(metricName(getClass(), "Index adds", indexName), "Documents added to Solr index",
            TimeUnit.SECONDS);
    indexDeleteMeter = Metrics.newMeter(metricName(getClass(), "Index deletes", indexName),
            "Documents deleted from Solr index", TimeUnit.SECONDS);
    solrAddErrorMeter = Metrics.newMeter(metricName(getClass(), "Solr add errors", indexName),
            "Documents not added to Solr due to Solr errors", TimeUnit.SECONDS);
    solrDeleteErrorMeter = Metrics.newMeter(metricName(getClass(), "Solr delete errors", indexName),
            "Documents not deleted from Solr due to Solr errors", TimeUnit.SECONDS);
    documentAddErrorMeter = Metrics.newMeter(metricName(getClass(), "Document add errors", indexName),
            "Documents not added to Solr due to document errors", TimeUnit.SECONDS);
    documentDeleteErrorMeter = Metrics.newMeter(metricName(getClass(), "Document delete errors", indexName),
            "Documents not deleted from Solr due to document errors", TimeUnit.SECONDS);

}
 
Example #8
Source File: CommitLogMetrics.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public CommitLogMetrics(final AbstractCommitLogService service, final CommitLogSegmentManager allocator)
{
    completedTasks = Metrics.newGauge(factory.createMetricName("CompletedTasks"), new Gauge<Long>()
    {
        public Long value()
        {
            return service.getCompletedTasks();
        }
    });
    pendingTasks = Metrics.newGauge(factory.createMetricName("PendingTasks"), new Gauge<Long>()
    {
        public Long value()
        {
            return service.getPendingTasks();
        }
    });
    totalCommitLogSize = Metrics.newGauge(factory.createMetricName("TotalCommitLogSize"), new Gauge<Long>()
    {
        public Long value()
        {
            return allocator.bytesUsed();
        }
    });
    waitingOnSegmentAllocation = Metrics.newTimer(factory.createMetricName("WaitingOnSegmentAllocation"), TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
    waitingOnCommit = Metrics.newTimer(factory.createMetricName("WaitingOnCommit"), TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
}
 
Example #9
Source File: StatsdMetricsReporter.java    From kafka-statsd-metrics2 with Apache License 2.0 6 votes vote down vote up
@Override
public void startReporter(long pollingPeriodInSeconds) {
  if (pollingPeriodInSeconds <= 0) {
    throw new IllegalArgumentException("Polling period must be greater than zero");
  }

  synchronized (running) {
    if (running.get()) {
      log.warn("Reporter is already running");
    } else {
      statsd = createStatsd();
      underlying = new StatsDReporter(
          Metrics.defaultRegistry(),
          statsd,
          metricPredicate,
          metricDimensions,
          isTagEnabled);
      underlying.start(pollingPeriodInSeconds, TimeUnit.SECONDS);
      log.info("Started Reporter with host={}, port={}, polling_period_secs={}, prefix={}",
          host, port, pollingPeriodInSeconds, prefix);
      running.set(true);
    }
  }
}
 
Example #10
Source File: KafkaStatsdMetricsReporter.java    From kafka-statsd-reporter with MIT License 6 votes vote down vote up
@Override
public synchronized void stopReporter() {
	if (initialized && running) {
		reporter.shutdown();
		running = false;
		LOG.info("Stopped Kafka Statsd metrics reporter");
           try {
           	reporter = new StatsdReporter(
           			Metrics.defaultRegistry(),
           			statsdGroupPrefix,
           			predicate,
           			statsdHost,
           			statsdPort,
           			Clock.defaultClock()
           			);
           } catch (IOException e) {
           	LOG.error("Unable to initialize StatsdReporter", e);
           }
	}
}
 
Example #11
Source File: CQLExecutor.java    From Rhombus with MIT License 6 votes vote down vote up
public ResultSetFuture executeAsync(CQLStatement cql){
	if(logCql) {
		logger.debug("Executing CQL: {}", cql.getQuery());
		if(cql.getValues() != null) {
			logger.debug("With values: {}", Arrays.asList(cql.getValues()));
		}
	}
	if(cql.isPreparable()){
		BoundStatement bs = getBoundStatement(session, cql);
		ResultSetFuture result = session.executeAsync(bs);
		com.yammer.metrics.Metrics.defaultRegistry().newMeter(CQLExecutor.class, "statement.executed", "executed", TimeUnit.SECONDS).mark();
		return result;
	}
	else{
		//just run a normal execute without a prepared statement
		com.yammer.metrics.Metrics.defaultRegistry().newMeter(CQLExecutor.class, "statement.executed", "executed", TimeUnit.SECONDS).mark();
		return session.executeAsync(cql.getQuery());
	}
}
 
Example #12
Source File: ClientMetrics.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
public void addCounter(String name, final Callable<Integer> provider)
{
    Metrics.newGauge(factory.createMetricName(name), new Gauge<Integer>()
    {
        public Integer value()
        {
            try
            {
                return provider.call();
            }
            catch (Exception e)
            {
                throw new RuntimeException(e);
            }
        }
    });
}
 
Example #13
Source File: HdfsDirectory.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
protected MetricsGroup createNewMetricsGroup(String scope) {
  MetricName readRandomAccessName = new MetricName(ORG_APACHE_BLUR, HDFS, "Read Random Latency in \u00B5s", scope);
  MetricName readStreamAccessName = new MetricName(ORG_APACHE_BLUR, HDFS, "Read Stream Latency in \u00B5s", scope);
  MetricName writeAcccessName = new MetricName(ORG_APACHE_BLUR, HDFS, "Write Latency in \u00B5s", scope);
  MetricName readRandomThroughputName = new MetricName(ORG_APACHE_BLUR, HDFS, "Read Random Throughput", scope);
  MetricName readStreamThroughputName = new MetricName(ORG_APACHE_BLUR, HDFS, "Read Stream Throughput", scope);
  MetricName readSeekName = new MetricName(ORG_APACHE_BLUR, HDFS, "Read Stream Seeks", scope);
  MetricName writeThroughputName = new MetricName(ORG_APACHE_BLUR, HDFS, "Write Throughput", scope);
  MetricName totalHdfsBlocks = new MetricName(ORG_APACHE_BLUR, HDFS, "Hdfs Blocks Total", scope);
  MetricName localHdfsBlocks = new MetricName(ORG_APACHE_BLUR, HDFS, "Hdfs Blocks Local", scope);

  Histogram readRandomAccess = Metrics.newHistogram(readRandomAccessName);
  Histogram readStreamAccess = Metrics.newHistogram(readStreamAccessName);
  Histogram writeAccess = Metrics.newHistogram(writeAcccessName);
  Meter readRandomThroughput = Metrics.newMeter(readRandomThroughputName, "Read Random Bytes", TimeUnit.SECONDS);
  Meter readStreamThroughput = Metrics.newMeter(readStreamThroughputName, "Read Stream Bytes", TimeUnit.SECONDS);
  Meter readStreamSeek = Metrics.newMeter(readSeekName, "Read Stream Seeks", TimeUnit.SECONDS);
  Meter writeThroughput = Metrics.newMeter(writeThroughputName, "Write Bytes", TimeUnit.SECONDS);
  Counter totalHdfsBlock = Metrics.newCounter(totalHdfsBlocks);
  Counter localHdfsBlock = Metrics.newCounter(localHdfsBlocks);

  return new MetricsGroup(readRandomAccess, readStreamAccess, writeAccess, readRandomThroughput,
      readStreamThroughput, readStreamSeek, writeThroughput, totalHdfsBlock, localHdfsBlock);
}
 
Example #14
Source File: FilterMetricPredicateTest.java    From kafka-graphite with Apache License 2.0 6 votes vote down vote up
@Test
public void keepGaugesIfTheyThrowRuntimeExceptions() throws Exception {
    MetricPredicate predicate = new FilterMetricPredicate();

    MetricName metricName = new MetricName("test", "test", "delete", "scope", "mBeanName");

    Metric gauge = Metrics.newGauge(metricName, new Gauge<Long>() {
        @Override
        public Long value() {
            throw new RuntimeException("catch me if you can");
        }
    });

    assertTrue(predicate.matches(metricName, gauge));

    assertTrue("The gauge should be there", Metrics.defaultRegistry().allMetrics().containsKey(metricName));
    assertEquals(Metrics.defaultRegistry().allMetrics().get(metricName), gauge);
}
 
Example #15
Source File: ColumnFamilyMetrics.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
/**
 * Release all associated metrics.
 */
public void release()
{
    for(String name : all)
    {
        allColumnFamilyMetrics.get(name).remove(Metrics.defaultRegistry().allMetrics().get(factory.createMetricName(name)));
        Metrics.defaultRegistry().removeMetric(factory.createMetricName(name));
    }
    readLatency.release();
    writeLatency.release();
    rangeLatency.release();
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("EstimatedRowSizeHistogram"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("EstimatedColumnCountHistogram"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("KeyCacheHitRate"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("CoordinatorReadLatency"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("CoordinatorScanLatency"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("WaitingOnFreeMemtableSpace"));
}
 
Example #16
Source File: IncomingSmtpSummary.java    From mireka with Apache License 2.0 6 votes vote down vote up
@PostConstruct
public void register() {
    mailTransactions =
            Metrics.newMeter(metricName("mailTransactions"),
                    "transactions", TimeUnit.MINUTES);
    rcptCommands =
            Metrics.newMeter(metricName("rcptCommands"), "commands",
                    TimeUnit.MINUTES);
    dataCommands =
            Metrics.newMeter(metricName("dataCommands"), "commands",
                    TimeUnit.MINUTES);
    acceptedMessages =
            Metrics.newMeter(metricName("acceptedMessages"), "messages",
                    TimeUnit.MINUTES);
    messageRecipients =
            Metrics.newMeter(metricName("messageRecipients"), "recipients",
                    TimeUnit.MINUTES);
}
 
Example #17
Source File: AbstractDistributedIndexServer.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
public AbstractDistributedIndexServer(ClusterStatus clusterStatus, Configuration configuration, String nodeName,
    String cluster) {
  _clusterStatus = clusterStatus;
  _configuration = configuration;
  _nodeName = nodeName;
  _cluster = cluster;
  MetricName tableCount = new MetricName(ORG_APACHE_BLUR, BLUR, TABLE_COUNT, _cluster);
  MetricName indexCount = new MetricName(ORG_APACHE_BLUR, BLUR, INDEX_COUNT, _cluster);
  MetricName segmentCount = new MetricName(ORG_APACHE_BLUR, BLUR, SEGMENT_COUNT, _cluster);
  MetricName indexMemoryUsage = new MetricName(ORG_APACHE_BLUR, BLUR, INDEX_MEMORY_USAGE, _cluster);
  MetricName recordCount = new MetricName(ORG_APACHE_BLUR, BLUR, RECORD_COUNT, _cluster);

  Metrics.newGauge(tableCount, new AtomicLongGauge(_tableCount));
  Metrics.newGauge(indexCount, new AtomicLongGauge(_indexCount));
  Metrics.newGauge(segmentCount, new AtomicLongGauge(_segmentCount));
  Metrics.newGauge(indexMemoryUsage, new AtomicLongGauge(_indexMemoryUsage));
  Metrics.newGauge(recordCount, new AtomicLongGauge(_recordCount));
}
 
Example #18
Source File: ThreadPoolMetrics.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Create metrics for given ThreadPoolExecutor.
 *
 * @param executor Thread pool
 * @param path Type of thread pool
 * @param poolName Name of thread pool to identify metrics
 */
public ThreadPoolMetrics(final ThreadPoolExecutor executor, String path, String poolName)
{
    this.factory = new ThreadPoolMetricNameFactory("ThreadPools", path, poolName);

    activeTasks = Metrics.newGauge(factory.createMetricName("ActiveTasks"), new Gauge<Integer>()
    {
        public Integer value()
        {
            return executor.getActiveCount();
        }
    });
    totalBlocked = Metrics.newCounter(factory.createMetricName("TotalBlockedTasks"));
    currentBlocked = Metrics.newCounter(factory.createMetricName("CurrentlyBlockedTasks"));
    completedTasks = Metrics.newGauge(factory.createMetricName("CompletedTasks"), new Gauge<Long>()
    {
        public Long value()
        {
            return executor.getCompletedTaskCount();
        }
    });
    pendingTasks = Metrics.newGauge(factory.createMetricName("PendingTasks"), new Gauge<Long>()
    {
        public Long value()
        {
            return executor.getTaskCount() - executor.getCompletedTaskCount();
        }
    });
    maxPoolSize =  Metrics.newGauge(factory.createMetricName("MaxPoolSize"), new Gauge<Integer>()
    {
        public Integer value()
        {
            return executor.getMaximumPoolSize();
        }
    });
}
 
Example #19
Source File: KafkaBrokerReporter.java    From metrics-kafka with Apache License 2.0 5 votes vote down vote up
synchronized public void init(VerifiableProperties props) {
    if (!initialized) {
        this.props = props;
        props.props().put("metadata.broker.list", String.format("%s:%d", "localhost", props.getInt("port")));

        final KafkaMetricsConfig metricsConfig = new KafkaMetricsConfig(props);

        this.underlying = new TopicReporter(Metrics.defaultRegistry(),
                new ProducerConfig(props.props()),
                "broker%s".format(props.getString("broker.id")));
        initialized = true;
        startReporter(metricsConfig.pollingIntervalSecs());
    }
}
 
Example #20
Source File: FilterMetricPredicateTest.java    From kafka-graphite with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
    metricMock = mock(Metric.class);

    // clean all metrics
    List<MetricName> metricNames = new ArrayList<MetricName>(Metrics.defaultRegistry().allMetrics().keySet());
    for (MetricName metricName : metricNames) {
        Metrics.defaultRegistry().removeMetric(metricName);
    }
}
 
Example #21
Source File: TransmitterSummary.java    From mireka with Apache License 2.0 5 votes vote down vote up
@PostConstruct
public void register() {
    try {
        ObjectName objectName =
                new ObjectName("mireka:type=TransmitterTraffic,name="
                        + name);
        ManagementFactory.getPlatformMBeanServer().registerMBean(this,
                objectName);
    } catch (JMException e) {
        throw new RuntimeException(e);
    }

    mailTransactions =
            Metrics.newMeter(metricName("mailTransactions"),
                    "transactions", TimeUnit.MINUTES);
    successfulMailTransactions =
            Metrics.newMeter(metricName("successfulMailTransactions"),
                    "transactions", TimeUnit.MINUTES);
    failures =
            Metrics.newMeter(metricName("failures"), "transactions",
                    TimeUnit.MINUTES);
    permanentFailures =
            Metrics.newMeter(metricName("permanentFailures"),
                    "transactions", TimeUnit.MINUTES);
    transientFailures =
            Metrics.newMeter(metricName("transientFailures"),
                    "transactions", TimeUnit.MINUTES);
    partialFailures =
            Metrics.newMeter(metricName("partialFailures"), "transactions",
                    TimeUnit.MINUTES);
    errors =
            Metrics.newMeter(metricName("errors"), "transactions",
                    TimeUnit.MINUTES);
}
 
Example #22
Source File: ClientRequestMetrics.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public ClientRequestMetrics(String scope)
{
    super("ClientRequest", scope);

    timeouts = Metrics.newMeter(factory.createMetricName("Timeouts"), "timeouts", TimeUnit.SECONDS);
    unavailables = Metrics.newMeter(factory.createMetricName("Unavailables"), "unavailables", TimeUnit.SECONDS);
}
 
Example #23
Source File: KafkaBrokerReporter.java    From metrics-kafka with Apache License 2.0 5 votes vote down vote up
public synchronized void stopReporter() {
    if (initialized && running) {
        underlying.shutdown();
        running = false;
        log.info("Stopped Kafka Topic metrics reporter");
        underlying = new TopicReporter(Metrics.defaultRegistry(), new ProducerConfig(props.props()), String.format("broker%s", props.getString("broker.id")));
    }
}
 
Example #24
Source File: LatencyMetrics.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Create LatencyMetrics with given group, type, prefix to append to each metric name, and scope.
 *
 * @param factory MetricName factory to use
 * @param namePrefix Prefix to append to each metric name
 */
public LatencyMetrics(MetricNameFactory factory, String namePrefix)
{
    this.factory = factory;
    this.namePrefix = namePrefix;

    latency = Metrics.newTimer(factory.createMetricName(namePrefix + "Latency"), TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
    totalLatency = Metrics.newCounter(factory.createMetricName(namePrefix + "TotalLatency"));
}
 
Example #25
Source File: GraphiteReporter.java    From mireka with Apache License 2.0 5 votes vote down vote up
@PostConstruct
public void start() {
    try {
        reporter =
                new com.yammer.metrics.reporting.GraphiteReporter(
                        Metrics.defaultRegistry(), host, port, prefix);
        reporter.start(period, periodUnit);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
 
Example #26
Source File: IndexManager.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
public IndexManager(IndexServer indexServer, ClusterStatus clusterStatus, BlurFilterCache filterCache,
    int maxHeapPerRowFetch, int fetchCount, int threadCount, int mutateThreadCount, int facetThreadCount,
    DeepPagingCache deepPagingCache, MemoryAllocationWatcher memoryAllocationWatcher, QueryStatusManager statusManager) {
  _statusManager = statusManager;
  _memoryAllocationWatcher = memoryAllocationWatcher;
  _deepPagingCache = deepPagingCache;
  _indexServer = indexServer;
  _clusterStatus = clusterStatus;
  _filterCache = filterCache;

  MetricName metricName1 = new MetricName(ORG_APACHE_BLUR, BLUR, "External Queries/s");
  MetricName metricName2 = new MetricName(ORG_APACHE_BLUR, BLUR, "Internal Queries/s");
  MetricName metricName3 = new MetricName(ORG_APACHE_BLUR, BLUR, "Fetch Timer");

  _queriesExternalMeter = Metrics.newMeter(metricName1, "External Queries/s", TimeUnit.SECONDS);
  _queriesInternalMeter = Metrics.newMeter(metricName2, "Internal Queries/s", TimeUnit.SECONDS);
  _fetchTimer = Metrics.newTimer(metricName3, TimeUnit.MICROSECONDS, TimeUnit.SECONDS);

  if (threadCount == 0) {
    throw new RuntimeException("Thread Count cannot be 0.");
  }
  _threadCount = threadCount;
  if (mutateThreadCount == 0) {
    throw new RuntimeException("Mutate Thread Count cannot be 0.");
  }
  _mutateThreadCount = mutateThreadCount;
  _fetchCount = fetchCount;
  _maxHeapPerRowFetch = maxHeapPerRowFetch;

  _executor = Executors.newThreadPool("index-manager", _threadCount);
  _mutateExecutor = Executors.newThreadPool("index-manager-mutate", _mutateThreadCount);
  if (facetThreadCount < 1) {
    _facetExecutor = null;
  } else {
    _facetExecutor = Executors.newThreadPool(new SynchronousQueue<Runnable>(), "facet-execution", facetThreadCount);
  }

  LOG.info("Init Complete");

}
 
Example #27
Source File: SEPMetrics.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void release()
{
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("ActiveTasks"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("PendingTasks"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("CompletedTasks"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("TotalBlockedTasks"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("CurrentlyBlockedTasks"));
    Metrics.defaultRegistry().removeMetric(factory.createMetricName("MaxPoolSize"));
}
 
Example #28
Source File: KeyspaceMetrics.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Release all associated metrics.
 */
public void release()
{
    for(String name : allMetrics) 
    {
        Metrics.defaultRegistry().removeMetric(factory.createMetricName(name));
    }
    // latency metrics contain multiple metrics internally and need to be released manually
    readLatency.release();
    writeLatency.release();
    rangeLatency.release();
}
 
Example #29
Source File: FilteringSubscriber.java    From chancery with Apache License 2.0 5 votes vote down vote up
protected FilteringSubscriber(String filter) {
    this.filter = new RefFilter(filter);
    exceptionMeter = Metrics.newMeter(getClass(),
            "triggered-exception", "callbacks",
            TimeUnit.HOURS);
    filteredOutMeter = Metrics.newMeter(getClass(),
            "filtered-out", "callbacks",
            TimeUnit.HOURS);
    handledTimer = Metrics.newTimer(getClass(),
            "handled-callbacks",
            TimeUnit.SECONDS, TimeUnit.SECONDS);
}
 
Example #30
Source File: CQLExecutor.java    From Rhombus with MIT License 5 votes vote down vote up
public PreparedStatement prepareStatement(Session session, CQLStatement cql){
if(preparedStatementCache.containsKey(cql.getQuery())) {
	// When pre-preparing statements, we can send the same one multiple times
	// in this case, we should just return the one from the cache and not prepare again
	return preparedStatementCache.get(cql.getQuery());
} else {
	Long currentTime = System.currentTimeMillis();
	TimerContext prepareTimer = Metrics.defaultRegistry().newTimer(CQLExecutor.class, "statement.prepared").time();
	PreparedStatement ret = session.prepare(cql.getQuery());
	prepareTimer.stop();
	ret.setConsistencyLevel(consistencyLevel);
	preparedStatementCache.put(cql.getQuery(), ret);
	return ret;
}
  }