Java Code Examples for org.apache.hadoop.metrics2.MetricsRecordBuilder

The following examples show how to use org.apache.hadoop.metrics2.MetricsRecordBuilder. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   Source File: TestStartupProgressMetrics.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRunningState() {
  setStartupProgressForRunningState(startupProgress);

  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
  assertGauge("PercentComplete", 0.375f, builder);
  assertCounter("LoadingFsImageCount", 100L, builder);
  assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
  assertCounter("LoadingFsImageTotal", 100L, builder);
  assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
  assertCounter("LoadingEditsCount", 100L, builder);
  assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
  assertCounter("LoadingEditsTotal", 200L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
Example 2
Source Project: hbase   Source File: HBaseMetrics2HadoopMetricsAdapter.java    License: Apache License 2.0 6 votes vote down vote up
private void addGauge(String name, Gauge<?> gauge, MetricsRecordBuilder builder) {
  final MetricsInfo info = Interns.info(name, EMPTY_STRING);
  final Object o = gauge.getValue();

  // Figure out which gauge types metrics2 supports and call the right method
  if (o instanceof Integer) {
    builder.addGauge(info, (int) o);
  } else if (o instanceof Long) {
    builder.addGauge(info, (long) o);
  } else if (o instanceof Float) {
    builder.addGauge(info, (float) o);
  } else if (o instanceof Double) {
    builder.addGauge(info, (double) o);
  } else {
    LOG.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass());
  }
}
 
Example 3
Source Project: hbase   Source File: MetricsMasterQuotaSourceImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info());
  if (wrapper != null) {
    // Summarize the tables
    Map<String,Entry<Long,Long>> tableUsages = wrapper.getTableSpaceUtilization();
    String tableSummary = "[]";
    if (tableUsages != null && !tableUsages.isEmpty()) {
      tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table");
    }
    record.tag(Interns.info(TABLE_QUOTA_USAGE_NAME, TABLE_QUOTA_USAGE_DESC), tableSummary);

    // Summarize the namespaces
    String nsSummary = "[]";
    Map<String,Entry<Long,Long>> namespaceUsages = wrapper.getNamespaceSpaceUtilization();
    if (namespaceUsages != null && !namespaceUsages.isEmpty()) {
      nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace");
    }
    record.tag(Interns.info(NS_QUOTA_USAGE_NAME, NS_QUOTA_USAGE_DESC), nsSummary);
  }
  metricsRegistry.snapshot(record, all);
}
 
Example 4
private void checkBytesWritten(long expectedBytesWritten) throws Exception {
  // As only 3 datanodes and ozone.scm.pipeline.creation.auto.factor.one is
  // false, so only pipeline in the system.
  List<Pipeline> pipelines = cluster.getStorageContainerManager()
      .getPipelineManager().getPipelines();

  Assert.assertEquals(1, pipelines.size());
  Pipeline pipeline = pipelines.get(0);

  final String metricName =
      SCMPipelineMetrics.getBytesWrittenMetricName(pipeline);
  GenericTestUtils.waitFor(() -> {
    MetricsRecordBuilder metrics = getMetrics(
        SCMPipelineMetrics.class.getSimpleName());
    return expectedBytesWritten == getLongCounter(metricName, metrics);
  }, 500, 300000);
}
 
Example 5
Source Project: hadoop-ozone   Source File: TestSCMPipelineMetrics.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Verifies pipeline destroy metric.
 */
@Test
public void testPipelineDestroy() {
  PipelineManager pipelineManager = cluster
      .getStorageContainerManager().getPipelineManager();
  Optional<Pipeline> pipeline = pipelineManager
      .getPipelines().stream().findFirst();
  Assert.assertTrue(pipeline.isPresent());
  try {
    cluster.getStorageContainerManager()
        .getPipelineManager()
        .finalizeAndDestroyPipeline(
            pipeline.get(), false);
  } catch (IOException e) {
    e.printStackTrace();
    Assert.fail();
  }
  MetricsRecordBuilder metrics = getMetrics(
      SCMPipelineMetrics.class.getSimpleName());
  assertCounter("NumPipelineDestroyed", 1L, metrics);
}
 
Example 6
Source Project: hbase   Source File: MetricsUserSourceImpl.java    License: Apache License 2.0 6 votes vote down vote up
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
  // If there is a close that started be double extra sure
  // that we're not getting any locks and not putting data
  // into the metrics that should be removed. So early out
  // before even getting the lock.
  if (closed.get()) {
    return;
  }

  // Grab the read
  // This ensures that removes of the metrics
  // can't happen while we are putting them back in.
  synchronized (this) {

    // It's possible that a close happened between checking
    // the closed variable and getting the lock.
    if (closed.get()) {
      return;
    }
  }
}
 
Example 7
Source Project: hbase   Source File: MutableHistogram.java    License: Apache License 2.0 6 votes vote down vote up
protected static void updateSnapshotMetrics(String name, String desc, Histogram histogram,
    Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) {
  metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc),
      histogram.getCount());
  metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), snapshot.getMin());
  metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), snapshot.getMax());
  metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), snapshot.getMean());

  metricsRecordBuilder.addGauge(Interns.info(name + TWENTY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get25thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc),
      snapshot.getMedian());
  metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get75thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get90thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get95thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get98thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get99thPercentile());
  metricsRecordBuilder.addGauge(
      Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get999thPercentile());
}
 
Example 8
Source Project: hadoop   Source File: TestProtoBufRpc.java    License: Apache License 2.0 6 votes vote down vote up
@Test (timeout=5000)
public void testProtoBufRpc2() throws Exception {
  TestRpcService2 client = getClient2();
  
  // Test ping method
  EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
  client.ping2(null, emptyRequest);
  
  // Test echo method
  EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
      .setMessage("hello").build();
  EchoResponseProto echoResponse = client.echo2(null, echoRequest);
  Assert.assertEquals(echoResponse.getMessage(), "hello");
  
  // Ensure RPC metrics are updated
  MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name());
  assertCounterGt("RpcQueueTimeNumOps", 0L, rpcMetrics);
  assertCounterGt("RpcProcessingTimeNumOps", 0L, rpcMetrics);
  
  MetricsRecordBuilder rpcDetailedMetrics = 
      getMetrics(server.getRpcDetailedMetrics().name());
  assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics);
}
 
Example 9
Source Project: big-c   Source File: JvmMetrics.java    License: Apache License 2.0 6 votes vote down vote up
private void getThreadUsage(MetricsRecordBuilder rb) {
  int threadsNew = 0;
  int threadsRunnable = 0;
  int threadsBlocked = 0;
  int threadsWaiting = 0;
  int threadsTimedWaiting = 0;
  int threadsTerminated = 0;
  long threadIds[] = threadMXBean.getAllThreadIds();
  for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
    if (threadInfo == null) continue; // race protection
    switch (threadInfo.getThreadState()) {
      case NEW:           threadsNew++;           break;
      case RUNNABLE:      threadsRunnable++;      break;
      case BLOCKED:       threadsBlocked++;       break;
      case WAITING:       threadsWaiting++;       break;
      case TIMED_WAITING: threadsTimedWaiting++;  break;
      case TERMINATED:    threadsTerminated++;    break;
    }
  }
  rb.addGauge(ThreadsNew, threadsNew)
    .addGauge(ThreadsRunnable, threadsRunnable)
    .addGauge(ThreadsBlocked, threadsBlocked)
    .addGauge(ThreadsWaiting, threadsWaiting)
    .addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
    .addGauge(ThreadsTerminated, threadsTerminated);
}
 
Example 10
Source Project: big-c   Source File: MethodMetric.java    License: Apache License 2.0 6 votes vote down vote up
MutableMetric newTag(Class<?> resType) {
  if (resType == String.class) {
    return new MutableMetric() {
      @Override public void snapshot(MetricsRecordBuilder rb, boolean all) {
        try {
          Object ret = method.invoke(obj, (Object[]) null);
          rb.tag(info, (String) ret);
        }
        catch (Exception ex) {
          LOG.error("Error invoking method "+ method.getName(), ex);
        }
      }
    };
  }
  throw new MetricsException("Unsupported tag type: "+ resType.getName());
}
 
Example 11
Source Project: hadoop   Source File: TestNameNodeMetrics.java    License: Apache License 2.0 6 votes vote down vote up
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
  // Create a file with single block with two replicas
  Path file = getTestPath("testMissingBlocks");
  createFile(file, 100, (short)1);
  
  // Corrupt the only replica of the block to result in a missing block
  LocatedBlock block = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), file.toString(), 0, 1).get(0);
  cluster.getNamesystem().writeLock();
  try {
    bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
        "STORAGE_ID", "TEST");
  } finally {
    cluster.getNamesystem().writeUnlock();
  }
  updateMetrics();
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("UnderReplicatedBlocks", 1L, rb);
  assertGauge("MissingBlocks", 1L, rb);
  assertGauge("MissingReplOneBlocks", 1L, rb);
  fs.delete(file, true);
  waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
 
Example 12
Source Project: hadoop   Source File: TestJvmMetrics.java    License: Apache License 2.0 6 votes vote down vote up
@Test public void testPresence() {
  JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
  JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
  jvmMetrics.setPauseMonitor(pauseMonitor);
  MetricsRecordBuilder rb = getMetrics(jvmMetrics);
  MetricsCollector mc = rb.parent();

  verify(mc).addRecord(JvmMetrics);
  verify(rb).tag(ProcessName, "test");
  verify(rb).tag(SessionId, "test");
  for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
    if (info.name().startsWith("Mem"))
      verify(rb).addGauge(eq(info), anyFloat());
    else if (info.name().startsWith("Gc"))
      verify(rb).addCounter(eq(info), anyLong());
    else if (info.name().startsWith("Threads"))
      verify(rb).addGauge(eq(info), anyInt());
    else if (info.name().startsWith("Log"))
      verify(rb).addCounter(eq(info), anyLong());
  }
}
 
Example 13
Source Project: hadoop   Source File: TestNameNodeMetrics.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test NN ReadOps Count and WriteOps Count
 */
@Test
public void testReadWriteOps() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
      rb);
  Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");

  //Perform create file operation
  createFile(file1_Path, 1024 * 1024,(short)2);

  // Perform read file operation on earlier created file
  readFile(fs, file1_Path);
  MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
  assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
      startWriteCounter);
}
 
Example 14
Source Project: hadoop   Source File: TestStartupProgressMetrics.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testInitialState() {
  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertCounter("ElapsedTime", 0L, builder);
  assertGauge("PercentComplete", 0.0f, builder);
  assertCounter("LoadingFsImageCount", 0L, builder);
  assertCounter("LoadingFsImageElapsedTime", 0L, builder);
  assertCounter("LoadingFsImageTotal", 0L, builder);
  assertGauge("LoadingFsImagePercentComplete", 0.0f, builder);
  assertCounter("LoadingEditsCount", 0L, builder);
  assertCounter("LoadingEditsElapsedTime", 0L, builder);
  assertCounter("LoadingEditsTotal", 0L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.0f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
Example 15
Source Project: hadoop   Source File: TestStartupProgressMetrics.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRunningState() {
  setStartupProgressForRunningState(startupProgress);

  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
  assertGauge("PercentComplete", 0.375f, builder);
  assertCounter("LoadingFsImageCount", 100L, builder);
  assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
  assertCounter("LoadingFsImageTotal", 100L, builder);
  assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
  assertCounter("LoadingEditsCount", 100L, builder);
  assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
  assertCounter("LoadingEditsTotal", 200L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
Example 16
Source Project: hadoop   Source File: TestDataNodeMetrics.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    assertCounter("BytesWritten", LONG_FILE_LEN, rb);
    assertTrue("Expected non-zero number of incremental block reports",
        getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 17
Source Project: phoenix   Source File: GlobalMetricRegistriesAdapter.java    License: Apache License 2.0 6 votes vote down vote up
private void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) {
    Map<String, Metric> metrics = metricRegistry.getMetrics();
    Iterator iterator = metrics.entrySet().iterator();

    while(iterator.hasNext()) {
        Entry<String, Metric> e = (Entry)iterator.next();
        String name = StringUtils.capitalize(e.getKey());
        Metric metric = e.getValue();
        if (metric instanceof Gauge) {
            this.addGauge(name, (Gauge)metric, builder);
        } else if (metric instanceof Counter) {
            this.addCounter(name, (Counter)metric, builder);
        } else if (metric instanceof Histogram) {
            this.addHistogram(name, (Histogram)metric, builder);
        } else if (metric instanceof Meter) {
            this.addMeter(name, (Meter)metric, builder);
        } else if (metric instanceof Timer) {
            this.addTimer(name, (Timer)metric, builder);
        } else {
            LOGGER.info("Ignoring unknown Metric class " + metric.getClass().getName());
        }
    }
}
 
Example 18
Source Project: big-c   Source File: TestDataNodeMetrics.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    assertCounter("BytesWritten", LONG_FILE_LEN, rb);
    assertTrue("Expected non-zero number of incremental block reports",
        getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 19
Source Project: hadoop   Source File: TestAzureFileSystemInstrumentation.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMetricTags() throws Exception {
  String accountName =
      testAccount.getRealAccount().getBlobEndpoint()
      .getAuthority();
  String containerName =
      testAccount.getRealContainer().getName();
  MetricsRecordBuilder myMetrics = getMyMetrics();
  verify(myMetrics).add(argThat(
      new TagMatcher("accountName", accountName)
      ));
  verify(myMetrics).add(argThat(
      new TagMatcher("containerName", containerName)
      ));
  verify(myMetrics).add(argThat(
      new TagMatcher("Context", "azureFileSystem")
      ));
  verify(myMetrics).add(argThat(
      new TagExistsMatcher("wasbFileSystemId")
      ));
}
 
Example 20
Source Project: hadoop   Source File: MethodMetric.java    License: Apache License 2.0 6 votes vote down vote up
MutableMetric newCounter(final Class<?> type) {
  if (isInt(type) || isLong(type)) {
    return new MutableMetric() {
      @Override public void snapshot(MetricsRecordBuilder rb, boolean all) {
        try {
          Object ret = method.invoke(obj, (Object[])null);
          if (isInt(type)) rb.addCounter(info, ((Integer) ret).intValue());
          else rb.addCounter(info, ((Long) ret).longValue());
        }
        catch (Exception ex) {
          LOG.error("Error invoking method "+ method.getName(), ex);
        }
      }
    };
  }
  throw new MetricsException("Unsupported counter type: "+ type.getName());
}
 
Example 21
Source Project: hadoop   Source File: JvmMetrics.java    License: Apache License 2.0 6 votes vote down vote up
private void getGcUsage(MetricsRecordBuilder rb) {
  long count = 0;
  long timeMillis = 0;
  for (GarbageCollectorMXBean gcBean : gcBeans) {
    long c = gcBean.getCollectionCount();
    long t = gcBean.getCollectionTime();
    MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
    rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
    count += c;
    timeMillis += t;
  }
  rb.addCounter(GcCount, count)
    .addCounter(GcTimeMillis, timeMillis);
  
  if (pauseMonitor != null) {
    rb.addCounter(GcNumWarnThresholdExceeded,
        pauseMonitor.getNumGcWarnThreadholdExceeded());
    rb.addCounter(GcNumInfoThresholdExceeded,
        pauseMonitor.getNumGcInfoThresholdExceeded());
    rb.addCounter(GcTotalExtraSleepTime,
        pauseMonitor.getTotalGcExtraSleepTime());
  }
}
 
Example 22
Source Project: hadoop   Source File: JvmMetrics.java    License: Apache License 2.0 6 votes vote down vote up
private void getThreadUsage(MetricsRecordBuilder rb) {
  int threadsNew = 0;
  int threadsRunnable = 0;
  int threadsBlocked = 0;
  int threadsWaiting = 0;
  int threadsTimedWaiting = 0;
  int threadsTerminated = 0;
  long threadIds[] = threadMXBean.getAllThreadIds();
  for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
    if (threadInfo == null) continue; // race protection
    switch (threadInfo.getThreadState()) {
      case NEW:           threadsNew++;           break;
      case RUNNABLE:      threadsRunnable++;      break;
      case BLOCKED:       threadsBlocked++;       break;
      case WAITING:       threadsWaiting++;       break;
      case TIMED_WAITING: threadsTimedWaiting++;  break;
      case TERMINATED:    threadsTerminated++;    break;
    }
  }
  rb.addGauge(ThreadsNew, threadsNew)
    .addGauge(ThreadsRunnable, threadsRunnable)
    .addGauge(ThreadsBlocked, threadsBlocked)
    .addGauge(ThreadsWaiting, threadsWaiting)
    .addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
    .addGauge(ThreadsTerminated, threadsTerminated);
}
 
Example 23
Source Project: hbase   Source File: MetricsIOSourceImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);

  // wrapper can be null because this function is called inside of init.
  if (wrapper != null) {
    mrb.addCounter(Interns.info(CHECKSUM_FAILURES_KEY, CHECKSUM_FAILURES_DESC),
      wrapper.getChecksumFailures());
  }

  metricsRegistry.snapshot(mrb, all);
}
 
Example 24
Source Project: hadoop-ozone   Source File: TestSCMContainerMetrics.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSCMContainerMetrics() {
  SCMMXBean scmmxBean = mock(SCMMXBean.class);

  Map<String, Integer> stateInfo = new HashMap<String, Integer>() {{
      put(HddsProtos.LifeCycleState.OPEN.toString(), 2);
      put(HddsProtos.LifeCycleState.CLOSING.toString(), 3);
      put(HddsProtos.LifeCycleState.QUASI_CLOSED.toString(), 4);
      put(HddsProtos.LifeCycleState.CLOSED.toString(), 5);
      put(HddsProtos.LifeCycleState.DELETING.toString(), 6);
      put(HddsProtos.LifeCycleState.DELETED.toString(), 7);
    }};


  when(scmmxBean.getContainerStateCount()).thenReturn(stateInfo);

  MetricsRecordBuilder mb = mock(MetricsRecordBuilder.class);
  when(mb.addGauge(any(MetricsInfo.class), anyInt())).thenReturn(mb);

  MetricsCollector metricsCollector = mock(MetricsCollector.class);
  when(metricsCollector.addRecord(anyString())).thenReturn(mb);

  SCMContainerMetrics containerMetrics = new SCMContainerMetrics(scmmxBean);

  containerMetrics.getMetrics(metricsCollector, true);

  verify(mb, times(1)).addGauge(Interns.info("OpenContainers",
      "Number of open containers"), 2);
  verify(mb, times(1)).addGauge(Interns.info("ClosingContainers",
      "Number of containers in closing state"), 3);
  verify(mb, times(1)).addGauge(Interns.info("QuasiClosedContainers",
      "Number of containers in quasi closed state"), 4);
  verify(mb, times(1)).addGauge(Interns.info("ClosedContainers",
      "Number of containers in closed state"), 5);
  verify(mb, times(1)).addGauge(Interns.info("DeletingContainers",
      "Number of containers in deleting state"), 6);
  verify(mb, times(1)).addGauge(Interns.info("DeletedContainers",
      "Number of containers in deleted state"), 7);
}
 
Example 25
Source Project: big-c   Source File: MutableCounterInt.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void snapshot(MetricsRecordBuilder builder, boolean all) {
  if (all || changed()) {
    builder.addCounter(info(), value());
    clearChanged();
  }
}
 
Example 26
Source Project: big-c   Source File: TestRPC.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testRpcMetrics() throws Exception {
  Configuration configuration = new Configuration();
  final int interval = 1;
  configuration.setBoolean(CommonConfigurationKeys.
      RPC_METRICS_QUANTILE_ENABLE, true);
  configuration.set(CommonConfigurationKeys.
      RPC_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  final Server server = new RPC.Builder(configuration)
      .setProtocol(TestProtocol.class).setInstance(new TestImpl())
      .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
      .build();
  server.start();
  final TestProtocol proxy = RPC.getProxy(TestProtocol.class,
      TestProtocol.versionID, server.getListenerAddress(), configuration);
  try {
    for (int i=0; i<1000; i++) {
      proxy.ping();
      proxy.echo("" + i);
    }
    MetricsRecordBuilder rpcMetrics =
        getMetrics(server.getRpcMetrics().name());
    assertTrue("Expected non-zero rpc queue time",
        getLongCounter("RpcQueueTimeNumOps", rpcMetrics) > 0);
    assertTrue("Expected non-zero rpc processing time",
        getLongCounter("RpcProcessingTimeNumOps", rpcMetrics) > 0);
    MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",
        rpcMetrics);
    MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",
        rpcMetrics);
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
Example 27
Source Project: kylin   Source File: HadoopMetrics2ReporterTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testGaugeReporting() {
    final AtomicLong gaugeValue = new AtomicLong(0L);
    @SuppressWarnings("rawtypes")
    final Gauge gauge = new Gauge<Long>() {
        @Override
        public Long getValue() {
            return gaugeValue.get();
        }
    };

    @SuppressWarnings("rawtypes")
    TreeMap<String, Gauge> gauges = new TreeMap<>();
    gauges.put("my_gauge", gauge);
    // Add the metrics objects to the internal "queues" by hand
    metrics2Reporter.setDropwizardGauges(gauges);

    // Set some values
    gaugeValue.set(5L);

    MetricsCollector collector = mock(MetricsCollector.class);
    MetricsRecordBuilder recordBuilder = mock(MetricsRecordBuilder.class);

    Mockito.when(collector.addRecord(recordName)).thenReturn(recordBuilder);

    // Make sure a value of 5 gets reported
    metrics2Reporter.getMetrics(collector, true);

    verify(recordBuilder).addGauge(Interns.info("my_gauge", ""), gaugeValue.get());
    verifyRecordBuilderUnits(recordBuilder);

    // Should not be the same instance we gave before. Our map should have gotten swapped out.
    assertTrue("Should not be the same map instance after collection",
            gauges != metrics2Reporter.getDropwizardGauges());
}
 
Example 28
Source Project: phoenix   Source File: GlobalMetricRegistriesAdapter.java    License: Apache License 2.0 5 votes vote down vote up
private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) {
    builder.addGauge(Interns.info(name + "_count", ""), meter.getCount());
    builder.addGauge(Interns.info(name + "_mean_rate", ""), meter.getMeanRate());
    builder.addGauge(Interns.info(name + "_1min_rate", ""), meter.getOneMinuteRate());
    builder.addGauge(Interns.info(name + "_5min_rate", ""), meter.getFiveMinuteRate());
    builder.addGauge(Interns.info(name + "_15min_rate", ""), meter.getFifteenMinuteRate());
}
 
Example 29
Source Project: hadoop   Source File: TestUserGroupInformation.java    License: Apache License 2.0 5 votes vote down vote up
public static void verifyLoginMetrics(long success, int failure)
    throws IOException {
  // Ensure metrics related to kerberos login is updated.
  MetricsRecordBuilder rb = getMetrics("UgiMetrics");
  if (success > 0) {
    assertCounter("LoginSuccessNumOps", success, rb);
    assertGaugeGt("LoginSuccessAvgTime", 0, rb);
  }
  if (failure > 0) {
    assertCounter("LoginFailureNumPos", failure, rb);
    assertGaugeGt("LoginFailureAvgTime", 0, rb);
  }
}
 
Example 30
Source Project: hadoop   Source File: TestMetricsAnnotations.java    License: Apache License 2.0 5 votes vote down vote up
@Test public void testFields() {
  MyMetrics metrics = new MyMetrics();
  MetricsSource source = MetricsAnnotations.makeSource(metrics);

  metrics.c1.incr();
  metrics.c2.incr();
  metrics.g1.incr();
  metrics.g2.incr();
  metrics.g3.incr();
  metrics.r1.add(1);
  metrics.s1.add(1);
  metrics.rs1.add("rs1", 1);

  MetricsRecordBuilder rb = getMetrics(source);

  verify(rb).addCounter(info("C1", "C1"), 1);
  verify(rb).addCounter(info("Counter2", "Counter2 desc"), 1L);
  verify(rb).addGauge(info("G1", "G1"), 1);
  verify(rb).addGauge(info("G2", "G2"), 1);
  verify(rb).addGauge(info("G3", "g3 desc"), 1L);
  verify(rb).addCounter(info("R1NumOps", "Number of ops for r1"), 1L);
  verify(rb).addGauge(info("R1AvgTime", "Average time for r1"), 1.0);
  verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
  verify(rb).addGauge(info("S1AvgTime", "Average time for s1"), 1.0);
  verify(rb).addCounter(info("Rs1NumOps", "Number of ops for rs1"), 1L);
  verify(rb).addGauge(info("Rs1AvgTime", "Average time for rs1"), 1.0);
}