org.apache.hadoop.metrics2.MetricsRecordBuilder Java Examples

The following examples show how to use org.apache.hadoop.metrics2.MetricsRecordBuilder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JvmMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void getThreadUsage(MetricsRecordBuilder rb) {
  int threadsNew = 0;
  int threadsRunnable = 0;
  int threadsBlocked = 0;
  int threadsWaiting = 0;
  int threadsTimedWaiting = 0;
  int threadsTerminated = 0;
  long threadIds[] = threadMXBean.getAllThreadIds();
  for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
    if (threadInfo == null) continue; // race protection
    switch (threadInfo.getThreadState()) {
      case NEW:           threadsNew++;           break;
      case RUNNABLE:      threadsRunnable++;      break;
      case BLOCKED:       threadsBlocked++;       break;
      case WAITING:       threadsWaiting++;       break;
      case TIMED_WAITING: threadsTimedWaiting++;  break;
      case TERMINATED:    threadsTerminated++;    break;
    }
  }
  rb.addGauge(ThreadsNew, threadsNew)
    .addGauge(ThreadsRunnable, threadsRunnable)
    .addGauge(ThreadsBlocked, threadsBlocked)
    .addGauge(ThreadsWaiting, threadsWaiting)
    .addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
    .addGauge(ThreadsTerminated, threadsTerminated);
}
 
Example #2
Source File: MetricsMasterQuotaSourceImpl.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info());
  if (wrapper != null) {
    // Summarize the tables
    Map<String,Entry<Long,Long>> tableUsages = wrapper.getTableSpaceUtilization();
    String tableSummary = "[]";
    if (tableUsages != null && !tableUsages.isEmpty()) {
      tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table");
    }
    record.tag(Interns.info(TABLE_QUOTA_USAGE_NAME, TABLE_QUOTA_USAGE_DESC), tableSummary);

    // Summarize the namespaces
    String nsSummary = "[]";
    Map<String,Entry<Long,Long>> namespaceUsages = wrapper.getNamespaceSpaceUtilization();
    if (namespaceUsages != null && !namespaceUsages.isEmpty()) {
      nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace");
    }
    record.tag(Interns.info(NS_QUOTA_USAGE_NAME, NS_QUOTA_USAGE_DESC), nsSummary);
  }
  metricsRegistry.snapshot(record, all);
}
 
Example #3
Source File: HBaseMetrics2HadoopMetricsAdapter.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void addGauge(String name, Gauge<?> gauge, MetricsRecordBuilder builder) {
  final MetricsInfo info = Interns.info(name, EMPTY_STRING);
  final Object o = gauge.getValue();

  // Figure out which gauge types metrics2 supports and call the right method
  if (o instanceof Integer) {
    builder.addGauge(info, (int) o);
  } else if (o instanceof Long) {
    builder.addGauge(info, (long) o);
  } else if (o instanceof Float) {
    builder.addGauge(info, (float) o);
  } else if (o instanceof Double) {
    builder.addGauge(info, (double) o);
  } else {
    LOG.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass());
  }
}
 
Example #4
Source File: TestSCMPipelineBytesWrittenMetrics.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
private void checkBytesWritten(long expectedBytesWritten) throws Exception {
  // As only 3 datanodes and ozone.scm.pipeline.creation.auto.factor.one is
  // false, so only pipeline in the system.
  List<Pipeline> pipelines = cluster.getStorageContainerManager()
      .getPipelineManager().getPipelines();

  Assert.assertEquals(1, pipelines.size());
  Pipeline pipeline = pipelines.get(0);

  final String metricName =
      SCMPipelineMetrics.getBytesWrittenMetricName(pipeline);
  GenericTestUtils.waitFor(() -> {
    MetricsRecordBuilder metrics = getMetrics(
        SCMPipelineMetrics.class.getSimpleName());
    return expectedBytesWritten == getLongCounter(metricName, metrics);
  }, 500, 300000);
}
 
Example #5
Source File: TestSCMPipelineMetrics.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies pipeline destroy metric.
 */
@Test
public void testPipelineDestroy() {
  PipelineManager pipelineManager = cluster
      .getStorageContainerManager().getPipelineManager();
  Optional<Pipeline> pipeline = pipelineManager
      .getPipelines().stream().findFirst();
  Assert.assertTrue(pipeline.isPresent());
  try {
    cluster.getStorageContainerManager()
        .getPipelineManager()
        .finalizeAndDestroyPipeline(
            pipeline.get(), false);
  } catch (IOException e) {
    e.printStackTrace();
    Assert.fail();
  }
  MetricsRecordBuilder metrics = getMetrics(
      SCMPipelineMetrics.class.getSimpleName());
  assertCounter("NumPipelineDestroyed", 1L, metrics);
}
 
Example #6
Source File: MetricsUserSourceImpl.java    From hbase with Apache License 2.0 6 votes vote down vote up
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
  // If there is a close that started be double extra sure
  // that we're not getting any locks and not putting data
  // into the metrics that should be removed. So early out
  // before even getting the lock.
  if (closed.get()) {
    return;
  }

  // Grab the read
  // This ensures that removes of the metrics
  // can't happen while we are putting them back in.
  synchronized (this) {

    // It's possible that a close happened between checking
    // the closed variable and getting the lock.
    if (closed.get()) {
      return;
    }
  }
}
 
Example #7
Source File: MutableHistogram.java    From hbase with Apache License 2.0 6 votes vote down vote up
protected static void updateSnapshotMetrics(String name, String desc, Histogram histogram,
    Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) {
  metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc),
      histogram.getCount());
  metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), snapshot.getMin());
  metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), snapshot.getMax());
  metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), snapshot.getMean());

  metricsRecordBuilder.addGauge(Interns.info(name + TWENTY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get25thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc),
      snapshot.getMedian());
  metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get75thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get90thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get95thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get98thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get99thPercentile());
  metricsRecordBuilder.addGauge(
      Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get999thPercentile());
}
 
Example #8
Source File: TestProtoBufRpc.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test (timeout=5000)
public void testProtoBufRpc2() throws Exception {
  TestRpcService2 client = getClient2();
  
  // Test ping method
  EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
  client.ping2(null, emptyRequest);
  
  // Test echo method
  EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
      .setMessage("hello").build();
  EchoResponseProto echoResponse = client.echo2(null, echoRequest);
  Assert.assertEquals(echoResponse.getMessage(), "hello");
  
  // Ensure RPC metrics are updated
  MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name());
  assertCounterGt("RpcQueueTimeNumOps", 0L, rpcMetrics);
  assertCounterGt("RpcProcessingTimeNumOps", 0L, rpcMetrics);
  
  MetricsRecordBuilder rpcDetailedMetrics = 
      getMetrics(server.getRpcDetailedMetrics().name());
  assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics);
}
 
Example #9
Source File: JvmMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void getThreadUsage(MetricsRecordBuilder rb) {
  int threadsNew = 0;
  int threadsRunnable = 0;
  int threadsBlocked = 0;
  int threadsWaiting = 0;
  int threadsTimedWaiting = 0;
  int threadsTerminated = 0;
  long threadIds[] = threadMXBean.getAllThreadIds();
  for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
    if (threadInfo == null) continue; // race protection
    switch (threadInfo.getThreadState()) {
      case NEW:           threadsNew++;           break;
      case RUNNABLE:      threadsRunnable++;      break;
      case BLOCKED:       threadsBlocked++;       break;
      case WAITING:       threadsWaiting++;       break;
      case TIMED_WAITING: threadsTimedWaiting++;  break;
      case TERMINATED:    threadsTerminated++;    break;
    }
  }
  rb.addGauge(ThreadsNew, threadsNew)
    .addGauge(ThreadsRunnable, threadsRunnable)
    .addGauge(ThreadsBlocked, threadsBlocked)
    .addGauge(ThreadsWaiting, threadsWaiting)
    .addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
    .addGauge(ThreadsTerminated, threadsTerminated);
}
 
Example #10
Source File: MethodMetric.java    From big-c with Apache License 2.0 6 votes vote down vote up
MutableMetric newTag(Class<?> resType) {
  if (resType == String.class) {
    return new MutableMetric() {
      @Override public void snapshot(MetricsRecordBuilder rb, boolean all) {
        try {
          Object ret = method.invoke(obj, (Object[]) null);
          rb.tag(info, (String) ret);
        }
        catch (Exception ex) {
          LOG.error("Error invoking method "+ method.getName(), ex);
        }
      }
    };
  }
  throw new MetricsException("Unsupported tag type: "+ resType.getName());
}
 
Example #11
Source File: TestNameNodeMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
  // Create a file with single block with two replicas
  Path file = getTestPath("testMissingBlocks");
  createFile(file, 100, (short)1);
  
  // Corrupt the only replica of the block to result in a missing block
  LocatedBlock block = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), file.toString(), 0, 1).get(0);
  cluster.getNamesystem().writeLock();
  try {
    bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
        "STORAGE_ID", "TEST");
  } finally {
    cluster.getNamesystem().writeUnlock();
  }
  updateMetrics();
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("UnderReplicatedBlocks", 1L, rb);
  assertGauge("MissingBlocks", 1L, rb);
  assertGauge("MissingReplOneBlocks", 1L, rb);
  fs.delete(file, true);
  waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
 
Example #12
Source File: TestNameNodeMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test NN ReadOps Count and WriteOps Count
 */
@Test
public void testReadWriteOps() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
      rb);
  Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");

  //Perform create file operation
  createFile(file1_Path, 1024 * 1024,(short)2);

  // Perform read file operation on earlier created file
  readFile(fs, file1_Path);
  MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
  assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
      startWriteCounter);
}
 
Example #13
Source File: TestStartupProgressMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testInitialState() {
  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertCounter("ElapsedTime", 0L, builder);
  assertGauge("PercentComplete", 0.0f, builder);
  assertCounter("LoadingFsImageCount", 0L, builder);
  assertCounter("LoadingFsImageElapsedTime", 0L, builder);
  assertCounter("LoadingFsImageTotal", 0L, builder);
  assertGauge("LoadingFsImagePercentComplete", 0.0f, builder);
  assertCounter("LoadingEditsCount", 0L, builder);
  assertCounter("LoadingEditsElapsedTime", 0L, builder);
  assertCounter("LoadingEditsTotal", 0L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.0f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
Example #14
Source File: TestStartupProgressMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testRunningState() {
  setStartupProgressForRunningState(startupProgress);

  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
  assertGauge("PercentComplete", 0.375f, builder);
  assertCounter("LoadingFsImageCount", 100L, builder);
  assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
  assertCounter("LoadingFsImageTotal", 100L, builder);
  assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
  assertCounter("LoadingEditsCount", 100L, builder);
  assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
  assertCounter("LoadingEditsTotal", 200L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
Example #15
Source File: TestDataNodeMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    assertCounter("BytesWritten", LONG_FILE_LEN, rb);
    assertTrue("Expected non-zero number of incremental block reports",
        getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example #16
Source File: GlobalMetricRegistriesAdapter.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) {
    Map<String, Metric> metrics = metricRegistry.getMetrics();
    Iterator iterator = metrics.entrySet().iterator();

    while(iterator.hasNext()) {
        Entry<String, Metric> e = (Entry)iterator.next();
        String name = StringUtils.capitalize(e.getKey());
        Metric metric = e.getValue();
        if (metric instanceof Gauge) {
            this.addGauge(name, (Gauge)metric, builder);
        } else if (metric instanceof Counter) {
            this.addCounter(name, (Counter)metric, builder);
        } else if (metric instanceof Histogram) {
            this.addHistogram(name, (Histogram)metric, builder);
        } else if (metric instanceof Meter) {
            this.addMeter(name, (Meter)metric, builder);
        } else if (metric instanceof Timer) {
            this.addTimer(name, (Timer)metric, builder);
        } else {
            LOGGER.info("Ignoring unknown Metric class " + metric.getClass().getName());
        }
    }
}
 
Example #17
Source File: TestDataNodeMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    assertCounter("BytesWritten", LONG_FILE_LEN, rb);
    assertTrue("Expected non-zero number of incremental block reports",
        getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example #18
Source File: TestAzureFileSystemInstrumentation.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMetricTags() throws Exception {
  String accountName =
      testAccount.getRealAccount().getBlobEndpoint()
      .getAuthority();
  String containerName =
      testAccount.getRealContainer().getName();
  MetricsRecordBuilder myMetrics = getMyMetrics();
  verify(myMetrics).add(argThat(
      new TagMatcher("accountName", accountName)
      ));
  verify(myMetrics).add(argThat(
      new TagMatcher("containerName", containerName)
      ));
  verify(myMetrics).add(argThat(
      new TagMatcher("Context", "azureFileSystem")
      ));
  verify(myMetrics).add(argThat(
      new TagExistsMatcher("wasbFileSystemId")
      ));
}
 
Example #19
Source File: MethodMetric.java    From hadoop with Apache License 2.0 6 votes vote down vote up
MutableMetric newCounter(final Class<?> type) {
  if (isInt(type) || isLong(type)) {
    return new MutableMetric() {
      @Override public void snapshot(MetricsRecordBuilder rb, boolean all) {
        try {
          Object ret = method.invoke(obj, (Object[])null);
          if (isInt(type)) rb.addCounter(info, ((Integer) ret).intValue());
          else rb.addCounter(info, ((Long) ret).longValue());
        }
        catch (Exception ex) {
          LOG.error("Error invoking method "+ method.getName(), ex);
        }
      }
    };
  }
  throw new MetricsException("Unsupported counter type: "+ type.getName());
}
 
Example #20
Source File: TestJvmMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test public void testPresence() {
  JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
  JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
  jvmMetrics.setPauseMonitor(pauseMonitor);
  MetricsRecordBuilder rb = getMetrics(jvmMetrics);
  MetricsCollector mc = rb.parent();

  verify(mc).addRecord(JvmMetrics);
  verify(rb).tag(ProcessName, "test");
  verify(rb).tag(SessionId, "test");
  for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
    if (info.name().startsWith("Mem"))
      verify(rb).addGauge(eq(info), anyFloat());
    else if (info.name().startsWith("Gc"))
      verify(rb).addCounter(eq(info), anyLong());
    else if (info.name().startsWith("Threads"))
      verify(rb).addGauge(eq(info), anyInt());
    else if (info.name().startsWith("Log"))
      verify(rb).addCounter(eq(info), anyLong());
  }
}
 
Example #21
Source File: JvmMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void getGcUsage(MetricsRecordBuilder rb) {
  long count = 0;
  long timeMillis = 0;
  for (GarbageCollectorMXBean gcBean : gcBeans) {
    long c = gcBean.getCollectionCount();
    long t = gcBean.getCollectionTime();
    MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
    rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
    count += c;
    timeMillis += t;
  }
  rb.addCounter(GcCount, count)
    .addCounter(GcTimeMillis, timeMillis);
  
  if (pauseMonitor != null) {
    rb.addCounter(GcNumWarnThresholdExceeded,
        pauseMonitor.getNumGcWarnThreadholdExceeded());
    rb.addCounter(GcNumInfoThresholdExceeded,
        pauseMonitor.getNumGcInfoThresholdExceeded());
    rb.addCounter(GcTotalExtraSleepTime,
        pauseMonitor.getTotalGcExtraSleepTime());
  }
}
 
Example #22
Source File: TestStartupProgressMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testRunningState() {
  setStartupProgressForRunningState(startupProgress);

  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
  assertGauge("PercentComplete", 0.375f, builder);
  assertCounter("LoadingFsImageCount", 100L, builder);
  assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
  assertCounter("LoadingFsImageTotal", 100L, builder);
  assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
  assertCounter("LoadingEditsCount", 100L, builder);
  assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
  assertCounter("LoadingEditsTotal", 200L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
Example #23
Source File: TestUserGroupInformation.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static void verifyLoginMetrics(long success, int failure)
    throws IOException {
  // Ensure metrics related to kerberos login is updated.
  MetricsRecordBuilder rb = getMetrics("UgiMetrics");
  if (success > 0) {
    assertCounter("LoginSuccessNumOps", success, rb);
    assertGaugeGt("LoginSuccessAvgTime", 0, rb);
  }
  if (failure > 0) {
    assertCounter("LoginFailureNumPos", failure, rb);
    assertGaugeGt("LoginFailureAvgTime", 0, rb);
  }
}
 
Example #24
Source File: TestUserGroupInformation.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static void verifyLoginMetrics(long success, int failure)
    throws IOException {
  // Ensure metrics related to kerberos login is updated.
  MetricsRecordBuilder rb = getMetrics("UgiMetrics");
  if (success > 0) {
    assertCounter("LoginSuccessNumOps", success, rb);
    assertGaugeGt("LoginSuccessAvgTime", 0, rb);
  }
  if (failure > 0) {
    assertCounter("LoginFailureNumPos", failure, rb);
    assertGaugeGt("LoginFailureAvgTime", 0, rb);
  }
}
 
Example #25
Source File: TestMetricsAnnotations.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test public void testFields() {
  MyMetrics metrics = new MyMetrics();
  MetricsSource source = MetricsAnnotations.makeSource(metrics);

  metrics.c1.incr();
  metrics.c2.incr();
  metrics.g1.incr();
  metrics.g2.incr();
  metrics.g3.incr();
  metrics.r1.add(1);
  metrics.s1.add(1);
  metrics.rs1.add("rs1", 1);

  MetricsRecordBuilder rb = getMetrics(source);

  verify(rb).addCounter(info("C1", "C1"), 1);
  verify(rb).addCounter(info("Counter2", "Counter2 desc"), 1L);
  verify(rb).addGauge(info("G1", "G1"), 1);
  verify(rb).addGauge(info("G2", "G2"), 1);
  verify(rb).addGauge(info("G3", "g3 desc"), 1L);
  verify(rb).addCounter(info("R1NumOps", "Number of ops for r1"), 1L);
  verify(rb).addGauge(info("R1AvgTime", "Average time for r1"), 1.0);
  verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
  verify(rb).addGauge(info("S1AvgTime", "Average time for s1"), 1.0);
  verify(rb).addCounter(info("Rs1NumOps", "Number of ops for rs1"), 1L);
  verify(rb).addGauge(info("Rs1AvgTime", "Average time for rs1"), 1.0);
}
 
Example #26
Source File: HadoopMetrics2ReporterTest.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("rawtypes")
@Test
public void cachedMetricsAreClearedAfterCycle() {
    // After we perform a metrics2 reporting cycle, the maps should be reset to avoid double-reporting
    TreeMap<String, Counter> counters = new TreeMap<>();
    TreeMap<String, Gauge> gauges = new TreeMap<>();
    TreeMap<String, Histogram> histograms = new TreeMap<>();
    TreeMap<String, Meter> meters = new TreeMap<>();
    TreeMap<String, Timer> timers = new TreeMap<>();

    metrics2Reporter.setDropwizardCounters(counters);
    metrics2Reporter.setDropwizardGauges(gauges);
    metrics2Reporter.setDropwizardHistograms(histograms);
    metrics2Reporter.setDropwizardMeters(meters);
    metrics2Reporter.setDropwizardTimers(timers);

    MetricsCollector collector = mock(MetricsCollector.class);
    MetricsRecordBuilder recordBuilder = mock(MetricsRecordBuilder.class);

    Mockito.when(collector.addRecord(recordName)).thenReturn(recordBuilder);

    metrics2Reporter.getMetrics(collector, true);

    assertTrue(counters != metrics2Reporter.getDropwizardCounters());
    assertEquals(0, metrics2Reporter.getDropwizardCounters().size());
    assertTrue(gauges != metrics2Reporter.getDropwizardGauges());
    assertEquals(0, metrics2Reporter.getDropwizardGauges().size());
    assertTrue(histograms != metrics2Reporter.getDropwizardHistograms());
    assertEquals(0, metrics2Reporter.getDropwizardHistograms().size());
    assertTrue(meters != metrics2Reporter.getDropwizardMeters());
    assertEquals(0, metrics2Reporter.getDropwizardMeters().size());
    assertTrue(timers != metrics2Reporter.getDropwizardTimers());
    assertEquals(0, metrics2Reporter.getDropwizardTimers().size());
}
 
Example #27
Source File: TestShuffleHandler.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static void checkShuffleMetrics(MetricsSystem ms, long bytes, int failed,
                                int succeeded, int connections) {
  MetricsSource source = ms.getSource("ShuffleMetrics");
  MetricsRecordBuilder rb = getMetrics(source);
  assertCounter("ShuffleOutputBytes", bytes, rb);
  assertCounter("ShuffleOutputsFailed", failed, rb);
  assertCounter("ShuffleOutputsOK", succeeded, rb);
  assertGauge("ShuffleConnections", connections, rb);
}
 
Example #28
Source File: MutableFastCounter.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void snapshot(MetricsRecordBuilder builder, boolean all) {
  if (all || changed()) {
    builder.addCounter(info(), value());
    clearChanged();
  }
}
 
Example #29
Source File: MutableCounterInt.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void snapshot(MetricsRecordBuilder builder, boolean all) {
  if (all || changed()) {
    builder.addCounter(info(), value());
    clearChanged();
  }
}
 
Example #30
Source File: TestNameNodeMetrics.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that capacity metrics are exported and pass
 * basic sanity tests.
 */
@Test (timeout = 1800)
public void testCapacityMetrics() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  long capacityTotal = MetricsAsserts.getLongGauge("CapacityTotal", rb);
  assert(capacityTotal != 0);
  long capacityUsed = MetricsAsserts.getLongGauge("CapacityUsed", rb);
  long capacityRemaining =
      MetricsAsserts.getLongGauge("CapacityRemaining", rb);
  long capacityUsedNonDFS =
      MetricsAsserts.getLongGauge("CapacityUsedNonDFS", rb);
  assert(capacityUsed + capacityRemaining + capacityUsedNonDFS ==
      capacityTotal);
}