Java Code Examples for org.apache.hadoop.metrics2.MetricsRecord#metrics()

The following examples show how to use org.apache.hadoop.metrics2.MetricsRecord#metrics() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AzureBlobStorageTestAccount.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public Number getLatestMetricValue(String metricName, Number defaultValue)
    throws IndexOutOfBoundsException{
  boolean found = false;
  Number ret = null;
  for (MetricsRecord currentRecord : allMetrics) {
    // First check if this record is coming for my file system.
    if (wasGeneratedByMe(currentRecord)) {
      for (AbstractMetric currentMetric : currentRecord.metrics()) {
        if (currentMetric.name().equalsIgnoreCase(metricName)) {
          found = true;
          ret = currentMetric.value();
          break;
        }
      }
    }
  }
  if (!found) {
    if (defaultValue != null) {
      return defaultValue;
    }
    throw new IndexOutOfBoundsException(metricName);
  }
  return ret;
}
 
Example 2
Source File: FileSink.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
  writer.print(record.timestamp());
  writer.print(" ");
  writer.print(record.context());
  writer.print(".");
  writer.print(record.name());
  String separator = ": ";
  for (MetricsTag tag : record.tags()) {
    writer.print(separator);
    separator = ", ";
    writer.print(tag.name());
    writer.print("=");
    writer.print(tag.value());
  }
  for (AbstractMetric metric : record.metrics()) {
    writer.print(separator);
    separator = ", ";
    writer.print(metric.name());
    writer.print("=");
    writer.print(metric.value());
  }
  writer.println();
}
 
Example 3
Source File: MetricsCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Update the cache and return the current cached record
 * @param mr the update record
 * @param includingTags cache tag values (for later lookup by name) if true
 * @return the updated cache record
 */
public Record update(MetricsRecord mr, boolean includingTags) {
  String name = mr.name();
  RecordCache recordCache = map.get(name);
  if (recordCache == null) {
    recordCache = new RecordCache();
    map.put(name, recordCache);
  }
  Collection<MetricsTag> tags = mr.tags();
  Record record = recordCache.get(tags);
  if (record == null) {
    record = new Record();
    recordCache.put(tags, record);
  }
  for (AbstractMetric m : mr.metrics()) {
    record.metrics.put(m.name(), m);
  }
  if (includingTags) {
    // mostly for some sinks that include tags as part of a dense schema
    for (MetricsTag t : mr.tags()) {
      record.tags.put(t.name(), t.value());
    }
  }
  return record;
}
 
Example 4
Source File: TestMetricsSystemImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
  final String prefix = "threadSourceRec";
  if (record.name().startsWith(prefix)) {
    final int recordNumber = Integer.parseInt(
        record.name().substring(prefix.length()));
    ArrayList<String> names = new ArrayList<String>();
    for (AbstractMetric m : record.metrics()) {
      if (m.name().equalsIgnoreCase("g1")) {
        collected[recordNumber].set(m.value().longValue());
        return;
      }
      names.add(m.name());
    }
  }
}
 
Example 5
Source File: AzureBlobStorageTestAccount.java    From big-c with Apache License 2.0 6 votes vote down vote up
public Number getLatestMetricValue(String metricName, Number defaultValue)
    throws IndexOutOfBoundsException{
  boolean found = false;
  Number ret = null;
  for (MetricsRecord currentRecord : allMetrics) {
    // First check if this record is coming for my file system.
    if (wasGeneratedByMe(currentRecord)) {
      for (AbstractMetric currentMetric : currentRecord.metrics()) {
        if (currentMetric.name().equalsIgnoreCase(metricName)) {
          found = true;
          ret = currentMetric.value();
          break;
        }
      }
    }
  }
  if (!found) {
    if (defaultValue != null) {
      return defaultValue;
    }
    throw new IndexOutOfBoundsException(metricName);
  }
  return ret;
}
 
Example 6
Source File: FileSink.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
  writer.print(record.timestamp());
  writer.print(" ");
  writer.print(record.context());
  writer.print(".");
  writer.print(record.name());
  String separator = ": ";
  for (MetricsTag tag : record.tags()) {
    writer.print(separator);
    separator = ", ";
    writer.print(tag.name());
    writer.print("=");
    writer.print(tag.value());
  }
  for (AbstractMetric metric : record.metrics()) {
    writer.print(separator);
    separator = ", ";
    writer.print(metric.name());
    writer.print("=");
    writer.print(metric.value());
  }
  writer.println();
}
 
Example 7
Source File: MetricsCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Update the cache and return the current cached record
 * @param mr the update record
 * @param includingTags cache tag values (for later lookup by name) if true
 * @return the updated cache record
 */
public Record update(MetricsRecord mr, boolean includingTags) {
  String name = mr.name();
  RecordCache recordCache = map.get(name);
  if (recordCache == null) {
    recordCache = new RecordCache();
    map.put(name, recordCache);
  }
  Collection<MetricsTag> tags = mr.tags();
  Record record = recordCache.get(tags);
  if (record == null) {
    record = new Record();
    recordCache.put(tags, record);
  }
  for (AbstractMetric m : mr.metrics()) {
    record.metrics.put(m.name(), m);
  }
  if (includingTags) {
    // mostly for some sinks that include tags as part of a dense schema
    for (MetricsTag t : mr.tags()) {
      record.tags.put(t.name(), t.value());
    }
  }
  return record;
}
 
Example 8
Source File: TestMetricsSystemImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
  final String prefix = "threadSourceRec";
  if (record.name().startsWith(prefix)) {
    final int recordNumber = Integer.parseInt(
        record.name().substring(prefix.length()));
    ArrayList<String> names = new ArrayList<String>();
    for (AbstractMetric m : record.metrics()) {
      if (m.name().equalsIgnoreCase("g1")) {
        collected[recordNumber].set(m.value().longValue());
        return;
      }
      names.add(m.name());
    }
  }
}
 
Example 9
Source File: LoggingSink.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
    // we could wait until flush, but this is a really lightweight process, so we just write
    // them
    // as soon as we get them
    if (!LOG.isDebugEnabled()) {
        return;
    }
    LOG.debug("Found record:" + record.name());
    for (AbstractMetric metric : record.metrics()) {
        // just print the metric we care about
        if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
            LOG.debug("\t metric:" + metric);
        }
    }
}
 
Example 10
Source File: LoggingSink.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
    // we could wait until flush, but this is a really lightweight process, so we just write
    // them
    // as soon as we get them
    if (!LOGGER.isDebugEnabled()) {
        return;
    }
    LOGGER.debug("Found record:" + record.name());
    for (AbstractMetric metric : record.metrics()) {
        // just print the metric we care about
        if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
            LOGGER.debug("\t metric:" + metric);
        }
    }
}
 
Example 11
Source File: GlobalPhoenixMetricsTestSink.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public void putMetrics(MetricsRecord metricsRecord) {
    if (metricsRecord.name().equals(PHOENIX_METRICS_RECORD_NAME)) {
        synchronized (GlobalPhoenixMetricsTestSink.lock) {
            GlobalPhoenixMetricsTestSink.metrics = metricsRecord.metrics();
        }
    }
}
 
Example 12
Source File: PrometheusMetricsSink.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
@Override
public void putMetrics(MetricsRecord metricsRecord) {
  for (AbstractMetric metrics : metricsRecord.metrics()) {
    if (metrics.type() == MetricType.COUNTER
        || metrics.type() == MetricType.GAUGE) {

      String key = prometheusName(
          metricsRecord.name(), metrics.name());

      StringBuilder builder = new StringBuilder();
      builder.append("# TYPE ")
          .append(key)
          .append(" ")
          .append(metrics.type().toString().toLowerCase())
          .append("\n");

      StringBuilder prometheusMetricKey = new StringBuilder();
      prometheusMetricKey.append(key)
          .append("{");
      String sep = "";

      //add tags
      for (MetricsTag tag : metricsRecord.tags()) {
        String tagName = tag.name().toLowerCase();

        //ignore specific tag which includes sub-hierarchy
        if (!tagName.equals("numopenconnectionsperuser")) {
          prometheusMetricKey.append(sep)
              .append(tagName)
              .append("=\"")
              .append(tag.value())
              .append("\"");
          sep = ",";
        }
      }
      prometheusMetricKey.append("}");

      String prometheusMetricKeyAsString = prometheusMetricKey.toString();
      builder.append(prometheusMetricKeyAsString);
      builder.append(" ");
      builder.append(metrics.value());
      builder.append("\n");
      metricLines.put(prometheusMetricKeyAsString, builder.toString());

    }
  }
}
 
Example 13
Source File: HadoopTimelineMetricsSink.java    From ambari-metrics with Apache License 2.0 4 votes vote down vote up
private void parseContainerMetrics(MetricsRecord record,
    ContainerMetric containerMetric) {
  for (AbstractMetric metric : record.metrics() ) {
    switch (metric.name()) {
    case "PMemUsageMBsAvgMBs":
      containerMetric.setPmemUsedAvg(metric.value().intValue());
      break;
    case "PMemUsageMBsMinMBs":
      containerMetric.setPmemUsedMin(metric.value().intValue());
      break;
    case "PMemUsageMBsMaxMBs":
      containerMetric.setPmemUsedMax(metric.value().intValue());
      break;
    case "PMemUsageMBHistogram50thPercentileMBs":
      containerMetric.setPmem50Pct(metric.value().intValue());
      break;
    case "PMemUsageMBHistogram75thPercentileMBs":
      containerMetric.setPmem75Pct(metric.value().intValue());
      break;
    case "PMemUsageMBHistogram90thPercentileMBs":
      containerMetric.setPmem90Pct(metric.value().intValue());
      break;
    case "PMemUsageMBHistogram95thPercentileMBs":
      containerMetric.setPmem95Pct(metric.value().intValue());
      break;
    case "PMemUsageMBHistogram99thPercentileMBs":
      containerMetric.setPmem99Pct(metric.value().intValue());
      break;
    case "pMemLimitMBs":
      containerMetric.setPmemLimit(metric.value().intValue());
      break;
    case "vMemLimitMBs":
      containerMetric.setVmemLimit(metric.value().intValue());
      break;
    case "launchDurationMs":
      containerMetric.setLaunchDuration(metric.value().longValue());
      break;
    case "localizationDurationMs":
      containerMetric.setLocalizationDuration(metric.value().longValue());
      break;
    case "StartTime":
      containerMetric.setStartTime(metric.value().longValue());
      break;
    case "FinishTime":
      containerMetric.setFinishTime(metric.value().longValue());
      break;
    case "ExitCode":
      containerMetric.setExitCode((metric.value().intValue()));
      break;
    default:
      break;
    }
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug(containerMetric);
  }
}
 
Example 14
Source File: GraphiteSink.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
    StringBuilder lines = new StringBuilder();
    StringBuilder metricsPathPrefix = new StringBuilder();

    // Configure the hierarchical place to display the graph.
    metricsPathPrefix.append(metricsPrefix).append(".")
            .append(record.context()).append(".").append(record.name());

    for (MetricsTag tag : record.tags()) {
        if (tag.value() != null) {
            metricsPathPrefix.append(".");
            metricsPathPrefix.append(tag.name());
            metricsPathPrefix.append("=");
            metricsPathPrefix.append(tag.value());
        }
    }

    // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
    long timestamp = record.timestamp() / 1000L;

    // Collect datapoints.
    for (AbstractMetric metric : record.metrics()) {
        lines.append(
                metricsPathPrefix.toString() + "."
                        + metric.name().replace(' ', '.')).append(" ")
                .append(metric.value()).append(" ").append(timestamp)
                .append("\n");
    }

    try {
      graphite.write(lines.toString());
    } catch (Exception e) {
      LOG.warn("Error sending metrics to Graphite", e);
      try {
        graphite.close();
      } catch (Exception e1) {
        throw new MetricsException("Error closing connection to Graphite", e1);
      }
    }
}
 
Example 15
Source File: GraphiteSink.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
    StringBuilder lines = new StringBuilder();
    StringBuilder metricsPathPrefix = new StringBuilder();

    // Configure the hierarchical place to display the graph.
    metricsPathPrefix.append(metricsPrefix).append(".")
            .append(record.context()).append(".").append(record.name());

    for (MetricsTag tag : record.tags()) {
        if (tag.value() != null) {
            metricsPathPrefix.append(".");
            metricsPathPrefix.append(tag.name());
            metricsPathPrefix.append("=");
            metricsPathPrefix.append(tag.value());
        }
    }

    // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
    long timestamp = record.timestamp() / 1000L;

    // Collect datapoints.
    for (AbstractMetric metric : record.metrics()) {
        lines.append(
                metricsPathPrefix.toString() + "."
                        + metric.name().replace(' ', '.')).append(" ")
                .append(metric.value()).append(" ").append(timestamp)
                .append("\n");
    }

    try {
      graphite.write(lines.toString());
    } catch (Exception e) {
      LOG.warn("Error sending metrics to Graphite", e);
      try {
        graphite.close();
      } catch (Exception e1) {
        throw new MetricsException("Error closing connection to Graphite", e1);
      }
    }
}