Java Code Examples for org.apache.hadoop.metrics2.MetricsRecord#tags()

The following examples show how to use org.apache.hadoop.metrics2.MetricsRecord#tags() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopTimelineMetricsSink.java    From ambari-metrics with Apache License 2.0 6 votes vote down vote up
private void emitContainerMetrics(MetricsRecord record) {

    ContainerMetric containerMetric = new ContainerMetric();
    containerMetric.setHostName(hostName);

    for (MetricsTag tag : record.tags()) {
      if (tag.name().equals("ContainerResource")) {
        containerMetric.setContainerId(tag.value());
      }
    }

    parseContainerMetrics(record, containerMetric);
    List<ContainerMetric> list = new ArrayList<>();
    list.add(containerMetric);
    String jsonData = null;
    try {
      jsonData = mapper.writeValueAsString(list);
    } catch (IOException e) {
      LOG.error("Unable to parse container metrics ", e);
    }
    if (jsonData != null) {
      String collectorHost = getCurrentCollectorHost();
      containerMetricsUri = constructContainerMetricUri(protocol, collectorHost, port);
      emitMetricsJson(containerMetricsUri, jsonData);
    }
  }
 
Example 2
Source File: HadoopTimelineMetricsSink.java    From ambari-metrics with Apache License 2.0 6 votes vote down vote up
@InterfaceAudience.Private
public void appendPrefix(MetricsRecord record, StringBuilder sb) {
  String contextName = record.context();
  Collection<MetricsTag> tags = record.tags();
  if (useTagsMap.containsKey(contextName)) {
    Set<String> useTags = useTagsMap.get(contextName);
    for (MetricsTag t : tags) {
      if (useTags == null || useTags.contains(t.name())) {

        // the context is always skipped here because it is always added

        // the hostname is always skipped to avoid case-mismatches
        // from different DNSes.

        if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
          sb.append('.').append(t.name()).append('=').append(t.value());
        }
      }
    }
  }
}
 
Example 3
Source File: FileSink.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
  writer.print(record.timestamp());
  writer.print(" ");
  writer.print(record.context());
  writer.print(".");
  writer.print(record.name());
  String separator = ": ";
  for (MetricsTag tag : record.tags()) {
    writer.print(separator);
    separator = ", ";
    writer.print(tag.name());
    writer.print("=");
    writer.print(tag.value());
  }
  for (AbstractMetric metric : record.metrics()) {
    writer.print(separator);
    separator = ", ";
    writer.print(metric.name());
    writer.print("=");
    writer.print(metric.value());
  }
  writer.println();
}
 
Example 4
Source File: GangliaSink30.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@InterfaceAudience.Private
public void appendPrefix(MetricsRecord record, StringBuilder sb) {
  String contextName = record.context();
  Collection<MetricsTag> tags = record.tags();
  if (useTagsMap.containsKey(contextName)) {
    Set<String> useTags = useTagsMap.get(contextName);
    for (MetricsTag t : tags) {
      if (useTags == null || useTags.contains(t.name())) {

        // the context is always skipped here because it is always added
        
        // the hostname is always skipped to avoid case-mismatches 
        // from different DNSes.

        if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
          sb.append('.').append(t.name()).append('=').append(t.value());
        }
      }
    }
  }          
}
 
Example 5
Source File: MetricsCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Update the cache and return the current cached record
 * @param mr the update record
 * @param includingTags cache tag values (for later lookup by name) if true
 * @return the updated cache record
 */
public Record update(MetricsRecord mr, boolean includingTags) {
  String name = mr.name();
  RecordCache recordCache = map.get(name);
  if (recordCache == null) {
    recordCache = new RecordCache();
    map.put(name, recordCache);
  }
  Collection<MetricsTag> tags = mr.tags();
  Record record = recordCache.get(tags);
  if (record == null) {
    record = new Record();
    recordCache.put(tags, record);
  }
  for (AbstractMetric m : mr.metrics()) {
    record.metrics.put(m.name(), m);
  }
  if (includingTags) {
    // mostly for some sinks that include tags as part of a dense schema
    for (MetricsTag t : mr.tags()) {
      record.tags.put(t.name(), t.value());
    }
  }
  return record;
}
 
Example 6
Source File: FileSink.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
  writer.print(record.timestamp());
  writer.print(" ");
  writer.print(record.context());
  writer.print(".");
  writer.print(record.name());
  String separator = ": ";
  for (MetricsTag tag : record.tags()) {
    writer.print(separator);
    separator = ", ";
    writer.print(tag.name());
    writer.print("=");
    writer.print(tag.value());
  }
  for (AbstractMetric metric : record.metrics()) {
    writer.print(separator);
    separator = ", ";
    writer.print(metric.name());
    writer.print("=");
    writer.print(metric.value());
  }
  writer.println();
}
 
Example 7
Source File: GangliaSink30.java    From big-c with Apache License 2.0 6 votes vote down vote up
@InterfaceAudience.Private
public void appendPrefix(MetricsRecord record, StringBuilder sb) {
  String contextName = record.context();
  Collection<MetricsTag> tags = record.tags();
  if (useTagsMap.containsKey(contextName)) {
    Set<String> useTags = useTagsMap.get(contextName);
    for (MetricsTag t : tags) {
      if (useTags == null || useTags.contains(t.name())) {

        // the context is always skipped here because it is always added
        
        // the hostname is always skipped to avoid case-mismatches 
        // from different DNSes.

        if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
          sb.append('.').append(t.name()).append('=').append(t.value());
        }
      }
    }
  }          
}
 
Example 8
Source File: MetricsCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Update the cache and return the current cached record
 * @param mr the update record
 * @param includingTags cache tag values (for later lookup by name) if true
 * @return the updated cache record
 */
public Record update(MetricsRecord mr, boolean includingTags) {
  String name = mr.name();
  RecordCache recordCache = map.get(name);
  if (recordCache == null) {
    recordCache = new RecordCache();
    map.put(name, recordCache);
  }
  Collection<MetricsTag> tags = mr.tags();
  Record record = recordCache.get(tags);
  if (record == null) {
    record = new Record();
    recordCache.put(tags, record);
  }
  for (AbstractMetric m : mr.metrics()) {
    record.metrics.put(m.name(), m);
  }
  if (includingTags) {
    // mostly for some sinks that include tags as part of a dense schema
    for (MetricsTag t : mr.tags()) {
      record.tags.put(t.name(), t.value());
    }
  }
  return record;
}
 
Example 9
Source File: AzureBlobStorageTestAccount.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Checks if the given record was generated by my WASB file system instance.
 * @param currentRecord The metrics record to check.
 * @return
 */
private boolean wasGeneratedByMe(MetricsRecord currentRecord) {
  String myFsId = fs.getInstrumentation().getFileSystemInstanceId().toString();
  for (MetricsTag currentTag : currentRecord.tags()) {
    if (currentTag.name().equalsIgnoreCase("wasbFileSystemId")) {
      return currentTag.value().equals(myFsId);
    }
  }
  return false;
}
 
Example 10
Source File: AzureBlobStorageTestAccount.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Checks if the given record was generated by my WASB file system instance.
 * @param currentRecord The metrics record to check.
 * @return
 */
private boolean wasGeneratedByMe(MetricsRecord currentRecord) {
  String myFsId = fs.getInstrumentation().getFileSystemInstanceId().toString();
  for (MetricsTag currentTag : currentRecord.tags()) {
    if (currentTag.name().equalsIgnoreCase("wasbFileSystemId")) {
      return currentTag.value().equals(myFsId);
    }
  }
  return false;
}
 
Example 11
Source File: PrometheusMetricsSink.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
@Override
public void putMetrics(MetricsRecord metricsRecord) {
  for (AbstractMetric metrics : metricsRecord.metrics()) {
    if (metrics.type() == MetricType.COUNTER
        || metrics.type() == MetricType.GAUGE) {

      String key = prometheusName(
          metricsRecord.name(), metrics.name());

      StringBuilder builder = new StringBuilder();
      builder.append("# TYPE ")
          .append(key)
          .append(" ")
          .append(metrics.type().toString().toLowerCase())
          .append("\n");

      StringBuilder prometheusMetricKey = new StringBuilder();
      prometheusMetricKey.append(key)
          .append("{");
      String sep = "";

      //add tags
      for (MetricsTag tag : metricsRecord.tags()) {
        String tagName = tag.name().toLowerCase();

        //ignore specific tag which includes sub-hierarchy
        if (!tagName.equals("numopenconnectionsperuser")) {
          prometheusMetricKey.append(sep)
              .append(tagName)
              .append("=\"")
              .append(tag.value())
              .append("\"");
          sep = ",";
        }
      }
      prometheusMetricKey.append("}");

      String prometheusMetricKeyAsString = prometheusMetricKey.toString();
      builder.append(prometheusMetricKeyAsString);
      builder.append(" ");
      builder.append(metrics.value());
      builder.append("\n");
      metricLines.put(prometheusMetricKeyAsString, builder.toString());

    }
  }
}
 
Example 12
Source File: GraphiteSink.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
    StringBuilder lines = new StringBuilder();
    StringBuilder metricsPathPrefix = new StringBuilder();

    // Configure the hierarchical place to display the graph.
    metricsPathPrefix.append(metricsPrefix).append(".")
            .append(record.context()).append(".").append(record.name());

    for (MetricsTag tag : record.tags()) {
        if (tag.value() != null) {
            metricsPathPrefix.append(".");
            metricsPathPrefix.append(tag.name());
            metricsPathPrefix.append("=");
            metricsPathPrefix.append(tag.value());
        }
    }

    // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
    long timestamp = record.timestamp() / 1000L;

    // Collect datapoints.
    for (AbstractMetric metric : record.metrics()) {
        lines.append(
                metricsPathPrefix.toString() + "."
                        + metric.name().replace(' ', '.')).append(" ")
                .append(metric.value()).append(" ").append(timestamp)
                .append("\n");
    }

    try {
      graphite.write(lines.toString());
    } catch (Exception e) {
      LOG.warn("Error sending metrics to Graphite", e);
      try {
        graphite.close();
      } catch (Exception e1) {
        throw new MetricsException("Error closing connection to Graphite", e1);
      }
    }
}
 
Example 13
Source File: GraphiteSink.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void putMetrics(MetricsRecord record) {
    StringBuilder lines = new StringBuilder();
    StringBuilder metricsPathPrefix = new StringBuilder();

    // Configure the hierarchical place to display the graph.
    metricsPathPrefix.append(metricsPrefix).append(".")
            .append(record.context()).append(".").append(record.name());

    for (MetricsTag tag : record.tags()) {
        if (tag.value() != null) {
            metricsPathPrefix.append(".");
            metricsPathPrefix.append(tag.name());
            metricsPathPrefix.append("=");
            metricsPathPrefix.append(tag.value());
        }
    }

    // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
    long timestamp = record.timestamp() / 1000L;

    // Collect datapoints.
    for (AbstractMetric metric : record.metrics()) {
        lines.append(
                metricsPathPrefix.toString() + "."
                        + metric.name().replace(' ', '.')).append(" ")
                .append(metric.value()).append(" ").append(timestamp)
                .append("\n");
    }

    try {
      graphite.write(lines.toString());
    } catch (Exception e) {
      LOG.warn("Error sending metrics to Graphite", e);
      try {
        graphite.close();
      } catch (Exception e1) {
        throw new MetricsException("Error closing connection to Graphite", e1);
      }
    }
}