Java Code Examples for org.apache.hadoop.metrics.MetricsRecord

The following examples show how to use org.apache.hadoop.metrics.MetricsRecord. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: RDFS   Source File: ProxyJobTracker.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void doUpdates(MetricsContext unused) {
  synchronized (aggregateJobStats) {
    // Update metrics with aggregate job stats and reset the aggregate.
    aggregateJobStats.incrementMetricsAndReset(metricsRecord);

    incrementMetricsAndReset(metricsRecord, aggregateCounters);

    for (Map.Entry<String, MetricsRecord> entry :
      poolToMetricsRecord.entrySet()) {
      String pool = entry.getKey();

      JobStats poolJobStats = poolToJobStats.get(pool);
      poolJobStats.incrementMetricsAndReset(entry.getValue());

      Counters poolCounters = poolToJobCounters.get(pool);
      incrementMetricsAndReset(entry.getValue(), poolCounters);
    }
  }
}
 
Example 2
Source Project: RDFS   Source File: ProxyJobTracker.java    License: Apache License 2.0 6 votes vote down vote up
private static void incrementMetricsAndReset(
  MetricsRecord record, Counters counters) {
  // Now update metrics with the counters and reset the aggregate.
  for (Counters.Group group : counters) {
    String groupName = group.getName();
    for (Counter counter : group) {
      String name = groupName + "_" + counter.getName();
      name = name.replaceAll("[^a-zA-Z_]", "_").toLowerCase();
      record.incrMetric(name, counter.getValue());
    }
  }
  // Reset the aggregate counters.
  for (Counters.Group g : counters) {
    for (Counter c : g) {
      c.setValue(0);
    }
  }
  record.update();
}
 
Example 3
Source Project: RDFS   Source File: ProxyJobTracker.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void reportJobStats(
  String jobId, String pool, JobStats stats, Counters counters) {
  synchronized (aggregateJobStats) {
    aggregateJobStats.accumulate(stats);
    JobStats poolJobStats = poolToJobStats.get(pool);
    if (poolJobStats == null) {
      poolJobStats = new JobStats();
      poolToJobStats.put(pool, poolJobStats);
    }
    poolJobStats.accumulate(stats);

    accumulateCounters(aggregateCounters, counters);
    Counters poolCounters = poolToJobCounters.get(pool);
    if (poolCounters == null) {
      poolCounters = new Counters();
      poolToJobCounters.put(pool, poolCounters);
    }
    accumulateCounters(poolCounters, counters);

    if (!poolToMetricsRecord.containsKey(pool)) {
      MetricsRecord poolRecord = context.createRecord("pool-" + pool);
      poolToMetricsRecord.put(pool, poolRecord);
    }
  }
}
 
Example 4
Source Project: RDFS   Source File: FairSchedulerMetricsInst.java    License: Apache License 2.0 6 votes vote down vote up
private void submitPoolMetrics(PoolInfo info) {
  MetricsRecord record = poolToMetricsRecord.get(info.poolName);
  if (record == null) {
    record = MetricsUtil.createRecord(context, "pool-" + info.poolName);
    FairScheduler.LOG.info("Create metrics record for pool:" + info.poolName);
    poolToMetricsRecord.put(info.poolName, record);
  }
  record.setMetric("min_map", info.minMaps);
  record.setMetric("min_reduce", info.minReduces);
  record.setMetric("max_map", info.maxMaps);
  record.setMetric("max_reduce", info.maxReduces);
  record.setMetric("running_map", info.runningMaps);
  record.setMetric("running_reduce", info.runningReduces);
  record.setMetric("runnable_map", info.runnableMaps);
  record.setMetric("runnable_reduce", info.runnableReduces);
  record.setMetric("inited_tasks", info.initedTasks);
  record.setMetric("max_inited_tasks", info.maxInitedTasks);
  int runningJobs = info.runningJobs;
  record.setMetric("avg_first_map_wait_ms",
      (runningJobs == 0) ? 0 : info.totalFirstMapWaitTime / runningJobs);
  record.setMetric("avg_first_reduce_wait_ms",
      (runningJobs == 0) ? 0 : info.totalFirstReduceWaitTime / runningJobs);
}
 
Example 5
Source Project: RDFS   Source File: MetricsTimeVaryingInt.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Push the delta  metrics to the mr.
 * The delta is since the last push/interval.
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #previousIntervalValue}
 *
 * @param mr
 */
public void pushMetric(final MetricsRecord mr) {
  lock.lock();
  try {
    intervalHeartBeat();
    try {
      mr.incrMetric(getName(), getPreviousIntervalValue());
    } catch (Exception e) {
      LOG.info("pushMetric failed for " + getName() + "\n" +
          StringUtils.stringifyException(e));
    }
  } finally {
    lock.unlock();
  }
  
}
 
Example 6
Source Project: RDFS   Source File: MetricsTimeVaryingRate.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Push the delta  metrics to the mr.
 * The delta is since the last push/interval.
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and
 * {@link #getPreviousIntervalNumOps()}
 *
 * @param mr metrics record.  If null, simply interval heart beat only.
 */
public void pushMetric(final MetricsRecord mr) {
  lock.lock();
  try {
    intervalHeartBeat();
    try {
      if (mr != null) {
        mr.incrMetric(getName() + "_num_ops", getPreviousIntervalNumOps());
        mr.setMetric(getName() + "_avg_time", getPreviousIntervalAverageTime());
        if (printMinMax) {
          mr.setMetric(getName() + "_min", getMinTime());
          mr.setMetric(getName() + "_max", getMaxTime());
          resetMinMax();
        }
      }
    } catch (Exception e) {
      LOG.info("pushMetric failed for " + getName() + "\n" +
          StringUtils.stringifyException(e));
    }
  } finally {
    lock.unlock();
  }
}
 
Example 7
Source Project: hadoop   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
@InterfaceAudience.Private
@Override
public MetricsRecord newRecord(String recordName) {
  return (MetricsRecord) Proxy.newProxyInstance(
      MetricsRecord.class.getClassLoader(),
      new Class[] { MetricsRecord.class },
      new MetricsRecordDelegator(recordName, subctxt));
}
 
Example 8
Source Project: hadoop   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
private static Method initMethod() {
  try {
    return MetricsRecord.class.getMethod("getRecordName", new Class[0]);
  } catch (Exception e) {
    throw new RuntimeException("Internal error", e);
  }
}
 
Example 9
Source Project: hadoop   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) {
  this.recordName = recordName;
  this.subrecs = new ArrayList<MetricsRecord>(ctxts.size());
  for (MetricsContext ctxt : ctxts) {
    subrecs.add(ctxt.createRecord(recordName));
  }
}
 
Example 10
Source Project: hadoop   Source File: MetricsIntValue.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Push the metric to the mr.
 * The metric is pushed only if it was updated since last push
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #get()}
 *
 * @param mr
 */
public synchronized void pushMetric(final MetricsRecord mr) {
  if (changed) {
    try {
      mr.setMetric(getName(), value);
    } catch (Exception e) {
      LOG.info("pushMetric failed for " + getName() + "\n", e);
    }
  }
  changed = false;
}
 
Example 11
Source Project: big-c   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
@InterfaceAudience.Private
@Override
public MetricsRecord newRecord(String recordName) {
  return (MetricsRecord) Proxy.newProxyInstance(
      MetricsRecord.class.getClassLoader(),
      new Class[] { MetricsRecord.class },
      new MetricsRecordDelegator(recordName, subctxt));
}
 
Example 12
Source Project: big-c   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
private static Method initMethod() {
  try {
    return MetricsRecord.class.getMethod("getRecordName", new Class[0]);
  } catch (Exception e) {
    throw new RuntimeException("Internal error", e);
  }
}
 
Example 13
Source Project: big-c   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) {
  this.recordName = recordName;
  this.subrecs = new ArrayList<MetricsRecord>(ctxts.size());
  for (MetricsContext ctxt : ctxts) {
    subrecs.add(ctxt.createRecord(recordName));
  }
}
 
Example 14
Source Project: big-c   Source File: MetricsIntValue.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Push the metric to the mr.
 * The metric is pushed only if it was updated since last push
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #get()}
 *
 * @param mr
 */
public synchronized void pushMetric(final MetricsRecord mr) {
  if (changed) {
    try {
      mr.setMetric(getName(), value);
    } catch (Exception e) {
      LOG.info("pushMetric failed for " + getName() + "\n", e);
    }
  }
  changed = false;
}
 
Example 15
Source Project: RDFS   Source File: JobStats.java    License: Apache License 2.0 5 votes vote down vote up
public synchronized void incrementMetricsAndReset(
  MetricsRecord  metricsRecord) {
  metricsRecord.incrMetric("maps_launched", getNumMapTasksLaunched());
  metricsRecord.incrMetric("maps_completed", getNumMapTasksCompleted());
  metricsRecord.incrMetric("maps_failed", getNumMapTasksFailed());
  metricsRecord.incrMetric("reduces_launched", getNumReduceTasksLaunched());
  metricsRecord.incrMetric("reduces_completed", getNumReduceTasksCompleted());
  metricsRecord.incrMetric("reduces_failed", getNumReduceTasksFailed());
  metricsRecord.incrMetric("num_speculative_maps", getNumSpeculativeMaps());
  metricsRecord.incrMetric("num_speculative_reduces", getNumSpeculativeReduces());
  metricsRecord.incrMetric("num_speculative_succeeded_maps",
    getNumSpeculativeSucceededMaps());
  metricsRecord.incrMetric("num_speculative_succeeded_reduces",
    getNumSpeculativeSucceededReduces());
  metricsRecord.incrMetric("num_speculative_wasted_maps",
    getNumSpeculativeWasteMaps());
  metricsRecord.incrMetric("num_speculative_wasted_reduces",
    getNumSpeculativeWasteReduces());
  metricsRecord.incrMetric("speculative_map_time_waste",
    getSpeculativeMapTimeWaste());
  metricsRecord.incrMetric("speculative_reduce_time_waste",
    getSpeculativeMapTimeWaste());
  metricsRecord.incrMetric("killed_tasks_map_time", getKilledMapTime());
  metricsRecord.incrMetric("killed_tasks_reduce_time", getKilledReduceTime());
  metricsRecord.incrMetric("failed_tasks_map_time", getFailedMapTime());
  metricsRecord.incrMetric("failed_tasks_reduce_time", getFailedReduceTime());
  metricsRecord.incrMetric("num_dataLocal_maps", getNumDataLocalMaps());
  metricsRecord.incrMetric("num_rackLocal_maps", getNumRackLocalMaps());
  metricsRecord.incrMetric("maps_killed", getNumMapTasksKilled());
  metricsRecord.incrMetric("reduces_killed", getNumReduceTasksKilled());
  metricsRecord.incrMetric("total_map_input_bytes", getTotalMapInputBytes());
  metricsRecord.incrMetric("local_map_input_bytes", getLocalMapInputBytes());
  metricsRecord.incrMetric("rack_map_input_bytes", getRackMapInputBytes());
  metricsRecord.incrMetric("maps_failed_by_fetch_failures",
    getNumMapTasksFailedByFetchFailures());
  metricsRecord.incrMetric("map_fetches_failed", getNumMapFetchFailures());

  reset();
}
 
Example 16
Source Project: RDFS   Source File: Scheduler.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Submit the metrics.
 *
 * @param metricsRecord Where the metrics will be submitted
 */
public void submitMetrics(MetricsRecord metricsRecord) {
  List<PoolMetadata> poolMetadatas = getPoolMetadataList();
  PoolFairnessCalculator.calculateFairness(poolMetadatas, metricsRecord);
  for (SchedulerForType scheduler: schedulersForTypes.values()) {
    scheduler.submitMetrics();
  }
}
 
Example 17
Source Project: RDFS   Source File: PoolInfoMetrics.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Constructor for a pool info of a specific resource.
 *
 * @param poolInfo Pool info
 * @param type Resource type
 * @param record The metrics record for this object
 * @param
 */
public PoolInfoMetrics(PoolInfo poolInfo, ResourceType type,
    MetricsRecord record) {
  this.poolInfo = poolInfo;
  this.type = type;
  this.counters =
      Collections.synchronizedMap(new HashMap<MetricName, Long>());
  this.record = record;
}
 
Example 18
Source Project: RDFS   Source File: FairSchedulerMetricsInst.java    License: Apache License 2.0 5 votes vote down vote up
public FairSchedulerMetricsInst(FairScheduler scheduler, Configuration conf) {
  // Create a record for map-reduce metrics
  metricsRecord = MetricsUtil.createRecord(context, "fairscheduler");
  poolToMetricsRecord = new HashMap<String, MetricsRecord>();
  context.registerUpdater(this);

  updatePeriod = conf.getLong("mapred.fairscheduler.metric.update.period",
                              5 * 1000);  // default period is 5 seconds.
  jobInitializer = scheduler.getJobInitializer();
}
 
Example 19
Source Project: RDFS   Source File: FairSchedulerMetricsInst.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void doUpdates(MetricsContext context) {
  long now = JobTracker.getClock().getTime();
  if (now - lastUpdateTime > updatePeriod) {
    updateMetrics();
    lastUpdateTime = now;
  }
  updateCounters();
  metricsRecord.update();
  for (MetricsRecord mr : poolToMetricsRecord.values()) {
    mr.update();
  }
}
 
Example 20
Source Project: RDFS   Source File: JMXContext.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected MetricsRecord newRecord(String recordName) {
  MetricsRecord record = super.newRecord(recordName);
  if (records.isEmpty() || records.contains(recordName)) {
    // Create MBean to expose this record
    // Only if this record is to be exposed through JMX
    getOrCreateMBean(recordName);
  }

  return record;
}
 
Example 21
Source Project: RDFS   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public MetricsRecord newRecord(String recordName) {
  return (MetricsRecord) Proxy.newProxyInstance(
      MetricsRecord.class.getClassLoader(),
      new Class[] { MetricsRecord.class },
      new MetricsRecordDelegator(recordName, subctxt));
}
 
Example 22
Source Project: RDFS   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
private static Method initMethod() {
  try {
    return MetricsRecord.class.getMethod("getRecordName", new Class[0]);
  } catch (Exception e) {
    throw new RuntimeException("Internal error", e);
  }
}
 
Example 23
Source Project: RDFS   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) {
  this.recordName = recordName;
  this.subrecs = new ArrayList<MetricsRecord>(ctxts.size());
  for (MetricsContext ctxt : ctxts) {
    subrecs.add(ctxt.createRecord(recordName));
  }
}
 
Example 24
Source Project: RDFS   Source File: MetricsIntValue.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Push the metric to the mr.
 * The metric is pushed only if it was updated since last push
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #get()}
 *
 * @param mr
 */
public synchronized void pushMetric(final MetricsRecord mr) {
  if (changed) {
    try {
      mr.setMetric(getName(), value);
    } catch (Exception e) {
      LOG.info("pushMetric failed for " + getName() + "\n" +
          StringUtils.stringifyException(e));
    }
  }
  changed = false;
}
 
Example 25
Source Project: RDFS   Source File: MetricsTimeVaryingLong.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Push the delta  metrics to the mr.
 * The delta is since the last push/interval.
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #previousIntervalValue}
 *
 * @param mr
 */
public void pushMetric(final MetricsRecord mr) {
  lock.lock();
  try {
    intervalHeartBeat();
    try {
      mr.incrMetric(getName(), getPreviousIntervalValue());
    } catch (Exception e) {
      LOG.info("pushMetric failed for " + getName() + "\n" +
          StringUtils.stringifyException(e));
    }
  } finally {
    lock.unlock();
  }
}
 
Example 26
Source Project: hadoop-gpu   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public MetricsRecord newRecord(String recordName) {
  return (MetricsRecord) Proxy.newProxyInstance(
      MetricsRecord.class.getClassLoader(),
      new Class[] { MetricsRecord.class },
      new MetricsRecordDelegator(recordName, subctxt));
}
 
Example 27
Source Project: hadoop-gpu   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
private static Method initMethod() {
  try {
    return MetricsRecord.class.getMethod("getRecordName", new Class[0]);
  } catch (Exception e) {
    throw new RuntimeException("Internal error", e);
  }
}
 
Example 28
Source Project: hadoop-gpu   Source File: CompositeContext.java    License: Apache License 2.0 5 votes vote down vote up
MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) {
  this.recordName = recordName;
  this.subrecs = new ArrayList<MetricsRecord>(ctxts.size());
  for (MetricsContext ctxt : ctxts) {
    subrecs.add(ctxt.createRecord(recordName));
  }
}
 
Example 29
Source Project: hadoop-gpu   Source File: MetricsIntValue.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Push the metric to the mr.
 * The metric is pushed only if it was updated since last push
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #get()}
 *
 * @param mr
 */
public synchronized void pushMetric(final MetricsRecord mr) {
  if (changed) {
    try {
      mr.setMetric(getName(), value);
    } catch (Exception e) {
      LOG.info("pushMetric failed for " + getName() + "\n" +
          StringUtils.stringifyException(e));
    }
  }
  changed = false;
}
 
Example 30
Source Project: hadoop-gpu   Source File: MetricsTimeVaryingInt.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Push the delta  metrics to the mr.
 * The delta is since the last push/interval.
 * 
 * Note this does NOT push to JMX
 * (JMX gets the info via {@link #previousIntervalValue}
 *
 * @param mr
 */
public synchronized void pushMetric(final MetricsRecord mr) {
  intervalHeartBeat();
  try {
    mr.incrMetric(getName(), getPreviousIntervalValue());
  } catch (Exception e) {
    LOG.info("pushMetric failed for " + getName() + "\n" +
        StringUtils.stringifyException(e));
  }
}