org.apache.hadoop.metrics.MetricsContext Java Examples

The following examples show how to use org.apache.hadoop.metrics.MetricsContext. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ShuffleClientMetrics.java    From incubator-tez with Apache License 2.0 6 votes vote down vote up
ShuffleClientMetrics(String dagName, String vertexName, int taskIndex, Configuration conf, 
    String user) {
  this.numCopiers = 
      conf.getInt(
          TezJobConfig.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES, 
          TezJobConfig.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES_DEFAULT);

  MetricsContext metricsContext = MetricsUtil.getContext(Constants.TEZ);
  this.shuffleMetrics = 
    MetricsUtil.createRecord(metricsContext, "shuffleInput");
  this.shuffleMetrics.setTag("user", user);
  this.shuffleMetrics.setTag("dagName", dagName);
  this.shuffleMetrics.setTag("taskId", TezRuntimeUtils.getTaskIdentifier(vertexName, taskIndex));
  this.shuffleMetrics.setTag("sessionId", 
      conf.get(
          TezRuntimeFrameworkConfigs.TEZ_RUNTIME_METRICS_SESSION_ID,
          TezRuntimeFrameworkConfigs.TEZ_RUNTIME_METRICS_SESSION_ID_DEFAULT));
  metricsContext.registerUpdater(this);
}
 
Example #2
Source File: TaskTracker.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    if (workerThreads != 0) {
      shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 
          100*((float)serverHandlerBusy/workerThreads));
    } else {
      shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0);
    }
    shuffleMetricsRecord.incrMetric("shuffle_output_bytes", 
                                    outputBytes);
    shuffleMetricsRecord.incrMetric("shuffle_failed_outputs", 
                                    failedOutputs);
    shuffleMetricsRecord.incrMetric("shuffle_success_outputs", 
                                    successOutputs);
    outputBytes = 0;
    failedOutputs = 0;
    successOutputs = 0;
  }
  shuffleMetricsRecord.update();
}
 
Example #3
Source File: CompositeContext.java    From big-c with Apache License 2.0 6 votes vote down vote up
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);
  int nKids;
  try {
    String sKids = getAttribute(ARITY_LABEL);
    nKids = Integer.parseInt(sKids);
  } catch (Exception e) {
    LOG.error("Unable to initialize composite metric " + contextName +
              ": could not init arity", e);
    return;
  }
  for (int i = 0; i < nKids; ++i) {
    MetricsContext ctxt = MetricsUtil.getContext(
        String.format(SUB_FMT, contextName, i), contextName);
    if (null != ctxt) {
      subctxt.add(ctxt);
    }
  }
}
 
Example #4
Source File: CompositeContext.java    From big-c with Apache License 2.0 6 votes vote down vote up
@InterfaceAudience.Private
@Override
protected void emitRecord(String contextName, String recordName,
    OutputRecord outRec) throws IOException {
  for (MetricsContext ctxt : subctxt) {
    try {
      ((AbstractMetricsContext)ctxt).emitRecord(
        contextName, recordName, outRec);
      if (contextName == null || recordName == null || outRec == null) {
        throw new IOException(contextName + ":" + recordName + ":" + outRec);
      }
    } catch (IOException e) {
      LOG.warn("emitRecord failed: " + ctxt.getContextName(), e);
    }
  }
}
 
Example #5
Source File: ProxyJobTracker.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public void doUpdates(MetricsContext unused) {
  synchronized (aggregateJobStats) {
    // Update metrics with aggregate job stats and reset the aggregate.
    aggregateJobStats.incrementMetricsAndReset(metricsRecord);

    incrementMetricsAndReset(metricsRecord, aggregateCounters);

    for (Map.Entry<String, MetricsRecord> entry :
      poolToMetricsRecord.entrySet()) {
      String pool = entry.getKey();

      JobStats poolJobStats = poolToJobStats.get(pool);
      poolJobStats.incrementMetricsAndReset(entry.getValue());

      Counters poolCounters = poolToJobCounters.get(pool);
      incrementMetricsAndReset(entry.getValue(), poolCounters);
    }
  }
}
 
Example #6
Source File: CompositeContext.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);
  int nKids;
  try {
    String sKids = getAttribute(ARITY_LABEL);
    nKids = Integer.valueOf(sKids);
  } catch (Exception e) {
    LOG.error("Unable to initialize composite metric " + contextName +
              ": could not init arity", e);
    return;
  }
  for (int i = 0; i < nKids; ++i) {
    MetricsContext ctxt = MetricsUtil.getContext(
        String.format(SUB_FMT, contextName, i), contextName);
    if (null != ctxt) {
      subctxt.add(ctxt);
    }
  }
}
 
Example #7
Source File: ReduceTask.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes);
    shuffleMetrics.incrMetric("shuffle_failed_fetches", 
                              numFailedFetches);
    shuffleMetrics.incrMetric("shuffle_success_fetches", 
                              numSuccessFetches);
    if (numCopiers != 0) {
      shuffleMetrics.setMetric("shuffle_fetchers_busy_percent",
          100*((float)numThreadsBusy/numCopiers));
    } else {
      shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0);
    }
    numBytes = 0;
    numSuccessFetches = 0;
    numFailedFetches = 0;
  }
  shuffleMetrics.update();
}
 
Example #8
Source File: HighTideNodeMetrics.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public HighTideNodeMetrics(Configuration conf, HighTideNode hightideNode) {
  String sessionId = conf.get("session.id");
  // Initiate Java VM metrics
  JvmMetrics.init("HighTideNode", sessionId);


  // Now the Mbean for the name node - this also registers the MBean
  hightidenodeActivityMBean = new HighTideNodeActivityMBean(registry);

  // Create a record for HighTideNode metrics
  MetricsContext metricsContext = MetricsUtil.getContext("dfs");
  metricsRecord = MetricsUtil.createRecord(metricsContext, "hightidenode");
  metricsRecord.setTag("sessionId", sessionId);
  metricsContext.registerUpdater(this);
  LOG.info("Initializing HighTideNodeMetrics using context object:" +
            metricsContext.getClass().getName());
}
 
Example #9
Source File: CompositeContext.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);
  int nKids;
  try {
    String sKids = getAttribute(ARITY_LABEL);
    nKids = Integer.valueOf(sKids);
  } catch (Exception e) {
    LOG.error("Unable to initialize composite metric " + contextName +
              ": could not init arity", e);
    return;
  }
  for (int i = 0; i < nKids; ++i) {
    MetricsContext ctxt = MetricsUtil.getContext(
        String.format(SUB_FMT, contextName, i), contextName);
    if (null != ctxt) {
      subctxt.add(ctxt);
    }
  }
}
 
Example #10
Source File: CompositeContext.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@InterfaceAudience.Private
@Override
protected void emitRecord(String contextName, String recordName,
    OutputRecord outRec) throws IOException {
  for (MetricsContext ctxt : subctxt) {
    try {
      ((AbstractMetricsContext)ctxt).emitRecord(
        contextName, recordName, outRec);
      if (contextName == null || recordName == null || outRec == null) {
        throw new IOException(contextName + ":" + recordName + ":" + outRec);
      }
    } catch (IOException e) {
      LOG.warn("emitRecord failed: " + ctxt.getContextName(), e);
    }
  }
}
 
Example #11
Source File: CompositeContext.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);
  int nKids;
  try {
    String sKids = getAttribute(ARITY_LABEL);
    nKids = Integer.parseInt(sKids);
  } catch (Exception e) {
    LOG.error("Unable to initialize composite metric " + contextName +
              ": could not init arity", e);
    return;
  }
  for (int i = 0; i < nKids; ++i) {
    MetricsContext ctxt = MetricsUtil.getContext(
        String.format(SUB_FMT, contextName, i), contextName);
    if (null != ctxt) {
      subctxt.add(ctxt);
    }
  }
}
 
Example #12
Source File: ReduceTask.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    shuffleMetrics.incrMetric("shuffle_input_bytes", numBytes);
    shuffleMetrics.incrMetric("shuffle_failed_fetches",
                              numFailedFetches);
    shuffleMetrics.incrMetric("shuffle_success_fetches",
                              numSuccessFetches);
    if (numCopiers != 0) {
      shuffleMetrics.setMetric("shuffle_fetchers_busy_percent",
          100*((float)numThreadsBusy/numCopiers));
    } else {
      shuffleMetrics.setMetric("shuffle_fetchers_busy_percent", 0);
    }
    numBytes = 0;
    numSuccessFetches = 0;
    numFailedFetches = 0;
  }
  shuffleMetrics.update();
}
 
Example #13
Source File: TaskTrackerMetricsInst.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Since this object is a registered updater, this method will be called
 * periodically, e.g. every 5 seconds.
 */
@Override
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    metricsRecord.setMetric("maps_running", tt.mapTotal);
    metricsRecord.setMetric("reduces_running", tt.reduceTotal);
    metricsRecord.setMetric("mapTaskSlots", (short)tt.getMaxCurrentMapTasks());
    metricsRecord.setMetric("reduceTaskSlots", 
                                 (short)tt.getMaxCurrentReduceTasks());
    metricsRecord.incrMetric("tasks_completed", numCompletedTasks);
    metricsRecord.incrMetric("tasks_failed_timeout", timedoutTasks);
    metricsRecord.incrMetric("tasks_failed_ping", tasksFailedPing);
    
    numCompletedTasks = 0;
    timedoutTasks = 0;
    tasksFailedPing = 0;
  }
    metricsRecord.update();
}
 
Example #14
Source File: LocalJobRunnerMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Since this object is a registered updater, this method will be called
 * periodically, e.g. every 5 seconds.
 */
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    metricsRecord.incrMetric("maps_launched", numMapTasksLaunched);
    metricsRecord.incrMetric("maps_completed", numMapTasksCompleted);
    metricsRecord.incrMetric("reduces_launched", numReduceTasksLaunched);
    metricsRecord.incrMetric("reduces_completed", numReduceTasksCompleted);
    metricsRecord.incrMetric("waiting_maps", numWaitingMaps);
    metricsRecord.incrMetric("waiting_reduces", numWaitingReduces);

    numMapTasksLaunched = 0;
    numMapTasksCompleted = 0;
    numReduceTasksLaunched = 0;
    numReduceTasksCompleted = 0;
    numWaitingMaps = 0;
    numWaitingReduces = 0;
  }
  metricsRecord.update();
}
 
Example #15
Source File: DataNodeMetrics.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public DataNodeMetrics(Configuration conf, String storageId) {
  String sessionId = conf.get("session.id"); 
  // Initiate reporting of Java VM metrics
  JvmMetrics.init("DataNode", sessionId);
  

  // Now the MBean for the data node
  datanodeActivityMBean = new DataNodeActivityMBean(registry, storageId);
  
  // Create record for DataNode metrics
  MetricsContext context = MetricsUtil.getContext("dfs");
  metricsRecord = MetricsUtil.createRecord(context, "datanode");
  metricsRecord.setTag("sessionId", sessionId);
  context.registerUpdater(this);
}
 
Example #16
Source File: LookasideMetrics.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public LookasideMetrics() {
  // Create a record for LookasideCache metrics
  MetricsContext metricsContext = MetricsUtil.getContext("lookasideCache");
  metricsRecord = MetricsUtil.createRecord(metricsContext,
                                           "LookasideFileSystem");
  metricsContext.registerUpdater(this);

}
 
Example #17
Source File: HighTideNodeMetrics.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Since this object is a registered updater, this method will be called
 * periodically, e.g. every 5 seconds.
 */
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    for (MetricsBase m : registry.getMetricsList()) {
      m.pushMetric(metricsRecord);
    }
  }
  metricsRecord.update();
}
 
Example #18
Source File: JobTrackerMetricsInst.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public JobTrackerMetricsInst(JobTracker tracker, JobConf conf) {
  super(tracker, conf);
  String sessionId = conf.getSessionId();
  // Initiate JVM Metrics
  JvmMetrics.init("JobTracker", sessionId);
  // Create a record for map-reduce metrics
  MetricsContext context = MetricsUtil.getContext("mapred");
  metricsRecord = MetricsUtil.createRecord(context, "jobtracker");
  metricsRecord.setTag("sessionId", sessionId);
  context.registerUpdater(this);
}
 
Example #19
Source File: SepMetrics.java    From hbase-indexer with Apache License 2.0 5 votes vote down vote up
@Override
public void doUpdates(MetricsContext unused) {
    synchronized (this) {
        for (MetricsBase m : metricsRegistry.getMetricsList()) {
            m.pushMetric(metricsRecord);
        }
    }
    metricsRecord.update();
}
 
Example #20
Source File: ReduceTask.java    From RDFS with Apache License 2.0 5 votes vote down vote up
ShuffleClientMetrics(JobConf conf) {
  MetricsContext metricsContext = MetricsUtil.getContext("mapred");
  this.shuffleMetrics =
    MetricsUtil.createRecord(metricsContext, "shuffleInput");
  this.shuffleMetrics.setTag("user", conf.getUser());
  this.shuffleMetrics.setTag("jobName", conf.getJobName());
  this.shuffleMetrics.setTag("jobId", ReduceTask.this.getJobID().toString());
  this.shuffleMetrics.setTag("taskId", getTaskID().toString());
  this.shuffleMetrics.setTag("sessionId", conf.getSessionId());
  metricsContext.registerUpdater(this);
}
 
Example #21
Source File: JvmMetrics.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * This will be called periodically (with the period being configuration
 * dependent).
 */
public void doUpdates(MetricsContext context) {
    doMemoryUpdates();
    doGarbageCollectionUpdates();
    doThreadUpdates();
    doEventCountUpdates();
    metrics.update();
}
 
Example #22
Source File: JvmMetrics.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** Creates a new instance of JvmMetrics */
private JvmMetrics(String processName, String sessionId,
  String recordName) {
    MetricsContext context = MetricsUtil.getContext("jvm");
    metrics = MetricsUtil.createRecord(context, recordName);
    metrics.setTag("processName", processName);
    metrics.setTag("sessionId", sessionId);
    context.registerUpdater(this);
}
 
Example #23
Source File: DataNodeMetrics.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Since this object is a registered updater, this method will be called
 * periodically, e.g. every 5 seconds.
 */
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    for (MetricsBase m : registry.getMetricsList()) {
      m.pushMetric(metricsRecord);
    }
  }
  metricsRecord.update();
}
 
Example #24
Source File: CompositeContext.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) {
  this.recordName = recordName;
  this.subrecs = new ArrayList<MetricsRecord>(ctxts.size());
  for (MetricsContext ctxt : ctxts) {
    subrecs.add(ctxt.createRecord(recordName));
  }
}
 
Example #25
Source File: TaskTracker.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
ShuffleServerMetrics(JobConf conf) {
  MetricsContext context = MetricsUtil.getContext("mapred");
  shuffleMetricsRecord = 
                       MetricsUtil.createRecord(context, "shuffleOutput");
  this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
  context.registerUpdater(this);
}
 
Example #26
Source File: RaidNodeMetrics.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public void doUpdates(MetricsContext context) {
  synchronized (this) {
    for (MetricsBase m : registry.getMetricsList()) {
      m.pushMetric(metricsRecord);
    }
  }
  metricsRecord.update();
}
 
Example #27
Source File: RpcMetrics.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public RpcMetrics(String hostName, String port, Server server) {
  myServer = server;
  MetricsContext context = MetricsUtil.getContext("rpc");
  metricsRecord = MetricsUtil.createRecord(context, "metrics");

  metricsRecord.setTag("port", port);

  LOG.info("Initializing RPC Metrics with hostName=" 
      + hostName + ", port=" + port);

  context.registerUpdater(this);
  
  // Need to clean up the interface to RpcMgt - don't need both metrics and server params
  rpcMBean = new RpcActivityMBean(registry, hostName, port);
}
 
Example #28
Source File: TaskTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void doUpdates(MetricsContext unused) {
  synchronized (this) {
    if (workerThreads != 0) {
      shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent",
          100*((float)serverHandlerBusy/workerThreads));
    } else {
      shuffleMetricsRecord.setMetric("shuffle_handler_busy_percent", 0);
    }
    shuffleMetricsRecord.setMetric("shuffle_queue_len", httpQueueLen);
    shuffleMetricsRecord.incrMetric("shuffle_output_bytes",
                                    outputBytes);
    shuffleMetricsRecord.incrMetric("shuffle_failed_outputs",
                                    failedOutputs);
    shuffleMetricsRecord.incrMetric("shuffle_success_outputs",
                                    successOutputs);
    // Netty map output metrics
    if (nettyWorkerThreadPool != null) {
      shuffleMetricsRecord.setMetric("netty_mapoutput_activecount",
          nettyWorkerThreadPool.getActiveCount());
      shuffleMetricsRecord.setMetric("netty_mapoutput_poolsize",
          nettyWorkerThreadPool.getPoolSize());
      shuffleMetricsRecord.setMetric("netty_mapoutput_maximumpoolsize",
          nettyWorkerThreadPool.getMaximumPoolSize());
      shuffleMetricsRecord.setMetric("netty_mapoutput_largestpoolsize",
          nettyWorkerThreadPool.getLargestPoolSize());
      shuffleMetricsRecord.setMetric("netty_mapoutput_taskcount",
          nettyWorkerThreadPool.getTaskCount());
    }
    outputBytes = 0;
    failedOutputs = 0;
    successOutputs = 0;
  }
  shuffleMetricsRecord.update();
}
 
Example #29
Source File: LocalJobRunnerMetrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public LocalJobRunnerMetrics(JobConf conf) {
  String sessionId = conf.getSessionId();
  // Initiate JVM Metrics
  JvmMetrics.init("JobTracker", sessionId);
  // Create a record for map-reduce metrics
  MetricsContext context = MetricsUtil.getContext("mapred");
  // record name is jobtracker for compatibility 
  metricsRecord = MetricsUtil.createRecord(context, "jobtracker");
  metricsRecord.setTag("sessionId", sessionId);
  context.registerUpdater(this);
}
 
Example #30
Source File: TaskTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
ShuffleServerMetrics(JobConf conf) {
  MetricsContext context = MetricsUtil.getContext("mapred");
  shuffleMetricsRecord =
                       MetricsUtil.createRecord(context, "shuffleOutput");
  this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId());
  context.registerUpdater(this);
}