org.apache.hadoop.metrics.MetricsUtil Java Examples
The following examples show how to use
org.apache.hadoop.metrics.MetricsUtil.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClusterManagerMetrics.java From RDFS with Apache License 2.0 | 6 votes |
/** * Constructor. * @param types The available resource types. */ public ClusterManagerMetrics(Collection<ResourceType> types) { context = MetricsUtil.getContext(CONTEXT_NAME); metricsRecord = MetricsUtil.createRecord(context, CONTEXT_NAME); typeToResourceRequested = createTypeToResourceCountMap(types, "requested"); typeToResourceGranted = createTypeToResourceCountMap(types, "granted"); typeToResourceRevoked = createTypeToResourceCountMap(types, "revoked"); typeToResourceReleased = createTypeToResourceCountMap(types, "released"); typeToPendingCount = createTypeToCountMap(types, "pending"); typeToRunningCount = createTypeToCountMap(types, "running"); typeToTotalSlots = createTypeToCountMap(types, "total"); typeToFreeSlots = createTypeToCountMap(types, "free"); typeToSchedulerRunTime = createTypeToCountMap(types, "scheduler_runtime"); sessionStatusToMetrics = createSessionStatusToMetricsMap(); aliveNodes = new MetricsIntValue("alive_nodes", registry); deadNodes = new MetricsIntValue("dead_nodes", registry); blacklistedNodes = new MetricsIntValue("blacklisted_nodes", registry); numRunningSessions = new MetricsIntValue("num_running_sessions", registry); totalSessionCount = new MetricsTimeVaryingInt("total_sessions", registry); pendingCallsCount = new MetricsIntValue("num_pending_calls", registry); numCJTFailures = new MetricsTimeVaryingInt("num_cjt_failures", registry); }
Example #2
Source File: CompositeContext.java From hadoop with Apache License 2.0 | 6 votes |
@InterfaceAudience.Private public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); int nKids; try { String sKids = getAttribute(ARITY_LABEL); nKids = Integer.parseInt(sKids); } catch (Exception e) { LOG.error("Unable to initialize composite metric " + contextName + ": could not init arity", e); return; } for (int i = 0; i < nKids; ++i) { MetricsContext ctxt = MetricsUtil.getContext( String.format(SUB_FMT, contextName, i), contextName); if (null != ctxt) { subctxt.add(ctxt); } } }
Example #3
Source File: CompositeContext.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); int nKids; try { String sKids = getAttribute(ARITY_LABEL); nKids = Integer.valueOf(sKids); } catch (Exception e) { LOG.error("Unable to initialize composite metric " + contextName + ": could not init arity", e); return; } for (int i = 0; i < nKids; ++i) { MetricsContext ctxt = MetricsUtil.getContext( String.format(SUB_FMT, contextName, i), contextName); if (null != ctxt) { subctxt.add(ctxt); } } }
Example #4
Source File: FairSchedulerMetricsInst.java From RDFS with Apache License 2.0 | 6 votes |
private void submitPoolMetrics(PoolInfo info) { MetricsRecord record = poolToMetricsRecord.get(info.poolName); if (record == null) { record = MetricsUtil.createRecord(context, "pool-" + info.poolName); FairScheduler.LOG.info("Create metrics record for pool:" + info.poolName); poolToMetricsRecord.put(info.poolName, record); } record.setMetric("min_map", info.minMaps); record.setMetric("min_reduce", info.minReduces); record.setMetric("max_map", info.maxMaps); record.setMetric("max_reduce", info.maxReduces); record.setMetric("running_map", info.runningMaps); record.setMetric("running_reduce", info.runningReduces); record.setMetric("runnable_map", info.runnableMaps); record.setMetric("runnable_reduce", info.runnableReduces); record.setMetric("inited_tasks", info.initedTasks); record.setMetric("max_inited_tasks", info.maxInitedTasks); int runningJobs = info.runningJobs; record.setMetric("avg_first_map_wait_ms", (runningJobs == 0) ? 0 : info.totalFirstMapWaitTime / runningJobs); record.setMetric("avg_first_reduce_wait_ms", (runningJobs == 0) ? 0 : info.totalFirstReduceWaitTime / runningJobs); }
Example #5
Source File: CompositeContext.java From RDFS with Apache License 2.0 | 6 votes |
public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); int nKids; try { String sKids = getAttribute(ARITY_LABEL); nKids = Integer.valueOf(sKids); } catch (Exception e) { LOG.error("Unable to initialize composite metric " + contextName + ": could not init arity", e); return; } for (int i = 0; i < nKids; ++i) { MetricsContext ctxt = MetricsUtil.getContext( String.format(SUB_FMT, contextName, i), contextName); if (null != ctxt) { subctxt.add(ctxt); } } }
Example #6
Source File: CompositeContext.java From big-c with Apache License 2.0 | 6 votes |
@InterfaceAudience.Private public void init(String contextName, ContextFactory factory) { super.init(contextName, factory); int nKids; try { String sKids = getAttribute(ARITY_LABEL); nKids = Integer.parseInt(sKids); } catch (Exception e) { LOG.error("Unable to initialize composite metric " + contextName + ": could not init arity", e); return; } for (int i = 0; i < nKids; ++i) { MetricsContext ctxt = MetricsUtil.getContext( String.format(SUB_FMT, contextName, i), contextName); if (null != ctxt) { subctxt.add(ctxt); } } }
Example #7
Source File: ShuffleClientMetrics.java From incubator-tez with Apache License 2.0 | 6 votes |
ShuffleClientMetrics(String dagName, String vertexName, int taskIndex, Configuration conf, String user) { this.numCopiers = conf.getInt( TezJobConfig.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES, TezJobConfig.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES_DEFAULT); MetricsContext metricsContext = MetricsUtil.getContext(Constants.TEZ); this.shuffleMetrics = MetricsUtil.createRecord(metricsContext, "shuffleInput"); this.shuffleMetrics.setTag("user", user); this.shuffleMetrics.setTag("dagName", dagName); this.shuffleMetrics.setTag("taskId", TezRuntimeUtils.getTaskIdentifier(vertexName, taskIndex)); this.shuffleMetrics.setTag("sessionId", conf.get( TezRuntimeFrameworkConfigs.TEZ_RUNTIME_METRICS_SESSION_ID, TezRuntimeFrameworkConfigs.TEZ_RUNTIME_METRICS_SESSION_ID_DEFAULT)); metricsContext.registerUpdater(this); }
Example #8
Source File: HighTideNodeMetrics.java From RDFS with Apache License 2.0 | 6 votes |
public HighTideNodeMetrics(Configuration conf, HighTideNode hightideNode) { String sessionId = conf.get("session.id"); // Initiate Java VM metrics JvmMetrics.init("HighTideNode", sessionId); // Now the Mbean for the name node - this also registers the MBean hightidenodeActivityMBean = new HighTideNodeActivityMBean(registry); // Create a record for HighTideNode metrics MetricsContext metricsContext = MetricsUtil.getContext("dfs"); metricsRecord = MetricsUtil.createRecord(metricsContext, "hightidenode"); metricsRecord.setTag("sessionId", sessionId); metricsContext.registerUpdater(this); LOG.info("Initializing HighTideNodeMetrics using context object:" + metricsContext.getClass().getName()); }
Example #9
Source File: SepMetrics.java From hbase-indexer with Apache License 2.0 | 5 votes |
public SepMetrics(String recordName) { this.recordName = recordName; metricsRegistry = new MetricsRegistry(); sepProcessingRate = new MetricsTimeVaryingRate("sepProcessed", metricsRegistry); lastTimestampInputProcessed = new MetricsLongValue("lastSepTimestamp", metricsRegistry); context = MetricsUtil.getContext("repository"); metricsRecord = MetricsUtil.createRecord(context, recordName); context.registerUpdater(this); mbean = new SepMetricsMXBean(this.metricsRegistry); }
Example #10
Source File: LocalJobRunnerMetrics.java From hadoop with Apache License 2.0 | 5 votes |
public LocalJobRunnerMetrics(JobConf conf) { String sessionId = conf.getSessionId(); // Initiate JVM Metrics JvmMetrics.init("JobTracker", sessionId); // Create a record for map-reduce metrics MetricsContext context = MetricsUtil.getContext("mapred"); // record name is jobtracker for compatibility metricsRecord = MetricsUtil.createRecord(context, "jobtracker"); metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #11
Source File: JvmMetrics.java From RDFS with Apache License 2.0 | 5 votes |
/** Creates a new instance of JvmMetrics */ private JvmMetrics(String processName, String sessionId, String recordName) { MetricsContext context = MetricsUtil.getContext("jvm"); metrics = MetricsUtil.createRecord(context, recordName); metrics.setTag("processName", processName); metrics.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #12
Source File: RpcMetrics.java From RDFS with Apache License 2.0 | 5 votes |
public RpcMetrics(String hostName, String port, Server server) { myServer = server; context = MetricsUtil.getContext("rpc"); metricsRecord = MetricsUtil.createRecord(context, "metrics"); metricsRecord.setTag("port", port); LOG.info("Initializing RPC Metrics with hostName=" + hostName + ", port=" + port); context.registerUpdater(this); // Need to clean up the interface to RpcMgt - don't need both metrics and server params rpcMBean = new RpcActivityMBean(registry, hostName, port); }
Example #13
Source File: DFSClientMetrics.java From RDFS with Apache License 2.0 | 5 votes |
public DFSClientMetrics() { // Create a record for FSNamesystem metrics MetricsContext metricsContext = MetricsUtil.getContext("hdfsclient"); metricsRecord = MetricsUtil.createRecord(metricsContext, "DFSClient"); metricsContext.registerUpdater(this); }
Example #14
Source File: LookasideMetrics.java From RDFS with Apache License 2.0 | 5 votes |
public LookasideMetrics() { // Create a record for LookasideCache metrics MetricsContext metricsContext = MetricsUtil.getContext("lookasideCache"); metricsRecord = MetricsUtil.createRecord(metricsContext, "LookasideFileSystem"); metricsContext.registerUpdater(this); }
Example #15
Source File: DataNodeMetrics.java From RDFS with Apache License 2.0 | 5 votes |
public DataNodeMetrics(Configuration conf, String storageId) { String sessionId = conf.get("session.id"); // Initiate reporting of Java VM metrics JvmMetrics.init("DataNode", sessionId); // Now the MBean for the data node datanodeActivityMBean = new DataNodeActivityMBean(registry, storageId); // Create record for DataNode metrics MetricsContext context = MetricsUtil.getContext("dfs"); metricsRecord = MetricsUtil.createRecord(context, "datanode"); metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #16
Source File: FairSchedulerMetricsInst.java From RDFS with Apache License 2.0 | 5 votes |
public FairSchedulerMetricsInst(FairScheduler scheduler, Configuration conf) { // Create a record for map-reduce metrics metricsRecord = MetricsUtil.createRecord(context, "fairscheduler"); poolToMetricsRecord = new HashMap<String, MetricsRecord>(); context.registerUpdater(this); updatePeriod = conf.getLong("mapred.fairscheduler.metric.update.period", 5 * 1000); // default period is 5 seconds. jobInitializer = scheduler.getJobInitializer(); }
Example #17
Source File: JobTrackerMetricsInst.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public JobTrackerMetricsInst(JobTracker tracker, JobConf conf) { super(tracker, conf); String sessionId = conf.getSessionId(); // Initiate JVM Metrics JvmMetrics.init("JobTracker", sessionId); // Create a record for map-reduce metrics MetricsContext context = MetricsUtil.getContext("mapred"); metricsRecord = MetricsUtil.createRecord(context, "jobtracker"); metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #18
Source File: TaskTrackerMetricsInst.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public TaskTrackerMetricsInst(TaskTracker t) { super(t); JobConf conf = tt.getJobConf(); String sessionId = conf.getSessionId(); // Initiate Java VM Metrics JvmMetrics.init("TaskTracker", sessionId); // Create a record for Task Tracker metrics MetricsContext context = MetricsUtil.getContext("mapred"); metricsRecord = MetricsUtil.createRecord(context, "tasktracker"); //guaranteed never null metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #19
Source File: ReduceTask.java From hadoop-gpu with Apache License 2.0 | 5 votes |
ShuffleClientMetrics(JobConf conf) { MetricsContext metricsContext = MetricsUtil.getContext("mapred"); this.shuffleMetrics = MetricsUtil.createRecord(metricsContext, "shuffleInput"); this.shuffleMetrics.setTag("user", conf.getUser()); this.shuffleMetrics.setTag("jobName", conf.getJobName()); this.shuffleMetrics.setTag("jobId", ReduceTask.this.getJobID().toString()); this.shuffleMetrics.setTag("taskId", getTaskID().toString()); this.shuffleMetrics.setTag("sessionId", conf.getSessionId()); metricsContext.registerUpdater(this); }
Example #20
Source File: TaskTracker.java From hadoop-gpu with Apache License 2.0 | 5 votes |
ShuffleServerMetrics(JobConf conf) { MetricsContext context = MetricsUtil.getContext("mapred"); shuffleMetricsRecord = MetricsUtil.createRecord(context, "shuffleOutput"); this.shuffleMetricsRecord.setTag("sessionId", conf.getSessionId()); context.registerUpdater(this); }
Example #21
Source File: JvmMetrics.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** Creates a new instance of JvmMetrics */ private JvmMetrics(String processName, String sessionId, String recordName) { MetricsContext context = MetricsUtil.getContext("jvm"); metrics = MetricsUtil.createRecord(context, recordName); metrics.setTag("processName", processName); metrics.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #22
Source File: RpcMetrics.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public RpcMetrics(String hostName, String port, Server server) { myServer = server; MetricsContext context = MetricsUtil.getContext("rpc"); metricsRecord = MetricsUtil.createRecord(context, "metrics"); metricsRecord.setTag("port", port); LOG.info("Initializing RPC Metrics with hostName=" + hostName + ", port=" + port); context.registerUpdater(this); // Need to clean up the interface to RpcMgt - don't need both metrics and server params rpcMBean = new RpcActivityMBean(registry, hostName, port); }
Example #23
Source File: DataNodeMetrics.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public DataNodeMetrics(Configuration conf, String storageId) { String sessionId = conf.get("session.id"); // Initiate reporting of Java VM metrics JvmMetrics.init("DataNode", sessionId); // Now the MBean for the data node datanodeActivityMBean = new DataNodeActivityMBean(registry, storageId); // Create record for DataNode metrics MetricsContext context = MetricsUtil.getContext("dfs"); metricsRecord = MetricsUtil.createRecord(context, "datanode"); metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #24
Source File: ShuffleClientMetrics.java From hadoop with Apache License 2.0 | 5 votes |
ShuffleClientMetrics(TaskAttemptID reduceId, JobConf jobConf) { this.numCopiers = jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5); MetricsContext metricsContext = MetricsUtil.getContext("mapred"); this.shuffleMetrics = MetricsUtil.createRecord(metricsContext, "shuffleInput"); this.shuffleMetrics.setTag("user", jobConf.getUser()); this.shuffleMetrics.setTag("jobName", jobConf.getJobName()); this.shuffleMetrics.setTag("jobId", reduceId.getJobID().toString()); this.shuffleMetrics.setTag("taskId", reduceId.toString()); this.shuffleMetrics.setTag("sessionId", jobConf.getSessionId()); metricsContext.registerUpdater(this); }
Example #25
Source File: ShuffleClientMetrics.java From big-c with Apache License 2.0 | 5 votes |
ShuffleClientMetrics(TaskAttemptID reduceId, JobConf jobConf) { this.numCopiers = jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5); MetricsContext metricsContext = MetricsUtil.getContext("mapred"); this.shuffleMetrics = MetricsUtil.createRecord(metricsContext, "shuffleInput"); this.shuffleMetrics.setTag("user", jobConf.getUser()); this.shuffleMetrics.setTag("jobName", jobConf.getJobName()); this.shuffleMetrics.setTag("jobId", reduceId.getJobID().toString()); this.shuffleMetrics.setTag("taskId", reduceId.toString()); this.shuffleMetrics.setTag("sessionId", jobConf.getSessionId()); metricsContext.registerUpdater(this); }
Example #26
Source File: LocalJobRunnerMetrics.java From big-c with Apache License 2.0 | 5 votes |
public LocalJobRunnerMetrics(JobConf conf) { String sessionId = conf.getSessionId(); // Initiate JVM Metrics JvmMetrics.init("JobTracker", sessionId); // Create a record for map-reduce metrics MetricsContext context = MetricsUtil.getContext("mapred"); // record name is jobtracker for compatibility metricsRecord = MetricsUtil.createRecord(context, "jobtracker"); metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #27
Source File: JvmMetrics.java From big-c with Apache License 2.0 | 5 votes |
/** Creates a new instance of JvmMetrics */ private JvmMetrics(String processName, String sessionId, String recordName) { MetricsContext context = MetricsUtil.getContext("jvm"); metrics = MetricsUtil.createRecord(context, recordName); metrics.setTag("processName", processName); metrics.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #28
Source File: ClusterManagerMetricsVerifier.java From RDFS with Apache License 2.0 | 5 votes |
private void verifyMetrics(String name, int expectValue) throws Exception { MetricsContext context = MetricsUtil.getContext( ClusterManagerMetrics.CONTEXT_NAME); cm.metrics.doUpdates(context); OutputRecord record = context.getAllRecords().get( ClusterManagerMetrics.CONTEXT_NAME).iterator().next(); Assert.assertEquals(expectValue, record.getMetric(name).intValue()); }
Example #29
Source File: JobTrackerMetricsInst.java From RDFS with Apache License 2.0 | 5 votes |
public JobTrackerMetricsInst(JobTracker tracker, JobConf conf) { super(tracker, conf); String sessionId = conf.getSessionId(); // Initiate JVM Metrics JvmMetrics.init("JobTracker", sessionId); // Create a record for map-reduce metrics MetricsContext context = MetricsUtil.getContext("mapred"); metricsRecord = MetricsUtil.createRecord(context, "jobtracker"); metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }
Example #30
Source File: TaskTrackerMetricsInst.java From RDFS with Apache License 2.0 | 5 votes |
public TaskTrackerMetricsInst(TaskTracker t) { super(t); JobConf conf = tt.getJobConf(); String sessionId = conf.getSessionId(); // Initiate Java VM Metrics JvmMetrics.init("TaskTracker", sessionId); // Create a record for Task Tracker metrics MetricsContext context = MetricsUtil.getContext("mapred"); metricsRecord = MetricsUtil.createRecord(context, "tasktracker"); //guaranteed never null metricsRecord.setTag("sessionId", sessionId); context.registerUpdater(this); }