org.apache.hadoop.mapreduce.TaskReport Java Examples
The following examples show how to use
org.apache.hadoop.mapreduce.TaskReport.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IndexerJobDriver.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
PartitionedInputResult(Path partitionedInputData, Counters counters, int shards, TaskReport[] taskReports) { _partitionedInputData = partitionedInputData; _counters = counters; _rowIdsFromNewData = new long[shards]; _rowIdsToUpdateFromNewData = new long[shards]; _rowIdsFromIndex = new long[shards]; for (TaskReport tr : taskReports) { int id = tr.getTaskID().getId(); Counters taskCounters = tr.getTaskCounters(); Counter total = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_NEW_DATA); _rowIdsFromNewData[id] = total.getValue(); Counter update = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_TO_UPDATE_FROM_NEW_DATA); _rowIdsToUpdateFromNewData[id] = update.getValue(); Counter index = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_INDEX); _rowIdsFromIndex[id] = index.getValue(); } }
Example #2
Source File: MRJobStatus.java From sequenceiq-samples with Apache License 2.0 | 6 votes |
public JobStatus printJobStatus(YARNRunner yarnRunner, JobID jobID) throws IOException, InterruptedException { JobStatus jobStatus; jobStatus = yarnRunner.getJobStatus(jobID); // print overall job M/R progresses LOGGER.info("\nJob " + jobStatus.getJobName() + "in queue (" + jobStatus.getQueue() + ")" + " progress M/R: " + jobStatus.getMapProgress() + "/" + jobStatus.getReduceProgress()); LOGGER.info("Tracking URL : " + jobStatus.getTrackingUrl()); LOGGER.info("Reserved memory : " + jobStatus.getReservedMem() + ", used memory : "+ jobStatus.getUsedMem() + " and used slots : "+ jobStatus.getNumUsedSlots()); // list map & reduce tasks statuses and progress TaskReport[] reports = yarnRunner.getTaskReports(jobID, TaskType.MAP); for (int i = 0; i < reports.length; i++) { LOGGER.info("MAP: Status " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); } reports = yarnRunner.getTaskReports(jobID, TaskType.REDUCE); for (int i = 0; i < reports.length; i++) { LOGGER.info("REDUCE: " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); } return jobStatus; }
Example #3
Source File: CLI.java From big-c with Apache License 2.0 | 6 votes |
/** * Display the information about a job's tasks, of a particular type and * in a particular state * * @param job the job * @param type the type of the task (map/reduce/setup/cleanup) * @param state the state of the task * (pending/running/completed/failed/killed) */ protected void displayTasks(Job job, String type, String state) throws IOException, InterruptedException { TaskReport[] reports = job.getTaskReports(TaskType.valueOf( org.apache.hadoop.util.StringUtils.toUpperCase(type))); for (TaskReport report : reports) { TIPStatus status = report.getCurrentStatus(); if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) || (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) || (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) || (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) || (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) { printTaskAttempts(report); } } }
Example #4
Source File: CLI.java From hadoop with Apache License 2.0 | 6 votes |
/** * Display the information about a job's tasks, of a particular type and * in a particular state * * @param job the job * @param type the type of the task (map/reduce/setup/cleanup) * @param state the state of the task * (pending/running/completed/failed/killed) */ protected void displayTasks(Job job, String type, String state) throws IOException, InterruptedException { TaskReport[] reports = job.getTaskReports(TaskType.valueOf( org.apache.hadoop.util.StringUtils.toUpperCase(type))); for (TaskReport report : reports) { TIPStatus status = report.getCurrentStatus(); if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) || (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) || (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) || (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) || (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) { printTaskAttempts(report); } } }
Example #5
Source File: JobClientUnitTest.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testMapTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getMapTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #6
Source File: UpdateColumnJob.java From indexr with Apache License 2.0 | 5 votes |
public boolean doRun(Config upcolConfig) throws Exception { JobConf jobConf = new JobConf(getConf(), UpdateColumnJob.class); jobConf.setKeepFailedTaskFiles(false); jobConf.setNumReduceTasks(0); String jobName = String.format("indexr-upcol-%s-%s-%s", upcolConfig.table, LocalDateTime.now().format(timeFormatter), RandomStringUtils.randomAlphabetic(5)); jobConf.setJobName(jobName); jobConf.set(CONFKEY, JsonUtil.toJson(upcolConfig)); Path workDir = new Path(jobConf.getWorkingDirectory(), jobName); jobConf.setWorkingDirectory(workDir); Job job = Job.getInstance(jobConf); job.setInputFormatClass(SegmentInputFormat.class); job.setMapperClass(UpColSegmentMapper.class); job.setJarByClass(UpdateColumnJob.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setMapSpeculativeExecution(false); job.setOutputFormatClass(UpColSegmentOutputFormat.class); job.submit(); boolean ok = job.waitForCompletion(true); if (!ok) { TaskReport[] reports = job.getTaskReports(TaskType.MAP); if (reports != null) { for (TaskReport report : reports) { log.error("Error in task [%s] : %s", report.getTaskId(), Arrays.toString(report.getDiagnostics())); } } } return ok; }
Example #7
Source File: CLI.java From big-c with Apache License 2.0 | 5 votes |
private void printTaskAttempts(TaskReport report) { if (report.getCurrentStatus() == TIPStatus.COMPLETE) { System.out.println(report.getSuccessfulTaskAttemptId()); } else if (report.getCurrentStatus() == TIPStatus.RUNNING) { for (TaskAttemptID t : report.getRunningTaskAttemptIds()) { System.out.println(t); } } }
Example #8
Source File: JobClientUnitTest.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testCleanupTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getCleanupTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #9
Source File: JobClientUnitTest.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testSetupTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getSetupTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #10
Source File: JobClientUnitTest.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testReduceTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getReduceTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #11
Source File: JobClientUnitTest.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testMapTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getMapTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #12
Source File: CLI.java From hadoop with Apache License 2.0 | 5 votes |
private void printTaskAttempts(TaskReport report) { if (report.getCurrentStatus() == TIPStatus.COMPLETE) { System.out.println(report.getSuccessfulTaskAttemptId()); } else if (report.getCurrentStatus() == TIPStatus.RUNNING) { for (TaskAttemptID t : report.getRunningTaskAttemptIds()) { System.out.println(t); } } }
Example #13
Source File: JobClientUnitTest.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testCleanupTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getCleanupTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #14
Source File: JobClientUnitTest.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testSetupTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getSetupTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #15
Source File: JobClientUnitTest.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testReduceTaskReportsWithNullJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); Cluster mockCluster = mock(Cluster.class); client.setCluster(mockCluster); JobID id = new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result = client.getReduceTaskReports(id); assertEquals(0, result.length); verify(mockCluster).getJob(id); }
Example #16
Source File: YARNRunner.java From tez with Apache License 2.0 | 4 votes |
@Override public TaskReport[] getTaskReports(JobID jobID, TaskType taskType) throws IOException, InterruptedException { return clientCache.getClient(jobID) .getTaskReports(jobID, taskType); }
Example #17
Source File: YARNRunner.java From big-c with Apache License 2.0 | 4 votes |
@Override public TaskReport[] getTaskReports(JobID jobID, TaskType taskType) throws IOException, InterruptedException { return clientCache.getClient(jobID) .getTaskReports(jobID, taskType); }
Example #18
Source File: ClientServiceDelegate.java From tez with Apache License 2.0 | 4 votes |
public TaskReport[] getTaskReports( JobID oldJobID, TaskType taskType) throws IOException{ // TEZ-146: need to return real task reports return new TaskReport[0]; }
Example #19
Source File: YARNRunner.java From incubator-tez with Apache License 2.0 | 4 votes |
@Override public TaskReport[] getTaskReports(JobID jobID, TaskType taskType) throws IOException, InterruptedException { return clientCache.getClient(jobID) .getTaskReports(jobID, taskType); }
Example #20
Source File: ClientServiceDelegate.java From incubator-tez with Apache License 2.0 | 4 votes |
public TaskReport[] getTaskReports( JobID oldJobID, TaskType taskType) throws IOException{ // TEZ-146: need to return real task reports return new TaskReport[0]; }
Example #21
Source File: MockSimulatorJobTracker.java From RDFS with Apache License 2.0 | 4 votes |
@Override public TaskReport[] getTaskReports(JobID jobid, TaskType type) throws IOException, InterruptedException { throw new UnsupportedOperationException(); }
Example #22
Source File: YARNRunner.java From hadoop with Apache License 2.0 | 4 votes |
@Override public TaskReport[] getTaskReports(JobID jobID, TaskType taskType) throws IOException, InterruptedException { return clientCache.getClient(jobID) .getTaskReports(jobID, taskType); }
Example #23
Source File: HadoopClientProtocol.java From ignite with Apache License 2.0 | 4 votes |
/** {@inheritDoc} */ @Override public TaskReport[] getTaskReports(JobID jobid, TaskType type) throws IOException, InterruptedException { return new TaskReport[0]; }
Example #24
Source File: TestCLI.java From big-c with Apache License 2.0 | 4 votes |
private TaskReport[] getTaskReports(JobID jobId, TaskType type) { return new TaskReport[] { new TaskReport(), new TaskReport() }; }
Example #25
Source File: JobClientUnitTest.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testShowJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); long startTime = System.currentTimeMillis(); JobID jobID = new JobID(String.valueOf(startTime), 12345); JobStatus mockJobStatus = mock(JobStatus.class); when(mockJobStatus.getJobID()).thenReturn(jobID); when(mockJobStatus.getJobName()).thenReturn(jobID.toString()); when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING); when(mockJobStatus.getStartTime()).thenReturn(startTime); when(mockJobStatus.getUsername()).thenReturn("mockuser"); when(mockJobStatus.getQueue()).thenReturn("mockqueue"); when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL); when(mockJobStatus.getNumUsedSlots()).thenReturn(1); when(mockJobStatus.getNumReservedSlots()).thenReturn(1); when(mockJobStatus.getUsedMem()).thenReturn(1024); when(mockJobStatus.getReservedMem()).thenReturn(512); when(mockJobStatus.getNeededMem()).thenReturn(2048); when(mockJobStatus.getSchedulingInfo()).thenReturn("NA"); Job mockJob = mock(Job.class); when(mockJob.getTaskReports(isA(TaskType.class))).thenReturn( new TaskReport[5]); Cluster mockCluster = mock(Cluster.class); when(mockCluster.getJob(jobID)).thenReturn(mockJob); client.setCluster(mockCluster); ByteArrayOutputStream out = new ByteArrayOutputStream(); client.displayJobList(new JobStatus[] {mockJobStatus}, new PrintWriter(out)); String commandLineOutput = out.toString(); System.out.println(commandLineOutput); Assert.assertTrue(commandLineOutput.contains("Total jobs:1")); verify(mockJobStatus, atLeastOnce()).getJobID(); verify(mockJobStatus).getState(); verify(mockJobStatus).getStartTime(); verify(mockJobStatus).getUsername(); verify(mockJobStatus).getQueue(); verify(mockJobStatus).getPriority(); verify(mockJobStatus).getNumUsedSlots(); verify(mockJobStatus).getNumReservedSlots(); verify(mockJobStatus).getUsedMem(); verify(mockJobStatus).getReservedMem(); verify(mockJobStatus).getNeededMem(); verify(mockJobStatus).getSchedulingInfo(); // This call should not go to each AM. verify(mockCluster, never()).getJob(jobID); verify(mockJob, never()).getTaskReports(isA(TaskType.class)); }
Example #26
Source File: JobClientUnitTest.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testShowJob() throws Exception { TestJobClient client = new TestJobClient(new JobConf()); long startTime = System.currentTimeMillis(); JobID jobID = new JobID(String.valueOf(startTime), 12345); JobStatus mockJobStatus = mock(JobStatus.class); when(mockJobStatus.getJobID()).thenReturn(jobID); when(mockJobStatus.getJobName()).thenReturn(jobID.toString()); when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING); when(mockJobStatus.getStartTime()).thenReturn(startTime); when(mockJobStatus.getUsername()).thenReturn("mockuser"); when(mockJobStatus.getQueue()).thenReturn("mockqueue"); when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL); when(mockJobStatus.getNumUsedSlots()).thenReturn(1); when(mockJobStatus.getNumReservedSlots()).thenReturn(1); when(mockJobStatus.getUsedMem()).thenReturn(1024); when(mockJobStatus.getReservedMem()).thenReturn(512); when(mockJobStatus.getNeededMem()).thenReturn(2048); when(mockJobStatus.getSchedulingInfo()).thenReturn("NA"); Job mockJob = mock(Job.class); when(mockJob.getTaskReports(isA(TaskType.class))).thenReturn( new TaskReport[5]); Cluster mockCluster = mock(Cluster.class); when(mockCluster.getJob(jobID)).thenReturn(mockJob); client.setCluster(mockCluster); ByteArrayOutputStream out = new ByteArrayOutputStream(); client.displayJobList(new JobStatus[] {mockJobStatus}, new PrintWriter(out)); String commandLineOutput = out.toString(); System.out.println(commandLineOutput); Assert.assertTrue(commandLineOutput.contains("Total jobs:1")); verify(mockJobStatus, atLeastOnce()).getJobID(); verify(mockJobStatus).getState(); verify(mockJobStatus).getStartTime(); verify(mockJobStatus).getUsername(); verify(mockJobStatus).getQueue(); verify(mockJobStatus).getPriority(); verify(mockJobStatus).getNumUsedSlots(); verify(mockJobStatus).getNumReservedSlots(); verify(mockJobStatus).getUsedMem(); verify(mockJobStatus).getReservedMem(); verify(mockJobStatus).getNeededMem(); verify(mockJobStatus).getSchedulingInfo(); // This call should not go to each AM. verify(mockCluster, never()).getJob(jobID); verify(mockJob, never()).getTaskReports(isA(TaskType.class)); }
Example #27
Source File: TestCLI.java From hadoop with Apache License 2.0 | 4 votes |
private TaskReport[] getTaskReports(JobID jobId, TaskType type) { return new TaskReport[] { new TaskReport(), new TaskReport() }; }
Example #28
Source File: ClientProtocol.java From big-c with Apache License 2.0 | 2 votes |
/** * Grab a bunch of info on the tasks that make up the job */ public TaskReport[] getTaskReports(JobID jobid, TaskType type) throws IOException, InterruptedException;
Example #29
Source File: ClientProtocol.java From hadoop with Apache License 2.0 | 2 votes |
/** * Grab a bunch of info on the tasks that make up the job */ public TaskReport[] getTaskReports(JobID jobid, TaskType type) throws IOException, InterruptedException;