org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo Java Examples
The following examples show how to use
org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 6 votes |
private long computeFinishedMaps(JobInfo jobInfo, int numMaps, int numSuccessfulMaps) { if (numMaps == numSuccessfulMaps) { return jobInfo.getFinishedMaps(); } long numFinishedMaps = 0; Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { ++numFinishedMaps; } } return numFinishedMaps; }
Example #2
Source File: YarnJobStatsUtility.java From jumbune with GNU Lesser General Public License v3.0 | 6 votes |
/** * This method is responsible for populating the clean up phase details. * @return TaskOutputDetails contains the details of the clean up phase. */ private PhaseDetails prepareCleanupDetails(JobInfo jobInfo, Map<TaskAttemptID, TaskAttemptInfo> tasks){ PhaseDetails phaseDetails = new PhaseDetails(); List<TaskOutputDetails> cleanupTaskOuptputDetails = new ArrayList<TaskOutputDetails>(); TaskOutputDetails taskOutputDetails = new TaskOutputDetails(); taskOutputDetails.setTaskType("CLEANUP"); taskOutputDetails.setTaskID("Cleanup"); for (Map.Entry<TaskAttemptID, TaskAttemptInfo> task : tasks .entrySet()) { TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue()); taskOutputDetails.setLocation(taskAttemptInfo.getHostname()); } long startPoint = getMaxReduceTime(tasks,jobInfo.getSubmitTime()); taskOutputDetails.setStartPoint(startPoint); LOGGER.debug("Clean up start time" + taskOutputDetails.getStartPoint()); long endPoint = (jobInfo.getFinishTime() - jobInfo.getSubmitTime())/CONVERSION_FACTOR_MILLISECS_TO_SECS; taskOutputDetails.setEndPoint(endPoint); LOGGER.debug("Clean up end time" + taskOutputDetails.getEndPoint()); taskOutputDetails.setDataFlowRate(0); cleanupTaskOuptputDetails.add(taskOutputDetails); phaseDetails.setTaskOutputDetails(cleanupTaskOuptputDetails); phaseDetails.setAvgDataFlowRate(0); return phaseDetails; }
Example #3
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testFailedJobHistoryWithoutDiagnostics() throws Exception { final Path histPath = new Path(getClass().getClassLoader().getResource( "job_1393307629410_0001-1393307687476-user-Sleep+job-1393307723835-0-0-FAILED-default-1393307693920.jhist") .getFile()); final FileSystem lfs = FileSystem.getLocal(new Configuration()); final FSDataInputStream fsdis = lfs.open(histPath); try { JobHistoryParser parser = new JobHistoryParser(fsdis); JobInfo info = parser.parse(); assertEquals("History parsed jobId incorrectly", info.getJobId(), JobID.forName("job_1393307629410_0001") ); assertEquals("Default diagnostics incorrect ", "", info.getErrorInfo()); } finally { fsdis.close(); } }
Example #4
Source File: YarnJobStatsUtility.java From jumbune with GNU Lesser General Public License v3.0 | 6 votes |
/** * This method is responsible for populating the setup phase details. * @return TaskOutputDetails contains the details of the set up phase. */ private PhaseDetails prepareSetupDetails(JobInfo jobInfo,Map<TaskAttemptID, TaskAttemptInfo> tasks){ PhaseDetails phaseDetails = new PhaseDetails(); List<TaskOutputDetails> taskOutputDetails = new ArrayList<TaskOutputDetails>(); TaskOutputDetails tod; tod = new TaskOutputDetails(); tod.setTaskType("SETUP"); tod.setTaskID("Setup"); for (Map.Entry<TaskAttemptID, TaskAttemptInfo> task : tasks .entrySet()) { TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue()); tod.setLocation(taskAttemptInfo.getHostname()); } long startPoint = jobInfo.getSubmitTime(); tod.setStartPoint(0); long endPoint = (jobInfo.getLaunchTime()-startPoint) / CONVERSION_FACTOR_MILLISECS_TO_SECS; tod.setEndPoint(endPoint); tod.setDataFlowRate(0); taskOutputDetails.add(tod); phaseDetails.setTaskOutputDetails(taskOutputDetails); phaseDetails.setAvgDataFlowRate(0); return phaseDetails; }
Example #5
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 6 votes |
private long computeFinishedMaps(JobInfo jobInfo, int numMaps, int numSuccessfulMaps) { if (numMaps == numSuccessfulMaps) { return jobInfo.getFinishedMaps(); } long numFinishedMaps = 0; Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { ++numFinishedMaps; } } return numFinishedMaps; }
Example #6
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFailedJobHistoryWithoutDiagnostics() throws Exception { final Path histPath = new Path(getClass().getClassLoader().getResource( "job_1393307629410_0001-1393307687476-user-Sleep+job-1393307723835-0-0-FAILED-default-1393307693920.jhist") .getFile()); final FileSystem lfs = FileSystem.getLocal(new Configuration()); final FSDataInputStream fsdis = lfs.open(histPath); try { JobHistoryParser parser = new JobHistoryParser(fsdis); JobInfo info = parser.parse(); assertEquals("History parsed jobId incorrectly", info.getJobId(), JobID.forName("job_1393307629410_0001") ); assertEquals("Default diagnostics incorrect ", "", info.getErrorInfo()); } finally { fsdis.close(); } }
Example #7
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test compatibility of JobHistoryParser with 2.0.3-alpha history files * @throws IOException */ @Test public void testTaskAttemptUnsuccessfulCompletionWithoutCounters203() throws IOException { Path histPath = new Path(getClass().getClassLoader().getResource( "job_2.0.3-alpha-FAILED.jhist").getFile()); JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal (new Configuration()), histPath); JobInfo jobInfo = parser.parse(); LOG.info(" job info: " + jobInfo.getJobname() + " " + jobInfo.getFinishedMaps() + " " + jobInfo.getTotalMaps() + " " + jobInfo.getJobId() ) ; }
Example #8
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 5 votes |
/** Apply the filter (status) on the parsed job and generate summary */ public FilteredJob(JobInfo job, String status) { filter = status; Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus().equals(status)) { String hostname = attempt.getHostname(); TaskID id = attempt.getAttemptId().getTaskID(); Set<TaskID> set = badNodesToFilteredTasks.get(hostname); if (set == null) { set = new TreeSet<TaskID>(); set.add(id); badNodesToFilteredTasks.put(hostname, set); }else{ set.add(id); } } } } }
Example #9
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 5 votes |
/** * Test compatibility of JobHistoryParser with 2.0.3-alpha history files * @throws IOException */ @Test public void testTaskAttemptUnsuccessfulCompletionWithoutCounters203() throws IOException { Path histPath = new Path(getClass().getClassLoader().getResource( "job_2.0.3-alpha-FAILED.jhist").getFile()); JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal (new Configuration()), histPath); JobInfo jobInfo = parser.parse(); LOG.info(" job info: " + jobInfo.getJobname() + " " + jobInfo.getFinishedMaps() + " " + jobInfo.getTotalMaps() + " " + jobInfo.getJobId() ) ; }
Example #10
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 5 votes |
/** * Test compatibility of JobHistoryParser with 2.4.0 history files * @throws IOException */ @Test public void testTaskAttemptUnsuccessfulCompletionWithoutCounters240() throws IOException { Path histPath = new Path(getClass().getClassLoader().getResource( "job_2.4.0-FAILED.jhist").getFile()); JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal (new Configuration()), histPath); JobInfo jobInfo = parser.parse(); LOG.info(" job info: " + jobInfo.getJobname() + " " + jobInfo.getFinishedMaps() + " " + jobInfo.getTotalMaps() + " " + jobInfo.getJobId() ); }
Example #11
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 5 votes |
/** * Test compatibility of JobHistoryParser with 0.23.9 history files * @throws IOException */ @Test public void testTaskAttemptUnsuccessfulCompletionWithoutCounters0239() throws IOException { Path histPath = new Path(getClass().getClassLoader().getResource( "job_0.23.9-FAILED.jhist").getFile()); JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal (new Configuration()), histPath); JobInfo jobInfo = parser.parse(); LOG.info(" job info: " + jobInfo.getJobname() + " " + jobInfo.getFinishedMaps() + " " + jobInfo.getTotalMaps() + " " + jobInfo.getJobId() ) ; }
Example #12
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test compatibility of JobHistoryParser with 0.23.9 history files * @throws IOException */ @Test public void testTaskAttemptUnsuccessfulCompletionWithoutCounters0239() throws IOException { Path histPath = new Path(getClass().getClassLoader().getResource( "job_0.23.9-FAILED.jhist").getFile()); JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal (new Configuration()), histPath); JobInfo jobInfo = parser.parse(); LOG.info(" job info: " + jobInfo.getJobname() + " " + jobInfo.getFinishedMaps() + " " + jobInfo.getTotalMaps() + " " + jobInfo.getJobId() ) ; }
Example #13
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test compatibility of JobHistoryParser with 2.4.0 history files * @throws IOException */ @Test public void testTaskAttemptUnsuccessfulCompletionWithoutCounters240() throws IOException { Path histPath = new Path(getClass().getClassLoader().getResource( "job_2.4.0-FAILED.jhist").getFile()); JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal (new Configuration()), histPath); JobInfo jobInfo = parser.parse(); LOG.info(" job info: " + jobInfo.getJobname() + " " + jobInfo.getFinishedMaps() + " " + jobInfo.getTotalMaps() + " " + jobInfo.getJobId() ); }
Example #14
Source File: HistoryViewer.java From hadoop with Apache License 2.0 | 5 votes |
/** Apply the filter (status) on the parsed job and generate summary */ public FilteredJob(JobInfo job, String status) { filter = status; Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus().equals(status)) { String hostname = attempt.getHostname(); TaskID id = attempt.getAttemptId().getTaskID(); Set<TaskID> set = badNodesToFilteredTasks.get(hostname); if (set == null) { set = new TreeSet<TaskID>(); set.add(id); badNodesToFilteredTasks.put(hostname, set); }else{ set.add(id); } } } } }
Example #15
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 4 votes |
/** Generate analysis information for the parsed job */ public AnalyzedJob (JobInfo job) { Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); int finishedMaps = (int) job.getFinishedMaps(); int finishedReduces = (int) job.getFinishedReduces(); mapTasks = new JobHistoryParser.TaskAttemptInfo[finishedMaps]; reduceTasks = new JobHistoryParser.TaskAttemptInfo[finishedReduces]; int mapIndex = 0 , reduceIndex=0; avgMapTime = 0; avgReduceTime = 0; avgShuffleTime = 0; for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus(). equals(TaskStatus.State.SUCCEEDED.toString())) { long avgFinishTime = (attempt.getFinishTime() - attempt.getStartTime()); if (attempt.getTaskType().equals(TaskType.MAP)) { mapTasks[mapIndex++] = attempt; avgMapTime += avgFinishTime; } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { reduceTasks[reduceIndex++] = attempt; avgShuffleTime += (attempt.getShuffleFinishTime() - attempt.getStartTime()); avgReduceTime += (attempt.getFinishTime() - attempt.getShuffleFinishTime()); } break; } } } if (finishedMaps > 0) { avgMapTime /= finishedMaps; } if (finishedReduces > 0) { avgReduceTime /= finishedReduces; avgShuffleTime /= finishedReduces; } }
Example #16
Source File: YarnJobStatsUtility.java From jumbune with GNU Lesser General Public License v3.0 | 4 votes |
/** * This method parses the .hist files and returns the job stats.. * * @param reLocalPath the re local path * @return the job output contains the details of the job. * @throws IOException Signals that an I/O exception has occurred. */ public JobOutput parseAndGetJobStats(String reLocalPath) throws IOException{ Path histFilePath = new Path(reLocalPath); DecoratedJobHistoryParser decoratedJobHistoryParser = new DecoratedJobHistoryParser(FileSystem.getLocal(new Configuration()), histFilePath); JobOutput jobOutput = null; try { JobInfo jobInfo = decoratedJobHistoryParser.parse(); AdditionalJobInfo additionalJobInfo = decoratedJobHistoryParser.getAdditionalJobInfo(); jobOutput = getJobOutput(jobInfo,additionalJobInfo); } catch (IOException e) { LOGGER.error(JumbuneRuntimeException.throwUnresponsiveIOException(e.getStackTrace())); } return jobOutput; }
Example #17
Source File: ClusterProfilingHelper.java From jumbune with GNU Lesser General Public License v3.0 | 4 votes |
/** * Gets the rack aware stats. * * @param cluster the cluster * @param rmCommunicator * @return the rack aware * @throws IOException Signals that an I/O exception has occurred. * @throws InterruptedException the interrupted exception */ public RackAwareStats getRackAwareStats(Cluster cluster, RMCommunicator rmCommunicator) throws Exception { RackAwareStats rackAwareStats = new RackAwareStats (); List<ApplicationReport> applicationReport = rmCommunicator.getApplications(); Long dataLocalJob = (long) 0; Long rackLocalJob = (long) 0; Long otherLocalJob = (long) 0; Long totalCounter = (long) 0; StringBuffer jsonFile = new StringBuffer().append(JumbuneInfo.getHome()).append(File.separator).append(ExtendedConstants.JOB_JARS_LOC) .append(cluster.getClusterName()).append(RACK).append(JSON_EXTENSION); File rackAwareFile = new File(jsonFile.toString()); if(rackAwareFile.exists()){ String rackAwareJsonReader = ConfigurationUtil.readFileData(jsonFile.toString()); rackAwareStats = Constants.gson.fromJson(rackAwareJsonReader, RackAwareStats.class); dataLocalJob = rackAwareStats.getDataLocalJob(); rackLocalJob = rackAwareStats.getRackLocalJob(); otherLocalJob = rackAwareStats.getOtherLocalJob(); } for(ApplicationReport appReport:applicationReport){ String applicationId = appReport.getApplicationId().toString(); String jobId=applicationId.replace(Constants.APPLICATION, Constants.JOB); if(appReport.getFinalApplicationStatus().equals(FinalApplicationStatus.SUCCEEDED)){ StringBuffer histFilePath = new StringBuffer().append(JumbuneInfo.getHome()).append(ExtendedConstants.JOB_JARS_LOC) .append(cluster.getClusterName()).append(CLUSTER_PROFILING).append(jobId).append(RACKWARE).append(jobId).append(JHIST_EXTENSION); StringBuffer newHistFilePath = new StringBuffer().append(JumbuneInfo.getHome()).append(ExtendedConstants.JOB_JARS_LOC) .append(cluster.getClusterName()).append(CLUSTER_PROFILING).append(jobId).append(RACKWARE).append(jobId).append(COPY).append(JHIST_EXTENSION); File histFile = new File(histFilePath.toString()); File copiedHistFile = new File(newHistFilePath.toString()); if(histFile.exists()){ if(!copiedHistFile.exists()){ Files.copy(histFile.toPath(), copiedHistFile.toPath()); if(Constants.HORTONWORKS.equals(FileUtil.getClusterInfoDetail(Constants.HADOOP_DISTRIBUTION))){ preProcessContainerId(newHistFilePath.toString()); } Path path = new Path(newHistFilePath.toString()); //begin mapr code changes if(Constants.MAPR.equals(FileUtil.getClusterInfoDetail(Constants.HADOOP_DISTRIBUTION))){ UserGroupInformation realUser = UserGroupInformation.createRemoteUser("mapr"); UserGroupInformation.setLoginUser(realUser); } //end mapr code changes DecoratedJobHistoryParser decoratedJobHistoryParser = new DecoratedJobHistoryParser(FileSystem.getLocal(new Configuration()), path); JobInfo jobInfo = decoratedJobHistoryParser.parse(); if(jobInfo.getTotalCounters().findCounter(JOB_COUNTER, OTHER_LOCAL_MAPS).getValue()> 0){ otherLocalJob = otherLocalJob + jobInfo.getTotalCounters().findCounter(JOB_COUNTER, OTHER_LOCAL_MAPS).getValue(); } if(jobInfo.getTotalCounters().findCounter(JOB_COUNTER, RACK_LOCAL_MAPS).getValue() > 0){ rackLocalJob = rackLocalJob + jobInfo.getTotalCounters().findCounter(JOB_COUNTER, RACK_LOCAL_MAPS).getValue(); } if(jobInfo.getTotalCounters().findCounter(JOB_COUNTER, DATA_LOCAL_MAPS).getValue() > 0){ dataLocalJob = dataLocalJob + jobInfo.getTotalCounters().findCounter(JOB_COUNTER, DATA_LOCAL_MAPS).getValue(); } } } } } rackAwareStats.setDataLocalJob(dataLocalJob); rackAwareStats.setRackLocalJob(rackLocalJob); rackAwareStats.setOtherLocalJob(otherLocalJob); File parentFile = new File(rackAwareFile.getParent()); if (!parentFile.exists()) { parentFile.mkdirs(); } String json = Constants.gson.toJson(rackAwareStats); ConfigurationUtil.writeToFile(jsonFile.toString(), json); totalCounter = (dataLocalJob+rackLocalJob+otherLocalJob); if(totalCounter > 0){ rackAwareStats.setDataLocalJob((dataLocalJob*100)/totalCounter); rackAwareStats.setOtherLocalJob((otherLocalJob*100)/totalCounter); rackAwareStats.setRackLocalJob(100-rackAwareStats.getOtherLocalJob()-rackAwareStats.getDataLocalJob()); }else{ rackAwareStats.setDataLocalJob(0); rackAwareStats.setOtherLocalJob(0); rackAwareStats.setRackLocalJob(0); } return rackAwareStats; }
Example #18
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testMultipleFailedTasks() throws Exception { JobHistoryParser parser = new JobHistoryParser(Mockito.mock(FSDataInputStream.class)); EventReader reader = Mockito.mock(EventReader.class); final AtomicInteger numEventsRead = new AtomicInteger(0); // Hack! final org.apache.hadoop.mapreduce.TaskType taskType = org.apache.hadoop.mapreduce.TaskType.MAP; final TaskID[] tids = new TaskID[2]; final JobID jid = new JobID("1", 1); tids[0] = new TaskID(jid, taskType, 0); tids[1] = new TaskID(jid, taskType, 1); Mockito.when(reader.getNextEvent()).thenAnswer( new Answer<HistoryEvent>() { public HistoryEvent answer(InvocationOnMock invocation) throws IOException { // send two task start and two task fail events for tasks 0 and 1 int eventId = numEventsRead.getAndIncrement(); TaskID tid = tids[eventId & 0x1]; if (eventId < 2) { return new TaskStartedEvent(tid, 0, taskType, ""); } if (eventId < 4) { TaskFailedEvent tfe = new TaskFailedEvent(tid, 0, taskType, "failed", "FAILED", null, new Counters()); tfe.setDatum(tfe.getDatum()); return tfe; } if (eventId < 5) { JobUnsuccessfulCompletionEvent juce = new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0, "JOB_FAILED", Collections.singletonList( "Task failed: " + tids[0].toString())); return juce; } return null; } }); JobInfo info = parser.parse(reader); assertTrue("Task 0 not implicated", info.getErrorInfo().contains(tids[0].toString())); }
Example #19
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout = 50000) public void testJobInfo() throws Exception { JobInfo info = new JobInfo(); Assert.assertEquals("NORMAL", info.getPriority()); info.printAll(); }
Example #20
Source File: MRAppMaster.java From hadoop with Apache License 2.0 | 4 votes |
private void parsePreviousJobHistory() throws IOException { FSDataInputStream in = getPreviousJobHistoryStream(getConfig(), appAttemptID); JobHistoryParser parser = new JobHistoryParser(in); JobInfo jobInfo = parser.parse(); Exception parseException = parser.getParseException(); if (parseException != null) { LOG.info("Got an error parsing job-history file" + ", ignoring incomplete events.", parseException); } Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator = taskInfo.getAllTaskAttempts().entrySet().iterator(); while (taskAttemptIterator.hasNext()) { Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next(); if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) { taskAttemptIterator.remove(); } } completedTasksFromPreviousRun .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo); LOG.info("Read from history task " + TypeConverter.toYarn(taskInfo.getTaskId())); } } LOG.info("Read completed tasks from history " + completedTasksFromPreviousRun.size()); recoveredJobStartTime = jobInfo.getLaunchTime(); // recover AMInfos List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos(); if (jhAmInfoList != null) { for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) { AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(), jhAmInfo.getStartTime(), jhAmInfo.getContainerId(), jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(), jhAmInfo.getNodeManagerHttpPort()); amInfos.add(amInfo); } } }
Example #21
Source File: HistoryViewer.java From big-c with Apache License 2.0 | 4 votes |
/** Create summary information for the parsed job */ public SummarizedJob(JobInfo job) { tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); //allHosts.put(task.getHo(Keys.HOSTNAME), ""); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { long startTime = attempt.getStartTime(); long finishTime = attempt.getFinishTime(); if (attempt.getTaskType().equals(TaskType.MAP)) { if (mapStarted== 0 || mapStarted > startTime) { mapStarted = startTime; } if (mapFinished < finishTime) { mapFinished = finishTime; } totalMaps++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedMaps++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledMaps++; } } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { if (reduceStarted==0||reduceStarted > startTime) { reduceStarted = startTime; } if (reduceFinished < finishTime) { reduceFinished = finishTime; } totalReduces++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedReduces++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledReduces++; } } else if (attempt.getTaskType().equals(TaskType.JOB_CLEANUP)) { if (cleanupStarted==0||cleanupStarted > startTime) { cleanupStarted = startTime; } if (cleanupFinished < finishTime) { cleanupFinished = finishTime; } totalCleanups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledCleanups++; } } else if (attempt.getTaskType().equals(TaskType.JOB_SETUP)) { if (setupStarted==0||setupStarted > startTime) { setupStarted = startTime; } if (setupFinished < finishTime) { setupFinished = finishTime; } totalSetups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledSetups++; } } } } }
Example #22
Source File: MRAppMaster.java From big-c with Apache License 2.0 | 4 votes |
private void parsePreviousJobHistory() throws IOException { FSDataInputStream in = getPreviousJobHistoryStream(getConfig(), appAttemptID); JobHistoryParser parser = new JobHistoryParser(in); JobInfo jobInfo = parser.parse(); Exception parseException = parser.getParseException(); if (parseException != null) { LOG.info("Got an error parsing job-history file" + ", ignoring incomplete events.", parseException); } Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator = taskInfo.getAllTaskAttempts().entrySet().iterator(); while (taskAttemptIterator.hasNext()) { Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next(); if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) { taskAttemptIterator.remove(); } } completedTasksFromPreviousRun .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo); LOG.info("Read from history task " + TypeConverter.toYarn(taskInfo.getTaskId())); } } LOG.info("Read completed tasks from history " + completedTasksFromPreviousRun.size()); recoveredJobStartTime = jobInfo.getLaunchTime(); // recover AMInfos List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos(); if (jhAmInfoList != null) { for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) { AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(), jhAmInfo.getStartTime(), jhAmInfo.getContainerId(), jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(), jhAmInfo.getNodeManagerHttpPort()); amInfos.add(amInfo); } } }
Example #23
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testMultipleFailedTasks() throws Exception { JobHistoryParser parser = new JobHistoryParser(Mockito.mock(FSDataInputStream.class)); EventReader reader = Mockito.mock(EventReader.class); final AtomicInteger numEventsRead = new AtomicInteger(0); // Hack! final org.apache.hadoop.mapreduce.TaskType taskType = org.apache.hadoop.mapreduce.TaskType.MAP; final TaskID[] tids = new TaskID[2]; final JobID jid = new JobID("1", 1); tids[0] = new TaskID(jid, taskType, 0); tids[1] = new TaskID(jid, taskType, 1); Mockito.when(reader.getNextEvent()).thenAnswer( new Answer<HistoryEvent>() { public HistoryEvent answer(InvocationOnMock invocation) throws IOException { // send two task start and two task fail events for tasks 0 and 1 int eventId = numEventsRead.getAndIncrement(); TaskID tid = tids[eventId & 0x1]; if (eventId < 2) { return new TaskStartedEvent(tid, 0, taskType, ""); } if (eventId < 4) { TaskFailedEvent tfe = new TaskFailedEvent(tid, 0, taskType, "failed", "FAILED", null, new Counters()); tfe.setDatum(tfe.getDatum()); return tfe; } if (eventId < 5) { JobUnsuccessfulCompletionEvent juce = new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0, "JOB_FAILED", Collections.singletonList( "Task failed: " + tids[0].toString())); return juce; } return null; } }); JobInfo info = parser.parse(reader); assertTrue("Task 0 not implicated", info.getErrorInfo().contains(tids[0].toString())); }
Example #24
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout = 50000) public void testJobInfo() throws Exception { JobInfo info = new JobInfo(); Assert.assertEquals("NORMAL", info.getPriority()); info.printAll(); }
Example #25
Source File: HistoryViewer.java From hadoop with Apache License 2.0 | 4 votes |
/** Generate analysis information for the parsed job */ public AnalyzedJob (JobInfo job) { Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks(); int finishedMaps = (int) job.getFinishedMaps(); int finishedReduces = (int) job.getFinishedReduces(); mapTasks = new JobHistoryParser.TaskAttemptInfo[finishedMaps]; reduceTasks = new JobHistoryParser.TaskAttemptInfo[finishedReduces]; int mapIndex = 0 , reduceIndex=0; avgMapTime = 0; avgReduceTime = 0; avgShuffleTime = 0; for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { if (attempt.getTaskStatus(). equals(TaskStatus.State.SUCCEEDED.toString())) { long avgFinishTime = (attempt.getFinishTime() - attempt.getStartTime()); if (attempt.getTaskType().equals(TaskType.MAP)) { mapTasks[mapIndex++] = attempt; avgMapTime += avgFinishTime; } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { reduceTasks[reduceIndex++] = attempt; avgShuffleTime += (attempt.getShuffleFinishTime() - attempt.getStartTime()); avgReduceTime += (attempt.getFinishTime() - attempt.getShuffleFinishTime()); } break; } } } if (finishedMaps > 0) { avgMapTime /= finishedMaps; } if (finishedReduces > 0) { avgReduceTime /= finishedReduces; avgShuffleTime /= finishedReduces; } }
Example #26
Source File: HistoryViewer.java From hadoop with Apache License 2.0 | 4 votes |
/** Create summary information for the parsed job */ public SummarizedJob(JobInfo job) { tasks = job.getAllTasks(); for (JobHistoryParser.TaskInfo task : tasks.values()) { Map<TaskAttemptID, JobHistoryParser.TaskAttemptInfo> attempts = task.getAllTaskAttempts(); //allHosts.put(task.getHo(Keys.HOSTNAME), ""); for (JobHistoryParser.TaskAttemptInfo attempt : attempts.values()) { long startTime = attempt.getStartTime(); long finishTime = attempt.getFinishTime(); if (attempt.getTaskType().equals(TaskType.MAP)) { if (mapStarted== 0 || mapStarted > startTime) { mapStarted = startTime; } if (mapFinished < finishTime) { mapFinished = finishTime; } totalMaps++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedMaps++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledMaps++; } } else if (attempt.getTaskType().equals(TaskType.REDUCE)) { if (reduceStarted==0||reduceStarted > startTime) { reduceStarted = startTime; } if (reduceFinished < finishTime) { reduceFinished = finishTime; } totalReduces++; if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedReduces++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledReduces++; } } else if (attempt.getTaskType().equals(TaskType.JOB_CLEANUP)) { if (cleanupStarted==0||cleanupStarted > startTime) { cleanupStarted = startTime; } if (cleanupFinished < finishTime) { cleanupFinished = finishTime; } totalCleanups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedCleanups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledCleanups++; } } else if (attempt.getTaskType().equals(TaskType.JOB_SETUP)) { if (setupStarted==0||setupStarted > startTime) { setupStarted = startTime; } if (setupFinished < finishTime) { setupFinished = finishTime; } totalSetups++; if (attempt.getTaskStatus().equals (TaskStatus.State.SUCCEEDED.toString())) { numFinishedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.FAILED.toString())) { numFailedSetups++; } else if (attempt.getTaskStatus().equals (TaskStatus.State.KILLED.toString())) { numKilledSetups++; } } } } }