Java Code Examples for org.apache.hadoop.mapreduce.v2.api.records.JobReport#getUser()
The following examples show how to use
org.apache.hadoop.mapreduce.v2.api.records.JobReport#getUser() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TypeConverter.java From hadoop with Apache License 2.0 | 5 votes |
public static JobStatus fromYarn(JobReport jobreport, String trackingUrl) { JobPriority jobPriority = JobPriority.NORMAL; JobStatus jobStatus = new org.apache.hadoop.mapred.JobStatus( fromYarn(jobreport.getJobId()), jobreport.getSetupProgress(), jobreport .getMapProgress(), jobreport.getReduceProgress(), jobreport .getCleanupProgress(), fromYarn(jobreport.getJobState()), jobPriority, jobreport.getUser(), jobreport.getJobName(), jobreport .getJobFile(), trackingUrl, jobreport.isUber()); jobStatus.setStartTime(jobreport.getStartTime()); jobStatus.setFinishTime(jobreport.getFinishTime()); jobStatus.setFailureInfo(jobreport.getDiagnostics()); return jobStatus; }
Example 2
Source File: TypeConverter.java From big-c with Apache License 2.0 | 5 votes |
public static JobStatus fromYarn(JobReport jobreport, String trackingUrl) { JobPriority jobPriority = JobPriority.NORMAL; JobStatus jobStatus = new org.apache.hadoop.mapred.JobStatus( fromYarn(jobreport.getJobId()), jobreport.getSetupProgress(), jobreport .getMapProgress(), jobreport.getReduceProgress(), jobreport .getCleanupProgress(), fromYarn(jobreport.getJobState()), jobPriority, jobreport.getUser(), jobreport.getJobName(), jobreport .getJobFile(), trackingUrl, jobreport.isUber()); jobStatus.setStartTime(jobreport.getStartTime()); jobStatus.setFinishTime(jobreport.getFinishTime()); jobStatus.setFailureInfo(jobreport.getDiagnostics()); return jobStatus; }
Example 3
Source File: ClientServiceDelegate.java From hadoop with Apache License 2.0 | 4 votes |
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID) throws IOException { org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID); GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class); request.setJobId(jobId); JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport(); if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED, JobState.ERROR).contains(report.getJobState())) { if (oldTaskAttemptID != null) { GetTaskAttemptReportRequest taRequest = recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class); taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID)); TaskAttemptReport taReport = ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport", GetTaskAttemptReportRequest.class, taRequest)) .getTaskAttemptReport(); if (taReport.getContainerId() == null || taReport.getNodeManagerHost() == null) { throw new IOException("Unable to get log information for task: " + oldTaskAttemptID); } return new LogParams( taReport.getContainerId().toString(), taReport.getContainerId().getApplicationAttemptId() .getApplicationId().toString(), NodeId.newInstance(taReport.getNodeManagerHost(), taReport.getNodeManagerPort()).toString(), report.getUser()); } else { if (report.getAMInfos() == null || report.getAMInfos().size() == 0) { throw new IOException("Unable to get log information for job: " + oldJobID); } AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1); return new LogParams( amInfo.getContainerId().toString(), amInfo.getAppAttemptId().getApplicationId().toString(), NodeId.newInstance(amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort()).toString(), report.getUser()); } } else { throw new IOException("Cannot get log path for a in-progress job"); } }
Example 4
Source File: ClientServiceDelegate.java From big-c with Apache License 2.0 | 4 votes |
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID) throws IOException { org.apache.hadoop.mapreduce.v2.api.records.JobId jobId = TypeConverter.toYarn(oldJobID); GetJobReportRequest request = recordFactory.newRecordInstance(GetJobReportRequest.class); request.setJobId(jobId); JobReport report = ((GetJobReportResponse) invoke("getJobReport", GetJobReportRequest.class, request)).getJobReport(); if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED, JobState.ERROR).contains(report.getJobState())) { if (oldTaskAttemptID != null) { GetTaskAttemptReportRequest taRequest = recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class); taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID)); TaskAttemptReport taReport = ((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport", GetTaskAttemptReportRequest.class, taRequest)) .getTaskAttemptReport(); if (taReport.getContainerId() == null || taReport.getNodeManagerHost() == null) { throw new IOException("Unable to get log information for task: " + oldTaskAttemptID); } return new LogParams( taReport.getContainerId().toString(), taReport.getContainerId().getApplicationAttemptId() .getApplicationId().toString(), NodeId.newInstance(taReport.getNodeManagerHost(), taReport.getNodeManagerPort()).toString(), report.getUser()); } else { if (report.getAMInfos() == null || report.getAMInfos().size() == 0) { throw new IOException("Unable to get log information for job: " + oldJobID); } AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1); return new LogParams( amInfo.getContainerId().toString(), amInfo.getAppAttemptId().getApplicationId().toString(), NodeId.newInstance(amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort()).toString(), report.getUser()); } } else { throw new IOException("Cannot get log path for a in-progress job"); } }