Java Code Examples for org.apache.hadoop.mapreduce.TypeConverter#toYarn()

The following examples show how to use org.apache.hadoop.mapreduce.TypeConverter#toYarn() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ClientServiceDelegate.java    From big-c with Apache License 2.0 6 votes vote down vote up
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
     throws IOException{
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetTaskReportsRequest request =
      recordFactory.newRecordInstance(GetTaskReportsRequest.class);
  request.setJobId(jobId);
  request.setTaskType(TypeConverter.toYarn(taskType));

  List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
    ((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
        request)).getTaskReportList();

  return TypeConverter.fromYarn
  (taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
 
Example 2
Source File: TaskAttemptListenerImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
 
Example 3
Source File: TestFileNameIndexUtils.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testUserNamePercentEncoding() throws IOException {
  JobIndexInfo info = new JobIndexInfo();
  JobID oldJobId = JobID.forName(JOB_ID);
  JobId jobId = TypeConverter.toYarn(oldJobId);
  info.setJobId(jobId);
  info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
  info.setUser(USER_NAME_WITH_DELIMITER);
  info.setJobName(JOB_NAME);
  info.setFinishTime(Long.parseLong(FINISH_TIME));
  info.setNumMaps(Integer.parseInt(NUM_MAPS));
  info.setNumReduces(Integer.parseInt(NUM_REDUCES));
  info.setJobStatus(JOB_STATUS);
  info.setQueueName(QUEUE_NAME);
  info.setJobStartTime(Long.parseLong(JOB_START_TIME));

  String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
  Assert.assertTrue("User name not encoded correctly into job history file",
      jobHistoryFile.contains(USER_NAME_WITH_DELIMITER_ESCAPE));
}
 
Example 4
Source File: TaskAttemptListenerImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public MapTaskCompletionEventsUpdate getMapCompletionEvents(
    JobID jobIdentifier, int startIndex, int maxEvents,
    TaskAttemptID taskAttemptID) throws IOException {
  LOG.info("MapCompletionEvents request from " + taskAttemptID.toString()
      + ". startIndex " + startIndex + " maxEvents " + maxEvents);

  // TODO: shouldReset is never used. See TT. Ask for Removal.
  boolean shouldReset = false;
  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
    TypeConverter.toYarn(taskAttemptID);
  TaskCompletionEvent[] events =
      context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(
          startIndex, maxEvents);

  taskHeartbeatHandler.progressing(attemptID);
  
  return new MapTaskCompletionEventsUpdate(events, shouldReset);
}
 
Example 5
Source File: TaskAttemptListenerImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
 public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
   diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
   LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
       + diagnosticInfo);

   org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
     TypeConverter.toYarn(taskAttemptID);
   taskHeartbeatHandler.progressing(attemptID);

   // This is mainly used for cases where we want to propagate exception traces
   // of tasks that fail.

   // This call exists as a hadoop mapreduce legacy wherein all changes in
   // counters/progress/phase/output-size are reported through statusUpdate()
   // call but not diagnosticInformation.
   context.getEventHandler().handle(
       new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
 }
 
Example 6
Source File: TestJobImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static StubbedJob createStubbedJob(Configuration conf,
    Dispatcher dispatcher, int numSplits, AppContext appContext) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  if (appContext == null) {
    appContext = mock(AppContext.class);
    when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
  }
  StubbedJob job = new StubbedJob(jobId,
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
      conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
  dispatcher.register(JobEventType.class, job);
  EventHandler mockHandler = mock(EventHandler.class);
  dispatcher.register(TaskEventType.class, mockHandler);
  dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
      mockHandler);
  dispatcher.register(JobFinishEvent.Type.class, mockHandler);
  return job;
}
 
Example 7
Source File: TaskAttemptListenerImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public MapTaskCompletionEventsUpdate getMapCompletionEvents(
    JobID jobIdentifier, int startIndex, int maxEvents,
    TaskAttemptID taskAttemptID) throws IOException {
  LOG.info("MapCompletionEvents request from " + taskAttemptID.toString()
      + ". startIndex " + startIndex + " maxEvents " + maxEvents);

  // TODO: shouldReset is never used. See TT. Ask for Removal.
  boolean shouldReset = false;
  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
    TypeConverter.toYarn(taskAttemptID);
  TaskCompletionEvent[] events =
      context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(
          startIndex, maxEvents);

  taskHeartbeatHandler.progressing(attemptID);
  
  return new MapTaskCompletionEventsUpdate(events, shouldReset);
}
 
Example 8
Source File: TestFileNameIndexUtils.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testUserNamePercentEncoding() throws IOException {
  JobIndexInfo info = new JobIndexInfo();
  JobID oldJobId = JobID.forName(JOB_ID);
  JobId jobId = TypeConverter.toYarn(oldJobId);
  info.setJobId(jobId);
  info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
  info.setUser(USER_NAME_WITH_DELIMITER);
  info.setJobName(JOB_NAME);
  info.setFinishTime(Long.parseLong(FINISH_TIME));
  info.setNumMaps(Integer.parseInt(NUM_MAPS));
  info.setNumReduces(Integer.parseInt(NUM_REDUCES));
  info.setJobStatus(JOB_STATUS);
  info.setQueueName(QUEUE_NAME);
  info.setJobStartTime(Long.parseLong(JOB_START_TIME));

  String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
  Assert.assertTrue("User name not encoded correctly into job history file",
      jobHistoryFile.contains(USER_NAME_WITH_DELIMITER_ESCAPE));
}
 
Example 9
Source File: ClientServiceDelegate.java    From big-c with Apache License 2.0 6 votes vote down vote up
public JobStatus getJobStatus(JobID oldJobID) throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
    TypeConverter.toYarn(oldJobID);
  GetJobReportRequest request =
      recordFactory.newRecordInstance(GetJobReportRequest.class);
  request.setJobId(jobId);
  JobReport report = ((GetJobReportResponse) invoke("getJobReport",
      GetJobReportRequest.class, request)).getJobReport();
  JobStatus jobStatus = null;
  if (report != null) {
    if (StringUtils.isEmpty(report.getJobFile())) {
      String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
      report.setJobFile(jobFile);
    }
    String historyTrackingUrl = report.getTrackingUrl();
    String url = StringUtils.isNotEmpty(historyTrackingUrl)
        ? historyTrackingUrl : trackingUrl;
    jobStatus = TypeConverter.fromYarn(report, url);
  }
  return jobStatus;
}
 
Example 10
Source File: JobHistory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public Map<JobId, Job> getAllJobs(ApplicationId appID) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Called getAllJobs(AppId): " + appID);
  }
  // currently there is 1 to 1 mapping between app and job id
  org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID);
  Map<JobId, Job> jobs = new HashMap<JobId, Job>();
  JobId jobID = TypeConverter.toYarn(oldJobID);
  jobs.put(jobID, getJob(jobID));
  return jobs;
}
 
Example 11
Source File: TaskAttemptListenerImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void fatalError(TaskAttemptID taskAttemptID, String msg)
    throws IOException {
  // This happens only in Child and in the Task.
  LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
  reportDiagnosticInfo(taskAttemptID, "Error: " + msg);

  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);
  context.getEventHandler().handle(
      new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
 
Example 12
Source File: RMCommunicator.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  scheduler= createSchedulerProxy();
  JobID id = TypeConverter.fromYarn(this.applicationId);
  JobId jobId = TypeConverter.toYarn(id);
  job = context.getJob(jobId);
  register();
  startAllocatorThread();
  super.serviceStart();
}
 
Example 13
Source File: CompletedTaskAttempt.java    From big-c with Apache License 2.0 5 votes vote down vote up
CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
  this.attemptInfo = attemptInfo;
  this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
  if (attemptInfo.getTaskStatus() != null) {
    this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
  } else {
    this.state = TaskAttemptState.KILLED;
    localDiagMessage = "Attmpt state missing from History : marked as KILLED";
    diagnostics.add(localDiagMessage);
  }
  if (attemptInfo.getError() != null) {
    diagnostics.add(attemptInfo.getError());
  }
}
 
Example 14
Source File: ClientServiceDelegate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
InterruptedException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
    GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
    request.setJobId(jobID);
    Counters cnt = ((GetCountersResponse)
        invoke("getCounters", GetCountersRequest.class, request)).getCounters();
    return TypeConverter.fromYarn(cnt);

}
 
Example 15
Source File: ClientServiceDelegate.java    From big-c with Apache License 2.0 5 votes vote down vote up
public org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
InterruptedException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
    GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
    request.setJobId(jobID);
    Counters cnt = ((GetCountersResponse)
        invoke("getCounters", GetCountersRequest.class, request)).getCounters();
    return TypeConverter.fromYarn(cnt);

}
 
Example 16
Source File: ClientServiceDelegate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public boolean killJob(JobID oldJobID)
     throws IOException {
  org.apache.hadoop.mapreduce.v2.api.records.JobId jobId
  = TypeConverter.toYarn(oldJobID);
  KillJobRequest killRequest = recordFactory.newRecordInstance(KillJobRequest.class);
  killRequest.setJobId(jobId);
  invoke("killJob", KillJobRequest.class, killRequest);
  return true;
}
 
Example 17
Source File: TaskAttemptListenerImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void fatalError(TaskAttemptID taskAttemptID, String msg)
    throws IOException {
  // This happens only in Child and in the Task.
  LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
  reportDiagnosticInfo(taskAttemptID, "Error: " + msg);

  org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
      TypeConverter.toYarn(taskAttemptID);
  context.getEventHandler().handle(
      new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
 
Example 18
Source File: TestCommitterEventHandler.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testBasic() throws Exception {
  AppContext mockContext = mock(AppContext.class);
  OutputCommitter mockCommitter = mock(OutputCommitter.class);
  Clock mockClock = mock(Clock.class);
  
  CommitterEventHandler handler = new CommitterEventHandler(mockContext, 
      mockCommitter, new TestingRMHeartbeatHandler());
  YarnConfiguration conf = new YarnConfiguration();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  JobContext mockJobContext = mock(JobContext.class);
  ApplicationAttemptId attemptid = 
    ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
  JobId jobId =  TypeConverter.toYarn(
      TypeConverter.fromYarn(attemptid.getApplicationId()));
  
  WaitForItHandler waitForItHandler = new WaitForItHandler();
  
  when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
  when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
  when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
  when(mockContext.getClock()).thenReturn(mockClock);
  
  handler.init(conf);
  handler.start();
  try {
    handler.handle(new CommitterJobCommitEvent(jobId, mockJobContext));

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
    Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, 
        jobId);
    Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, 
        jobId);

    Event e = waitForItHandler.getAndClearEvent();
    assertNotNull(e);
    assertTrue(e instanceof JobCommitCompletedEvent);
    FileSystem fs = FileSystem.get(conf);
    assertTrue(startCommitFile.toString(), fs.exists(startCommitFile));
    assertTrue(endCommitSuccessFile.toString(), fs.exists(endCommitSuccessFile));
    assertFalse(endCommitFailureFile.toString(), fs.exists(endCommitFailureFile));
    verify(mockCommitter).commitJob(any(JobContext.class));
  } finally {
    handler.stop();
  }
}
 
Example 19
Source File: TaskAttemptListenerImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
  public boolean statusUpdate(TaskAttemptID taskAttemptID,
      TaskStatus taskStatus) throws IOException, InterruptedException {
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
        TypeConverter.toYarn(taskAttemptID);
    taskHeartbeatHandler.progressing(yarnAttemptID);
    TaskAttemptStatus taskAttemptStatus =
        new TaskAttemptStatus();
    taskAttemptStatus.id = yarnAttemptID;
    // Task sends the updated progress to the TT.
    taskAttemptStatus.progress = taskStatus.getProgress();
    LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
        + taskStatus.getProgress());
    // Task sends the updated state-string to the TT.
    taskAttemptStatus.stateString = taskStatus.getStateString();
    // Task sends the updated phase to the TT.
    taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
    // Counters are updated by the task. Convert counters into new format as
    // that is the primary storage format inside the AM to avoid multiple
    // conversions and unnecessary heap usage.
    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
      taskStatus.getCounters());

    // Map Finish time set by the task (map only)
    if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
      taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
    }

    // Shuffle Finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
      taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
    }

    // Sort finish time set by the task (reduce only).
    if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
      taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
    }

    // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
    //taskAttemptStatus.taskState =  TypeConverter.toYarn(taskStatus.getRunState());
    
    //set the fetch failures
    if (taskStatus.getFetchFailedMaps() != null 
        && taskStatus.getFetchFailedMaps().size() > 0) {
      taskAttemptStatus.fetchFailedMaps = 
        new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
      for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
        taskAttemptStatus.fetchFailedMaps.add(
            TypeConverter.toYarn(failedMapId));
      }
    }

 // Task sends the information about the nextRecordRange to the TT
    
//    TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
//    taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
//    taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
//    taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
//    // This was used by TT to do counter updates only once every minute. So this
//    // isn't ever changed by the Task itself.
//    taskStatus.getIncludeCounters();

    context.getEventHandler().handle(
        new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
            taskAttemptStatus));
    return true;
  }
 
Example 20
Source File: TezTypeConverters.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
public static TaskAttemptId toYarn(TezTaskAttemptID taskAttemptId) {
  TaskAttemptID mrTaskAttemptId = IDConverter
      .toMRTaskAttemptId(taskAttemptId);
  TaskAttemptId mrv2TaskAttemptId = TypeConverter.toYarn(mrTaskAttemptId);
  return mrv2TaskAttemptId;
}