Java Code Examples for org.apache.hadoop.mapreduce.v2.api.records.TaskState

The following examples show how to use org.apache.hadoop.mapreduce.v2.api.records.TaskState. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TaskInfo.java    License: Apache License 2.0 6 votes vote down vote up
public TaskInfo(Task task) {
  TaskType ttype = task.getType();
  this.type = ttype.toString();
  TaskReport report = task.getReport();
  this.startTime = report.getStartTime();
  this.finishTime = report.getFinishTime();
  this.state = report.getTaskState();
  this.elapsedTime = Times.elapsed(this.startTime, this.finishTime,
    this.state == TaskState.RUNNING);
  if (this.elapsedTime == -1) {
    this.elapsedTime = 0;
  }
  this.progress = report.getProgress() * 100;
  this.status =  report.getStatus();
  this.id = MRApps.toString(task.getID());
  this.taskNum = task.getID().getId();
  this.successful = getSuccessfulAttempt(task);
  if (successful != null) {
    this.successfulAttempt = MRApps.toString(successful.getID());
  } else {
    this.successfulAttempt = "";
  }
}
 
Example 2
Source Project: big-c   Source File: TestMRApp.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testJobError() throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an invalid event on task at current state
  app.getContext().getEventHandler().handle(
      new TaskEvent(
          task.getID(), TaskEventType.T_SCHEDULE));

  //this must lead to job error
  app.waitForState(job, JobState.ERROR);
}
 
Example 3
Source Project: big-c   Source File: TestTypeConverter.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));
  
  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }
  
  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }
  
  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }
  
  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
 
Example 4
Source Project: big-c   Source File: TestMRApp.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testJobRebootNotLastRetryOnUnregistrationFailure()
    throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an reboot event
  app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
    JobEventType.JOB_AM_REBOOT));

  // return exteranl state as RUNNING since otherwise the JobClient will
  // prematurely exit.
  app.waitForState(job, JobState.RUNNING);
}
 
Example 5
Source Project: hadoop   Source File: TestMRApp.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testJobError() throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an invalid event on task at current state
  app.getContext().getEventHandler().handle(
      new TaskEvent(
          task.getID(), TaskEventType.T_SCHEDULE));

  //this must lead to job error
  app.waitForState(job, JobState.ERROR);
}
 
Example 6
Source Project: hadoop   Source File: TestMRApp.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testJobRebootNotLastRetryOnUnregistrationFailure()
    throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an reboot event
  app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
    JobEventType.JOB_AM_REBOOT));

  // return exteranl state as RUNNING since otherwise the JobClient will
  // prematurely exit.
  app.waitForState(job, JobState.RUNNING);
}
 
Example 7
Source Project: big-c   Source File: TypeConverter.java    License: Apache License 2.0 6 votes vote down vote up
public static org.apache.hadoop.mapred.TIPStatus fromYarn(
    TaskState state) {
  switch (state) {
  case NEW:
  case SCHEDULED:
    return org.apache.hadoop.mapred.TIPStatus.PENDING;
  case RUNNING:
    return org.apache.hadoop.mapred.TIPStatus.RUNNING;
  case KILLED:
    return org.apache.hadoop.mapred.TIPStatus.KILLED;
  case SUCCEEDED:
    return org.apache.hadoop.mapred.TIPStatus.COMPLETE;
  case FAILED:
    return org.apache.hadoop.mapred.TIPStatus.FAILED;
  }
  throw new YarnRuntimeException("Unrecognized task state: " + state);
}
 
Example 8
Source Project: big-c   Source File: TestTaskAttempt.java    License: Apache License 2.0 6 votes vote down vote up
private void testMRAppHistory(MRApp app) throws Exception {
  Configuration conf = new Configuration();
  Job job = app.submit(conf);
  app.waitForState(job, JobState.FAILED);
  Map<TaskId, Task> tasks = job.getTasks();

  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.FAILED, task
      .getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next()
      .getAttempts();
  Assert.assertEquals("Num attempts is not correct", 4, attempts.size());

  Iterator<TaskAttempt> it = attempts.values().iterator();
  TaskAttemptReport report = it.next().getReport();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      report.getTaskAttemptState());
  Assert.assertEquals("Diagnostic Information is not Correct",
      "Test Diagnostic Event", report.getDiagnosticInfo());
  report = it.next().getReport();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      report.getTaskAttemptState());
}
 
Example 9
Source Project: big-c   Source File: TestJobImpl.java    License: Apache License 2.0 6 votes vote down vote up
private static void completeJobTasks(JobImpl job) {
  // complete the map tasks and the reduce tasks so we start committing
  int numMaps = job.getTotalMaps();
  for (int i = 0; i < numMaps; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
  int numReduces = job.getTotalReduces();
  for (int i = 0; i < numReduces; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
}
 
Example 10
Source Project: hadoop   Source File: TypeConverter.java    License: Apache License 2.0 6 votes vote down vote up
public static org.apache.hadoop.mapred.TIPStatus fromYarn(
    TaskState state) {
  switch (state) {
  case NEW:
  case SCHEDULED:
    return org.apache.hadoop.mapred.TIPStatus.PENDING;
  case RUNNING:
    return org.apache.hadoop.mapred.TIPStatus.RUNNING;
  case KILLED:
    return org.apache.hadoop.mapred.TIPStatus.KILLED;
  case SUCCEEDED:
    return org.apache.hadoop.mapred.TIPStatus.COMPLETE;
  case FAILED:
    return org.apache.hadoop.mapred.TIPStatus.FAILED;
  }
  throw new YarnRuntimeException("Unrecognized task state: " + state);
}
 
Example 11
Source Project: hadoop   Source File: TestTypeConverter.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));
  
  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }
  
  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }
  
  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }
  
  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
 
Example 12
Source Project: hadoop   Source File: TestJobHistoryParsing.java    License: Apache License 2.0 6 votes vote down vote up
private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
    int numSuccessfulMaps) {
  if (numMaps == numSuccessfulMaps) {
    return jobInfo.getFinishedMaps();
  }

  long numFinishedMaps = 0;
  Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
      .getAllTasks();
  for (TaskInfo taskInfo : taskInfos.values()) {
    if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
      ++numFinishedMaps;
    }
  }
  return numFinishedMaps;
}
 
Example 13
Source Project: hadoop   Source File: TestBlocks.java    License: Apache License 2.0 6 votes vote down vote up
private Task getTask(long timestamp) {
  
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  jobId.setAppId(ApplicationIdPBImpl.newInstance(timestamp,1));

  TaskId taskId = new TaskIdPBImpl();
  taskId.setId(0);
  taskId.setTaskType(TaskType.REDUCE);
  taskId.setJobId(jobId);
  Task task = mock(Task.class);
  when(task.getID()).thenReturn(taskId);
  TaskReport report = mock(TaskReport.class);
  when(report.getProgress()).thenReturn(0.7f);
  when(report.getTaskState()).thenReturn(TaskState.SUCCEEDED);
  when(report.getStartTime()).thenReturn(100001L);
  when(report.getFinishTime()).thenReturn(100011L);

  when(task.getReport()).thenReturn(report);
  when(task.getType()).thenReturn(TaskType.REDUCE);
  return task;
}
 
Example 14
Source Project: big-c   Source File: JobImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public JobStateInternal transition(JobImpl job, JobEvent event) {
  job.completedTaskCount++;
  LOG.info("Num completed Tasks: " + job.completedTaskCount);
  JobTaskEvent taskEvent = (JobTaskEvent) event;
  Task task = job.tasks.get(taskEvent.getTaskID());
  if (taskEvent.getState() == TaskState.SUCCEEDED) {
    taskSucceeded(job, task);
  } else if (taskEvent.getState() == TaskState.FAILED) {
    taskFailed(job, task);
  } else if (taskEvent.getState() == TaskState.KILLED) {
    taskKilled(job, task);
  }

  return checkJobAfterTaskCompletion(job);
}
 
Example 15
Source Project: hadoop   Source File: JobImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void transition(JobImpl job, JobEvent event) {
  //get number of shuffling reduces
  int shufflingReduceTasks = 0;
  for (TaskId taskId : job.reduceTasks) {
    Task task = job.tasks.get(taskId);
    if (TaskState.RUNNING.equals(task.getState())) {
      for(TaskAttempt attempt : task.getAttempts().values()) {
        if(attempt.getPhase() == Phase.SHUFFLE) {
          shufflingReduceTasks++;
          break;
        }
      }
    }
  }

  JobTaskAttemptFetchFailureEvent fetchfailureEvent = 
    (JobTaskAttemptFetchFailureEvent) event;
  for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId mapId : 
        fetchfailureEvent.getMaps()) {
    Integer fetchFailures = job.fetchFailuresMapping.get(mapId);
    fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
    job.fetchFailuresMapping.put(mapId, fetchFailures);
    
    float failureRate = shufflingReduceTasks == 0 ? 1.0f : 
      (float) fetchFailures / shufflingReduceTasks;
    // declare faulty if fetch-failures >= max-allowed-failures
    if (fetchFailures >= job.getMaxFetchFailuresNotifications()
        && failureRate >= job.getMaxAllowedFetchFailuresFraction()) {
      LOG.info("Too many fetch-failures for output of task attempt: " + 
          mapId + " ... raising fetch failure to map");
      job.eventHandler.handle(new TaskAttemptEvent(mapId, 
          TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
      job.fetchFailuresMapping.remove(mapId);
    }
  }
}
 
Example 16
Source Project: hadoop   Source File: TaskImpl.java    License: Apache License 2.0 5 votes vote down vote up
private static TaskState getExternalState(TaskStateInternal smState) {
  if (smState == TaskStateInternal.KILL_WAIT) {
    return TaskState.KILLED;
  } else {
    return TaskState.valueOf(smState.name());
  }
}
 
Example 17
Source Project: hadoop   Source File: TaskImpl.java    License: Apache License 2.0 5 votes vote down vote up
private void sendTaskSucceededEvents() {
  eventHandler.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
  LOG.info("Task succeeded with attempt " + successfulAttempt);
  if (historyTaskStartGenerated) {
    TaskFinishedEvent tfe = createTaskFinishedEvent(this,
        TaskStateInternal.SUCCEEDED);
    eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
  }
}
 
Example 18
Source Project: big-c   Source File: TaskReportPBImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TaskState getTaskState() {
  TaskReportProtoOrBuilder p = viaProto ? proto : builder;
  if (!p.hasTaskState()) {
    return null;
  }
  return convertFromProtoFormat(p.getTaskState());
}
 
Example 19
Source Project: big-c   Source File: TestRMContainerAllocator.java    License: Apache License 2.0 5 votes vote down vote up
private void finishTask(DrainDispatcher rmDispatcher, MockNM node,
    MRApp mrApp, Task task) throws Exception {
  TaskAttempt attempt = task.getAttempts().values().iterator().next();
  List<ContainerStatus> contStatus = new ArrayList<ContainerStatus>(1);
  contStatus.add(ContainerStatus.newInstance(attempt.getAssignedContainerID(),
      ContainerState.COMPLETE, "", 0));
  Map<ApplicationId,List<ContainerStatus>> statusUpdate =
      new HashMap<ApplicationId,List<ContainerStatus>>(1);
  statusUpdate.put(mrApp.getAppID(), contStatus);
  node.nodeHeartbeat(statusUpdate, true);
  rmDispatcher.await();
  mrApp.getContext().getEventHandler().handle(
        new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
  mrApp.waitForState(task, TaskState.SUCCEEDED);
}
 
Example 20
Source Project: hadoop   Source File: TestMRApp.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCommitPending() throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);
  TaskAttempt attempt = task.getAttempts().values().iterator().next();
  app.waitForState(attempt, TaskAttemptState.RUNNING);

  //send the commit pending signal to the task
  app.getContext().getEventHandler().handle(
      new TaskAttemptEvent(
          attempt.getID(),
          TaskAttemptEventType.TA_COMMIT_PENDING));

  //wait for first attempt to commit pending
  app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);

  //re-send the commit pending signal to the task
  app.getContext().getEventHandler().handle(
      new TaskAttemptEvent(
          attempt.getID(),
          TaskAttemptEventType.TA_COMMIT_PENDING));

  //the task attempt should be still at COMMIT_PENDING
  app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);

  //send the done signal to the task
  app.getContext().getEventHandler().handle(
      new TaskAttemptEvent(
          task.getAttempts().values().iterator().next().getID(),
          TaskAttemptEventType.TA_DONE));

  app.waitForState(job, JobState.SUCCEEDED);
}
 
Example 21
Source Project: big-c   Source File: TestJobImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAbortJobCalledAfterKillingTasks() throws IOException {
  Configuration conf = new Configuration();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
  InlineDispatcher dispatcher = new InlineDispatcher();
  dispatcher.init(conf);
  dispatcher.start();
  OutputCommitter committer = Mockito.mock(OutputCommitter.class);
  CommitterEventHandler commitHandler =
      createCommitterEventHandler(dispatcher, committer);
  commitHandler.init(conf);
  commitHandler.start();
  JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);

  //Fail one task. This should land the JobImpl in the FAIL_WAIT state
  job.handle(new JobTaskEvent(
    MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
    TaskState.FAILED));
  //Verify abort job hasn't been called
  Mockito.verify(committer, Mockito.never())
    .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
  assertJobState(job, JobStateInternal.FAIL_WAIT);

  //Verify abortJob is called once and the job failed
  Mockito.verify(committer, Mockito.timeout(2000).times(1))
    .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
  assertJobState(job, JobStateInternal.FAILED);

  dispatcher.stop();
}
 
Example 22
Source Project: big-c   Source File: NotRunningJob.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
    throws IOException {
  GetTaskReportResponse resp =
    recordFactory.newRecordInstance(GetTaskReportResponse.class);
  TaskReport report = recordFactory.newRecordInstance(TaskReport.class);
  report.setTaskId(request.getTaskId());
  report.setTaskState(TaskState.NEW);
  Counters counters = recordFactory.newRecordInstance(Counters.class);
  counters.addAllCounterGroups(new HashMap<String, CounterGroup>());
  report.setCounters(counters);
  report.addAllRunningAttempts(new ArrayList<TaskAttemptId>());
  return resp;
}
 
Example 23
Source Project: hadoop   Source File: TestJobImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAbortJobCalledAfterKillingTasks() throws IOException {
  Configuration conf = new Configuration();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
  InlineDispatcher dispatcher = new InlineDispatcher();
  dispatcher.init(conf);
  dispatcher.start();
  OutputCommitter committer = Mockito.mock(OutputCommitter.class);
  CommitterEventHandler commitHandler =
      createCommitterEventHandler(dispatcher, committer);
  commitHandler.init(conf);
  commitHandler.start();
  JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);

  //Fail one task. This should land the JobImpl in the FAIL_WAIT state
  job.handle(new JobTaskEvent(
    MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
    TaskState.FAILED));
  //Verify abort job hasn't been called
  Mockito.verify(committer, Mockito.never())
    .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
  assertJobState(job, JobStateInternal.FAIL_WAIT);

  //Verify abortJob is called once and the job failed
  Mockito.verify(committer, Mockito.timeout(2000).times(1))
    .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
  assertJobState(job, JobStateInternal.FAILED);

  dispatcher.stop();
}
 
Example 24
Source Project: big-c   Source File: MRApp.java    License: Apache License 2.0 5 votes vote down vote up
public void waitForState(Task task, TaskState finalState) throws Exception {
  int timeoutSecs = 0;
  TaskReport report = task.getReport();
  while (!finalState.equals(report.getTaskState()) &&
      timeoutSecs++ < 20) {
    System.out.println("Task State for " + task.getID() + " is : "
        + report.getTaskState() + " Waiting for state : " + finalState
        + "   progress : " + report.getProgress());
    report = task.getReport();
    Thread.sleep(500);
  }
  System.out.println("Task State is : " + report.getTaskState());
  Assert.assertEquals("Task state is not correct (timedout)", finalState, 
      report.getTaskState());
}
 
Example 25
Source Project: hadoop   Source File: MRApp.java    License: Apache License 2.0 5 votes vote down vote up
public void waitForState(Task task, TaskState finalState) throws Exception {
  int timeoutSecs = 0;
  TaskReport report = task.getReport();
  while (!finalState.equals(report.getTaskState()) &&
      timeoutSecs++ < 20) {
    System.out.println("Task State for " + task.getID() + " is : "
        + report.getTaskState() + " Waiting for state : " + finalState
        + "   progress : " + report.getProgress());
    report = task.getReport();
    Thread.sleep(500);
  }
  System.out.println("Task State is : " + report.getTaskState());
  Assert.assertEquals("Task state is not correct (timedout)", finalState, 
      report.getTaskState());
}
 
Example 26
Source Project: big-c   Source File: TestKill.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testKillJob() throws Exception {
  final CountDownLatch latch = new CountDownLatch(1);
  
  MRApp app = new BlockingMRApp(1, 0, latch);
  //this will start the job but job won't complete as task is
  //blocked
  Job job = app.submit(new Configuration());
  
  //wait and vailidate for Job to become RUNNING
  app.waitForState(job, JobState.RUNNING);
  
  //send the kill signal to Job
  app.getContext().getEventHandler().handle(
      new JobEvent(job.getID(), JobEventType.JOB_KILL));
  
  //unblock Task
  latch.countDown();

  //wait and validate for Job to be KILLED
  app.waitForState(job, JobState.KILLED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("No of tasks is not correct", 1, 
      tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.KILLED, 
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts = 
    tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("No of attempts is not correct", 1, 
      attempts.size());
  Iterator<TaskAttempt> it = attempts.values().iterator();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED, 
        it.next().getReport().getTaskAttemptState());
}
 
Example 27
Source Project: hadoop   Source File: TestRMContainerAllocator.java    License: Apache License 2.0 5 votes vote down vote up
private void finishTask(DrainDispatcher rmDispatcher, MockNM node,
    MRApp mrApp, Task task) throws Exception {
  TaskAttempt attempt = task.getAttempts().values().iterator().next();
  List<ContainerStatus> contStatus = new ArrayList<ContainerStatus>(1);
  contStatus.add(ContainerStatus.newInstance(attempt.getAssignedContainerID(),
      ContainerState.COMPLETE, "", 0));
  Map<ApplicationId,List<ContainerStatus>> statusUpdate =
      new HashMap<ApplicationId,List<ContainerStatus>>(1);
  statusUpdate.put(mrApp.getAppID(), contStatus);
  node.nodeHeartbeat(statusUpdate, true);
  rmDispatcher.await();
  mrApp.getContext().getEventHandler().handle(
        new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
  mrApp.waitForState(task, TaskState.SUCCEEDED);
}
 
Example 28
Source Project: hadoop   Source File: NotRunningJob.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
    throws IOException {
  GetTaskReportResponse resp =
    recordFactory.newRecordInstance(GetTaskReportResponse.class);
  TaskReport report = recordFactory.newRecordInstance(TaskReport.class);
  report.setTaskId(request.getTaskId());
  report.setTaskState(TaskState.NEW);
  Counters counters = recordFactory.newRecordInstance(Counters.class);
  counters.addAllCounterGroups(new HashMap<String, CounterGroup>());
  report.setCounters(counters);
  report.addAllRunningAttempts(new ArrayList<TaskAttemptId>());
  return resp;
}
 
Example 29
Source Project: hadoop   Source File: TaskReportPBImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TaskState getTaskState() {
  TaskReportProtoOrBuilder p = viaProto ? proto : builder;
  if (!p.hasTaskState()) {
    return null;
  }
  return convertFromProtoFormat(p.getTaskState());
}
 
Example 30
Source Project: hadoop   Source File: TaskReportPBImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setTaskState(TaskState taskState) {
  maybeInitBuilder();
  if (taskState == null) {
    builder.clearTaskState();
    return;
  }
  builder.setTaskState(convertToProtoFormat(taskState));
}