Java Code Examples for org.apache.hadoop.mapreduce.v2.app.job.Task#getAttempt()

The following examples show how to use org.apache.hadoop.mapreduce.v2.app.job.Task#getAttempt() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LegacyTaskRuntimeEstimator.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private long storedPerAttemptValue
     (Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) {
  TaskId taskID = attemptID.getTaskId();
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  Task task = job.getTask(taskID);

  if (task == null) {
    return -1L;
  }

  TaskAttempt taskAttempt = task.getAttempt(attemptID);

  if (taskAttempt == null) {
    return -1L;
  }

  AtomicLong estimate = data.get(taskAttempt);

  return estimate == null ? -1L : estimate.get();

}
 
Example 2
Source File: LegacyTaskRuntimeEstimator.java    From big-c with Apache License 2.0 6 votes vote down vote up
private long storedPerAttemptValue
     (Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) {
  TaskId taskID = attemptID.getTaskId();
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  Task task = job.getTask(taskID);

  if (task == null) {
    return -1L;
  }

  TaskAttempt taskAttempt = task.getAttempt(attemptID);

  if (taskAttempt == null) {
    return -1L;
  }

  AtomicLong estimate = data.get(taskAttempt);

  return estimate == null ? -1L : estimate.get();

}
 
Example 3
Source File: MRAppMaster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void handle(TaskAttemptEvent event) {
  Job job = context.getJob(event.getTaskAttemptID().getTaskId().getJobId());
  Task task = job.getTask(event.getTaskAttemptID().getTaskId());
  TaskAttempt attempt = task.getAttempt(event.getTaskAttemptID());
  ((EventHandler<TaskAttemptEvent>) attempt).handle(event);
}
 
Example 4
Source File: TestJobHistoryEntities.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout=10000)
public void testCompletedTaskAttempt() throws Exception {
  HistoryFileInfo info = mock(HistoryFileInfo.class);
  when(info.getConfFile()).thenReturn(fullConfPath);
  completedJob =
    new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
        info, jobAclsManager);
  TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0);
  TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0);
  
  Task mt1 = completedJob.getTask(mt1Id);
  Task rt1 = completedJob.getTask(rt1Id);
  
  TaskAttempt mta1 = mt1.getAttempt(mta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState());
  assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", mta1.getNodeHttpAddress());
  TaskAttemptReport mta1Report = mta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState());
  assertEquals("localhost", mta1Report.getNodeManagerHost());
  assertEquals(45454, mta1Report.getNodeManagerPort());
  assertEquals(9999, mta1Report.getNodeManagerHttpPort());
  
  TaskAttempt rta1 = rt1.getAttempt(rta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState());
  assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", rta1.getNodeHttpAddress());
  TaskAttemptReport rta1Report = rta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState());
  assertEquals("localhost", rta1Report.getNodeManagerHost());
  assertEquals(45454, rta1Report.getNodeManagerPort());
  assertEquals(9999, rta1Report.getNodeManagerHttpPort());
}
 
Example 5
Source File: MRAppMaster.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void handle(TaskAttemptEvent event) {
  Job job = context.getJob(event.getTaskAttemptID().getTaskId().getJobId());
  Task task = job.getTask(event.getTaskAttemptID().getTaskId());
  TaskAttempt attempt = task.getAttempt(event.getTaskAttemptID());
  ((EventHandler<TaskAttemptEvent>) attempt).handle(event);
}
 
Example 6
Source File: TestJobHistoryEntities.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout=10000)
public void testCompletedTaskAttempt() throws Exception {
  HistoryFileInfo info = mock(HistoryFileInfo.class);
  when(info.getConfFile()).thenReturn(fullConfPath);
  completedJob =
    new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
        info, jobAclsManager);
  TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0);
  TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0);
  
  Task mt1 = completedJob.getTask(mt1Id);
  Task rt1 = completedJob.getTask(rt1Id);
  
  TaskAttempt mta1 = mt1.getAttempt(mta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState());
  assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", mta1.getNodeHttpAddress());
  TaskAttemptReport mta1Report = mta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState());
  assertEquals("localhost", mta1Report.getNodeManagerHost());
  assertEquals(45454, mta1Report.getNodeManagerPort());
  assertEquals(9999, mta1Report.getNodeManagerHttpPort());
  
  TaskAttempt rta1 = rt1.getAttempt(rta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState());
  assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", rta1.getNodeHttpAddress());
  TaskAttemptReport rta1Report = rta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState());
  assertEquals("localhost", rta1Report.getNodeManagerHost());
  assertEquals(45454, rta1Report.getNodeManagerPort());
  assertEquals(9999, rta1Report.getNodeManagerHttpPort());
}
 
Example 7
Source File: StartEndTimesBase.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {

  TaskAttemptId attemptID = status.id;
  TaskId taskID = attemptID.getTaskId();
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  if (job == null) {
    return;
  }

  Task task = job.getTask(taskID);

  if (task == null) {
    return;
  }

  Long boxedStart = startTimes.get(attemptID);
  long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
  
  TaskAttempt taskAttempt = task.getAttempt(attemptID);

  if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
    boolean isNew = false;
    // is this  a new success?
    synchronized (doneTasks) {
      if (!doneTasks.contains(task)) {
        doneTasks.add(task);
        isNew = true;
      }
    }

    // It's a new completion
    // Note that if a task completes twice [because of a previous speculation
    //  and a race, or a success followed by loss of the machine with the
    //  local data] we only count the first one.
    if (isNew) {
      long finish = timestamp;
      if (start > 1L && finish > 1L && start <= finish) {
        long duration = finish - start;

        DataStatistics statistics
        = dataStatisticsForTask(taskID);

        if (statistics != null) {
          statistics.add(duration);
        }
      }
    }
  }
}
 
Example 8
Source File: LegacyTaskRuntimeEstimator.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {
  super.updateAttempt(status, timestamp);
  

  TaskAttemptId attemptID = status.id;
  TaskId taskID = attemptID.getTaskId();
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  if (job == null) {
    return;
  }

  Task task = job.getTask(taskID);

  if (task == null) {
    return;
  }

  TaskAttempt taskAttempt = task.getAttempt(attemptID);

  if (taskAttempt == null) {
    return;
  }

  Long boxedStart = startTimes.get(attemptID);
  long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;

  // We need to do two things.
  //  1: If this is a completion, we accumulate statistics in the superclass
  //  2: If this is not a completion, we learn more about it.

  // This is not a completion, but we're cooking.
  //
  if (taskAttempt.getState() == TaskAttemptState.RUNNING) {
    // See if this task is already in the registry
    AtomicLong estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
    AtomicLong estimateVarianceContainer
        = attemptRuntimeEstimateVariances.get(taskAttempt);

    if (estimateContainer == null) {
      if (attemptRuntimeEstimates.get(taskAttempt) == null) {
        attemptRuntimeEstimates.put(taskAttempt, new AtomicLong());

        estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
      }
    }

    if (estimateVarianceContainer == null) {
      attemptRuntimeEstimateVariances.putIfAbsent(taskAttempt, new AtomicLong());
      estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);
    }


    long estimate = -1;
    long varianceEstimate = -1;

    // This code assumes that we'll never consider starting a third
    //  speculative task attempt if two are already running for this task
    if (start > 0 && timestamp > start) {
      estimate = (long) ((timestamp - start) / Math.max(0.0001, status.progress));
      varianceEstimate = (long) (estimate * status.progress / 10);
    }
    if (estimateContainer != null) {
      estimateContainer.set(estimate);
    }
    if (estimateVarianceContainer != null) {
      estimateVarianceContainer.set(varianceEstimate);
    }
  }
}
 
Example 9
Source File: JobImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public void transition(JobImpl job, JobEvent event) {
  TaskAttemptCompletionEvent tce = 
    ((JobTaskAttemptCompletedEvent) event).getCompletionEvent();
  // Add the TaskAttemptCompletionEvent
  //eventId is equal to index in the arraylist
  tce.setEventId(job.taskAttemptCompletionEvents.size());
  job.taskAttemptCompletionEvents.add(tce);
  int mapEventIdx = -1;
  if (TaskType.MAP.equals(tce.getAttemptId().getTaskId().getTaskType())) {
    // we track map completions separately from task completions because
    // - getMapAttemptCompletionEvents uses index ranges specific to maps
    // - type converting the same events over and over is expensive
    mapEventIdx = job.mapAttemptCompletionEvents.size();
    job.mapAttemptCompletionEvents.add(TypeConverter.fromYarn(tce));
  }
  job.taskCompletionIdxToMapCompletionIdx.add(mapEventIdx);
  
  TaskAttemptId attemptId = tce.getAttemptId();
  TaskId taskId = attemptId.getTaskId();
  //make the previous completion event as obsolete if it exists
  Integer successEventNo =
      job.successAttemptCompletionEventNoMap.remove(taskId);
  if (successEventNo != null) {
    TaskAttemptCompletionEvent successEvent = 
      job.taskAttemptCompletionEvents.get(successEventNo);
    successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
    int mapCompletionIdx =
        job.taskCompletionIdxToMapCompletionIdx.get(successEventNo);
    if (mapCompletionIdx >= 0) {
      // update the corresponding TaskCompletionEvent for the map
      TaskCompletionEvent mapEvent =
          job.mapAttemptCompletionEvents.get(mapCompletionIdx);
      job.mapAttemptCompletionEvents.set(mapCompletionIdx,
          new TaskCompletionEvent(mapEvent.getEventId(),
              mapEvent.getTaskAttemptId(), mapEvent.idWithinJob(),
              mapEvent.isMapTask(), TaskCompletionEvent.Status.OBSOLETE,
              mapEvent.getTaskTrackerHttp()));
    }
  }
  
  // if this attempt is not successful then why is the previous successful 
  // attempt being removed above - MAPREDUCE-4330
  if (TaskAttemptCompletionEventStatus.SUCCEEDED.equals(tce.getStatus())) {
    job.successAttemptCompletionEventNoMap.put(taskId, tce.getEventId());
    
    // here we could have simply called Task.getSuccessfulAttempt() but
    // the event that triggers this code is sent before
    // Task.successfulAttempt is set and so there is no guarantee that it
    // will be available now
    Task task = job.tasks.get(taskId);
    TaskAttempt attempt = task.getAttempt(attemptId);
    NodeId nodeId = attempt.getNodeId();
    assert (nodeId != null); // node must exist for a successful event
    List<TaskAttemptId> taskAttemptIdList = job.nodesToSucceededTaskAttempts
        .get(nodeId);
    if (taskAttemptIdList == null) {
      taskAttemptIdList = new ArrayList<TaskAttemptId>();
      job.nodesToSucceededTaskAttempts.put(nodeId, taskAttemptIdList);
    }
    taskAttemptIdList.add(attempt.getID());
  }
}
 
Example 10
Source File: StartEndTimesBase.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {

  TaskAttemptId attemptID = status.id;
  TaskId taskID = attemptID.getTaskId();
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  if (job == null) {
    return;
  }

  Task task = job.getTask(taskID);

  if (task == null) {
    return;
  }

  Long boxedStart = startTimes.get(attemptID);
  long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
  
  TaskAttempt taskAttempt = task.getAttempt(attemptID);

  if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
    boolean isNew = false;
    // is this  a new success?
    synchronized (doneTasks) {
      if (!doneTasks.contains(task)) {
        doneTasks.add(task);
        isNew = true;
      }
    }

    // It's a new completion
    // Note that if a task completes twice [because of a previous speculation
    //  and a race, or a success followed by loss of the machine with the
    //  local data] we only count the first one.
    if (isNew) {
      long finish = timestamp;
      if (start > 1L && finish > 1L && start <= finish) {
        long duration = finish - start;

        DataStatistics statistics
        = dataStatisticsForTask(taskID);

        if (statistics != null) {
          statistics.add(duration);
        }
      }
    }
  }
}
 
Example 11
Source File: LegacyTaskRuntimeEstimator.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {
  super.updateAttempt(status, timestamp);
  

  TaskAttemptId attemptID = status.id;
  TaskId taskID = attemptID.getTaskId();
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  if (job == null) {
    return;
  }

  Task task = job.getTask(taskID);

  if (task == null) {
    return;
  }

  TaskAttempt taskAttempt = task.getAttempt(attemptID);

  if (taskAttempt == null) {
    return;
  }

  Long boxedStart = startTimes.get(attemptID);
  long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;

  // We need to do two things.
  //  1: If this is a completion, we accumulate statistics in the superclass
  //  2: If this is not a completion, we learn more about it.

  // This is not a completion, but we're cooking.
  //
  if (taskAttempt.getState() == TaskAttemptState.RUNNING) {
    // See if this task is already in the registry
    AtomicLong estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
    AtomicLong estimateVarianceContainer
        = attemptRuntimeEstimateVariances.get(taskAttempt);

    if (estimateContainer == null) {
      if (attemptRuntimeEstimates.get(taskAttempt) == null) {
        attemptRuntimeEstimates.put(taskAttempt, new AtomicLong());

        estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
      }
    }

    if (estimateVarianceContainer == null) {
      attemptRuntimeEstimateVariances.putIfAbsent(taskAttempt, new AtomicLong());
      estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);
    }


    long estimate = -1;
    long varianceEstimate = -1;

    // This code assumes that we'll never consider starting a third
    //  speculative task attempt if two are already running for this task
    if (start > 0 && timestamp > start) {
      estimate = (long) ((timestamp - start) / Math.max(0.0001, status.progress));
      varianceEstimate = (long) (estimate * status.progress / 10);
    }
    if (estimateContainer != null) {
      estimateContainer.set(estimate);
    }
    if (estimateVarianceContainer != null) {
      estimateVarianceContainer.set(varianceEstimate);
    }
  }
}
 
Example 12
Source File: JobImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void transition(JobImpl job, JobEvent event) {
  TaskAttemptCompletionEvent tce = 
    ((JobTaskAttemptCompletedEvent) event).getCompletionEvent();
  // Add the TaskAttemptCompletionEvent
  //eventId is equal to index in the arraylist
  tce.setEventId(job.taskAttemptCompletionEvents.size());
  job.taskAttemptCompletionEvents.add(tce);
  int mapEventIdx = -1;
  if (TaskType.MAP.equals(tce.getAttemptId().getTaskId().getTaskType())) {
    // we track map completions separately from task completions because
    // - getMapAttemptCompletionEvents uses index ranges specific to maps
    // - type converting the same events over and over is expensive
    mapEventIdx = job.mapAttemptCompletionEvents.size();
    job.mapAttemptCompletionEvents.add(TypeConverter.fromYarn(tce));
  }
  job.taskCompletionIdxToMapCompletionIdx.add(mapEventIdx);
  
  TaskAttemptId attemptId = tce.getAttemptId();
  TaskId taskId = attemptId.getTaskId();
  //make the previous completion event as obsolete if it exists
  Integer successEventNo =
      job.successAttemptCompletionEventNoMap.remove(taskId);
  if (successEventNo != null) {
    TaskAttemptCompletionEvent successEvent = 
      job.taskAttemptCompletionEvents.get(successEventNo);
    successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
    int mapCompletionIdx =
        job.taskCompletionIdxToMapCompletionIdx.get(successEventNo);
    if (mapCompletionIdx >= 0) {
      // update the corresponding TaskCompletionEvent for the map
      TaskCompletionEvent mapEvent =
          job.mapAttemptCompletionEvents.get(mapCompletionIdx);
      job.mapAttemptCompletionEvents.set(mapCompletionIdx,
          new TaskCompletionEvent(mapEvent.getEventId(),
              mapEvent.getTaskAttemptId(), mapEvent.idWithinJob(),
              mapEvent.isMapTask(), TaskCompletionEvent.Status.OBSOLETE,
              mapEvent.getTaskTrackerHttp()));
    }
  }
  
  // if this attempt is not successful then why is the previous successful 
  // attempt being removed above - MAPREDUCE-4330
  if (TaskAttemptCompletionEventStatus.SUCCEEDED.equals(tce.getStatus())) {
    job.successAttemptCompletionEventNoMap.put(taskId, tce.getEventId());
    
    // here we could have simply called Task.getSuccessfulAttempt() but
    // the event that triggers this code is sent before
    // Task.successfulAttempt is set and so there is no guarantee that it
    // will be available now
    Task task = job.tasks.get(taskId);
    TaskAttempt attempt = task.getAttempt(attemptId);
    NodeId nodeId = attempt.getNodeId();
    assert (nodeId != null); // node must exist for a successful event
    List<TaskAttemptId> taskAttemptIdList = job.nodesToSucceededTaskAttempts
        .get(nodeId);
    if (taskAttemptIdList == null) {
      taskAttemptIdList = new ArrayList<TaskAttemptId>();
      job.nodesToSucceededTaskAttempts.put(nodeId, taskAttemptIdList);
    }
    taskAttemptIdList.add(attempt.getID());
  }
}