org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo Java Examples

The following examples show how to use org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DebugJobProducer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
Example #2
Source File: DebugJobProducer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
Example #3
Source File: SimulatorJobInProgress.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Given the reduce taskAttemptID, returns the TaskAttemptInfo. Deconstructs
 * the reduce taskAttemptID and looks up the jobStory with the parts taskType,
 * id of task, id of task attempt.
 * 
 * @param taskTracker
 *          tasktracker
 * @param taskAttemptID
 *          task-attempt
 * @return TaskAttemptInfo for the reduce task-attempt
 */
private TaskAttemptInfo getReduceTaskAttemptInfo(TaskTracker taskTracker,
    TaskAttemptID taskAttemptID) {
  assert (!taskAttemptID.isMap());
  TaskID taskId = taskAttemptID.getTaskID();
  TaskType taskType;
  if (taskAttemptID.isMap()) {
    taskType = TaskType.MAP;
  } else {
    taskType = TaskType.REDUCE;
  }

  TaskAttemptInfo taskAttemptInfo = jobStory.getTaskAttemptInfo(taskType,
      taskId.getId(), taskAttemptID.getId());
  if (LOG.isDebugEnabled()) {
    LOG.debug("get an attempt: "
        + taskAttemptID.toString()
        + ", state="
        + taskAttemptInfo.getRunState()
        + ", runtime="
        + ((taskAttemptID.isMap()) ? taskAttemptInfo.getRuntime()
            : ((ReduceTaskAttemptInfo) taskAttemptInfo).getReduceRuntime()));
  }
  return taskAttemptInfo;
}
 
Example #4
Source File: SimulatorTaskTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** 
 * Constructs an object by copying most of the fields from a
 * SimulatorTaskAction.
 */
public SimulatorTaskInProgress(SimulatorLaunchTaskAction action,
                               TaskStatus taskStatus, long now) {
  this.taskStatus = taskStatus;
  this.taskAttempInfo = action.getTaskAttemptInfo();
  if (taskStatus.getIsMap()) {
    this.userSpaceRunTime = taskAttempInfo.getRuntime();
  } else {
    this.userSpaceRunTime = 
      ((ReduceTaskAttemptInfo)taskAttempInfo).getReduceRuntime();
  }
}
 
Example #5
Source File: MockSimulatorJobTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
                          long reduceStart, long mapDoneDelay, 
                          long reduceRuntime, long killHeartbeat) {
  long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
  long reduceDone = mapDone + reduceRuntime;
  long reduceEndHeartbeat = nextHeartbeat(reduceDone);
  final boolean isKilled = (killHeartbeat>=0);
  if (isKilled) {
    reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
  }

  LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone + 
            ", reduceDone=" + reduceDone + 
            ", reduceEndHeartbeat=" + reduceEndHeartbeat +
            ", killHeartbeat=" + killHeartbeat);

  final int numSlotsRequired = 1;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = 
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);        
  Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
                             numSlotsRequired);
  // all byte counters are 0
  TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); 
  ReduceTaskAttemptInfo taskAttemptInfo = 
      new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0, 
                                reduceRuntime);
  TaskTrackerAction action = 
      new SimulatorLaunchTaskAction(task, taskAttemptInfo);    
  heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
      action);
  if (!isKilled || mapDone < killHeartbeat) {
    action = new AllMapsCompletedTaskAction(task.getTaskID());
    heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
        action);
  }
  if (isKilled) {
    action = new KillTaskAction(taskIdOldApi);
    heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
       action);
  }

  for(long simulationTime = reduceStart + heartbeatInterval; 
      simulationTime <= reduceEndHeartbeat;
      simulationTime += heartbeatInterval) {
    State state = simulationTime < reduceEndHeartbeat ? 
        State.RUNNING : State.SUCCEEDED;
    if (simulationTime == reduceEndHeartbeat && isKilled) {
      state = State.KILLED;
    }
    // mapDone is when the all maps done event delivered
    Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE; 
    ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
        task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
    heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
        reduceStatus);
  }
}