Java Code Examples for org.apache.hadoop.mapred.TaskStatus.State#KILLED

The following examples show how to use org.apache.hadoop.mapred.TaskStatus.State#KILLED . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ZombieJob.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static State convertState(Values status) {
  if (status == Values.SUCCESS) {
    return State.SUCCEEDED;
  } else if (status == Values.FAILED) {
    return State.FAILED;
  } else if (status == Values.KILLED) {
    return State.KILLED;
  } else {
    throw new IllegalArgumentException("unknown status " + status);
  }
}
 
Example 2
Source File: ZombieJob.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static State convertState(Values status) {
  if (status == Values.SUCCESS) {
    return State.SUCCEEDED;
  } else if (status == Values.FAILED) {
    return State.FAILED;
  } else if (status == Values.KILLED) {
    return State.KILLED;
  } else {
    throw new IllegalArgumentException("unknown status " + status);
  }
}
 
Example 3
Source File: ZombieJob.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private static State convertState(Values status) {
  if (status == Values.SUCCESS) {
    return State.SUCCEEDED;
  } else if (status == Values.FAILED) {
    return State.FAILED;
  } else if (status == Values.KILLED) {
    return State.KILLED;
  } else {
    throw new IllegalArgumentException("unknown status " + status);
  }
}
 
Example 4
Source File: SimulatorTaskTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Stops running a task attempt on the task tracker. It also updates the 
 * number of available slots accordingly.
 * 
 * @param finalStatus the TaskStatus containing the task id and final 
 *        status of the task attempt. This rountine asserts a lot of the
 *        finalStatus params, in case it is coming from a task attempt
 *        completion event sent to ourselves. Only the run state, finish time,
 *        and progress fields of the task attempt are updated.
 * @param now Current simulation time, used for assert only
 */
private void finishRunningTask(TaskStatus finalStatus, long now) {
  TaskAttemptID taskId = finalStatus.getTaskID();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Finishing running task id=" + taskId + ", now=" + now);
  }

  SimulatorTaskInProgress tip = tasks.get(taskId);
  if (tip == null) {
    throw new IllegalArgumentException("Unknown task attempt " + taskId
        + " completed");
  }
  TaskStatus currentStatus = tip.getTaskStatus();
  if (currentStatus.getRunState() != State.RUNNING) {
    throw new IllegalArgumentException(
        "Task attempt to finish is not running: " + tip);
  }

  // Check that finalStatus describes a task attempt that has just been
  // completed
  State finalRunState = finalStatus.getRunState();
  if (finalRunState != State.SUCCEEDED && finalRunState != State.FAILED
      && finalRunState != State.KILLED) {
    throw new IllegalArgumentException(
        "Final run state for completed task can't be : " + finalRunState
            + " " + tip);
  }

  if (now != finalStatus.getFinishTime()) {
    throw new IllegalArgumentException(
        "Current time does not match task finish time: now=" + now
            + ", finish=" + finalStatus.getFinishTime());
  }

  if (currentStatus.getIsMap() != finalStatus.getIsMap()
      || currentStatus.getNumSlots() != finalStatus.getNumSlots()
      || currentStatus.getPhase() != finalStatus.getPhase()
      || currentStatus.getStartTime() != finalStatus.getStartTime()) {
    throw new IllegalArgumentException(
        "Current status does not match final status");
  }

  // We can't assert getShuffleFinishTime() and getSortFinishTime() for
  // reduces as those were unknown when the task attempt completion event
  // was created. We have not called setMapFinishTime() for maps either.
  // If we were really thorough we could update the progress of the task
  // and check if it is consistent with finalStatus.

  // If we've got this far it is safe to update the task status
  currentStatus.setRunState(finalStatus.getRunState());
  currentStatus.setFinishTime(finalStatus.getFinishTime());
  currentStatus.setProgress(finalStatus.getProgress());

  // and update the free slots
  int numSlots = currentStatus.getNumSlots();
  if (tip.isMapTask()) {
    usedMapSlots -= numSlots;
    if (usedMapSlots < 0) {
      throw new IllegalStateException(
          "TaskTracker reaches negative map slots: " + usedMapSlots);
    }
  } else {
    usedReduceSlots -= numSlots;
    if (usedReduceSlots < 0) {
      throw new IllegalStateException(
          "TaskTracker reaches negative reduce slots: " + usedReduceSlots);
    }
  }
}
 
Example 5
Source File: MockSimulatorJobTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
                       long mapStart, long mapRuntime, long killHeartbeat) {
  long mapDone = mapStart + mapRuntime;
  long mapEndHeartbeat = nextHeartbeat(mapDone);
  final boolean isKilled = (killHeartbeat>=0);
  if (isKilled) {
    mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
  }

  LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone + 
            ", mapEndHeartbeat=" + mapEndHeartbeat + 
            ", killHeartbeat=" + killHeartbeat);
  
  final int numSlotsRequired = 1;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = 
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);        
  Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, "dummysplitclass",
                          null, numSlotsRequired);
  // all byte counters are 0
  TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); 
  MapTaskAttemptInfo taskAttemptInfo = 
      new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
  TaskTrackerAction action = 
      new SimulatorLaunchTaskAction(task, taskAttemptInfo);
  heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
  if (isKilled) {
    action = new KillTaskAction(taskIdOldApi);
    heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
       action);
  }

  for(long simulationTime = mapStart + heartbeatInterval; 
      simulationTime <= mapEndHeartbeat;
      simulationTime += heartbeatInterval) {
    State state = simulationTime < mapEndHeartbeat ? 
        State.RUNNING : State.SUCCEEDED;
    if (simulationTime == mapEndHeartbeat && isKilled) {
      state = State.KILLED;
    }
    MapTaskStatus mapStatus = new MapTaskStatus(
        task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
    heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
       mapStatus);
  }
}
 
Example 6
Source File: MockSimulatorJobTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
                          long reduceStart, long mapDoneDelay, 
                          long reduceRuntime, long killHeartbeat) {
  long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
  long reduceDone = mapDone + reduceRuntime;
  long reduceEndHeartbeat = nextHeartbeat(reduceDone);
  final boolean isKilled = (killHeartbeat>=0);
  if (isKilled) {
    reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
  }

  LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone + 
            ", reduceDone=" + reduceDone + 
            ", reduceEndHeartbeat=" + reduceEndHeartbeat +
            ", killHeartbeat=" + killHeartbeat);

  final int numSlotsRequired = 1;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = 
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);        
  Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
                             numSlotsRequired);
  // all byte counters are 0
  TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); 
  ReduceTaskAttemptInfo taskAttemptInfo = 
      new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0, 
                                reduceRuntime);
  TaskTrackerAction action = 
      new SimulatorLaunchTaskAction(task, taskAttemptInfo);    
  heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
      action);
  if (!isKilled || mapDone < killHeartbeat) {
    action = new AllMapsCompletedTaskAction(task.getTaskID());
    heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
        action);
  }
  if (isKilled) {
    action = new KillTaskAction(taskIdOldApi);
    heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
       action);
  }

  for(long simulationTime = reduceStart + heartbeatInterval; 
      simulationTime <= reduceEndHeartbeat;
      simulationTime += heartbeatInterval) {
    State state = simulationTime < reduceEndHeartbeat ? 
        State.RUNNING : State.SUCCEEDED;
    if (simulationTime == reduceEndHeartbeat && isKilled) {
      state = State.KILLED;
    }
    // mapDone is when the all maps done event delivered
    Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE; 
    ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
        task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
    heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
        reduceStatus);
  }
}