org.apache.hadoop.mapred.TaskStatus.State Java Examples

The following examples show how to use org.apache.hadoop.mapred.TaskStatus.State. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ZombieJob.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private State makeUpState(int taskAttemptNumber, double[] numAttempts) {
 
// if numAttempts == null we are returning FAILED.
if(numAttempts == null) {
  return State.FAILED;
}
  if (taskAttemptNumber >= numAttempts.length - 1) {
    // always succeed
    return State.SUCCEEDED;
  } else {
    double pSucceed = numAttempts[taskAttemptNumber];
    double pFail = 0;
    for (int i = taskAttemptNumber + 1; i < numAttempts.length; i++) {
      pFail += numAttempts[i];
    }
    return (random.nextDouble() < pSucceed / (pSucceed + pFail)) ? State.SUCCEEDED
        : State.FAILED;
  }
}
 
Example #2
Source File: ZombieJob.java    From big-c with Apache License 2.0 6 votes vote down vote up
private long doMakeUpReduceRuntime(State state) {
  long reduceTime;
  try {
    if (state == State.SUCCEEDED) {
      reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF());
    } else if (state == State.FAILED) {
      reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF());
    } else {
      throw new IllegalArgumentException(
          "state is neither SUCCEEDED nor FAILED: " + state);
    }
    return reduceTime;
  } catch (NoValueToMakeUpRuntime e) {
    return 0;
  }
}
 
Example #3
Source File: ZombieJob.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
    LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
    double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
      rackRemoteOverNodeLocal };
  double scaleFactor = factors[locality] / factors[loggedLocality];
  State state = convertState(loggedAttempt.getResult());
  if (loggedTask.getTaskType() == Values.MAP) {
    long taskTime = 0;
    if (loggedAttempt.getStartTime() == 0) {
      taskTime = makeUpMapRuntime(state, locality);
    } else {
      taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
    }
    taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
    taskTime *= scaleFactor;
    return new MapTaskAttemptInfo
      (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
  } else {
    throw new IllegalArgumentException("taskType can only be MAP: "
        + loggedTask.getTaskType());
  }
}
 
Example #4
Source File: ZombieJob.java    From big-c with Apache License 2.0 6 votes vote down vote up
private State makeUpState(int taskAttemptNumber, double[] numAttempts) {
 
// if numAttempts == null we are returning FAILED.
if(numAttempts == null) {
  return State.FAILED;
}
  if (taskAttemptNumber >= numAttempts.length - 1) {
    // always succeed
    return State.SUCCEEDED;
  } else {
    double pSucceed = numAttempts[taskAttemptNumber];
    double pFail = 0;
    for (int i = taskAttemptNumber + 1; i < numAttempts.length; i++) {
      pFail += numAttempts[i];
    }
    return (random.nextDouble() < pSucceed / (pSucceed + pFail)) ? State.SUCCEEDED
        : State.FAILED;
  }
}
 
Example #5
Source File: DebugJobProducer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
Example #6
Source File: ZombieJob.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private long makeUpMapRuntime(State state, int locality) {
  long runtime;
  // make up runtime
  if (state == State.SUCCEEDED || state == State.FAILED) {
    List<LoggedDiscreteCDF> cdfList =
        state == State.SUCCEEDED ? job.getSuccessfulMapAttemptCDFs() : job
            .getFailedMapAttemptCDFs();
    // XXX MapCDFs is a ArrayList of 4 possible groups: distance=0, 1, 2, and
    // the last group is "distance cannot be determined". All pig jobs
    // would have only the 4th group, and pig tasks usually do not have
    // any locality, so this group should count as "distance=2".
    // However, setup/cleanup tasks are also counted in the 4th group.
    // These tasks do not make sense.
    try {
      runtime = makeUpRuntime(cdfList.get(locality));
    } catch (NoValueToMakeUpRuntime e) {
      runtime = makeUpRuntime(cdfList);
    }
  } else {
    throw new IllegalArgumentException(
        "state is neither SUCCEEDED nor FAILED: " + state);
  }
  return runtime;
}
 
Example #7
Source File: ZombieJob.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private long doMakeUpReduceRuntime(State state) {
  long reduceTime;
  try {
    if (state == State.SUCCEEDED) {
      reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF());
    } else if (state == State.FAILED) {
      reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF());
    } else {
      throw new IllegalArgumentException(
          "state is neither SUCCEEDED nor FAILED: " + state);
    }
    return reduceTime;
  } catch (NoValueToMakeUpRuntime e) {
    return 0;
  }
}
 
Example #8
Source File: SimulatorTaskTracker.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/** 
 * Frees up bookkeping memory used by completed tasks. 
 * Has no effect on the events or logs produced by the SimulatorTaskTracker.
 * We need this in order not to report completed task multiple times and 
 * to ensure that we do not run out of Java heap memory in larger 
 * simulations.
 */
private void garbageCollectCompletedTasks() {
  for (Iterator<TaskAttemptID> iter = tasks.keySet().iterator();
       iter.hasNext();) {
    TaskAttemptID taskId = iter.next();
    SimulatorTaskInProgress tip = tasks.get(taskId);
    if (tip.getTaskStatus().getRunState() != State.RUNNING) {
      iter.remove();
      if (LOG.isDebugEnabled()) {
        LOG.debug("Garbage collected SimulatorTIP, taskId=" + taskId);
      }
      // We don't have to / must not touch usedMapSlots and usedReduceSlots
      // as those were already updated by processTaskAttemptCompletionEvent() 
      // when the task switched its state from running
    }
  }
}
 
Example #9
Source File: ZombieJob.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private long doMakeUpReduceRuntime(State state) {
  long reduceTime;
  try {
    if (state == State.SUCCEEDED) {
      reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF());
    } else if (state == State.FAILED) {
      reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF());
    } else {
      throw new IllegalArgumentException(
          "state is neither SUCCEEDED nor FAILED: " + state);
    }
    return reduceTime;
  } catch (NoValueToMakeUpRuntime e) {
    return 0;
  }
}
 
Example #10
Source File: DebugJobProducer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
Example #11
Source File: ZombieJob.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
    LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
    double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
      rackRemoteOverNodeLocal };
  double scaleFactor = factors[locality] / factors[loggedLocality];
  State state = convertState(loggedAttempt.getResult());
  if (loggedTask.getTaskType() == Values.MAP) {
    long taskTime = 0;
    if (loggedAttempt.getStartTime() == 0) {
      taskTime = makeUpMapRuntime(state, locality);
    } else {
      taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
    }
    taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
    taskTime *= scaleFactor;
    return new MapTaskAttemptInfo
      (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
  } else {
    throw new IllegalArgumentException("taskType can only be MAP: "
        + loggedTask.getTaskType());
  }
}
 
Example #12
Source File: ZombieJob.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
    LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
    double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
      rackRemoteOverNodeLocal };
  double scaleFactor = factors[locality] / factors[loggedLocality];
  State state = convertState(loggedAttempt.getResult());
  if (loggedTask.getTaskType() == Values.MAP) {
    long taskTime = 0;
    if (loggedAttempt.getStartTime() == 0) {
      taskTime = makeUpMapRuntime(state, locality);
    } else {
      taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
    }
    taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
    taskTime *= scaleFactor;
    return new MapTaskAttemptInfo(state, taskInfo, taskTime);
  } else {
    throw new IllegalArgumentException("taskType can only be MAP: "
        + loggedTask.getTaskType());
  }
}
 
Example #13
Source File: TestSimulatorJobTracker.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private List<TaskStatus> collectAndCloneTaskStatuses() {
  ArrayList<TaskStatus> statuses = new ArrayList<TaskStatus>();
  Set<TaskAttemptID> mark = new HashSet<TaskAttemptID>();
  for (SimulatorTaskInProgress tip : tasks.values()) {
    statuses.add((TaskStatus) tip.getTaskStatus().clone());
    if (tip.getFinalRunState() == State.SUCCEEDED) {
      mark.add(tip.getTaskStatus().getTaskID());
    }
  }

  for (TaskAttemptID taskId : mark) {
    tasks.remove(taskId);
  }

  return statuses;
}
 
Example #14
Source File: CheckedEventQueue.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void expectMapTask(SimulatorTaskTracker taskTracker,
                          TaskAttemptID taskId,
                          long mapStart, long mapRuntime) {
  long mapDone = mapStart + mapRuntime;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
  MapTaskStatus status = new MapTaskStatus(taskIdOldApi, 1.0f, 1,
      State.SUCCEEDED, null, null, null, Phase.MAP, null);
  status.setFinishTime(mapDone);
  TaskAttemptCompletionEvent completionEvent = 
      new TaskAttemptCompletionEvent(taskTracker, status);
  addExpected(mapStart, completionEvent);
}
 
Example #15
Source File: SimulatorTaskTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** 
 * Kills a task attempt.
 *
 * @param action contains the task attempt to kill
 * @param now current simulation time
 * @return new events generated in response, empty
 */
private List<SimulatorEvent> handleKillTaskAction(KillTaskAction action, long now) {
  TaskAttemptID taskId = action.getTaskID();
  // we don't have a nice(r) toString() in Hadoop's TaskActions 
  if (LOG.isDebugEnabled()) {
    LOG.debug("Handling kill task action, taskId=" + taskId + ", now=" + now);
  }
  
  SimulatorTaskInProgress tip = tasks.get(taskId);
  
  // Safety check: We might get a KillTaskAction even for completed reduces
  if (tip == null) {
    return SimulatorEngine.EMPTY_EVENTS;
  }
  
  progressTaskStatus(tip, now); // make progress up to date
  TaskStatus finalStatus = (TaskStatus)tip.getTaskStatus().clone();
  finalStatus.setFinishTime(now);
  finalStatus.setRunState(State.KILLED);
  finishRunningTask(finalStatus, now);
 
  if (finalStatus.getIsMap() || finalStatus.getPhase() == Phase.REDUCE) {
    // if we have already created a task attempt completion event we remember
    // the task id, so that we can safely ignore the event when its delivered
    orphanTaskCompletions.add(taskId);
  }
  return SimulatorEngine.EMPTY_EVENTS;
}
 
Example #16
Source File: TaskTrackerLoadInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public TaskInfo(int jobId, int taskId, int attempt, boolean map,
        long startTime, long runningTime, double taskProgress,
        Phase taskPhase, State taskState) {

  this.jobId = jobId;
  this.taskId = taskId;
  this.attempt = attempt;
  this.map = map;
  this.startTime = startTime;
  this.taskProgress = taskProgress;
  this.taskPhase = taskPhase;
  this.taskState = taskState;
  this.runningTime = runningTime;
}
 
Example #17
Source File: ReduceTaskAttemptInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
    long mergeTime, long reduceTime) {
  super(state, taskInfo);
  this.shuffleTime = shuffleTime;
  this.mergeTime = mergeTime;
  this.reduceTime = reduceTime;
}
 
Example #18
Source File: TaskTrackerLoadInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public long getTotalWastedTime() {
  long total = 0;
  for (TaskInfo task : localTasksInfo) {
    if (task.isMap() ||
            (task.getTaskState() == State.RUNNING &&
            task.getTaskProgress() > 0)) {
      // The reduces that did not yet progress can be considered not started
      total += task.getRunningTime();
    }
  }
  return total;
}
 
Example #19
Source File: TestZombieJob.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testFourthJob() {
  // 7th job has many failed tasks.
  // 3204 m, 0 r
  // successful maps 497-586-23-1, failed maps 0-0-0-2714
  // map attempts to success -- 0.8113600833767587, 0.18707660239708182,
  // 0.0013027618551328818, 2.605523710265763E-4,
  JobStory job = jobStories.get(3);
  assertEquals(131, job.getNumberMaps());
  assertEquals(47, job.getNumberReduces());

  TaskAttemptInfo taInfo = null;
  // get a succeeded map task attempt
  long runtime = 5268;
  taInfo = job.getMapTaskAttemptInfoAdjusted(113, 1, 1);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());
  assertEquals(runtime, taInfo.getRuntime());

  // get a succeeded map task attempt, with different locality
  taInfo = job.getMapTaskAttemptInfoAdjusted(113, 1, 2);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());
  assertEquals(runtime, taInfo.getRuntime() / 2);
  taInfo = job.getMapTaskAttemptInfoAdjusted(113, 1, 0);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());
  assertEquals((long) (runtime / 1.5), taInfo.getRuntime());

  // get a failed map task attempt
  taInfo = job.getMapTaskAttemptInfoAdjusted(113, 0, 1);
  assertEquals(18592, taInfo.getRuntime());
  assertEquals(State.FAILED, taInfo.getRunState());
}
 
Example #20
Source File: TestSimulatorJobTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
int findLaunchTaskActions(HeartbeatResponse response) {
  TaskTrackerAction[] actions = response.getActions();
  int numLaunchTaskActions = 0;
  // HashSet<> numLaunchTaskActions
  for (TaskTrackerAction action : actions) {
    if (action instanceof SimulatorLaunchTaskAction) {
      Task task = ((SimulatorLaunchTaskAction) action).getTask();

      numLaunchTaskActions++;
      TaskAttemptID taskId = task.getTaskID();
      if (tasks.containsKey(taskId)) {
        // already have this task..do not need to generate new status
        continue;
      }
      TaskStatus status;
      if (task.isMapTask()) {
        status = new MapTaskStatus(taskId, 0f, 1, State.RUNNING, "", "",
            taskTrackerName, Phase.MAP, new Counters());
      } else {
        status = new ReduceTaskStatus(taskId, 0f, 1, State.RUNNING, "", "",
            taskTrackerName, Phase.SHUFFLE, new Counters());
      }
      status.setRunState(State.SUCCEEDED);
      status.setStartTime(this.now);
      SimulatorTaskInProgress tip = new SimulatorTaskInProgress(
          (SimulatorLaunchTaskAction) action, status, this.now);
      tasks.put(taskId, tip);
    }
  }
  return numLaunchTaskActions;
}
 
Example #21
Source File: TestZombieJob.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testSecondJob() {
  // 7th job has many failed tasks.
  // 3204 m, 0 r
  // successful maps 497-586-23-1, failed maps 0-0-0-2714
  // map attempts to success -- 0.8113600833767587, 0.18707660239708182,
  // 0.0013027618551328818, 2.605523710265763E-4,
  JobStory job = jobStories.get(1);
  assertEquals(20, job.getNumberMaps());
  assertEquals(1, job.getNumberReduces());

  TaskAttemptInfo taInfo = null;
  // get a succeeded map task attempt
  taInfo = job.getMapTaskAttemptInfoAdjusted(17, 1, 1);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());

  // get a succeeded map task attempt, with different locality
  taInfo = job.getMapTaskAttemptInfoAdjusted(17, 1, 2);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());
  taInfo = job.getMapTaskAttemptInfoAdjusted(17, 1, 0);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());

  // get a failed map task attempt
  taInfo = job.getMapTaskAttemptInfoAdjusted(14, 0, 1);
  assertEquals(1927, taInfo.getRuntime());
  assertEquals(State.SUCCEEDED, taInfo.getRunState());

  // get a failed map task attempt, with different locality
  // TODO: this test does not make sense here, because I don't have
  // available data set.
}
 
Example #22
Source File: TaskTrackerLoadInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public long getRunningTimeWasted() {
  long runningTimeWasted = 0;
  for (TaskInfo task : localTasksInfo) {
    if (task.getTaskState() == State.RUNNING &&
            (task.isMap() || task.getTaskProgress() > 0)) {
      runningTimeWasted += task.getRunningTime();
    }
  }

  // Another level of complexity here would be to count the time it would
  // take to rerun all the map tasks that were not fetched yet.
  return runningTimeWasted;
}
 
Example #23
Source File: CheckedEventQueue.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void expectReduceTask(SimulatorTaskTracker taskTracker,
                             TaskAttemptID taskId, long mapDone, 
                             long reduceRuntime) {
  long reduceDone = mapDone + reduceRuntime;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);
  ReduceTaskStatus status = new ReduceTaskStatus(taskIdOldApi, 1.0f, 1,
      State.SUCCEEDED, null, null, null, Phase.REDUCE, null);
  status.setFinishTime(reduceDone);
  TaskAttemptCompletionEvent completionEvent = 
      new TaskAttemptCompletionEvent(taskTracker, status);
  addExpected(mapDone, completionEvent);

}
 
Example #24
Source File: TaskAttemptInfo.java    From hadoop with Apache License 2.0 5 votes vote down vote up
protected TaskAttemptInfo
     (State state, TaskInfo taskInfo, List<List<Integer>> allSplits) {
  if (state == State.SUCCEEDED || state == State.FAILED) {
    this.state = state;
  } else {
    throw new IllegalArgumentException("status cannot be " + state);
  }
  this.taskInfo = taskInfo;
  this.allSplits = allSplits;
}
 
Example #25
Source File: TaskTrackerLoadInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void parseMap(Map<String, Object> trackerInfo) {
  active = (Boolean) trackerInfo.get("active");
  lastSeen = (Long) trackerInfo.get("last_seen");
  maxMapTasks = ((Long) trackerInfo.get("map_tasks_max")).intValue();

  maxReduceTasks = ((Long) trackerInfo.get("reduce_tasks_max")).intValue();

  Object[] tasks = (Object[]) trackerInfo.get("tasks");
  for (Object task : tasks) {
    Map<String, Object> taskMap = (Map<String, Object>) task;
    int jobId = ((Long) taskMap.get("job_id")).intValue();
    int taskId = ((Long) taskMap.get("task_id")).intValue();
    int attempt = ((Long) taskMap.get("attempt")).intValue();
    boolean map = taskMap.get("type").equals("map");

    double taskProgress = (Double) taskMap.get("progress");
    long startTime = (Long) taskMap.get("start_time");
    long runningTime = (Long) taskMap.get("running_time");

    TaskStatus.State taskState =
            TaskStatus.State.valueOf(taskMap.get("state").toString());
    TaskStatus.Phase taskPhase =
            TaskStatus.Phase.valueOf(taskMap.get("phase").toString());
    TaskInfo taskInfo = new TaskInfo(jobId, taskId, attempt, map,
            startTime, runningTime, taskProgress, taskPhase, taskState);
    if (map && 
					taskState == TaskStatus.State.SUCCEEDED || 
					taskState == TaskStatus.State.RUNNING) {
		totalMapTasks++;
    }

    localTasksInfo.add(taskInfo);
  }
}
 
Example #26
Source File: TestZombieJob.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testFirstJob() {
  // 20th job seems reasonable: "totalMaps":329,"totalReduces":101
  // successful map: 80 node-local, 196 rack-local, 53 rack-remote, 2 unknown
  // failed map: 0-0-0-1
  // successful reduce: 99 failed reduce: 13
  // map attempts to success -- 0.9969879518072289, 0.0030120481927710845,
  JobStory job = jobStories.get(0);
  assertEquals(1, job.getNumberMaps());
  assertEquals(1, job.getNumberReduces());

  // get splits

  TaskAttemptInfo taInfo = null;
  long expectedRuntime = 2423;
  // get a succeeded map task attempt, expect the exact same task attempt
  taInfo = job.getMapTaskAttemptInfoAdjusted(14, 0, 1);
  assertEquals(expectedRuntime, taInfo.getRuntime());
  assertEquals(State.SUCCEEDED, taInfo.getRunState());

  // get a succeeded map attempt, but reschedule with different locality.
  taInfo = job.getMapTaskAttemptInfoAdjusted(14, 0, 2);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());
  taInfo = job.getMapTaskAttemptInfoAdjusted(14, 0, 0);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());

  expectedRuntime = 97502;
  // get a succeeded reduce task attempt, expect the exact same task attempt
  taInfo = job.getTaskAttemptInfo(TaskType.REDUCE, 14, 0);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());

  // get a failed reduce task attempt, expect the exact same task attempt
  taInfo = job.getTaskAttemptInfo(TaskType.REDUCE, 14, 0);
  assertEquals(State.SUCCEEDED, taInfo.getRunState());

  // get a non-exist reduce task attempt, expect a made-up task attempt
  // TODO fill in test case
}
 
Example #27
Source File: TaskTrackerLoadInfo.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public int getRunningReduceTasks() {
  int running = 0;
  for (TaskInfo task : localTasksInfo) {
    if (!task.isMap() &&
            task.getTaskState() == State.RUNNING) {
      running++;
    }
  }
  return running;
}
 
Example #28
Source File: ZombieJob.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private State makeUpState(int taskAttemptNumber, double[] numAttempts) {
  if (taskAttemptNumber >= numAttempts.length - 1) {
    // always succeed
    return State.SUCCEEDED;
  } else {
    double pSucceed = numAttempts[taskAttemptNumber];
    double pFail = 0;
    for (int i = taskAttemptNumber + 1; i < numAttempts.length; i++) {
      pFail += numAttempts[i];
    }
    return (random.nextDouble() < pSucceed / (pSucceed + pFail)) ? State.SUCCEEDED
        : State.FAILED;
  }
}
 
Example #29
Source File: ZombieJob.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private long makeUpReduceRuntime(State state) {
  long reduceTime = 0;
  for (int i = 0; i < 5; i++) {
    reduceTime = doMakeUpReduceRuntime(state);
    if (reduceTime >= 0) {
      return reduceTime;
    }
  }
  return 0;
}
 
Example #30
Source File: ZombieJob.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private static State convertState(Values status) {
  if (status == Values.SUCCESS) {
    return State.SUCCEEDED;
  } else if (status == Values.FAILED) {
    return State.FAILED;
  } else if (status == Values.KILLED) {
    return State.KILLED;
  } else {
    throw new IllegalArgumentException("unknown status " + status);
  }
}