org.apache.hadoop.tools.rumen.TaskInfo Java Examples

The following examples show how to use org.apache.hadoop.tools.rumen.TaskInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DebugJobProducer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
Example #2
Source File: DebugJobProducer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
Example #3
Source File: DebugJobFactory.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  switch (taskType) {
    case MAP:
      return new TaskInfo(m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1);
    case REDUCE:
      return new TaskInfo(r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1);
    default:
      throw new IllegalArgumentException("Not interested");
  }
}
 
Example #4
Source File: JobFactory.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private JobStory getNextJobFiltered() throws IOException {
  JobStory job;
  do {
    job = jobProducer.getNextJob();
  } while (job != null
      && (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS ||
          job.getSubmissionTime() < 0));
  return null == job ? null : new FilterJobStory(job) {
      @Override
      public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
        return new MinTaskInfo(this.job.getTaskInfo(taskType, taskNumber));
      }
    };
}
 
Example #5
Source File: DebugJobProducer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  switch (taskType) {
    case MAP:
      return new TaskInfo(m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1);
    case REDUCE:
      return new TaskInfo(r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1);
    default:
      throw new IllegalArgumentException("Not interested");
  }
}
 
Example #6
Source File: DebugJobProducer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  switch (taskType) {
    case MAP:
      return new TaskInfo(m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1);
    case REDUCE:
      return new TaskInfo(r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1);
    default:
      throw new IllegalArgumentException("Not interested");
  }
}
 
Example #7
Source File: JobFactory.java    From big-c with Apache License 2.0 4 votes vote down vote up
public MinTaskInfo(TaskInfo info) {
  super(info.getInputBytes(), info.getInputRecords(),
        info.getOutputBytes(), info.getOutputRecords(),
        info.getTaskMemory(), info.getResourceUsageMetrics());
}
 
Example #8
Source File: JobFactory.java    From big-c with Apache License 2.0 4 votes vote down vote up
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  return job.getTaskInfo(taskType, taskNumber);
}
 
Example #9
Source File: JobFactory.java    From big-c with Apache License 2.0 4 votes vote down vote up
protected JobStory getNextJobFiltered() throws IOException {
  JobStory job = getNextJobFromTrace();
  // filter out the following jobs
  //    - unsuccessful jobs
  //    - jobs with missing submit-time
  //    - reduce only jobs
  // These jobs are not yet supported in Gridmix
  while (job != null &&
    (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS ||
      job.getSubmissionTime() < 0 || job.getNumberMaps() == 0)) {
    if (LOG.isDebugEnabled()) {
      List<String> reason = new ArrayList<String>();
      if (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS) {
        reason.add("STATE (" + job.getOutcome().name() + ")");
      }
      if (job.getSubmissionTime() < 0) {
        reason.add("SUBMISSION-TIME (" + job.getSubmissionTime() + ")");
      }
      if (job.getNumberMaps() == 0) {
        reason.add("ZERO-MAPS-JOB");
      }
      
      // TODO This should never happen. Probably we missed something!
      if (reason.size() == 0) {
        reason.add("N/A");
      }
      
      LOG.debug("Ignoring job " + job.getJobID() + " from the input trace."
                + " Reason: " + StringUtils.join(reason, ","));
    }
    job = getNextJobFromTrace();
  }
  return null == job ? null : new FilterJobStory(job) {
    @Override
    public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
      TaskInfo info = this.job.getTaskInfo(taskType, taskNumber);
      if (info != null) {
        info = new MinTaskInfo(info);
      } else {
        info = new MinTaskInfo(new TaskInfo(0, 0, 0, 0, 0));
      }
      return info;
    }
  };
}
 
Example #10
Source File: JobFactory.java    From hadoop with Apache License 2.0 4 votes vote down vote up
protected JobStory getNextJobFiltered() throws IOException {
  JobStory job = getNextJobFromTrace();
  // filter out the following jobs
  //    - unsuccessful jobs
  //    - jobs with missing submit-time
  //    - reduce only jobs
  // These jobs are not yet supported in Gridmix
  while (job != null &&
    (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS ||
      job.getSubmissionTime() < 0 || job.getNumberMaps() == 0)) {
    if (LOG.isDebugEnabled()) {
      List<String> reason = new ArrayList<String>();
      if (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS) {
        reason.add("STATE (" + job.getOutcome().name() + ")");
      }
      if (job.getSubmissionTime() < 0) {
        reason.add("SUBMISSION-TIME (" + job.getSubmissionTime() + ")");
      }
      if (job.getNumberMaps() == 0) {
        reason.add("ZERO-MAPS-JOB");
      }
      
      // TODO This should never happen. Probably we missed something!
      if (reason.size() == 0) {
        reason.add("N/A");
      }
      
      LOG.debug("Ignoring job " + job.getJobID() + " from the input trace."
                + " Reason: " + StringUtils.join(reason, ","));
    }
    job = getNextJobFromTrace();
  }
  return null == job ? null : new FilterJobStory(job) {
    @Override
    public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
      TaskInfo info = this.job.getTaskInfo(taskType, taskNumber);
      if (info != null) {
        info = new MinTaskInfo(info);
      } else {
        info = new MinTaskInfo(new TaskInfo(0, 0, 0, 0, 0));
      }
      return info;
    }
  };
}
 
Example #11
Source File: SimulatorJobStory.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  return job.getTaskInfo(taskType, taskNumber);
}
 
Example #12
Source File: TestSimulatorJobClient.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Override
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  throw new UnsupportedOperationException();
}
 
Example #13
Source File: MockSimulatorJobTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void runMapTask(String taskTrackerName, TaskAttemptID taskId,
                       long mapStart, long mapRuntime, long killHeartbeat) {
  long mapDone = mapStart + mapRuntime;
  long mapEndHeartbeat = nextHeartbeat(mapDone);
  final boolean isKilled = (killHeartbeat>=0);
  if (isKilled) {
    mapEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
  }

  LOG.debug("mapStart=" + mapStart + ", mapDone=" + mapDone + 
            ", mapEndHeartbeat=" + mapEndHeartbeat + 
            ", killHeartbeat=" + killHeartbeat);
  
  final int numSlotsRequired = 1;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = 
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);        
  Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, "dummysplitclass",
                          null, numSlotsRequired);
  // all byte counters are 0
  TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); 
  MapTaskAttemptInfo taskAttemptInfo = 
      new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
  TaskTrackerAction action = 
      new SimulatorLaunchTaskAction(task, taskAttemptInfo);
  heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
  if (isKilled) {
    action = new KillTaskAction(taskIdOldApi);
    heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
       action);
  }

  for(long simulationTime = mapStart + heartbeatInterval; 
      simulationTime <= mapEndHeartbeat;
      simulationTime += heartbeatInterval) {
    State state = simulationTime < mapEndHeartbeat ? 
        State.RUNNING : State.SUCCEEDED;
    if (simulationTime == mapEndHeartbeat && isKilled) {
      state = State.KILLED;
    }
    MapTaskStatus mapStatus = new MapTaskStatus(
        task.getTaskID(), 0.0f, 0, state, "", "", null, Phase.MAP, null);
    heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
       mapStatus);
  }
}
 
Example #14
Source File: MockSimulatorJobTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void runReduceTask(String taskTrackerName, TaskAttemptID taskId,
                          long reduceStart, long mapDoneDelay, 
                          long reduceRuntime, long killHeartbeat) {
  long mapDone = nextHeartbeat(reduceStart + mapDoneDelay);
  long reduceDone = mapDone + reduceRuntime;
  long reduceEndHeartbeat = nextHeartbeat(reduceDone);
  final boolean isKilled = (killHeartbeat>=0);
  if (isKilled) {
    reduceEndHeartbeat = nextHeartbeat(killHeartbeat + 1);
  }

  LOG.debug("reduceStart=" + reduceStart + ", mapDone=" + mapDone + 
            ", reduceDone=" + reduceDone + 
            ", reduceEndHeartbeat=" + reduceEndHeartbeat +
            ", killHeartbeat=" + killHeartbeat);

  final int numSlotsRequired = 1;
  org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi = 
      org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);        
  Task task = new ReduceTask("dummyjobfile", taskIdOldApi, 0, 0,
                             numSlotsRequired);
  // all byte counters are 0
  TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0); 
  ReduceTaskAttemptInfo taskAttemptInfo = 
      new ReduceTaskAttemptInfo(State.SUCCEEDED, taskInfo, 0, 0, 
                                reduceRuntime);
  TaskTrackerAction action = 
      new SimulatorLaunchTaskAction(task, taskAttemptInfo);    
  heartbeats.get(reduceStart).get(taskTrackerName).addTaskTrackerAction(
      action);
  if (!isKilled || mapDone < killHeartbeat) {
    action = new AllMapsCompletedTaskAction(task.getTaskID());
    heartbeats.get(mapDone).get(taskTrackerName).addTaskTrackerAction(
        action);
  }
  if (isKilled) {
    action = new KillTaskAction(taskIdOldApi);
    heartbeats.get(killHeartbeat).get(taskTrackerName).addTaskTrackerAction(
       action);
  }

  for(long simulationTime = reduceStart + heartbeatInterval; 
      simulationTime <= reduceEndHeartbeat;
      simulationTime += heartbeatInterval) {
    State state = simulationTime < reduceEndHeartbeat ? 
        State.RUNNING : State.SUCCEEDED;
    if (simulationTime == reduceEndHeartbeat && isKilled) {
      state = State.KILLED;
    }
    // mapDone is when the all maps done event delivered
    Phase phase = simulationTime <= mapDone ? Phase.SHUFFLE : Phase.REDUCE; 
    ReduceTaskStatus reduceStatus = new ReduceTaskStatus(
        task.getTaskID(), 0.0f, 0, state, "", "", null, phase, null);
    heartbeats.get(simulationTime).get(taskTrackerName).addTaskReport(
        reduceStatus);
  }
}
 
Example #15
Source File: JobFactory.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public MinTaskInfo(TaskInfo info) {
  super(info.getInputBytes(), info.getInputRecords(),
        info.getOutputBytes(), info.getOutputRecords(),
        info.getTaskMemory());
}
 
Example #16
Source File: JobFactory.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  return job.getTaskInfo(taskType, taskNumber);
}
 
Example #17
Source File: JobFactory.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
  return job.getTaskInfo(taskType, taskNumber);
}
 
Example #18
Source File: JobFactory.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public MinTaskInfo(TaskInfo info) {
  super(info.getInputBytes(), info.getInputRecords(),
        info.getOutputBytes(), info.getOutputRecords(),
        info.getTaskMemory(), info.getResourceUsageMetrics());
}
 
Example #19
Source File: FakeJobs.java    From RDFS with Apache License 2.0 votes vote down vote up
/**
 * Get {@link TaskInfo} for a given task.
 * @param taskType {@link TaskType} of the task
 * @param taskNumber Partition number of the task
 * @return the <code>TaskInfo</code> for the given task
 */
public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {

	return null;	

}