Java Code Examples for org.apache.hadoop.mapred.JobHistory#Keys

The following examples show how to use org.apache.hadoop.mapred.JobHistory#Keys . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopJobHistoryLoader.java    From spork with Apache License 2.0 6 votes vote down vote up
private static Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(
        JobHistory.Task task) {

    Map<String, JobHistory.TaskAttempt> taskAttempts = task
            .getTaskAttempts();
    int size = taskAttempts.size();
    Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts
            .entrySet().iterator();
    for (int i = 0; i < size; i++) {
        // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
        Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
        JobHistory.TaskAttempt attempt = tae.getValue();
        if (null != attempt && null != attempt.getValues() && attempt.getValues().containsKey(JobHistory.Keys.TASK_STATUS) && attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals(
                "SUCCESS")) {
            return attempt.getValues();
        }
    }

    return null;
}
 
Example 2
Source File: JobStatistics.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
  
  Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
  int size = taskAttempts.size();
  java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
  for (int i=0; i<size; i++) {
    // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
    Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
    JobHistory.TaskAttempt attempt = tae.getValue();
    if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
      return attempt.getValues();
    }
  }
  
  return null;
}
 
Example 3
Source File: JobStatistics.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
  
  Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
  int size = taskAttempts.size();
  java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
  for (int i=0; i<size; i++) {
    // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
    Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
    JobHistory.TaskAttempt attempt = tae.getValue();
    if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
      return attempt.getValues();
    }
  }
  
  return null;
}
 
Example 4
Source File: HadoopJobHistoryLoader.java    From spork with Apache License 2.0 5 votes vote down vote up
private static void populateJob (Map<JobHistory.Keys, String> jobC, Map<String, String> job) {            
    int size = jobC.size();
    Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
    for (int i = 0; i < size; i++) {
        Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
        JobHistory.Keys key = entry.getKey();
        String value = entry.getValue();
        switch (key) {
        case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID.toString(), value); break;           
        case FINISH_TIME: job.put(JobKeys.FINISH_TIME.toString(), value); break;
        case JOBID: job.put(JobKeys.JOBID.toString(), value); break;
        case JOBNAME: job.put(JobKeys.JOBNAME.toString(), value); break;
        case USER: job.put(JobKeys.USER.toString(), value); break;
        case JOBCONF: job.put(JobKeys.JOBCONF.toString(), value); break;
        case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME.toString(), value); break;
        case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME.toString(), value); break;
        case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS.toString(), value); break;
        case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES.toString(), value); break;
        case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS.toString(), value); break;
        case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES.toString(), value); break;
        case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS.toString(), value); break;
        case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES.toString(), value); break;
        case JOB_STATUS: job.put(JobKeys.STATUS.toString(), value); break;
        case COUNTERS:
            value.concat(",");
            parseAndAddJobCounters(job, value);
            break;
        default: 
            LOG.debug("JobHistory.Keys."+ key + " : NOT INCLUDED IN LOADER RETURN VALUE");
            break;
        }
    }
}
 
Example 5
Source File: JobStatistics.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
  int size = jobC.size(); 
  java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
  for (int i = 0; i < size; i++)
  {
    Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
    JobHistory.Keys key = entry.getKey();
    String value = entry.getValue();
    //System.out.println("JobHistory.JobKeys."+key+": "+value);
    switch (key) {
    case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
    case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
    case JOBID: job.put(JobKeys.JOBID, value); break;
    case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
    case USER: job.put(JobKeys.USER, value); break;
    case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
    case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
    case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
    case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
    case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
    case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
    case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
    case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
    case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
    case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
    case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
    case COUNTERS:
      value.concat(",");
      parseAndAddJobCounters(job, value);
      break;
    default:   System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
             break;
    }
  }
}
 
Example 6
Source File: JobStatistics.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
  int size = jobC.size(); 
  java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
  for (int i = 0; i < size; i++)
  {
    Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
    JobHistory.Keys key = entry.getKey();
    String value = entry.getValue();
    switch (key) {
    case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
    //case START_TIME: job.put(JobKeys., value); break;
    case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
    case JOBID: job.put(JobKeys.JOBID, value); break;
    case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
    case USER: job.put(JobKeys.USER, value); break;
    case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
    case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
    case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
    case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
    case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
    case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
    case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
    case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
    case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
    case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
    case COUNTERS:
      value.concat(",");
      parseAndAddJobCounters(job, value);
      break;
    default:   System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
             break;
    }
  }
}