org.apache.hadoop.mapreduce.JobStatus Java Examples

The following examples show how to use org.apache.hadoop.mapreduce.JobStatus. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestClientServiceDelegate.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testJobReportFromHistoryServer() throws Exception {                                 
  MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);                           
  when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(                      
      getJobReportResponseFromHistoryServer());                                                 
  ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);                                     
  when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))                      
  .thenReturn(null);                                                                        
  ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(                       
      historyServerProxy, rm);

  JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
  Assert.assertNotNull(jobStatus);
  Assert.assertEquals("TestJobFilePath", jobStatus.getJobFile());                               
  Assert.assertEquals("http://TestTrackingUrl", jobStatus.getTrackingUrl());                    
  Assert.assertEquals(1.0f, jobStatus.getMapProgress(), 0.0f);
  Assert.assertEquals(1.0f, jobStatus.getReduceProgress(), 0.0f);
}
 
Example #2
Source File: MRJobStatus.java    From sequenceiq-samples with Apache License 2.0 6 votes vote down vote up
public JobStatus printJobStatus(YARNRunner yarnRunner, JobID jobID) throws IOException, InterruptedException {
	JobStatus jobStatus;
	jobStatus = yarnRunner.getJobStatus(jobID);
	
	// print overall job M/R progresses
	LOGGER.info("\nJob " + jobStatus.getJobName() + "in queue (" + jobStatus.getQueue() + ")" + " progress M/R: " + jobStatus.getMapProgress() + "/" + jobStatus.getReduceProgress());
	LOGGER.info("Tracking URL : " + jobStatus.getTrackingUrl());
	LOGGER.info("Reserved memory : " + jobStatus.getReservedMem() + ", used memory : "+ jobStatus.getUsedMem() + " and used slots : "+ jobStatus.getNumUsedSlots());
	
	// list map & reduce tasks statuses and progress		
	TaskReport[] reports = yarnRunner.getTaskReports(jobID, TaskType.MAP);
	for (int i = 0; i < reports.length; i++) {
		LOGGER.info("MAP: Status " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); 
	}
	reports = yarnRunner.getTaskReports(jobID, TaskType.REDUCE);
	for (int i = 0; i < reports.length; i++) {
		LOGGER.info("REDUCE: " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); 
	}
	return jobStatus;
}
 
Example #3
Source File: TestClientServiceDelegate.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testHistoryServerNotConfigured() throws Exception {
  //RM doesn't have app report and job History Server is not configured
  ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
      null, getRMDelegate());
  JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
  Assert.assertEquals("N/A", jobStatus.getUsername());
  Assert.assertEquals(JobStatus.State.PREP, jobStatus.getState());

  //RM has app report and job History Server is not configured
  ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
  ApplicationReport applicationReport = getFinishedApplicationReport();
  when(rm.getApplicationReport(jobId.getAppId())).thenReturn(
      applicationReport);

  clientServiceDelegate = getClientServiceDelegate(null, rm);
  jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
  Assert.assertEquals(applicationReport.getUser(), jobStatus.getUsername());
  Assert.assertEquals(JobStatus.State.SUCCEEDED, jobStatus.getState());
}
 
Example #4
Source File: TestClientServiceDelegate.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testRetriesOnConnectionFailure() throws Exception {

  MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
  when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(
      new RuntimeException("1")).thenThrow(new RuntimeException("2"))       
      .thenReturn(getJobReportResponse());

  ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
  when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
      .thenReturn(null);

  ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(
      historyServerProxy, rm);

  JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
  Assert.assertNotNull(jobStatus);
  verify(historyServerProxy, times(3)).getJobReport(
      any(GetJobReportRequest.class));
}
 
Example #5
Source File: Task.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void readFields(DataInput in) throws IOException {
  jobFile = Text.readString(in);
  taskId = TaskAttemptID.read(in);
  partition = in.readInt();
  numSlotsRequired = in.readInt();
  taskStatus.readFields(in);
  this.mapOutputFile.setJobId(taskId.getJobID()); 
  skipRanges.readFields(in);
  currentRecIndexIterator = skipRanges.skipRangeIterator();
  currentRecStartIndex = currentRecIndexIterator.next();
  skipping = in.readBoolean();
  jobCleanup = in.readBoolean();
  if (jobCleanup) {
    jobRunStateForCleanup = 
      WritableUtils.readEnum(in, JobStatus.State.class);
  }
  jobSetup = in.readBoolean();
  username = Text.readString(in);
  writeSkipRecs = in.readBoolean();
  taskCleanup = in.readBoolean();
  if (taskCleanup) {
    setPhase(TaskStatus.Phase.CLEANUP);
  }
  extraData.readFields(in);
}
 
Example #6
Source File: TestMRAMWithNonNormalizedCapabilities.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
 
Example #7
Source File: YARNRunner.java    From tez with Apache License 2.0 6 votes vote down vote up
@Override
public JobStatus getJobStatus(JobID jobID) throws IOException,
    InterruptedException {
  String user = UserGroupInformation.getCurrentUser().getShortUserName();
  String jobFile = MRApps.getJobFile(conf, user, jobID);
  DAGStatus dagStatus;
  try {
    if(dagClient == null) {
      dagClient = MRTezClient.getDAGClient(TypeConverter.toYarn(jobID).getAppId(), tezConf, null);
    }
    dagStatus = dagClient.getDAGStatus(null);
    return new DAGJobStatus(dagClient.getApplicationReport(), dagStatus, jobFile);
  } catch (TezException e) {
    throw new IOException(e);
  }
}
 
Example #8
Source File: CLI.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Private
public void displayJobList(JobStatus[] jobs, PrintWriter writer) {
  writer.println("Total jobs:" + jobs.length);
  writer.printf(headerPattern, "JobId", "State", "StartTime", "UserName",
    "Queue", "Priority", "UsedContainers",
    "RsvdContainers", "UsedMem", "RsvdMem", "NeededMem", "AM info");
  for (JobStatus job : jobs) {
    int numUsedSlots = job.getNumUsedSlots();
    int numReservedSlots = job.getNumReservedSlots();
    int usedMem = job.getUsedMem();
    int rsvdMem = job.getReservedMem();
    int neededMem = job.getNeededMem();
    writer.printf(dataPattern,
        job.getJobID().toString(), job.getState(), job.getStartTime(),
        job.getUsername(), job.getQueue(), 
        job.getPriority().name(),
        numUsedSlots < 0 ? UNAVAILABLE : numUsedSlots,
        numReservedSlots < 0 ? UNAVAILABLE : numReservedSlots,
        usedMem < 0 ? UNAVAILABLE : String.format(memPattern, usedMem),
        rsvdMem < 0 ? UNAVAILABLE : String.format(memPattern, rsvdMem),
        neededMem < 0 ? UNAVAILABLE : String.format(memPattern, neededMem),
        job.getSchedulingInfo());
  }
  writer.flush();
}
 
Example #9
Source File: CLI.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Private
public void displayJobList(JobStatus[] jobs, PrintWriter writer) {
  writer.println("Total jobs:" + jobs.length);
  writer.printf(headerPattern, "JobId", "State", "StartTime", "UserName",
    "Queue", "Priority", "UsedContainers",
    "RsvdContainers", "UsedMem", "RsvdMem", "NeededMem", "AM info");
  for (JobStatus job : jobs) {
    int numUsedSlots = job.getNumUsedSlots();
    int numReservedSlots = job.getNumReservedSlots();
    int usedMem = job.getUsedMem();
    int rsvdMem = job.getReservedMem();
    int neededMem = job.getNeededMem();
    writer.printf(dataPattern,
        job.getJobID().toString(), job.getState(), job.getStartTime(),
        job.getUsername(), job.getQueue(), 
        job.getPriority().name(),
        numUsedSlots < 0 ? UNAVAILABLE : numUsedSlots,
        numReservedSlots < 0 ? UNAVAILABLE : numReservedSlots,
        usedMem < 0 ? UNAVAILABLE : String.format(memPattern, usedMem),
        rsvdMem < 0 ? UNAVAILABLE : String.format(memPattern, rsvdMem),
        neededMem < 0 ? UNAVAILABLE : String.format(memPattern, neededMem),
        job.getSchedulingInfo());
  }
  writer.flush();
}
 
Example #10
Source File: TestGridmixStatistics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link Statistics.JobStats}.
 */
@Test
@SuppressWarnings("deprecation")
public void testJobStats() throws Exception {
  Job job = new Job() {};
  JobStats stats = new JobStats(1, 2, job);
  assertEquals("Incorrect num-maps", 1, stats.getNoOfMaps());
  assertEquals("Incorrect num-reds", 2, stats.getNoOfReds());
  assertTrue("Incorrect job", job == stats.getJob());
  assertNull("Unexpected job status", stats.getJobStatus());
  
  // add a new status
  JobStatus status = new JobStatus();
  stats.updateJobStatus(status);
  assertNotNull("Missing job status", stats.getJobStatus());
  assertTrue("Incorrect job status", status == stats.getJobStatus());
}
 
Example #11
Source File: TestMRAMWithNonNormalizedCapabilities.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * To ensure nothing broken after we removed normalization 
 * from the MRAM side
 * @throws Exception
 */
@Test
public void testJobWithNonNormalizedCapabilities() throws Exception {
  if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
              + " not found. Not running test.");
    return;
  }

  JobConf jobConf = new JobConf(mrCluster.getConfig());
  jobConf.setInt("mapreduce.map.memory.mb", 700);
  jobConf.setInt("mapred.reduce.memory.mb", 1500);

  SleepJob sleepJob = new SleepJob();
  sleepJob.setConf(jobConf);
  Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
  job.setJarByClass(SleepJob.class);
  job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
  job.submit();
  boolean completed = job.waitForCompletion(true);
  Assert.assertTrue("Job should be completed", completed);
  Assert.assertEquals("Job should be finished successfully", 
                  JobStatus.State.SUCCEEDED, job.getJobState());
}
 
Example #12
Source File: TestGridmixStatistics.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link Statistics.JobStats}.
 */
@Test
@SuppressWarnings("deprecation")
public void testJobStats() throws Exception {
  Job job = new Job() {};
  JobStats stats = new JobStats(1, 2, job);
  assertEquals("Incorrect num-maps", 1, stats.getNoOfMaps());
  assertEquals("Incorrect num-reds", 2, stats.getNoOfReds());
  assertTrue("Incorrect job", job == stats.getJob());
  assertNull("Unexpected job status", stats.getJobStatus());
  
  // add a new status
  JobStatus status = new JobStatus();
  stats.updateJobStatus(status);
  assertNotNull("Missing job status", stats.getJobStatus());
  assertTrue("Incorrect job status", status == stats.getJobStatus());
}
 
Example #13
Source File: TestS3MultipartOutputCommitter.java    From s3committer with Apache License 2.0 6 votes vote down vote up
@Test
public void testJobAbort() throws Exception {
  Path jobAttemptPath = jobCommitter.getJobAttemptPath(job);
  FileSystem fs = jobAttemptPath.getFileSystem(conf);

  Set<String> uploads = runTasks(job, 4, 3);

  Assert.assertTrue(fs.exists(jobAttemptPath));

  jobCommitter.abortJob(job, JobStatus.State.KILLED);
  Assert.assertEquals("Should have committed no uploads",
      0, jobCommitter.results.getCommits().size());

  Assert.assertEquals("Should have deleted no uploads",
      0, jobCommitter.results.getDeletes().size());

  Assert.assertEquals("Should have aborted all uploads",
      uploads, getAbortedIds(jobCommitter.results.getAborts()));

  Assert.assertFalse(fs.exists(jobAttemptPath));
}
 
Example #14
Source File: MROutputCommitter.java    From incubator-tez with Apache License 2.0 6 votes vote down vote up
private JobStatus.State getJobStateFromVertexStatusState(VertexStatus.State state) {
  switch(state) {
    case INITED:
      return JobStatus.State.PREP;
    case RUNNING:
      return JobStatus.State.RUNNING;
    case SUCCEEDED:
      return JobStatus.State.SUCCEEDED;
    case KILLED:
      return JobStatus.State.KILLED;
    case FAILED:
    case ERROR:
      return JobStatus.State.FAILED;
    default:
      throw new TezUncheckedException("Unknown VertexStatus.State: " + state);
  }
}
 
Example #15
Source File: TestMRCJCFileOutputCommitter.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
public void testAbort() throws IOException, InterruptedException {
  Job job = Job.getInstance();
  FileOutputFormat.setOutputPath(job, outDir);
  Configuration conf = job.getConfiguration();
  conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

  // do setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);

  // write output
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
  writeOutput(theRecordWriter, tContext);

  // do abort
  committer.abortTask(tContext);
  File expectedFile = new File(new Path(committer.getWorkPath(), partFile)
      .toString());
  assertFalse("task temp dir still exists", expectedFile.exists());

  committer.abortJob(jContext, JobStatus.State.FAILED);
  expectedFile = new File(new Path(outDir, FileOutputCommitter.PENDING_DIR_NAME)
      .toString());
  assertFalse("job temp dir still exists", expectedFile.exists());
  assertEquals("Output directory not empty", 0, new File(outDir.toString())
      .listFiles().length);
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
Example #16
Source File: YARNRunner.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
throws IOException, InterruptedException {
  
  addHistoryToken(ts);
  
  // Construct necessary information to start the MR AM
  ApplicationSubmissionContext appContext =
    createApplicationSubmissionContext(conf, jobSubmitDir, ts);

  // Submit to ResourceManager
  try {
    ApplicationId applicationId =
        resMgrDelegate.submitApplication(appContext);

    ApplicationReport appMaster = resMgrDelegate
        .getApplicationReport(applicationId);
    String diagnostics =
        (appMaster == null ?
            "application report is null" : appMaster.getDiagnostics());
    if (appMaster == null
        || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
        || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
      throw new IOException("Failed to run job : " +
          diagnostics);
    }
    return clientCache.getClient(jobId).getJobStatus(jobId);
  } catch (YarnException e) {
    throw new IOException(e);
  }
}
 
Example #17
Source File: TestMRRJobs.java    From tez with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 60000)
public void testFailingAttempt() throws IOException, InterruptedException,
    ClassNotFoundException {

  LOG.info("\n\n\nStarting testFailingAttempt().");

  if (!(new File(MiniTezCluster.APPJAR)).exists()) {
    LOG.info("MRAppJar " + MiniTezCluster.APPJAR
             + " not found. Not running test.");
    return;
  }

  Configuration sleepConf = new Configuration(mrrTezCluster.getConfig());

  MRRSleepJob sleepJob = new MRRSleepJob();
  sleepJob.setConf(sleepConf);

  Job job = sleepJob.createJob(1, 1, 1, 1, 1,
      1, 1, 1, 1, 1);

  job.setJarByClass(MRRSleepJob.class);
  job.setMaxMapAttempts(3); // speed up failures
  job.getConfiguration().setBoolean(MRRSleepJob.MAP_THROW_ERROR, true);
  job.getConfiguration().set(MRRSleepJob.MAP_ERROR_TASK_IDS, "0");

  job.submit();
  boolean succeeded = job.waitForCompletion(true);
  Assert.assertTrue(succeeded);
  Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());

  // FIXME once counters and task progress can be obtained properly
  // TODO verify failed task diagnostics
}
 
Example #18
Source File: ResourceMgrDelegate.java    From tez with Apache License 2.0 5 votes vote down vote up
public JobStatus[] getAllJobs() throws IOException, InterruptedException {
  try {
    Set<String> appTypes = new HashSet<String>(1);
    appTypes.add(TezConstants.TEZ_APPLICATION_TYPE);
    return TypeConverter.fromYarnApps(client.getApplications(appTypes),
        this.conf);
  } catch (YarnException e) {
    throw new IOException(e);
  }
}
 
Example #19
Source File: TestJobOutputCommitter.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void abortJob(JobContext context, JobStatus.State state)
    throws IOException {
  Path outputPath = FileOutputFormat.getOutputPath(context);
  FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
  String fileName = 
    (state.equals(JobStatus.State.FAILED)) ? ABORT_FAILED_FILE_NAME
      : ABORT_KILLED_FILE_NAME;
  fs.create(new Path(outputPath, fileName)).close();
}
 
Example #20
Source File: YARNRunner.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
throws IOException, InterruptedException {
  
  addHistoryToken(ts);
  
  // Construct necessary information to start the MR AM
  ApplicationSubmissionContext appContext =
    createApplicationSubmissionContext(conf, jobSubmitDir, ts);

  // Submit to ResourceManager
  try {
    ApplicationId applicationId =
        resMgrDelegate.submitApplication(appContext);

    ApplicationReport appMaster = resMgrDelegate
        .getApplicationReport(applicationId);
    String diagnostics =
        (appMaster == null ?
            "application report is null" : appMaster.getDiagnostics());
    if (appMaster == null
        || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
        || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
      throw new IOException("Failed to run job : " +
          diagnostics);
    }
    return clientCache.getClient(jobId).getJobStatus(jobId);
  } catch (YarnException e) {
    throw new IOException(e);
  }
}
 
Example #21
Source File: Task.java    From hadoop with Apache License 2.0 5 votes vote down vote up
boolean isJobAbortTask() {
  // the task is an abort task if its marked for cleanup and the final 
  // expected state is either failed or killed.
  return isJobCleanupTask() 
         && (jobRunStateForCleanup == JobStatus.State.KILLED 
             || jobRunStateForCleanup == JobStatus.State.FAILED);
}
 
Example #22
Source File: Task.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void readFields(DataInput in) throws IOException {
  jobFile = StringInterner.weakIntern(Text.readString(in));
  taskId = TaskAttemptID.read(in);
  partition = in.readInt();
  numSlotsRequired = in.readInt();
  taskStatus.readFields(in);
  skipRanges.readFields(in);
  currentRecIndexIterator = skipRanges.skipRangeIterator();
  currentRecStartIndex = currentRecIndexIterator.next();
  skipping = in.readBoolean();
  jobCleanup = in.readBoolean();
  if (jobCleanup) {
    jobRunStateForCleanup = 
      WritableUtils.readEnum(in, JobStatus.State.class);
  }
  jobSetup = in.readBoolean();
  writeSkipRecs = in.readBoolean();
  taskCleanup = in.readBoolean();
  if (taskCleanup) {
    setPhase(TaskStatus.Phase.CLEANUP);
  }
  user = StringInterner.weakIntern(Text.readString(in));
  int len = in.readInt();
  encryptedSpillKey = new byte[len];
  extraData.readFields(in);
  in.readFully(encryptedSpillKey);
}
 
Example #23
Source File: YARNRunner.java    From tez with Apache License 2.0 5 votes vote down vote up
@Override
public void killJob(JobID arg0) throws IOException, InterruptedException {
  /* check if the status is not running, if not send kill to RM */
  JobStatus status = getJobStatus(arg0);
  if (status.getState() == JobStatus.State.RUNNING ||
      status.getState() == JobStatus.State.PREP) {
    try {
      resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
    } catch (YarnException e) {
      throw new IOException(e);
    }
    return;
  }
}
 
Example #24
Source File: Task.java    From big-c with Apache License 2.0 5 votes vote down vote up
boolean isJobAbortTask() {
  // the task is an abort task if its marked for cleanup and the final 
  // expected state is either failed or killed.
  return isJobCleanupTask() 
         && (jobRunStateForCleanup == JobStatus.State.KILLED 
             || jobRunStateForCleanup == JobStatus.State.FAILED);
}
 
Example #25
Source File: FileOutputCommitter.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Delete the temporary directory, including all of the work directories.
 * @param context the job's context
 */
@Override
public void abortJob(JobContext context, JobStatus.State state) 
throws IOException {
  // delete the _temporary folder
  cleanupJob(context);
}
 
Example #26
Source File: CLI.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Dump a list of currently running jobs
 * @throws IOException
 */
private void listJobs(Cluster cluster) 
    throws IOException, InterruptedException {
  List<JobStatus> runningJobs = new ArrayList<JobStatus>();
  for (JobStatus job : cluster.getAllJobStatuses()) {
    if (!job.isJobComplete()) {
      runningJobs.add(job);
    }
  }
  displayJobList(runningJobs.toArray(new JobStatus[0]));
}
 
Example #27
Source File: FileOutputCommitter.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Delete the temporary directory, including all of the work directories.
 * @param context the job's context
 */
@Override
public void abortJob(JobContext context, JobStatus.State state) 
throws IOException {
  // delete the _temporary folder
  cleanupJob(context);
}
 
Example #28
Source File: MROutputCommitter.java    From incubator-tez with Apache License 2.0 5 votes vote down vote up
@Override
public void abortOutput(VertexStatus.State finalState) throws IOException {
  if (!initialized) {
    throw new RuntimeException("Committer not initialized");
  }
  JobStatus.State jobState = getJobStateFromVertexStatusState(finalState);
  committer.abortJob(jobContext, jobState);
}
 
Example #29
Source File: CommitterJobAbortEvent.java    From big-c with Apache License 2.0 5 votes vote down vote up
public CommitterJobAbortEvent(JobId jobID, JobContext jobContext,
    JobStatus.State finalState) {
  super(CommitterEventType.JOB_ABORT);
  this.jobID = jobID;
  this.jobContext = jobContext;
  this.finalState = finalState;
}
 
Example #30
Source File: TestS3MultipartOutputCommitter.java    From s3committer with Apache License 2.0 5 votes vote down vote up
@Test
public void testJobAbortFailure() throws Exception {
  Path jobAttemptPath = jobCommitter.getJobAttemptPath(job);
  FileSystem fs = jobAttemptPath.getFileSystem(conf);

  Set<String> uploads = runTasks(job, 4, 3);

  Assert.assertTrue(fs.exists(jobAttemptPath));

  jobCommitter.errors.failOnAbort(5);
  jobCommitter.errors.recoverAfterFailure();

  TestUtil.assertThrows("Should propagate the abort failure",
      AmazonClientException.class, "Fail on abort 5", new Callable<Void>() {
        @Override
        public Void call() throws IOException {
          jobCommitter.abortJob(job, JobStatus.State.KILLED);
          return null;
        }
      });

  Assert.assertEquals("Should not have committed any uploads",
      0, jobCommitter.results.getCommits().size());

  Assert.assertEquals("Should have deleted no uploads",
      0, jobCommitter.results.getDeletes().size());

  Assert.assertEquals("Should have aborted all uploads",
      12, jobCommitter.results.getAborts().size());

  Set<String> uploadIds = getCommittedIds(jobCommitter.results.getCommits());
  uploadIds.addAll(getAbortedIds(jobCommitter.results.getAborts()));

  Assert.assertEquals("Should have committed or aborted all uploads",
      uploads, uploadIds);

  Assert.assertFalse(fs.exists(jobAttemptPath));
}