Java Code Examples for org.apache.hadoop.mapred.UtilsForTests#waitFor()

The following examples show how to use org.apache.hadoop.mapred.UtilsForTests#waitFor() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestJobOutputCommitter.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void testKilledJob(String fileName,
    Class<? extends OutputFormat> output, String[] exclude) throws Exception {
  Path outDir = getNewOutputDir();
  Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
  job.setOutputFormatClass(output);

  job.submit();

  // wait for the setup to be completed
  while (job.setupProgress() != 1.0f) {
    UtilsForTests.waitFor(100);
  }

  job.killJob(); // kill the job

  assertFalse("Job did not get kill", job.waitForCompletion(true));

  if (fileName != null) {
    Path testFile = new Path(outDir, fileName);
    assertTrue("File " + testFile + " missing for job " + job.getJobID(), fs
        .exists(testFile));
  }

  // check if the files from the missing set exists
  for (String ex : exclude) {
    Path file = new Path(outDir, ex);
    assertFalse("File " + file + " should not be present for killed job "
        + job.getJobID(), fs.exists(file));
  }
}
 
Example 2
Source File: TestJobOutputCommitter.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void testKilledJob(String fileName,
    Class<? extends OutputFormat> output, String[] exclude) throws Exception {
  Path outDir = getNewOutputDir();
  Job job = MapReduceTestUtil.createKillJob(conf, outDir, inDir);
  job.setOutputFormatClass(output);

  job.submit();

  // wait for the setup to be completed
  while (job.setupProgress() != 1.0f) {
    UtilsForTests.waitFor(100);
  }

  job.killJob(); // kill the job

  assertFalse("Job did not get kill", job.waitForCompletion(true));

  if (fileName != null) {
    Path testFile = new Path(outDir, fileName);
    assertTrue("File " + testFile + " missing for job " + job.getJobID(), fs
        .exists(testFile));
  }

  // check if the files from the missing set exists
  for (String ex : exclude) {
    Path file = new Path(outDir, ex);
    assertFalse("File " + file + " should not be present for killed job "
        + job.getJobID(), fs.exists(file));
  }
}
 
Example 3
Source File: TestJobInProgress.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void waitTillReady(JobInProgress jip, JobConf job) {
  // wait for all the maps to get scheduled
  while (jip.runningMaps() < job.getNumMapTasks()) {
    UtilsForTests.waitFor(10);
  }
  
  // wait for all the reducers to get scheduled
  while (jip.runningReduces() < job.getNumReduceTasks()) {
    UtilsForTests.waitFor(10);
  }
}
 
Example 4
Source File: TestJobTrackerRestart.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
@Override
public void setupJob(JobContext context) throws IOException {
  FileSystem fs = FileSystem.get(context.getConfiguration());
  while (true) {
    if (fs.exists(shareDir)) {
      break;
    }
    UtilsForTests.waitFor(100);
  }
  super.cleanupJob(context);
}
 
Example 5
Source File: TestJobInProgress.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void waitTillReady(JobInProgress jip, JobConf job) {
  // wait for all the maps to get scheduled
  while (jip.runningMaps() < job.getNumMapTasks()) {
    UtilsForTests.waitFor(10);
  }
  
  // wait for all the reducers to get scheduled
  while (jip.runningReduces() < job.getNumReduceTasks()) {
    UtilsForTests.waitFor(10);
  }
}
 
Example 6
Source File: TestJobTrackerRestart.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the jobtracker with restart-recovery turned off.
 * Submit a job with normal priority, maps = 2, reducers = 0}
 * 
 * Wait for the job to complete 50%
 * 
 * Restart the jobtracker with recovery turned off
 * 
 * Check if the job is missing
 */
public void testRestartWithoutRecovery(MiniDFSCluster dfs, 
                                       MiniMRCluster mr) 
throws IOException {
  // III. Test a job with waiting mapper and recovery turned off
  
  FileSystem fileSys = dfs.getFileSystem();
  
  cleanUp(fileSys, shareDir);
  
  JobConf newConf = getJobs(mr.createJobConf(), 
                            new JobPriority[] {JobPriority.NORMAL}, 
                            new int[] {2}, new int[] {0},
                            outputDir, inDir, 
                            getMapSignalFile(shareDir), 
                            getReduceSignalFile(shareDir))[0];
  
  JobClient jobClient = new JobClient(newConf);
  RunningJob job = jobClient.submitJob(newConf);
  JobID id = job.getID();
  
  //  make sure that the job is 50% completed
  while (UtilsForTests.getJobStatus(jobClient, id).mapProgress() < 0.5f) {
    UtilsForTests.waitFor(100);
  }
  
  mr.stopJobTracker();
  
  // Turn off the recovery
  mr.getJobTrackerConf().setBoolean("mapred.jobtracker.restart.recover", 
                                    false);
  
  // Wait for a minute before submitting a job
  UtilsForTests.waitFor(60 * 1000);
  
  mr.startJobTracker();
  
  // Signal the tasks
  UtilsForTests.signalTasks(dfs, fileSys, true, getMapSignalFile(shareDir), 
                            getReduceSignalFile(shareDir));
  
  // Wait for the JT to be ready
  UtilsForTests.waitForJobTracker(jobClient);
  
  UtilsForTests.waitTillDone(jobClient);
  
  // The submitted job should not exist
  assertTrue("Submitted job was detected with recovery disabled", 
             UtilsForTests.getJobStatus(jobClient, id) == null);
}
 
Example 7
Source File: TestJobTrackerRestart.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/** Tests a job on jobtracker with restart-recovery turned on and empty 
 *  jobhistory file.
 * Preparation :
 *    - Configure a job with
 *       - num-maps : 0 (long waiting setup)
 *       - num-reducers : 0
 *    
 * Check if the job succeedes after restart.
 * 
 * Assumption that map slots are given first for setup.
 */
public void testJobRecoveryWithEmptyHistory(MiniDFSCluster dfs, 
                                            MiniMRCluster mr) 
throws IOException {
  mr.startTaskTracker(null, null, 1, 1);
  FileSystem fileSys = dfs.getFileSystem();
  
  cleanUp(fileSys, shareDir);
  cleanUp(fileSys, inDir);
  cleanUp(fileSys, outputDir);
  
  JobConf conf = mr.createJobConf();
  conf.setNumReduceTasks(0);
  conf.setOutputCommitter(TestEmptyJob.CommitterWithDelayCleanup.class);
  fileSys.delete(outputDir, false);
  RunningJob job1 = 
    UtilsForTests.runJob(conf, inDir, outputDir, 30, 0);
  
  conf.setNumReduceTasks(0);
  conf.setOutputCommitter(CommitterWithDelaySetup.class);
  Path inDir2 = new Path(testDir, "input2");
  fileSys.mkdirs(inDir2);
  Path outDir2 = new Path(testDir, "output2");
  fileSys.delete(outDir2, false);
  JobConf newConf = getJobs(mr.createJobConf(),
                            new JobPriority[] {JobPriority.NORMAL},
                            new int[] {10}, new int[] {0},
                            outDir2, inDir2,
                            getMapSignalFile(shareDir),
                            getReduceSignalFile(shareDir))[0];

  JobClient jobClient = new JobClient(newConf);
  RunningJob job2 = jobClient.submitJob(newConf);
  JobID id = job2.getID();

  /*RunningJob job2 = 
    UtilsForTests.runJob(mr.createJobConf(), inDir2, outDir2, 0);
  
  JobID id = job2.getID();*/
  JobInProgress jip = mr.getJobTrackerRunner().getJobTracker().getJob(id);
  
  mr.getJobTrackerRunner().getJobTracker().initJob(jip);
  
  // find out the history filename
  String history = 
    JobHistory.JobInfo.getJobHistoryFileName(jip.getJobConf(), id);
  Path historyPath = JobHistory.JobInfo.getJobHistoryLogLocation(history);
  
  //  make sure that setup is launched
  while (jip.runningMaps() == 0) {
    UtilsForTests.waitFor(100);
  }
  
  id = job1.getID();
  jip = mr.getJobTrackerRunner().getJobTracker().getJob(id);
  
  mr.getJobTrackerRunner().getJobTracker().initJob(jip);
  
  //  make sure that cleanup is launched and is waiting
  while (!jip.isCleanupLaunched()) {
    UtilsForTests.waitFor(100);
  }
  
  mr.stopJobTracker();
  
  // delete the history file .. just to be safe.
  FileSystem historyFS = historyPath.getFileSystem(conf);
  historyFS.delete(historyPath, false);
  historyFS.create(historyPath).close(); // create an empty file
  
  
  UtilsForTests.signalTasks(dfs, fileSys, getMapSignalFile(shareDir), getReduceSignalFile(shareDir), (short)1);

  // Turn on the recovery
  mr.getJobTrackerConf().setBoolean("mapred.jobtracker.restart.recover", 
                                    true);
  
  mr.startJobTracker();
  
  job1.waitForCompletion();
  job2.waitForCompletion();
}