org.apache.hadoop.tools.rumen.ZombieJobProducer Java Examples

The following examples show how to use org.apache.hadoop.tools.rumen.ZombieJobProducer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestGridMixClasses.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout=120000)
public void testSerialReaderThread() throws Exception {

  Configuration conf = new Configuration();
  File fin = new File("src" + File.separator + "test" + File.separator
          + "resources" + File.separator + "data" + File.separator
          + "wordcount2.json");
  // read couple jobs from wordcount2.json
  JobStoryProducer jobProducer = new ZombieJobProducer(new Path(
          fin.getAbsolutePath()), null, conf);
  CountDownLatch startFlag = new CountDownLatch(1);
  UserResolver resolver = new SubmitterUserResolver();
  FakeJobSubmitter submitter = new FakeJobSubmitter();
  File ws = new File("target" + File.separator + this.getClass().getName());
  if (!ws.exists()) {
    Assert.assertTrue(ws.mkdirs());
  }

  SerialJobFactory jobFactory = new SerialJobFactory(submitter, jobProducer,
          new Path(ws.getAbsolutePath()), conf, startFlag, resolver);

  Path ioPath = new Path(ws.getAbsolutePath());
  jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf, ioPath));
  Thread test = jobFactory.createReaderThread();
  test.start();
  Thread.sleep(1000);
  // SerialReaderThread waits startFlag
  assertEquals(0, submitter.getJobs().size());
  // start!
  startFlag.countDown();
  while (test.isAlive()) {
    Thread.sleep(1000);
    jobFactory.update(null);
  }
  // submitter was called twice
  assertEquals(2, submitter.getJobs().size());
}
 
Example #2
Source File: TestGridMixClasses.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout=120000)
public void testSerialReaderThread() throws Exception {

  Configuration conf = new Configuration();
  File fin = new File("src" + File.separator + "test" + File.separator
          + "resources" + File.separator + "data" + File.separator
          + "wordcount2.json");
  // read couple jobs from wordcount2.json
  JobStoryProducer jobProducer = new ZombieJobProducer(new Path(
          fin.getAbsolutePath()), null, conf);
  CountDownLatch startFlag = new CountDownLatch(1);
  UserResolver resolver = new SubmitterUserResolver();
  FakeJobSubmitter submitter = new FakeJobSubmitter();
  File ws = new File("target" + File.separator + this.getClass().getName());
  if (!ws.exists()) {
    Assert.assertTrue(ws.mkdirs());
  }

  SerialJobFactory jobFactory = new SerialJobFactory(submitter, jobProducer,
          new Path(ws.getAbsolutePath()), conf, startFlag, resolver);

  Path ioPath = new Path(ws.getAbsolutePath());
  jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf, ioPath));
  Thread test = jobFactory.createReaderThread();
  test.start();
  Thread.sleep(1000);
  // SerialReaderThread waits startFlag
  assertEquals(0, submitter.getJobs().size());
  // start!
  startFlag.countDown();
  while (test.isAlive()) {
    Thread.sleep(1000);
    jobFactory.update(null);
  }
  // submitter was called twice
  assertEquals(2, submitter.getJobs().size());
}
 
Example #3
Source File: SortedZombieJobProducer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public SortedZombieJobProducer(Path path, ZombieCluster cluster,
		Configuration conf, int bufferSize)
    throws IOException {
  producer = new ZombieJobProducer(path, cluster, conf);
  jobBufferSize = bufferSize;
  initBuffer();
}
 
Example #4
Source File: TestSimulatorEndToEnd.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private int getNumberJobs(Path inputFile, Configuration conf)
    throws IOException {
  ZombieJobProducer jobProducer = new ZombieJobProducer(inputFile, null, conf);
  try {
    int numJobs = 0;
    while (jobProducer.getNextJob() != null) {
      ++numJobs;
    }
    return numJobs;
  } finally {
    jobProducer.close();
  }
}
 
Example #5
Source File: SortedZombieJobProducer.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public SortedZombieJobProducer(Path path, ZombieCluster cluster,
    Configuration conf, long randomSeed, int bufferSize) throws IOException {
  producer = new ZombieJobProducer(path, cluster, conf);
  jobBufferSize = bufferSize;
  initBuffer();
}
 
Example #6
Source File: Gridmix.java    From hadoop with Apache License 2.0 3 votes vote down vote up
/**
 * Create an appropriate {@code JobStoryProducer} object for the
 * given trace.
 * 
 * @param traceIn the path to the trace file. The special path
 * "-" denotes the standard input stream.
 *
 * @param conf the configuration to be used.
 *
 * @throws IOException if there was an error.
 */
protected JobStoryProducer createJobStoryProducer(String traceIn,
    Configuration conf) throws IOException {
  if ("-".equals(traceIn)) {
    return new ZombieJobProducer(System.in, null);
  }
  return new ZombieJobProducer(new Path(traceIn), null, conf);
}
 
Example #7
Source File: JobFactory.java    From hadoop with Apache License 2.0 3 votes vote down vote up
/**
 * Creating a new instance does not start the thread.
 * @param submitter Component to which deserialized jobs are passed
 * @param jobTrace Stream of job traces with which to construct a
 *                 {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
 * @param scratch Directory into which to write output from simulated jobs
 * @param conf Config passed to all jobs to be submitted
 * @param startFlag Latch released from main to start pipeline
 * @throws java.io.IOException
 */
public JobFactory(JobSubmitter submitter, InputStream jobTrace,
    Path scratch, Configuration conf, CountDownLatch startFlag,
    UserResolver userResolver) throws IOException {
  this(submitter, new ZombieJobProducer(jobTrace, null), scratch, conf,
      startFlag, userResolver);
}
 
Example #8
Source File: Gridmix.java    From big-c with Apache License 2.0 3 votes vote down vote up
/**
 * Create an appropriate {@code JobStoryProducer} object for the
 * given trace.
 * 
 * @param traceIn the path to the trace file. The special path
 * "-" denotes the standard input stream.
 *
 * @param conf the configuration to be used.
 *
 * @throws IOException if there was an error.
 */
protected JobStoryProducer createJobStoryProducer(String traceIn,
    Configuration conf) throws IOException {
  if ("-".equals(traceIn)) {
    return new ZombieJobProducer(System.in, null);
  }
  return new ZombieJobProducer(new Path(traceIn), null, conf);
}
 
Example #9
Source File: JobFactory.java    From big-c with Apache License 2.0 3 votes vote down vote up
/**
 * Creating a new instance does not start the thread.
 * @param submitter Component to which deserialized jobs are passed
 * @param jobTrace Stream of job traces with which to construct a
 *                 {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
 * @param scratch Directory into which to write output from simulated jobs
 * @param conf Config passed to all jobs to be submitted
 * @param startFlag Latch released from main to start pipeline
 * @throws java.io.IOException
 */
public JobFactory(JobSubmitter submitter, InputStream jobTrace,
    Path scratch, Configuration conf, CountDownLatch startFlag,
    UserResolver userResolver) throws IOException {
  this(submitter, new ZombieJobProducer(jobTrace, null), scratch, conf,
      startFlag, userResolver);
}
 
Example #10
Source File: JobFactory.java    From RDFS with Apache License 2.0 3 votes vote down vote up
/**
 * Creating a new instance does not start the thread.
 * @param submitter Component to which deserialized jobs are passed
 * @param jobTrace Stream of job traces with which to construct a
 *                 {@link org.apache.hadoop.tools.rumen.ZombieJobProducer}
 * @param scratch Directory into which to write output from simulated jobs
 * @param conf Config passed to all jobs to be submitted
 * @param startFlag Latch released from main to start pipeline
 */
public JobFactory(JobSubmitter submitter, InputStream jobTrace,
    Path scratch, Configuration conf, CountDownLatch startFlag)
    throws IOException {
  this(submitter, new ZombieJobProducer(jobTrace, null), scratch, conf,
      startFlag);
}