org.apache.hadoop.mapred.MRCaching.TestResult Java Examples

The following examples show how to use org.apache.hadoop.mapred.MRCaching.TestResult. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestMiniMRDFSCaching.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
Example #2
Source File: TestMiniMRDFSCaching.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void testWithDFS() throws IOException {
  MiniMRCluster mr = null;
  MiniDFSCluster dfs = null;
  FileSystem fileSys = null;
  try {
    JobConf conf = new JobConf();
    dfs = new MiniDFSCluster.Builder(conf).build();
    fileSys = dfs.getFileSystem();
    mr = new MiniMRCluster(2, fileSys.getUri().toString(), 4);
    MRCaching.setupCache("/cachedir", fileSys);
    // run the wordcount example with caching
    TestResult ret = MRCaching.launchMRCache("/testing/wc/input",
                                          "/testing/wc/output",
                                          "/cachedir",
                                          mr.createJobConf(),
                                          "The quick brown fox\nhas many silly\n"
                                          + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
    // launch MR cache with symlinks
    ret = MRCaching.launchMRCache("/testing/wc/input",
                                  "/testing/wc/output",
                                  "/cachedir",
                                  mr.createJobConf(),
                                  "The quick brown fox\nhas many silly\n"
                                  + "red fox sox\n");
    assertTrue("Archives not matching", ret.isOutputOk);
  } finally {
    if (fileSys != null) {
      fileSys.close();
    }
    if (dfs != null) {
      dfs.shutdown();
    }
    if (mr != null) {
      mr.shutdown();
    }
  }
}
 
Example #3
Source File: TestMiniMRLocalFS.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}
 
Example #4
Source File: TestMiniMRLocalFS.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public void testWithLocal()
    throws IOException, InterruptedException, ClassNotFoundException {
  MiniMRCluster mr = null;
  try {
    mr = new MiniMRCluster(2, "file:///", 3);
    TestMiniMRWithDFS.runPI(mr, mr.createJobConf());

    // run the wordcount example with caching
    JobConf job = mr.createJobConf();
    TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
                                          TEST_ROOT_DIR + "/wc/output", 
                                          TEST_ROOT_DIR + "/cachedir",
                                          job,
                                          "The quick brown fox\n" 
                                          + "has many silly\n"
                                          + "red fox sox\n");
    // assert the number of lines read during caching
    assertTrue("Failed test archives not matching", ret.isOutputOk);
    // test the task report fetchers
    JobClient client = new JobClient(job);
    JobID jobid = ret.job.getID();
    TaskReport[] reports;
    reports = client.getSetupTaskReports(jobid);
    assertEquals("number of setups", 2, reports.length);
    reports = client.getMapTaskReports(jobid);
    assertEquals("number of maps", 1, reports.length);
    reports = client.getReduceTaskReports(jobid);
    assertEquals("number of reduces", 1, reports.length);
    reports = client.getCleanupTaskReports(jobid);
    assertEquals("number of cleanups", 2, reports.length);
    Counters counters = ret.job.getCounters();
    assertEquals("number of map inputs", 3, 
                 counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
    assertEquals("number of reduce outputs", 9, 
                 counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
    runCustomFormats(mr);
    runSecondarySort(mr.createJobConf());
  } finally {
    if (mr != null) { mr.shutdown(); }
  }
}