Java Code Examples for org.apache.hadoop.mapred.MiniMRCluster#shutdown()

The following examples show how to use org.apache.hadoop.mapred.MiniMRCluster#shutdown() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestUlimit.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * This tests the setting of memory limit for streaming processes.
 * This will launch a streaming app which will allocate 10MB memory.
 * First, program is launched with sufficient memory. And test expects
 * it to succeed. Then program is launched with insufficient memory and 
 * is expected to be a failure.  
 */
public void testCommandLine() {
  if (StreamUtil.isCygwin()) {
    return;
  }
  try {
    final int numSlaves = 2;
    Configuration conf = new Configuration();
    dfs = new MiniDFSCluster(conf, numSlaves, true, null);
    fs = dfs.getFileSystem();
    
    mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1);
    writeInputFile(fs, inputPath);
    map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{});  
    runProgram(SET_MEMORY_LIMIT);
    fs.delete(outputPath, true);
    assertFalse("output not cleaned up", fs.exists(outputPath));
    mr.waitUntilIdle();
  } catch(IOException e) {
    fail(e.toString());
  } finally {
    mr.shutdown();
    dfs.shutdown();
  }
}
 
Example 2
Source File: TestUlimit.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * This tests the setting of memory limit for streaming processes.
 * This will launch a streaming app which will allocate 10MB memory.
 * First, program is launched with sufficient memory. And test expects
 * it to succeed. Then program is launched with insufficient memory and 
 * is expected to be a failure.  
 */
public void testCommandLine() {
  if (StreamUtil.isCygwin()) {
    return;
  }
  try {
    final int numSlaves = 2;
    Configuration conf = new Configuration();
    dfs = new MiniDFSCluster(conf, numSlaves, true, null);
    fs = dfs.getFileSystem();
    
    mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1);
    writeInputFile(fs, inputPath);
    map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{});  
    runProgram(SET_MEMORY_LIMIT);
    fs.delete(outputPath, true);
    assertFalse("output not cleaned up", fs.exists(outputPath));
    mr.waitUntilIdle();
  } catch(IOException e) {
    fail(e.toString());
  } finally {
    mr.shutdown();
    dfs.shutdown();
  }
}
 
Example 3
Source File: TestRaidShell.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test distRaid command
 * @throws Exception
 */
public void testDistRaid() throws Exception {
  LOG.info("TestDist started.");
  // create a dfs and map-reduce cluster
  mySetup(3, -1);
  MiniMRCluster mr = new MiniMRCluster(4, namenode, 3);
  String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  conf.set("mapred.job.tracker", jobTrackerName);

  try {
    // Create files to be raided
    TestRaidNode.createTestFiles(fileSys, RAID_SRC_PATH,
        "/raid" + RAID_SRC_PATH, 1, 3, (short)3);
    String subDir = RAID_SRC_PATH + "/subdir";
    TestRaidNode.createTestFiles(
        fileSys, subDir, "/raid" + subDir, 1, 3, (short)3);
    
    // Create RaidShell and raid the files.
    RaidShell shell = new RaidShell(conf);
    String[] args = new String[3];
    args[0] = "-distRaid";
    args[1] = RAID_POLICY_NAME;
    args[2] = RAID_SRC_PATH;
    assertEquals(0, ToolRunner.run(shell, args));

    // Check files are raided
    checkIfFileRaided(new Path(RAID_SRC_PATH, "file0"));
    checkIfFileRaided(new Path(subDir, "file0"));
  } finally {
    mr.shutdown();
    myTearDown();
  }
}
 
Example 4
Source File: TestDistributionPolicy.java    From RDFS with Apache License 2.0 4 votes vote down vote up
protected void setUp() throws Exception {
  super.setUp();
  try {
    dfsCluster =
        new MiniDFSCluster(conf, numDataNodes, true, (String[]) null);

    fs = dfsCluster.getFileSystem();
    if (fs.exists(inputPath)) {
      fs.delete(inputPath);
    }
    fs.copyFromLocalFile(localInputPath, inputPath);
    if (fs.exists(updatePath)) {
      fs.delete(updatePath);
    }
    fs.copyFromLocalFile(localUpdatePath, updatePath);

    if (fs.exists(outputPath)) {
      // do not create, mapred will create
      fs.delete(outputPath);
    }

    if (fs.exists(indexPath)) {
      fs.delete(indexPath);
    }

    mrCluster =
        new MiniMRCluster(numTaskTrackers, fs.getUri().toString(), 1);

  } catch (IOException e) {
    if (dfsCluster != null) {
      dfsCluster.shutdown();
      dfsCluster = null;
    }

    if (fs != null) {
      fs.close();
      fs = null;
    }

    if (mrCluster != null) {
      mrCluster.shutdown();
      mrCluster = null;
    }

    throw e;
  }

}
 
Example 5
Source File: TestIndexUpdater.java    From RDFS with Apache License 2.0 4 votes vote down vote up
protected void setUp() throws Exception {
  super.setUp();
  try {
    dfsCluster =
        new MiniDFSCluster(conf, numDataNodes, true, (String[]) null);

    fs = dfsCluster.getFileSystem();
    if (fs.exists(inputPath)) {
      fs.delete(inputPath);
    }
    fs.copyFromLocalFile(localInputPath, inputPath);

    if (fs.exists(outputPath)) {
      // do not create, mapred will create
      fs.delete(outputPath);
    }

    if (fs.exists(indexPath)) {
      fs.delete(indexPath);
    }

    mrCluster =
        new MiniMRCluster(numTaskTrackers, fs.getUri().toString(), 1);

  } catch (IOException e) {
    if (dfsCluster != null) {
      dfsCluster.shutdown();
      dfsCluster = null;
    }

    if (fs != null) {
      fs.close();
      fs = null;
    }

    if (mrCluster != null) {
      mrCluster.shutdown();
      mrCluster = null;
    }

    throw e;
  }

}
 
Example 6
Source File: TestDistributionPolicy.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
protected void setUp() throws Exception {
  super.setUp();
  try {
    dfsCluster =
        new MiniDFSCluster(conf, numDataNodes, true, (String[]) null);

    fs = dfsCluster.getFileSystem();
    if (fs.exists(inputPath)) {
      fs.delete(inputPath);
    }
    fs.copyFromLocalFile(localInputPath, inputPath);
    if (fs.exists(updatePath)) {
      fs.delete(updatePath);
    }
    fs.copyFromLocalFile(localUpdatePath, updatePath);

    if (fs.exists(outputPath)) {
      // do not create, mapred will create
      fs.delete(outputPath);
    }

    if (fs.exists(indexPath)) {
      fs.delete(indexPath);
    }

    mrCluster =
        new MiniMRCluster(numTaskTrackers, fs.getUri().toString(), 1);

  } catch (IOException e) {
    if (dfsCluster != null) {
      dfsCluster.shutdown();
      dfsCluster = null;
    }

    if (fs != null) {
      fs.close();
      fs = null;
    }

    if (mrCluster != null) {
      mrCluster.shutdown();
      mrCluster = null;
    }

    throw e;
  }

}
 
Example 7
Source File: TestIndexUpdater.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
protected void setUp() throws Exception {
  super.setUp();
  try {
    dfsCluster =
        new MiniDFSCluster(conf, numDataNodes, true, (String[]) null);

    fs = dfsCluster.getFileSystem();
    if (fs.exists(inputPath)) {
      fs.delete(inputPath);
    }
    fs.copyFromLocalFile(localInputPath, inputPath);

    if (fs.exists(outputPath)) {
      // do not create, mapred will create
      fs.delete(outputPath);
    }

    if (fs.exists(indexPath)) {
      fs.delete(indexPath);
    }

    mrCluster =
        new MiniMRCluster(numTaskTrackers, fs.getUri().toString(), 1);

  } catch (IOException e) {
    if (dfsCluster != null) {
      dfsCluster.shutdown();
      dfsCluster = null;
    }

    if (fs != null) {
      fs.close();
      fs = null;
    }

    if (mrCluster != null) {
      mrCluster.shutdown();
      mrCluster = null;
    }

    throw e;
  }

}