Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#getDFSCluster()

The following examples show how to use org.apache.hadoop.hbase.HBaseTestingUtility#getDFSCluster() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFileLink.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Test, on HDFS, that the FileLink is still readable
 * even when the current file gets renamed.
 */
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
Example 2
Source File: TestWALEntryStream.java    From hbase with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  CONF = TEST_UTIL.getConfiguration();
  TEST_UTIL.startMiniDFSCluster(3);

  cluster = TEST_UTIL.getDFSCluster();
  fs = cluster.getFileSystem();
}
 
Example 3
Source File: TestBlockReorderBlockLocation.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  htu = new HBaseTestingUtility();
  htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
  htu.getConfiguration().setInt("dfs.replication", 3);
  htu.startMiniDFSCluster(3,
      new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});

  conf = htu.getConfiguration();
  cluster = htu.getDFSCluster();
  dfs = (DistributedFileSystem) FileSystem.get(conf);
}
 
Example 4
Source File: TestBlockReorder.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  htu = new HBaseTestingUtility();
  htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
  htu.getConfiguration().setInt("dfs.replication", 3);
  htu.startMiniDFSCluster(3,
      new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});

  conf = htu.getConfiguration();
  cluster = htu.getDFSCluster();
  dfs = (DistributedFileSystem) FileSystem.get(conf);
}
 
Example 5
Source File: TestBlockReorderMultiBlocks.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  htu = new HBaseTestingUtility();
  htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
  htu.getConfiguration().setInt("dfs.replication", 3);
  htu.startMiniDFSCluster(3,
      new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});

  conf = htu.getConfiguration();
  cluster = htu.getDFSCluster();
  dfs = (DistributedFileSystem) FileSystem.get(conf);
}
 
Example 6
Source File: TestFileLink.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0), true);
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1), true);
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2), true);
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}