Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#verifyExpectedCacheUsage()
The following examples show how to use
org.apache.hadoop.hdfs.DFSTestUtil#verifyExpectedCacheUsage() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFsDatasetCache.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=60000) public void testPageRounder() throws Exception { // Write a small file Path fileName = new Path("/testPageRounder"); final int smallBlocks = 512; // This should be smaller than the page size assertTrue("Page size should be greater than smallBlocks!", PAGE_SIZE > smallBlocks); final int numBlocks = 5; final int fileLen = smallBlocks * numBlocks; FSDataOutputStream out = fs.create(fileName, false, 4096, (short)1, smallBlocks); out.write(new byte[fileLen]); out.close(); HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations( fileName, 0, fileLen); // Cache the file and check the sizes match the page size setHeartbeatResponse(cacheBlocks(locs)); DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd); // Uncache and check that it decrements by the page size too setHeartbeatResponse(uncacheBlocks(locs)); DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); }
Example 2
Source File: TestFsDatasetCache.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout=60000) public void testPageRounder() throws Exception { // Write a small file Path fileName = new Path("/testPageRounder"); final int smallBlocks = 512; // This should be smaller than the page size assertTrue("Page size should be greater than smallBlocks!", PAGE_SIZE > smallBlocks); final int numBlocks = 5; final int fileLen = smallBlocks * numBlocks; FSDataOutputStream out = fs.create(fileName, false, 4096, (short)1, smallBlocks); out.write(new byte[fileLen]); out.close(); HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations( fileName, 0, fileLen); // Cache the file and check the sizes match the page size setHeartbeatResponse(cacheBlocks(locs)); DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd); // Uncache and check that it decrements by the page size too setHeartbeatResponse(uncacheBlocks(locs)); DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); }
Example 3
Source File: TestFsDatasetCache.java From hadoop with Apache License 2.0 | 5 votes |
@After public void tearDown() throws Exception { // Verify that each test uncached whatever it cached. This cleanup is // required so that file descriptors are not leaked across tests. DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } // Restore the original CacheManipulator NativeIO.POSIX.setCacheManipulator(prevCacheManipulator); }
Example 4
Source File: TestFsDatasetCache.java From big-c with Apache License 2.0 | 5 votes |
@After public void tearDown() throws Exception { // Verify that each test uncached whatever it cached. This cleanup is // required so that file descriptors are not leaked across tests. DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } // Restore the original CacheManipulator NativeIO.POSIX.setCacheManipulator(prevCacheManipulator); }
Example 5
Source File: TestFsDatasetCache.java From hadoop with Apache License 2.0 | 4 votes |
@Test(timeout=600000) public void testFilesExceedMaxLockedMemory() throws Exception { LOG.info("beginning testFilesExceedMaxLockedMemory"); // Create some test files that will exceed total cache capacity final int numFiles = 5; final long fileSize = CACHE_CAPACITY / (numFiles-1); final Path[] testFiles = new Path[numFiles]; final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][]; final long[] fileSizes = new long[numFiles]; for (int i=0; i<numFiles; i++) { testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i); DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl); fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations( testFiles[i], 0, fileSize); // Get the file size (sum of blocks) long[] sizes = getBlockSizes(fileLocs[i]); for (int j=0; j<sizes.length; j++) { fileSizes[i] += sizes[j]; } } // Cache the first n-1 files long total = 0; DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); for (int i=0; i<numFiles-1; i++) { setHeartbeatResponse(cacheBlocks(fileLocs[i])); total = DFSTestUtil.verifyExpectedCacheUsage( rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd); } // nth file should hit a capacity exception final LogVerificationAppender appender = new LogVerificationAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1])); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { int lines = appender.countLinesWithMessage( "more bytes in the cache: " + DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY); return lines > 0; } }, 500, 30000); // Also check the metrics for the failure assertTrue("Expected more than 0 failed cache attempts", fsd.getNumBlocksFailedToCache() > 0); // Uncache the n-1 files int curCachedBlocks = 16; for (int i=0; i<numFiles-1; i++) { setHeartbeatResponse(uncacheBlocks(fileLocs[i])); long uncachedBytes = rounder.round(fileSizes[i]); total -= uncachedBytes; curCachedBlocks -= uncachedBytes / BLOCK_SIZE; DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd); } LOG.info("finishing testFilesExceedMaxLockedMemory"); }
Example 6
Source File: TestFsDatasetCacheRevocation.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that when a client has a replica mmapped, we will not un-mlock that * replica for a reasonable amount of time, even if an uncache request * occurs. */ @Test(timeout=120000) public void testPinning() throws Exception { assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS); Configuration conf = getDefaultConf(); // Set a really long revocation timeout, so that we won't reach it during // this test. conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 1800000L); // Poll very often conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // Create and cache a file. final String TEST_FILE = "/test_file"; DFSTestUtil.createFile(dfs, new Path(TEST_FILE), BLOCK_SIZE, (short)1, 0xcafe); dfs.addCachePool(new CachePoolInfo("pool")); long cacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder(). setPool("pool").setPath(new Path(TEST_FILE)). setReplication((short) 1).build()); FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset(); DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd); // Mmap the file. FSDataInputStream in = dfs.open(new Path(TEST_FILE)); ByteBuffer buf = in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class)); // Attempt to uncache file. The file should still be cached. dfs.removeCacheDirective(cacheDirectiveId); Thread.sleep(500); DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd); // Un-mmap the file. The file should be uncached after this. in.releaseBuffer(buf); DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); // Cleanup in.close(); cluster.shutdown(); }
Example 7
Source File: TestFsDatasetCacheRevocation.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that when we have an uncache request, and the client refuses to release * the replica for a long time, we will un-mlock it. */ @Test(timeout=120000) public void testRevocation() throws Exception { assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS); BlockReaderTestUtil.enableHdfsCachingTracing(); BlockReaderTestUtil.enableShortCircuitShmTracing(); Configuration conf = getDefaultConf(); // Set a really short revocation timeout. conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L); // Poll very often conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // Create and cache a file. final String TEST_FILE = "/test_file2"; DFSTestUtil.createFile(dfs, new Path(TEST_FILE), BLOCK_SIZE, (short)1, 0xcafe); dfs.addCachePool(new CachePoolInfo("pool")); long cacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder(). setPool("pool").setPath(new Path(TEST_FILE)). setReplication((short) 1).build()); FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset(); DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd); // Mmap the file. FSDataInputStream in = dfs.open(new Path(TEST_FILE)); ByteBuffer buf = in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class)); // Attempt to uncache file. The file should get uncached. LOG.info("removing cache directive {}", cacheDirectiveId); dfs.removeCacheDirective(cacheDirectiveId); LOG.info("finished removing cache directive {}", cacheDirectiveId); Thread.sleep(1000); DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); // Cleanup in.releaseBuffer(buf); in.close(); cluster.shutdown(); }
Example 8
Source File: TestFsDatasetCache.java From big-c with Apache License 2.0 | 4 votes |
@Test(timeout=600000) public void testFilesExceedMaxLockedMemory() throws Exception { LOG.info("beginning testFilesExceedMaxLockedMemory"); // Create some test files that will exceed total cache capacity final int numFiles = 5; final long fileSize = CACHE_CAPACITY / (numFiles-1); final Path[] testFiles = new Path[numFiles]; final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][]; final long[] fileSizes = new long[numFiles]; for (int i=0; i<numFiles; i++) { testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i); DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl); fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations( testFiles[i], 0, fileSize); // Get the file size (sum of blocks) long[] sizes = getBlockSizes(fileLocs[i]); for (int j=0; j<sizes.length; j++) { fileSizes[i] += sizes[j]; } } // Cache the first n-1 files long total = 0; DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); for (int i=0; i<numFiles-1; i++) { setHeartbeatResponse(cacheBlocks(fileLocs[i])); total = DFSTestUtil.verifyExpectedCacheUsage( rounder.round(total + fileSizes[i]), 4 * (i + 1), fsd); } // nth file should hit a capacity exception final LogVerificationAppender appender = new LogVerificationAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1])); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { int lines = appender.countLinesWithMessage( "more bytes in the cache: " + DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY); return lines > 0; } }, 500, 30000); // Also check the metrics for the failure assertTrue("Expected more than 0 failed cache attempts", fsd.getNumBlocksFailedToCache() > 0); // Uncache the n-1 files int curCachedBlocks = 16; for (int i=0; i<numFiles-1; i++) { setHeartbeatResponse(uncacheBlocks(fileLocs[i])); long uncachedBytes = rounder.round(fileSizes[i]); total -= uncachedBytes; curCachedBlocks -= uncachedBytes / BLOCK_SIZE; DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd); } LOG.info("finishing testFilesExceedMaxLockedMemory"); }
Example 9
Source File: TestFsDatasetCacheRevocation.java From big-c with Apache License 2.0 | 4 votes |
/** * Test that when a client has a replica mmapped, we will not un-mlock that * replica for a reasonable amount of time, even if an uncache request * occurs. */ @Test(timeout=120000) public void testPinning() throws Exception { assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS); Configuration conf = getDefaultConf(); // Set a really long revocation timeout, so that we won't reach it during // this test. conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 1800000L); // Poll very often conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // Create and cache a file. final String TEST_FILE = "/test_file"; DFSTestUtil.createFile(dfs, new Path(TEST_FILE), BLOCK_SIZE, (short)1, 0xcafe); dfs.addCachePool(new CachePoolInfo("pool")); long cacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder(). setPool("pool").setPath(new Path(TEST_FILE)). setReplication((short) 1).build()); FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset(); DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd); // Mmap the file. FSDataInputStream in = dfs.open(new Path(TEST_FILE)); ByteBuffer buf = in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class)); // Attempt to uncache file. The file should still be cached. dfs.removeCacheDirective(cacheDirectiveId); Thread.sleep(500); DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd); // Un-mmap the file. The file should be uncached after this. in.releaseBuffer(buf); DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); // Cleanup in.close(); cluster.shutdown(); }
Example 10
Source File: TestFsDatasetCacheRevocation.java From big-c with Apache License 2.0 | 4 votes |
/** * Test that when we have an uncache request, and the client refuses to release * the replica for a long time, we will un-mlock it. */ @Test(timeout=120000) public void testRevocation() throws Exception { assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS); BlockReaderTestUtil.enableHdfsCachingTracing(); BlockReaderTestUtil.enableShortCircuitShmTracing(); Configuration conf = getDefaultConf(); // Set a really short revocation timeout. conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L); // Poll very often conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // Create and cache a file. final String TEST_FILE = "/test_file2"; DFSTestUtil.createFile(dfs, new Path(TEST_FILE), BLOCK_SIZE, (short)1, 0xcafe); dfs.addCachePool(new CachePoolInfo("pool")); long cacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder(). setPool("pool").setPath(new Path(TEST_FILE)). setReplication((short) 1).build()); FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset(); DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd); // Mmap the file. FSDataInputStream in = dfs.open(new Path(TEST_FILE)); ByteBuffer buf = in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class)); // Attempt to uncache file. The file should get uncached. LOG.info("removing cache directive {}", cacheDirectiveId); dfs.removeCacheDirective(cacheDirectiveId); LOG.info("finished removing cache directive {}", cacheDirectiveId); Thread.sleep(1000); DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); // Cleanup in.releaseBuffer(buf); in.close(); cluster.shutdown(); }