Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#spyOnFsLock()
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#spyOnFsLock() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSetTimes.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test that when access time updates are not needed, the FSNamesystem * write lock is not taken by getBlockLocations. * Regression test for HDFS-3981. */ @Test(timeout=60000) public void testGetBlockLocationsOnlyUsesReadLock() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .build(); ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem()); try { // Create empty file in the FSN. Path p = new Path("/empty-file"); DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L); // getBlockLocations() should not need the write lock, since we just created // the file (and thus its access time is already within the 100-second // accesstime precision configured above). MockitoUtil.doThrowWhenCallStackMatches( new AssertionError("Should not need write lock"), ".*getBlockLocations.*") .when(spyLock).writeLock(); cluster.getFileSystem().getFileBlockLocations(p, 0, 100); } finally { cluster.shutdown(); } }
Example 2
Source File: TestSetTimes.java From big-c with Apache License 2.0 | 6 votes |
/** * Test that when access time updates are not needed, the FSNamesystem * write lock is not taken by getBlockLocations. * Regression test for HDFS-3981. */ @Test(timeout=60000) public void testGetBlockLocationsOnlyUsesReadLock() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .build(); ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem()); try { // Create empty file in the FSN. Path p = new Path("/empty-file"); DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L); // getBlockLocations() should not need the write lock, since we just created // the file (and thus its access time is already within the 100-second // accesstime precision configured above). MockitoUtil.doThrowWhenCallStackMatches( new AssertionError("Should not need write lock"), ".*getBlockLocations.*") .when(spyLock).writeLock(); cluster.getFileSystem().getFileBlockLocations(p, 0, 100); } finally { cluster.shutdown(); } }
Example 3
Source File: TestHAStateTransitions.java From hadoop with Apache License 2.0 | 4 votes |
/** * Regression test for HDFS-2693: when doing state transitions, we need to * lock the FSNamesystem so that we don't end up doing any writes while it's * "in between" states. * This test case starts up several client threads which do mutation operations * while flipping a NN back and forth from active to standby. */ @Test(timeout=120000) public void testTransitionSynchronization() throws Exception { Configuration conf = new Configuration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); try { cluster.waitActive(); ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock( cluster.getNameNode(0).getNamesystem()); Mockito.doAnswer(new GenericTestUtils.SleepAnswer(50)) .when(spyLock).writeLock(); final FileSystem fs = HATestUtil.configureFailoverFs( cluster, conf); TestContext ctx = new TestContext(); for (int i = 0; i < 50; i++) { final int finalI = i; ctx.addThread(new RepeatingTestThread(ctx) { @Override public void doAnAction() throws Exception { Path p = new Path("/test-" + finalI); fs.mkdirs(p); fs.delete(p, true); } }); } ctx.addThread(new RepeatingTestThread(ctx) { @Override public void doAnAction() throws Exception { cluster.transitionToStandby(0); Thread.sleep(50); cluster.transitionToActive(0); } }); ctx.startThreads(); ctx.waitFor(20000); ctx.stop(); } finally { cluster.shutdown(); } }
Example 4
Source File: TestHAStateTransitions.java From big-c with Apache License 2.0 | 4 votes |
/** * Regression test for HDFS-2693: when doing state transitions, we need to * lock the FSNamesystem so that we don't end up doing any writes while it's * "in between" states. * This test case starts up several client threads which do mutation operations * while flipping a NN back and forth from active to standby. */ @Test(timeout=120000) public void testTransitionSynchronization() throws Exception { Configuration conf = new Configuration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(0) .build(); try { cluster.waitActive(); ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock( cluster.getNameNode(0).getNamesystem()); Mockito.doAnswer(new GenericTestUtils.SleepAnswer(50)) .when(spyLock).writeLock(); final FileSystem fs = HATestUtil.configureFailoverFs( cluster, conf); TestContext ctx = new TestContext(); for (int i = 0; i < 50; i++) { final int finalI = i; ctx.addThread(new RepeatingTestThread(ctx) { @Override public void doAnAction() throws Exception { Path p = new Path("/test-" + finalI); fs.mkdirs(p); fs.delete(p, true); } }); } ctx.addThread(new RepeatingTestThread(ctx) { @Override public void doAnAction() throws Exception { cluster.transitionToStandby(0); Thread.sleep(50); cluster.transitionToActive(0); } }); ctx.startThreads(); ctx.waitFor(20000); ctx.stop(); } finally { cluster.shutdown(); } }