org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestNNHealthCheck.java From hadoop with Apache License 2.0 | 5 votes |
private void doNNHealthCheckTest() throws IOException { NameNodeResourceChecker mockResourceChecker = Mockito.mock( NameNodeResourceChecker.class); Mockito.doReturn(true).when(mockResourceChecker).hasAvailableDiskSpace(); cluster.getNameNode(0).getNamesystem() .setNNResourceChecker(mockResourceChecker); NNHAServiceTarget haTarget = new NNHAServiceTarget(conf, DFSUtil.getNamenodeNameServiceId(conf), "nn1"); HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, conf.getInt( HA_HM_RPC_TIMEOUT_KEY, HA_HM_RPC_TIMEOUT_DEFAULT)); // Should not throw error, which indicates healthy. rpc.monitorHealth(); Mockito.doReturn(false).when(mockResourceChecker).hasAvailableDiskSpace(); try { // Should throw error - NN is unhealthy. rpc.monitorHealth(); fail("Should not have succeeded in calling monitorHealth"); } catch (HealthCheckFailedException hcfe) { GenericTestUtils.assertExceptionContains( "The NameNode has no resources available", hcfe); } catch (RemoteException re) { GenericTestUtils.assertExceptionContains( "The NameNode has no resources available", re.unwrapRemoteException(HealthCheckFailedException.class)); } }
Example #2
Source File: TestNNHealthCheck.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testNNHealthCheck() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .build(); NameNodeResourceChecker mockResourceChecker = Mockito.mock( NameNodeResourceChecker.class); Mockito.doReturn(true).when(mockResourceChecker).hasAvailableDiskSpace(); cluster.getNameNode(0).getNamesystem() .setNNResourceChecker(mockResourceChecker); NamenodeProtocols rpc = cluster.getNameNodeRpc(0); // Should not throw error, which indicates healthy. rpc.monitorHealth(); Mockito.doReturn(false).when(mockResourceChecker).hasAvailableDiskSpace(); try { // Should throw error - NN is unhealthy. rpc.monitorHealth(); fail("Should not have succeeded in calling monitorHealth"); } catch (HealthCheckFailedException hcfe) { GenericTestUtils.assertExceptionContains( "The NameNode has no resources available", hcfe); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #3
Source File: HdfsTestUtil.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Ensure that the tests are picking up the modified Hadoop classes */ private static void checkOverriddenHadoopClasses() { List<Class<?>> modifiedHadoopClasses = Arrays.asList(BlockPoolSlice.class, DiskChecker.class, FileUtil.class, HardLink.class, HttpServer2.class, NameNodeResourceChecker.class, RawLocalFileSystem.class); for (Class<?> clazz : modifiedHadoopClasses) { try { LuceneTestCase.assertNotNull("Field on " + clazz.getCanonicalName() + " should not have been null", clazz.getField(SOLR_HACK_FOR_CLASS_VERIFICATION_FIELD)); } catch (NoSuchFieldException e) { LuceneTestCase.fail("Expected to load Solr modified Hadoop class " + clazz.getCanonicalName() + " , but it was not found."); } } }