Java Code Examples for org.apache.hadoop.hdfs.server.datanode.DataNode#getDatanodeId()
The following examples show how to use
org.apache.hadoop.hdfs.server.datanode.DataNode#getDatanodeId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestNameNodePrunesMissingStorages.java From hadoop with Apache License 2.0 | 4 votes |
private static void runTest(final String testCaseName, final boolean createFiles, final int numInitialStorages, final int expectedStoragesAfterTest) throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster .Builder(conf) .numDataNodes(1) .storagesPerDatanode(numInitialStorages) .build(); cluster.waitActive(); final DataNode dn0 = cluster.getDataNodes().get(0); // Ensure NN knows about the storage. final DatanodeID dnId = dn0.getDatanodeId(); final DatanodeDescriptor dnDescriptor = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId); assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages)); final String bpid = cluster.getNamesystem().getBlockPoolId(); final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid); DataNodeTestUtils.triggerBlockReport(dn0); if (createFiles) { final Path path = new Path("/", testCaseName); DFSTestUtil.createFile( cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED); DataNodeTestUtils.triggerBlockReport(dn0); } // Generate a fake StorageReport that is missing one storage. final StorageReport reports[] = dn0.getFSDataset().getStorageReports(bpid); final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1]; System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length); // Stop the DataNode and send fake heartbeat with missing storage. cluster.stopDataNode(0); cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null); // Check that the missing storage was pruned. assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 2
Source File: TestBlocksWithNotEnoughRacks.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testReplDueToNodeFailRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 3; final Path filePath = new Path("/testFile"); // Last datanode is on a different rack String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); final DatanodeManager dm = ns.getBlockManager().getDatanodeManager(); try { // Create a file with one block with a replication factor of 2 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Make the last datanode look like it failed to heartbeat by // calling removeDatanode and stopping it. ArrayList<DataNode> datanodes = cluster.getDataNodes(); int idx = datanodes.size() - 1; DataNode dataNode = datanodes.get(idx); DatanodeID dnId = dataNode.getDatanodeId(); cluster.stopDataNode(idx); dm.removeDatanode(dnId); // The block should still have sufficient # replicas, across racks. // The last node may not have contained a replica, but if it did // it should have been replicated within the same rack. DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Fail the last datanode again, it's also on rack2 so there is // only 1 rack for all the replicas datanodes = cluster.getDataNodes(); idx = datanodes.size() - 1; dataNode = datanodes.get(idx); dnId = dataNode.getDatanodeId(); cluster.stopDataNode(idx); dm.removeDatanode(dnId); // Make sure we have enough live replicas even though we are // short one rack and therefore need one replica DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); } finally { cluster.shutdown(); } }
Example 3
Source File: TestBlocksWithNotEnoughRacks.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testReduceReplFactorDueToRejoinRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 2; final Path filePath = new Path("/testFile"); // Last datanode is on a different rack String racks[] = {"/rack1", "/rack1", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); final DatanodeManager dm = ns.getBlockManager().getDatanodeManager(); try { // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Make the last (cross rack) datanode look like it failed // to heartbeat by stopping it and calling removeDatanode. ArrayList<DataNode> datanodes = cluster.getDataNodes(); assertEquals(3, datanodes.size()); DataNode dataNode = datanodes.get(2); DatanodeID dnId = dataNode.getDatanodeId(); cluster.stopDataNode(2); dm.removeDatanode(dnId); // The block gets re-replicated to another datanode so it has a // sufficient # replicas, but not across racks, so there should // be 1 rack, and 1 needed replica (even though there are 2 hosts // available and only 2 replicas required). DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); // Start the "failed" datanode, which has a replica so the block is // now over-replicated and therefore a replica should be removed but // not on the restarted datanode as that would violate the rack policy. String rack2[] = {"/rack2"}; cluster.startDataNodes(conf, 1, true, null, rack2); cluster.waitActive(); // The block now has sufficient # replicas, across racks DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example 4
Source File: TestNameNodePrunesMissingStorages.java From big-c with Apache License 2.0 | 4 votes |
private static void runTest(final String testCaseName, final boolean createFiles, final int numInitialStorages, final int expectedStoragesAfterTest) throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster .Builder(conf) .numDataNodes(1) .storagesPerDatanode(numInitialStorages) .build(); cluster.waitActive(); final DataNode dn0 = cluster.getDataNodes().get(0); // Ensure NN knows about the storage. final DatanodeID dnId = dn0.getDatanodeId(); final DatanodeDescriptor dnDescriptor = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId); assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages)); final String bpid = cluster.getNamesystem().getBlockPoolId(); final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid); DataNodeTestUtils.triggerBlockReport(dn0); if (createFiles) { final Path path = new Path("/", testCaseName); DFSTestUtil.createFile( cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED); DataNodeTestUtils.triggerBlockReport(dn0); } // Generate a fake StorageReport that is missing one storage. final StorageReport reports[] = dn0.getFSDataset().getStorageReports(bpid); final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1]; System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length); // Stop the DataNode and send fake heartbeat with missing storage. cluster.stopDataNode(0); cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0, 0, null); // Check that the missing storage was pruned. assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest)); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 5
Source File: TestBlocksWithNotEnoughRacks.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testReplDueToNodeFailRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 3; final Path filePath = new Path("/testFile"); // Last datanode is on a different rack String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); final DatanodeManager dm = ns.getBlockManager().getDatanodeManager(); try { // Create a file with one block with a replication factor of 2 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Make the last datanode look like it failed to heartbeat by // calling removeDatanode and stopping it. ArrayList<DataNode> datanodes = cluster.getDataNodes(); int idx = datanodes.size() - 1; DataNode dataNode = datanodes.get(idx); DatanodeID dnId = dataNode.getDatanodeId(); cluster.stopDataNode(idx); dm.removeDatanode(dnId); // The block should still have sufficient # replicas, across racks. // The last node may not have contained a replica, but if it did // it should have been replicated within the same rack. DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Fail the last datanode again, it's also on rack2 so there is // only 1 rack for all the replicas datanodes = cluster.getDataNodes(); idx = datanodes.size() - 1; dataNode = datanodes.get(idx); dnId = dataNode.getDatanodeId(); cluster.stopDataNode(idx); dm.removeDatanode(dnId); // Make sure we have enough live replicas even though we are // short one rack and therefore need one replica DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); } finally { cluster.shutdown(); } }
Example 6
Source File: TestBlocksWithNotEnoughRacks.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testReduceReplFactorDueToRejoinRespectsRackPolicy() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 2; final Path filePath = new Path("/testFile"); // Last datanode is on a different rack String racks[] = {"/rack1", "/rack1", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); final DatanodeManager dm = ns.getBlockManager().getDatanodeManager(); try { // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); // Make the last (cross rack) datanode look like it failed // to heartbeat by stopping it and calling removeDatanode. ArrayList<DataNode> datanodes = cluster.getDataNodes(); assertEquals(3, datanodes.size()); DataNode dataNode = datanodes.get(2); DatanodeID dnId = dataNode.getDatanodeId(); cluster.stopDataNode(2); dm.removeDatanode(dnId); // The block gets re-replicated to another datanode so it has a // sufficient # replicas, but not across racks, so there should // be 1 rack, and 1 needed replica (even though there are 2 hosts // available and only 2 replicas required). DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1); // Start the "failed" datanode, which has a replica so the block is // now over-replicated and therefore a replica should be removed but // not on the restarted datanode as that would violate the rack policy. String rack2[] = {"/rack2"}; cluster.startDataNodes(conf, 1, true, null, rack2); cluster.waitActive(); // The block now has sufficient # replicas, across racks DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }