Java Code Examples for org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration#getDatanodeUuid()
The following examples show how to use
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration#getDatanodeUuid() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataNode.java From hadoop with Apache License 2.0 | 6 votes |
/** * Check that the registration returned from a NameNode is consistent * with the information in the storage. If the storage is fresh/unformatted, * sets the storage ID based on this registration. * Also updates the block pool's state in the secret manager. */ synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration, String blockPoolId) throws IOException { // Set the ID if we haven't already if (null == id) { id = bpRegistration; } if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) { throw new IOException("Inconsistent Datanode IDs. Name-node returned " + bpRegistration.getDatanodeUuid() + ". Expecting " + storage.getDatanodeUuid()); } registerBlockPoolWithSecretManager(bpRegistration, blockPoolId); }
Example 2
Source File: DataNode.java From big-c with Apache License 2.0 | 6 votes |
/** * Check that the registration returned from a NameNode is consistent * with the information in the storage. If the storage is fresh/unformatted, * sets the storage ID based on this registration. * Also updates the block pool's state in the secret manager. */ synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration, String blockPoolId) throws IOException { // Set the ID if we haven't already if (null == id) { id = bpRegistration; } if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) { throw new IOException("Inconsistent Datanode IDs. Name-node returned " + bpRegistration.getDatanodeUuid() + ". Expecting " + storage.getDatanodeUuid()); } registerBlockPoolWithSecretManager(bpRegistration, blockPoolId); }
Example 3
Source File: TestOverReplicatedBlocks.java From hadoop with Apache License 2.0 | 4 votes |
/** * The test verifies that replica for deletion is chosen on a node, * with the oldest heartbeat, when this heartbeat is larger than the * tolerable heartbeat interval. * It creates a file with several blocks and replication 4. * The last DN is configured to send heartbeats rarely. * * Test waits until the tolerable heartbeat interval expires, and reduces * replication of the file. All replica deletions should be scheduled for the * last node. No replicas will actually be deleted, since last DN doesn't * send heartbeats. */ @Test public void testChooseReplicaToDelete() throws Exception { MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); fs = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNamesystem(); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300); cluster.startDataNodes(conf, 1, true, null, null, null); DataNode lastDN = cluster.getDataNodes().get(3); DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP( lastDN, namesystem.getBlockPoolId()); String lastDNid = dnReg.getDatanodeUuid(); final Path fileName = new Path("/foo2"); DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L); DFSTestUtil.waitReplication(fs, fileName, (short)4); // Wait for tolerable number of heartbeats plus one DatanodeDescriptor nodeInfo = null; long lastHeartbeat = 0; long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 * (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1); do { nodeInfo = namesystem.getBlockManager().getDatanodeManager() .getDatanode(dnReg); lastHeartbeat = nodeInfo.getLastUpdateMonotonic(); } while (monotonicNow() - lastHeartbeat < waitTime); fs.setReplication(fileName, (short)3); BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(fileName), 0, Long.MAX_VALUE); // All replicas for deletion should be scheduled on lastDN. // And should not actually be deleted, because lastDN does not heartbeat. namesystem.readLock(); Collection<Block> dnBlocks = namesystem.getBlockManager().excessReplicateMap.get(lastDNid); assertEquals("Replicas on node " + lastDNid + " should have been deleted", SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size()); namesystem.readUnlock(); for(BlockLocation location : locs) assertEquals("Block should still have 4 replicas", 4, location.getNames().length); } finally { if(fs != null) fs.close(); if(cluster != null) cluster.shutdown(); } }
Example 4
Source File: TestOverReplicatedBlocks.java From big-c with Apache License 2.0 | 4 votes |
/** * The test verifies that replica for deletion is chosen on a node, * with the oldest heartbeat, when this heartbeat is larger than the * tolerable heartbeat interval. * It creates a file with several blocks and replication 4. * The last DN is configured to send heartbeats rarely. * * Test waits until the tolerable heartbeat interval expires, and reduces * replication of the file. All replica deletions should be scheduled for the * last node. No replicas will actually be deleted, since last DN doesn't * send heartbeats. */ @Test public void testChooseReplicaToDelete() throws Exception { MiniDFSCluster cluster = null; FileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); fs = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNamesystem(); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300); cluster.startDataNodes(conf, 1, true, null, null, null); DataNode lastDN = cluster.getDataNodes().get(3); DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP( lastDN, namesystem.getBlockPoolId()); String lastDNid = dnReg.getDatanodeUuid(); final Path fileName = new Path("/foo2"); DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L); DFSTestUtil.waitReplication(fs, fileName, (short)4); // Wait for tolerable number of heartbeats plus one DatanodeDescriptor nodeInfo = null; long lastHeartbeat = 0; long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 * (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1); do { nodeInfo = namesystem.getBlockManager().getDatanodeManager() .getDatanode(dnReg); lastHeartbeat = nodeInfo.getLastUpdateMonotonic(); } while (monotonicNow() - lastHeartbeat < waitTime); fs.setReplication(fileName, (short)3); BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(fileName), 0, Long.MAX_VALUE); // All replicas for deletion should be scheduled on lastDN. // And should not actually be deleted, because lastDN does not heartbeat. namesystem.readLock(); Collection<Block> dnBlocks = namesystem.getBlockManager().excessReplicateMap.get(lastDNid); assertEquals("Replicas on node " + lastDNid + " should have been deleted", SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size()); namesystem.readUnlock(); for(BlockLocation location : locs) assertEquals("Block should still have 4 replicas", 4, location.getNames().length); } finally { if(fs != null) fs.close(); if(cluster != null) cluster.shutdown(); } }