Java Code Examples for org.apache.hadoop.hdfs.DFSClient#datanodeReport()
The following examples show how to use
org.apache.hadoop.hdfs.DFSClient#datanodeReport() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBlockCopier.java From RDFS with Apache License 2.0 | 6 votes |
private String decommissionOneNode() throws IOException { DFSClient client = ((DistributedFileSystem)fileSys).getClient(); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); int index = 0; boolean found = false; while (!found) { index = rand.nextInt(info.length); if (!info[index].isDecommissioned() && !info[index].isDecommissionInProgress()) { found = true; } } String nodename = info[index].getName(); System.out.println("Decommissioning node: " + nodename); // write nodename into the exclude file. decommissionedNodes.add(nodename); writeExcludesFileAndRefresh(decommissionedNodes); return nodename; }
Example 2
Source File: TestDecommissioningStatus.java From hadoop with Apache License 2.0 | 5 votes |
private String decommissionNode(FSNamesystem namesystem, DFSClient client, FileSystem localFileSys, int nodeIndex) throws IOException { DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); String nodename = info[nodeIndex].getXferAddr(); decommissionNode(namesystem, localFileSys, nodename); return nodename; }
Example 3
Source File: TestDecommissioningStatus.java From big-c with Apache License 2.0 | 5 votes |
private String decommissionNode(FSNamesystem namesystem, DFSClient client, FileSystem localFileSys, int nodeIndex) throws IOException { DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); String nodename = info[nodeIndex].getXferAddr(); decommissionNode(namesystem, localFileSys, nodename); return nodename; }
Example 4
Source File: ClusterStatusServlet.java From terrapin with Apache License 2.0 | 5 votes |
/** * Get all data nodes * @param hdfsClient client instance for HDFS * @return live data nodes * @throws IOException if client goes wrong when communicating with server */ public static List<String> getAllNodeNames(DFSClient hdfsClient) throws IOException { DatanodeInfo[] allNodes = hdfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE); List<String> allNodeNames = new ArrayList<String>(allNodes.length); for (DatanodeInfo nodeInfo : allNodes) { allNodeNames.add(TerrapinUtil.getHelixInstanceFromHDFSHost(nodeInfo.getHostName())); } return allNodeNames; }
Example 5
Source File: RestartRandomDataNodeAction.java From hbase with Apache License 2.0 | 5 votes |
public ServerName[] getDataNodes() throws IOException { DistributedFileSystem fs = (DistributedFileSystem) CommonFSUtils.getRootDir(getConf()) .getFileSystem(getConf()); DFSClient dfsClient = fs.getClient(); List<ServerName> hosts = new LinkedList<>(); for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) { hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1)); } return hosts.toArray(new ServerName[0]); }
Example 6
Source File: TestDecommissioningStatus.java From RDFS with Apache License 2.0 | 5 votes |
private String decommissionNode(FSNamesystem namesystem, Configuration conf, DFSClient client, FileSystem localFileSys, int nodeIndex) throws IOException { DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); String nodename = info[nodeIndex].getName(); System.out.println("Decommissioning node: " + nodename); // write nodename into the exclude file. ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes); nodes.add(nodename); writeConfigFile(localFileSys, excludeFile, nodes); namesystem.refreshNodes(conf); return nodename; }
Example 7
Source File: TestDecommissioningStatus.java From hadoop with Apache License 2.0 | 4 votes |
/** * Tests Decommissioning Status in DFS. */ @Test public void testDecommissionStatus() throws Exception { InetSocketAddress addr = new InetSocketAddress("localhost", cluster .getNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ", 2, info.length); DistributedFileSystem fileSys = cluster.getFileSystem(); DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0)); short replicas = numDatanodes; // // Decommission one node. Verify the decommission status // Path file1 = new Path("decommission.dat"); writeFile(fileSys, file1, replicas); Path file2 = new Path("decommission1.dat"); FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas); for (DataNode d: cluster.getDataNodes()) { DataNodeTestUtils.triggerBlockReport(d); } FSNamesystem fsn = cluster.getNamesystem(); final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager(); for (int iteration = 0; iteration < numDatanodes; iteration++) { String downnode = decommissionNode(fsn, client, localFileSys, iteration); dm.refreshNodes(conf); decommissionedNodes.add(downnode); BlockManagerTestUtil.recheckDecommissionState(dm); final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes(); if (iteration == 0) { assertEquals(decommissioningNodes.size(), 1); DatanodeDescriptor decommNode = decommissioningNodes.get(0); checkDecommissionStatus(decommNode, 3, 0, 1); checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1), fileSys, admin); } else { assertEquals(decommissioningNodes.size(), 2); DatanodeDescriptor decommNode1 = decommissioningNodes.get(0); DatanodeDescriptor decommNode2 = decommissioningNodes.get(1); // This one is still 3,3,1 since it passed over the UC block // earlier, before node 2 was decommed checkDecommissionStatus(decommNode1, 3, 3, 1); // This one is 4,4,2 since it has the full state checkDecommissionStatus(decommNode2, 4, 4, 2); checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2), fileSys, admin); } } // Call refreshNodes on FSNamesystem with empty exclude file. // This will remove the datanodes from decommissioning list and // make them available again. writeConfigFile(localFileSys, excludeFile, null); dm.refreshNodes(conf); st1.close(); cleanupFile(fileSys, file1); cleanupFile(fileSys, file2); }
Example 8
Source File: TestDecommissioningStatus.java From big-c with Apache License 2.0 | 4 votes |
/** * Tests Decommissioning Status in DFS. */ @Test public void testDecommissionStatus() throws Exception { InetSocketAddress addr = new InetSocketAddress("localhost", cluster .getNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ", 2, info.length); DistributedFileSystem fileSys = cluster.getFileSystem(); DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0)); short replicas = numDatanodes; // // Decommission one node. Verify the decommission status // Path file1 = new Path("decommission.dat"); writeFile(fileSys, file1, replicas); Path file2 = new Path("decommission1.dat"); FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas); for (DataNode d: cluster.getDataNodes()) { DataNodeTestUtils.triggerBlockReport(d); } FSNamesystem fsn = cluster.getNamesystem(); final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager(); for (int iteration = 0; iteration < numDatanodes; iteration++) { String downnode = decommissionNode(fsn, client, localFileSys, iteration); dm.refreshNodes(conf); decommissionedNodes.add(downnode); BlockManagerTestUtil.recheckDecommissionState(dm); final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes(); if (iteration == 0) { assertEquals(decommissioningNodes.size(), 1); DatanodeDescriptor decommNode = decommissioningNodes.get(0); checkDecommissionStatus(decommNode, 3, 0, 1); checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1), fileSys, admin); } else { assertEquals(decommissioningNodes.size(), 2); DatanodeDescriptor decommNode1 = decommissioningNodes.get(0); DatanodeDescriptor decommNode2 = decommissioningNodes.get(1); // This one is still 3,3,1 since it passed over the UC block // earlier, before node 2 was decommed checkDecommissionStatus(decommNode1, 3, 3, 1); // This one is 4,4,2 since it has the full state checkDecommissionStatus(decommNode2, 4, 4, 2); checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2), fileSys, admin); } } // Call refreshNodes on FSNamesystem with empty exclude file. // This will remove the datanodes from decommissioning list and // make them available again. writeConfigFile(localFileSys, excludeFile, null); dm.refreshNodes(conf); st1.close(); cleanupFile(fileSys, file1); cleanupFile(fileSys, file2); }
Example 9
Source File: TestDecommissioningStatus.java From RDFS with Apache License 2.0 | 4 votes |
/** * Tests Decommissioning Status in DFS. */ @Test public void testDecommissionStatus() throws IOException, InterruptedException { InetSocketAddress addr = new InetSocketAddress("localhost", cluster .getNameNodePort()); DFSClient client = new DFSClient(addr, conf); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ", 2, info.length); FileSystem fileSys = cluster.getFileSystem(); short replicas = 2; // // Decommission one node. Verify the decommission status // Path file1 = new Path("decommission.dat"); writeFile(fileSys, file1, replicas); Path file2 = new Path("decommission1.dat"); FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas); Thread.sleep(5000); FSNamesystem fsn = cluster.getNameNode().getNamesystem(); for (int iteration = 0; iteration < numDatanodes; iteration++) { String downnode = decommissionNode(fsn, conf, client, localFileSys, iteration); decommissionedNodes.add(downnode); Thread.sleep(5000); ArrayList<DatanodeDescriptor> decommissioningNodes = fsn .getDecommissioningNodes(); if (iteration == 0) { assertEquals(decommissioningNodes.size(), 1); DatanodeDescriptor decommNode = decommissioningNodes.get(0); checkDecommissionStatus(decommNode, 4, 0, 2); } else { assertEquals(decommissioningNodes.size(), 2); DatanodeDescriptor decommNode1 = decommissioningNodes.get(0); DatanodeDescriptor decommNode2 = decommissioningNodes.get(1); checkDecommissionStatus(decommNode1, 4, 4, 2); checkDecommissionStatus(decommNode2, 4, 4, 2); } } // Call refreshNodes on FSNamesystem with empty exclude file. // This will remove the datanodes from decommissioning list and // make them available again. writeConfigFile(localFileSys, excludeFile, null); fsn.refreshNodes(conf); st1.close(); cleanupFile(fileSys, file1); cleanupFile(fileSys, file2); cleanupFile(localFileSys, dir); }