org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 6 votes |
/** * Get the number of live datanodes. * * @param json JSON string that contains live node status. * @param nn namenode status to return information in */ private static void getLiveNodeCount(String json, NamenodeStatus nn) throws IOException { // Map of datanode host to (map of attribute name to value) Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } nn.liveDatanodeCount = nodeMap.size(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { // Inner map of attribute name to value Map<String, Object> innerMap = entry.getValue(); if (innerMap != null) { if (innerMap.get("adminState") .equals(AdminStates.DECOMMISSIONED.toString())) { nn.liveDecomCount++; } } } }
Example #2
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 6 votes |
private LocatedBlock createLocatedBlockNoStorageMedia() { DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); lb.setBlockToken(new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"))); return lb; }
Example #3
Source File: TestPBHelper.java From big-c with Apache License 2.0 | 6 votes |
private LocatedBlock createLocatedBlock() { DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL), }; String[] storageIDs = {"s1", "s2", "s3", "s4"}; StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{}); lb.setBlockToken(new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"))); return lb; }
Example #4
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 6 votes |
/** * Get the decommisioning datanode information. * * @param dataNodeStatusMap map with key being datanode, value being an * inner map (key:namenode, value:decommisionning state). * @param host datanode * @param json String */ private static void getDecommissionNodeStatus( Map<String, Map<String, String>> dataNodeStatusMap, String host, String json) throws IOException { Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } List<String> decomming = new ArrayList<String>(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { String dn = entry.getKey(); decomming.add(dn); // nn-status Map<String, String> nnStatus = new HashMap<String, String>(); if (dataNodeStatusMap.containsKey(dn)) { nnStatus = dataNodeStatusMap.get(dn); } nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString()); // dn-nn-status dataNodeStatusMap.put(dn, nnStatus); } }
Example #5
Source File: ClusterJspHelper.java From RDFS with Apache License 2.0 | 6 votes |
/** * We process the JSON string returned from http or local fsnamesystem * to get the decommisioning datanode information. * * @param dataNodeStatusMap map with key being datanode, value being an * inner map (key:namenode, value:decommisionning state). * @param address * @param json JSON string returned */ private static void getDecommissionNodeStatus( Map<String, Map<String, String>> dataNodeStatusMap, String address, String json) throws IOException { Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } List<String> decomming = new ArrayList<String>(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { String dn = entry.getKey(); decomming.add(dn); // nn-status Map<String, String> nnStatus = new HashMap<String, String>(); if (dataNodeStatusMap.containsKey(dn)) { nnStatus = dataNodeStatusMap.get(dn); } nnStatus.put(address, AdminStates.DECOMMISSION_INPROGRESS.toString()); // dn-nn-status dataNodeStatusMap.put(dn, nnStatus); } }
Example #6
Source File: TestDecommission.java From RDFS with Apache License 2.0 | 6 votes |
public void testClusterStats(int numNameNodes, boolean federation) throws IOException, InterruptedException { LOG.info("Starting test testClusterStats"); int numDatanodes = 1; startCluster(numNameNodes, numDatanodes, conf, federation); for (int i = 0; i < numNameNodes; i++) { FileSystem fileSys = cluster.getFileSystem(i); Path file = new Path("testClusterStats.dat"); writeFile(fileSys, file, 1); NameNode namenode = cluster.getNameNode(i); FSNamesystem fsn = namenode.namesystem; DatanodeInfo downnode = decommissionNode(i, null, AdminStates.DECOMMISSION_INPROGRESS); // Check namenode stats for multiple datanode heartbeats verifyStats(namenode, fsn, downnode, true); // Stop decommissioning and verify stats writeConfigFile(excludeFile, null); fsn.refreshNodes(conf); DatanodeInfo ret = fsn.getDatanode(downnode); waitNodeState(ret, AdminStates.NORMAL); verifyStats(namenode, fsn, ret, false); } }
Example #7
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 6 votes |
/** * Get the number of live datanodes. * * @param json JSON string that contains live node status. * @param nn namenode status to return information in */ private static void getLiveNodeCount(String json, NamenodeStatus nn) throws IOException { // Map of datanode host to (map of attribute name to value) Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } nn.liveDatanodeCount = nodeMap.size(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { // Inner map of attribute name to value Map<String, Object> innerMap = entry.getValue(); if (innerMap != null) { if (innerMap.get("adminState") .equals(AdminStates.DECOMMISSIONED.toString())) { nn.liveDecomCount++; } } } }
Example #8
Source File: ClusterJspHelper.java From RDFS with Apache License 2.0 | 6 votes |
/** * Process JSON string returned from connection to get the number of * live datanodes. * * @param json JSON output that contains live node status. * @param nn namenode status to return information in */ private static void getLiveNodeCount(String json, NamenodeStatus nn) throws IOException { // Map of datanode host to (map of attribute name to value) Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } nn.liveDatanodeCount = nodeMap.size(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { // Inner map of attribute name to value Map<String, Object> innerMap = entry.getValue(); if (innerMap != null) { if (((String) innerMap.get("adminState")) .equals(AdminStates.DECOMMISSIONED.toString())) { nn.liveDecomCount++; } if (((Boolean) innerMap.get("excluded")) .booleanValue() == true) { nn.liveExcludeCount++; } } } }
Example #9
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 6 votes |
private LocatedBlock createLocatedBlock() { DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL), }; String[] storageIDs = {"s1", "s2", "s3", "s4"}; StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{}); lb.setBlockToken(new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"))); return lb; }
Example #10
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 6 votes |
private LocatedBlock createLocatedBlockNoStorageMedia() { DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); lb.setBlockToken(new Token<BlockTokenIdentifier>( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"))); return lb; }
Example #11
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 6 votes |
/** * Get the decommisioning datanode information. * * @param dataNodeStatusMap map with key being datanode, value being an * inner map (key:namenode, value:decommisionning state). * @param host datanode * @param json String */ private static void getDecommissionNodeStatus( Map<String, Map<String, String>> dataNodeStatusMap, String host, String json) throws IOException { Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } List<String> decomming = new ArrayList<String>(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { String dn = entry.getKey(); decomming.add(dn); // nn-status Map<String, String> nnStatus = new HashMap<String, String>(); if (dataNodeStatusMap.containsKey(dn)) { nnStatus = dataNodeStatusMap.get(dn); } nnStatus.put(host, AdminStates.DECOMMISSION_INPROGRESS.toString()); // dn-nn-status dataNodeStatusMap.put(dn, nnStatus); } }
Example #12
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static DatanodeInfoProto.AdminState convert( final DatanodeInfo.AdminStates inAs) { switch (inAs) { case NORMAL: return DatanodeInfoProto.AdminState.NORMAL; case DECOMMISSION_INPROGRESS: return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS; case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED; default: return DatanodeInfoProto.AdminState.NORMAL; } }
Example #13
Source File: TestDecommission.java From RDFS with Apache License 2.0 | 5 votes |
private void waitNodeState(DatanodeInfo node, AdminStates state) throws IOException { boolean done = state == node.getAdminState(); while (!done) { LOG.info("Waiting for node " + node + " to change state to " + state + " current state: " + node.getAdminState()); try { Thread.sleep(HEARTBEAT_INTERVAL * 1000); } catch (InterruptedException e) { // nothing } done = state == node.getAdminState(); } LOG.info("node " + node + " reached the state " + state); }
Example #14
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * Store the live datanode status information into datanode status map and * DecommissionNode. * * @param statusMap Map of datanode status. Key is datanode, value * is an inner map whose key is namenode, value is datanode status. * reported by each namenode. * @param namenodeHost host name of the namenode * @param json JSON string contains datanode status * @throws IOException */ private static void getLiveNodeStatus( Map<String, Map<String, String>> statusMap, String namenodeHost, String json) throws IOException { Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap != null && !nodeMap.isEmpty()) { List<String> liveDecommed = new ArrayList<String>(); for (Map.Entry<String, Map<String, Object>> entry: nodeMap.entrySet()) { Map<String, Object> innerMap = entry.getValue(); String dn = entry.getKey(); if (innerMap != null) { if (innerMap.get("adminState").equals( AdminStates.DECOMMISSIONED.toString())) { liveDecommed.add(dn); } // the inner map key is namenode, value is datanode status. Map<String, String> nnStatus = statusMap.get(dn); if (nnStatus == null) { nnStatus = new HashMap<String, String>(); } nnStatus.put(namenodeHost, (String) innerMap.get("adminState")); // map whose key is datanode, value is the inner map. statusMap.put(dn, nnStatus); } } } }
Example #15
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * Store the dead datanode information into datanode status map and * DecommissionNode. * * @param statusMap map with key being datanode, value being an * inner map (key:namenode, value:decommisionning state). * @param host datanode hostname * @param json String * @throws IOException */ private static void getDeadNodeStatus( Map<String, Map<String, String>> statusMap, String host, String json) throws IOException { Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } List<String> deadDn = new ArrayList<String>(); List<String> deadDecommed = new ArrayList<String>(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { deadDn.add(entry.getKey()); Map<String, Object> deadNodeDetailMap = entry.getValue(); String dn = entry.getKey(); if (deadNodeDetailMap != null && !deadNodeDetailMap.isEmpty()) { // NN - status Map<String, String> nnStatus = statusMap.get(dn); if (nnStatus == null) { nnStatus = new HashMap<String, String>(); } if (((Boolean) deadNodeDetailMap.get("decommissioned")) .booleanValue() == true) { deadDecommed.add(dn); nnStatus.put(host, AdminStates.DECOMMISSIONED.toString()); } else { nnStatus.put(host, DEAD); } // dn-nn-status statusMap.put(dn, nnStatus); } } }
Example #16
Source File: TestDecommission.java From RDFS with Apache License 2.0 | 5 votes |
private DatanodeInfo decommissionNode(int nnIndex, ArrayList<DatanodeInfo>decommissionedNodes, AdminStates waitForState) throws IOException { DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf); DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); // // pick one datanode randomly. // int index = 0; boolean found = false; while (!found) { index = myrand.nextInt(info.length); if (!info[index].isDecommissioned()) { found = true; } } String nodename = info[index].getName(); LOG.info("Decommissioning node: " + nodename); // write nodename into the exclude file. ArrayList<String> nodes = new ArrayList<String>(); if (decommissionedNodes != null) { for (DatanodeInfo dn : decommissionedNodes) { nodes.add(dn.getName()); } } nodes.add(nodename); writeConfigFile(excludeFile, nodes); cluster.getNameNode(nnIndex).namesystem.refreshNodes(conf); DatanodeInfo ret = cluster.getNameNode(nnIndex).namesystem.getDatanode(info[index]); waitNodeState(ret, waitForState); return ret; }
Example #17
Source File: TestDecommission.java From big-c with Apache License 2.0 | 5 votes |
private void waitNodeState(DatanodeInfo node, AdminStates state) { boolean done = state == node.getAdminState(); while (!done) { LOG.info("Waiting for node " + node + " to change state to " + state + " current state: " + node.getAdminState()); try { Thread.sleep(HEARTBEAT_INTERVAL * 500); } catch (InterruptedException e) { // nothing } done = state == node.getAdminState(); } LOG.info("node " + node + " reached the state " + state); }
Example #18
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static AdminStates convert(AdminState adminState) { switch(adminState) { case DECOMMISSION_INPROGRESS: return AdminStates.DECOMMISSION_INPROGRESS; case DECOMMISSIONED: return AdminStates.DECOMMISSIONED; case NORMAL: default: return AdminStates.NORMAL; } }
Example #19
Source File: TestDecommission.java From big-c with Apache License 2.0 | 5 votes |
private void recommissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException { LOG.info("Recommissioning node: " + decommissionedNode); writeConfigFile(excludeFile, null); refreshNodes(cluster.getNamesystem(nnIndex), conf); waitNodeState(decommissionedNode, AdminStates.NORMAL); }
Example #20
Source File: DFSTestUtil.java From big-c with Apache License 2.0 | 5 votes |
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr, String hostname, AdminStates adminState) { return new DatanodeInfo(ipAddr, hostname, "", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT, 1l, 2l, 3l, 4l, 0l, 0l, 0l, 5, 6, "local", adminState); }
Example #21
Source File: TestDecommission.java From big-c with Apache License 2.0 | 5 votes |
public void testClusterStats(int numNameNodes) throws IOException, InterruptedException { LOG.info("Starting test testClusterStats"); int numDatanodes = 1; startCluster(numNameNodes, numDatanodes, conf); for (int i = 0; i < numNameNodes; i++) { FileSystem fileSys = cluster.getFileSystem(i); Path file = new Path("testClusterStats.dat"); writeFile(fileSys, file, 1); FSNamesystem fsn = cluster.getNamesystem(i); NameNode namenode = cluster.getNameNode(i); DatanodeInfo decomInfo = decommissionNode(i, null, null, AdminStates.DECOMMISSION_INPROGRESS); DataNode decomNode = getDataNode(decomInfo); // Check namenode stats for multiple datanode heartbeats verifyStats(namenode, fsn, decomInfo, decomNode, true); // Stop decommissioning and verify stats writeConfigFile(excludeFile, null); refreshNodes(fsn, conf); DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo); DataNode retNode = getDataNode(decomInfo); waitNodeState(retInfo, AdminStates.NORMAL); verifyStats(namenode, fsn, retInfo, retNode, false); } }
Example #22
Source File: TestDecommission.java From RDFS with Apache License 2.0 | 5 votes |
private void testDecommission(int numNamenodes, int numDatanodes, boolean federation) throws IOException { LOG.info("Starting test testDecommission"); startCluster(numNamenodes, numDatanodes, conf, federation); ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes); for(int i = 0; i < numNamenodes; i++) { namenodeDecomList.add(i, new ArrayList<DatanodeInfo>(numDatanodes)); } Path file1 = new Path("testDecommission.dat"); for (int iteration = 0; iteration < numDatanodes - 1; iteration++) { int replicas = numDatanodes - iteration - 1; // Start decommissioning one namenode at a time for (int i = 0; i < numNamenodes; i++) { ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(i); FileSystem fileSys = cluster.getFileSystem(i); writeFile(fileSys, file1, replicas); // Decommission one node. Verify that node is decommissioned. DatanodeInfo decomNode = decommissionNode(i, decommissionedNodes, AdminStates.DECOMMISSIONED); decommissionedNodes.add(decomNode); // Ensure decommissioned datanode is not automatically shutdown DFSClient client = getDfsClient(cluster.getNameNode(i), conf); assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length); checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes); cleanupFile(fileSys, file1); } } // Restart the cluster and ensure decommissioned datanodes // are allowed to register with the namenode cluster.shutdown(); startCluster(numNamenodes, numDatanodes, conf, federation); }
Example #23
Source File: TestDecommission.java From hadoop with Apache License 2.0 | 5 votes |
public void testClusterStats(int numNameNodes) throws IOException, InterruptedException { LOG.info("Starting test testClusterStats"); int numDatanodes = 1; startCluster(numNameNodes, numDatanodes, conf); for (int i = 0; i < numNameNodes; i++) { FileSystem fileSys = cluster.getFileSystem(i); Path file = new Path("testClusterStats.dat"); writeFile(fileSys, file, 1); FSNamesystem fsn = cluster.getNamesystem(i); NameNode namenode = cluster.getNameNode(i); DatanodeInfo decomInfo = decommissionNode(i, null, null, AdminStates.DECOMMISSION_INPROGRESS); DataNode decomNode = getDataNode(decomInfo); // Check namenode stats for multiple datanode heartbeats verifyStats(namenode, fsn, decomInfo, decomNode, true); // Stop decommissioning and verify stats writeConfigFile(excludeFile, null); refreshNodes(fsn, conf); DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo); DataNode retNode = getDataNode(decomInfo); waitNodeState(retInfo, AdminStates.NORMAL); verifyStats(namenode, fsn, retInfo, retNode, false); } }
Example #24
Source File: TestDecommission.java From hadoop with Apache License 2.0 | 5 votes |
private void waitNodeState(DatanodeInfo node, AdminStates state) { boolean done = state == node.getAdminState(); while (!done) { LOG.info("Waiting for node " + node + " to change state to " + state + " current state: " + node.getAdminState()); try { Thread.sleep(HEARTBEAT_INTERVAL * 500); } catch (InterruptedException e) { // nothing } done = state == node.getAdminState(); } LOG.info("node " + node + " reached the state " + state); }
Example #25
Source File: TestDecommission.java From hadoop with Apache License 2.0 | 5 votes |
private void recommissionNode(int nnIndex, DatanodeInfo decommissionedNode) throws IOException { LOG.info("Recommissioning node: " + decommissionedNode); writeConfigFile(excludeFile, null); refreshNodes(cluster.getNamesystem(nnIndex), conf); waitNodeState(decommissionedNode, AdminStates.NORMAL); }
Example #26
Source File: ClusterJspHelper.java From RDFS with Apache License 2.0 | 5 votes |
/** * Process the JSON string returned from http or local fsnamesystem to get * live datanode status. Store the information into datanode status map and * Decommissionnode. * * @param statusMap Map of datanode status. Key is datanode, value * is an inner map whose key is namenode, value is datanode status. * reported by each namenode. * @param namenodeAddr address of the namenode * @param decomnode update Decommissionnode with alive node status * @param json JSON string contains datanode status * @throws IOException */ private static void getLiveNodeStatus( Map<String, Map<String, String>> statusMap, String namenodeAddr, String json) throws IOException { Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap != null && !nodeMap.isEmpty()) { List<String> liveDecommed = new ArrayList<String>(); for (Map.Entry<String, Map<String, Object>> entry: nodeMap.entrySet()) { Map<String, Object> innerMap = entry.getValue(); String dn = entry.getKey(); if (innerMap != null) { if (innerMap.get("adminState").equals( AdminStates.DECOMMISSIONED.toString())) { liveDecommed.add(dn); } // the inner map key is namenode, value is datanode status. Map<String, String> nnStatus = statusMap.get(dn); if (nnStatus == null) { nnStatus = new HashMap<String, String>(); } nnStatus.put(namenodeAddr, (String) innerMap.get("adminState")); // map whose key is datanode, value is the inner map. statusMap.put(dn, nnStatus); } } } }
Example #27
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 5 votes |
public static DatanodeInfo getLocalDatanodeInfo(String ipAddr, String hostname, AdminStates adminState) { return new DatanodeInfo(ipAddr, hostname, "", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT, 1l, 2l, 3l, 4l, 0l, 0l, 0l, 5, 6, "local", adminState); }
Example #28
Source File: ClusterJspHelper.java From RDFS with Apache License 2.0 | 5 votes |
/** * Process the JSON string returned from http or local fsnamesystem to get * the dead datanode information. Store the information into datanode status * map and Decommissionnode. * * @param statusMap map with key being datanode, value being an * inner map (key:namenode, value:decommisionning state). * @param address * @param json * @throws IOException */ private static void getDeadNodeStatus( Map<String, Map<String, String>> statusMap, String address, String json) throws IOException { Map<String, Map<String, Object>> nodeMap = getNodeMap(json); if (nodeMap == null || nodeMap.isEmpty()) { return; } List<String> deadDn = new ArrayList<String>(); List<String> deadDecommed = new ArrayList<String>(); for (Entry<String, Map<String, Object>> entry : nodeMap.entrySet()) { deadDn.add(entry.getKey()); Map<String, Object> deadNodeDetailMap = entry.getValue(); String dn = entry.getKey(); if (deadNodeDetailMap != null && !deadNodeDetailMap.isEmpty()) { // NN - status Map<String, String> nnStatus = statusMap.get(dn); if (nnStatus == null) { nnStatus = new HashMap<String, String>(); } if (((Boolean) deadNodeDetailMap.get("decommissioned")) .booleanValue() == true) { deadDecommed.add(dn); nnStatus.put(address, AdminStates.DECOMMISSIONED.toString()); } else { nnStatus.put(address, DEAD); } // dn-nn-status statusMap.put(dn, nnStatus); } } }
Example #29
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static AdminStates convert(AdminState adminState) { switch(adminState) { case DECOMMISSION_INPROGRESS: return AdminStates.DECOMMISSION_INPROGRESS; case DECOMMISSIONED: return AdminStates.DECOMMISSIONED; case NORMAL: default: return AdminStates.NORMAL; } }
Example #30
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static DatanodeInfoProto.AdminState convert( final DatanodeInfo.AdminStates inAs) { switch (inAs) { case NORMAL: return DatanodeInfoProto.AdminState.NORMAL; case DECOMMISSION_INPROGRESS: return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS; case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED; default: return DatanodeInfoProto.AdminState.NORMAL; } }