Java Code Examples for org.apache.hadoop.hdfs.HAUtil#getProxiesForAllNameNodesInNameservice()
The following examples show how to use
org.apache.hadoop.hdfs.HAUtil#getProxiesForAllNameNodesInNameservice() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 6 votes |
/** * Dumps DFS data structures into specified file. * Usage: hdfs dfsadmin -metasave filename * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException if an error occurred while accessing * the file or path. */ public int metaSave(String[] argv, int idx) throws IOException { String pathname = argv[idx]; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress()); } } else { dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri()); } return 0; }
Example 2
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 6 votes |
/** * Dumps DFS data structures into specified file. * Usage: hdfs dfsadmin -metasave filename * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException if an error occurred while accessing * the file or path. */ public int metaSave(String[] argv, int idx) throws IOException { String pathname = argv[idx]; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress()); } } else { dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri()); } return 0; }
Example 3
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to save the namespace. * Usage: hdfs dfsadmin -saveNamespace * @exception IOException * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace() */ public int saveNamespace() throws IOException { int exitCode = -1; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().saveNamespace(); System.out.println("Save namespace successful for " + proxy.getAddress()); } } else { dfs.saveNamespace(); System.out.println("Save namespace successful"); } exitCode = 0; return exitCode; }
Example 4
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
public int refreshCallQueue() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshCallQueue for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshCallQueueProtocol.class); for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) { proxy.getProxy().refreshCallQueue(); System.out.println("Refresh call queue successful for " + proxy.getAddress()); } } else { // Create the client RefreshCallQueueProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshCallQueueProtocol.class).getProxy(); // Refresh the call queue refreshProtocol.refreshCallQueue(); System.out.println("Refresh call queue successful"); } return 0; }
Example 5
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * refreshSuperUserGroupsConfiguration {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshSuperUserGroupsConfiguration() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NAMENODE's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { proxy.getProxy().refreshSuperUserGroupsConfiguration(); System.out.println("Refresh super user groups configuration " + "successful for " + proxy.getAddress()); } } else { // Create the client RefreshUserMappingsProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshUserMappingsProtocol.class).getProxy(); // Refresh the user-to-groups mappings refreshProtocol.refreshSuperUserGroupsConfiguration(); System.out.println("Refresh super user groups configuration successful"); } return 0; }
Example 6
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Refresh the user-to-groups mappings on the {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshUserToGroupsMappings() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshUserToGroupsMapings for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { proxy.getProxy().refreshUserToGroupsMappings(); System.out.println("Refresh user to groups mapping successful for " + proxy.getAddress()); } } else { // Create the client RefreshUserMappingsProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshUserMappingsProtocol.class).getProxy(); // Refresh the user-to-groups mappings refreshProtocol.refreshUserToGroupsMappings(); System.out.println("Refresh user to groups mapping successful"); } return 0; }
Example 7
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Refresh the authorization policy on the {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshServiceAcl() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshServiceAcl for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshAuthorizationPolicyProtocol.class); for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) { proxy.getProxy().refreshServiceAcl(); System.out.println("Refresh service acl successful for " + proxy.getAddress()); } } else { // Create the client RefreshAuthorizationPolicyProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshAuthorizationPolicyProtocol.class).getProxy(); // Refresh the authorization policy in-effect refreshProtocol.refreshServiceAcl(); System.out.println("Refresh service acl successful"); } return 0; }
Example 8
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to finalize previously performed upgrade. * Usage: hdfs dfsadmin -finalizeUpgrade * @exception IOException */ public int finalizeUpgrade() throws IOException { DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaAndLogicalUri) { // In the case of HA and logical URI, run finalizeUpgrade for all // NNs in this nameservice. String nsId = dfsUri.getHost(); List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId); if (!HAUtil.isAtLeastOneActive(namenodes)) { throw new IOException("Cannot finalize with no NameNode active"); } List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().finalizeUpgrade(); System.out.println("Finalize upgrade successful for " + proxy.getAddress()); } } else { dfs.finalizeUpgrade(); System.out.println("Finalize upgrade successful"); } return 0; }
Example 9
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to reread the hosts and excluded hosts * file. * Usage: hdfs dfsadmin -refreshNodes * @exception IOException */ public int refreshNodes() throws IOException { int exitCode = -1; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy: proxies) { proxy.getProxy().refreshNodes(); System.out.println("Refresh nodes successful for " + proxy.getAddress()); } } else { dfs.refreshNodes(); System.out.println("Refresh nodes successful"); } exitCode = 0; return exitCode; }
Example 10
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to save the namespace. * Usage: hdfs dfsadmin -saveNamespace * @exception IOException * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace() */ public int saveNamespace() throws IOException { int exitCode = -1; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().saveNamespace(); System.out.println("Save namespace successful for " + proxy.getAddress()); } } else { dfs.saveNamespace(); System.out.println("Save namespace successful"); } exitCode = 0; return exitCode; }
Example 11
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 5 votes |
public int refreshCallQueue() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshCallQueue for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshCallQueueProtocol.class); for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) { proxy.getProxy().refreshCallQueue(); System.out.println("Refresh call queue successful for " + proxy.getAddress()); } } else { // Create the client RefreshCallQueueProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshCallQueueProtocol.class).getProxy(); // Refresh the call queue refreshProtocol.refreshCallQueue(); System.out.println("Refresh call queue successful"); } return 0; }
Example 12
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * refreshSuperUserGroupsConfiguration {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshSuperUserGroupsConfiguration() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NAMENODE's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { proxy.getProxy().refreshSuperUserGroupsConfiguration(); System.out.println("Refresh super user groups configuration " + "successful for " + proxy.getAddress()); } } else { // Create the client RefreshUserMappingsProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshUserMappingsProtocol.class).getProxy(); // Refresh the user-to-groups mappings refreshProtocol.refreshSuperUserGroupsConfiguration(); System.out.println("Refresh super user groups configuration successful"); } return 0; }
Example 13
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * Refresh the user-to-groups mappings on the {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshUserToGroupsMappings() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshUserToGroupsMapings for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { proxy.getProxy().refreshUserToGroupsMappings(); System.out.println("Refresh user to groups mapping successful for " + proxy.getAddress()); } } else { // Create the client RefreshUserMappingsProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshUserMappingsProtocol.class).getProxy(); // Refresh the user-to-groups mappings refreshProtocol.refreshUserToGroupsMappings(); System.out.println("Refresh user to groups mapping successful"); } return 0; }
Example 14
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * Refresh the authorization policy on the {@link NameNode}. * @return exitcode 0 on success, non-zero on failure * @throws IOException */ public int refreshServiceAcl() throws IOException { // Get the current configuration Configuration conf = getConf(); // for security authorization // server principal for this call // should be NN's one. conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshServiceAcl for all NNs if HA is enabled String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshAuthorizationPolicyProtocol.class); for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) { proxy.getProxy().refreshServiceAcl(); System.out.println("Refresh service acl successful for " + proxy.getAddress()); } } else { // Create the client RefreshAuthorizationPolicyProtocol refreshProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshAuthorizationPolicyProtocol.class).getProxy(); // Refresh the authorization policy in-effect refreshProtocol.refreshServiceAcl(); System.out.println("Refresh service acl successful"); } return 0; }
Example 15
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to finalize previously performed upgrade. * Usage: hdfs dfsadmin -finalizeUpgrade * @exception IOException */ public int finalizeUpgrade() throws IOException { DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaAndLogicalUri) { // In the case of HA and logical URI, run finalizeUpgrade for all // NNs in this nameservice. String nsId = dfsUri.getHost(); List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId); if (!HAUtil.isAtLeastOneActive(namenodes)) { throw new IOException("Cannot finalize with no NameNode active"); } List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().finalizeUpgrade(); System.out.println("Finalize upgrade successful for " + proxy.getAddress()); } } else { dfs.finalizeUpgrade(); System.out.println("Finalize upgrade successful"); } return 0; }
Example 16
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 5 votes |
/** * Command to ask the namenode to reread the hosts and excluded hosts * file. * Usage: hdfs dfsadmin -refreshNodes * @exception IOException */ public int refreshNodes() throws IOException { int exitCode = -1; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy: proxies) { proxy.getProxy().refreshNodes(); System.out.println("Refresh nodes successful for " + proxy.getAddress()); } } else { dfs.refreshNodes(); System.out.println("Refresh nodes successful"); } exitCode = 0; return exitCode; }
Example 17
Source File: DFSAdmin.java From big-c with Apache License 2.0 | 4 votes |
/** * Command to ask the namenode to set the balancer bandwidth for all of the * datanodes. * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException */ public int setBalancerBandwidth(String[] argv, int idx) throws IOException { long bandwidth; int exitCode = -1; try { bandwidth = Long.parseLong(argv[idx]); } catch (NumberFormatException nfe) { System.err.println("NumberFormatException: " + nfe.getMessage()); System.err.println("Usage: hdfs dfsadmin" + " [-setBalancerBandwidth <bandwidth in bytes per second>]"); return exitCode; } FileSystem fs = getFS(); if (!(fs instanceof DistributedFileSystem)) { System.err.println("FileSystem is " + fs.getUri()); return exitCode; } DistributedFileSystem dfs = (DistributedFileSystem) fs; Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().setBalancerBandwidth(bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth + " for " + proxy.getAddress()); } } else { dfs.setBalancerBandwidth(bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth); } exitCode = 0; return exitCode; }
Example 18
Source File: DFSAdmin.java From hadoop with Apache License 2.0 | 4 votes |
/** * Command to ask the namenode to set the balancer bandwidth for all of the * datanodes. * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException */ public int setBalancerBandwidth(String[] argv, int idx) throws IOException { long bandwidth; int exitCode = -1; try { bandwidth = Long.parseLong(argv[idx]); } catch (NumberFormatException nfe) { System.err.println("NumberFormatException: " + nfe.getMessage()); System.err.println("Usage: hdfs dfsadmin" + " [-setBalancerBandwidth <bandwidth in bytes per second>]"); return exitCode; } FileSystem fs = getFS(); if (!(fs instanceof DistributedFileSystem)) { System.err.println("FileSystem is " + fs.getUri()); return exitCode; } DistributedFileSystem dfs = (DistributedFileSystem) fs; Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().setBalancerBandwidth(bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth + " for " + proxy.getAddress()); } } else { dfs.setBalancerBandwidth(bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth); } exitCode = 0; return exitCode; }