Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#getUri()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#getUri() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
 
Example 2
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;
 
  return exitCode;
}
 
Example 3
Source File: TestParityMovement.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private DistributedRaidFileSystem getRaidFS() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
  Configuration clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl", 
           "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl", 
           "org.apache.hadoop.hdfs.DistributedFileSystem");
  clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
  URI dfsUri = dfs.getUri();
  return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
}
 
Example 4
Source File: RaidShell.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Recovers the specified path from the parity file
 */
public Path[] recover(String cmd, String argv[], int startindex)
  throws IOException {
  Path[] paths = new Path[(argv.length - startindex) / 2];
  int j = 0;
  for (int i = startindex; i < argv.length; i = i + 2) {
    String path = argv[i];
    long corruptOffset = Long.parseLong(argv[i+1]);
    LOG.info("RaidShell recoverFile for " + path + " corruptOffset " + corruptOffset);
    Path recovered = new Path("/tmp/recovered." + System.currentTimeMillis());
    FileSystem fs = recovered.getFileSystem(conf);
    DistributedFileSystem dfs = (DistributedFileSystem)fs;
    Configuration raidConf = new Configuration(conf);
    raidConf.set("fs.hdfs.impl",
                   "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
    raidConf.set("fs.raid.underlyingfs.impl",
                   "org.apache.hadoop.hdfs.DistributedFileSystem");
    raidConf.setBoolean("fs.hdfs.impl.disable.cache", true);
    java.net.URI dfsUri = dfs.getUri();
    FileSystem raidFs = FileSystem.get(dfsUri, raidConf);
    FileUtil.copy(raidFs, new Path(path), fs, recovered, false, conf);

    paths[j] = recovered;
    LOG.info("Raidshell created recovery file " + paths[j]);
    j++;
  }
  return paths;
}
 
Example 5
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
 
Example 6
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshSuperUserGroupsConfiguration();
      System.out.println("Refresh super user groups configuration " +
          "successful for " + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshSuperUserGroupsConfiguration();
    System.out.println("Refresh super user groups configuration successful");
  }

  return 0;
}
 
Example 7
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshUserToGroupsMapings for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshUserToGroupsMappings();
      System.out.println("Refresh user to groups mapping successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshUserToGroupsMappings();
    System.out.println("Refresh user to groups mapping successful");
  }
  
  return 0;
}
 
Example 8
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }
  
  return 0;
}
 
Example 9
Source File: TestReadConstruction.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private DistributedRaidFileSystem getRaidFS() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
  Configuration clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl", 
      "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl", 
      "org.apache.hadoop.hdfs.DistributedFileSystem");
  clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
  URI dfsUri = dfs.getUri();
  return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
}
 
Example 10
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: hdfs dfsadmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  DistributedFileSystem dfs = getDFS();
  
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
  if (isHaAndLogicalUri) {
    // In the case of HA and logical URI, run finalizeUpgrade for all
    // NNs in this nameservice.
    String nsId = dfsUri.getHost();
    List<ClientProtocol> namenodes =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
    if (!HAUtil.isAtLeastOneActive(namenodes)) {
      throw new IOException("Cannot finalize with no NameNode active");
    }

    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().finalizeUpgrade();
      System.out.println("Finalize upgrade successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.finalizeUpgrade();
    System.out.println("Finalize upgrade successful");
  }
  
  return 0;
}
 
Example 11
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;
 
  return exitCode;
}
 
Example 12
Source File: TestDirectoryRaidDfs.java    From RDFS with Apache License 2.0 5 votes vote down vote up
static private DistributedRaidFileSystem getRaidFS(FileSystem fileSys,
    Configuration conf)
    throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
  Configuration clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
  clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
  URI dfsUri = dfs.getUri();
  return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
}
 
Example 13
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
 
Example 14
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshSuperUserGroupsConfiguration();
      System.out.println("Refresh super user groups configuration " +
          "successful for " + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshSuperUserGroupsConfiguration();
    System.out.println("Refresh super user groups configuration successful");
  }

  return 0;
}
 
Example 15
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshUserToGroupsMapings for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshUserToGroupsMappings();
      System.out.println("Refresh user to groups mapping successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshUserToGroupsMappings();
    System.out.println("Refresh user to groups mapping successful");
  }
  
  return 0;
}
 
Example 16
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }
  
  return 0;
}
 
Example 17
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: hdfs dfsadmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  DistributedFileSystem dfs = getDFS();
  
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
  if (isHaAndLogicalUri) {
    // In the case of HA and logical URI, run finalizeUpgrade for all
    // NNs in this nameservice.
    String nsId = dfsUri.getHost();
    List<ClientProtocol> namenodes =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
    if (!HAUtil.isAtLeastOneActive(namenodes)) {
      throw new IOException("Cannot finalize with no NameNode active");
    }

    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().finalizeUpgrade();
      System.out.println("Finalize upgrade successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.finalizeUpgrade();
    System.out.println("Finalize upgrade successful");
  }
  
  return 0;
}
 
Example 18
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;
 
  return exitCode;
}
 
Example 19
Source File: DFSAdmin.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Command to ask the namenode to set the balancer bandwidth for all of the
 * datanodes.
 * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException 
 */
public int setBalancerBandwidth(String[] argv, int idx) throws IOException {
  long bandwidth;
  int exitCode = -1;

  try {
    bandwidth = Long.parseLong(argv[idx]);
  } catch (NumberFormatException nfe) {
    System.err.println("NumberFormatException: " + nfe.getMessage());
    System.err.println("Usage: hdfs dfsadmin"
                + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
    return exitCode;
  }

  FileSystem fs = getFS();
  if (!(fs instanceof DistributedFileSystem)) {
    System.err.println("FileSystem is " + fs.getUri());
    return exitCode;
  }

  DistributedFileSystem dfs = (DistributedFileSystem) fs;
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().setBalancerBandwidth(bandwidth);
      System.out.println("Balancer bandwidth is set to " + bandwidth +
          " for " + proxy.getAddress());
    }
  } else {
    dfs.setBalancerBandwidth(bandwidth);
    System.out.println("Balancer bandwidth is set to " + bandwidth);
  }
  exitCode = 0;

  return exitCode;
}
 
Example 20
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Command to ask the namenode to set the balancer bandwidth for all of the
 * datanodes.
 * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException 
 */
public int setBalancerBandwidth(String[] argv, int idx) throws IOException {
  long bandwidth;
  int exitCode = -1;

  try {
    bandwidth = Long.parseLong(argv[idx]);
  } catch (NumberFormatException nfe) {
    System.err.println("NumberFormatException: " + nfe.getMessage());
    System.err.println("Usage: hdfs dfsadmin"
                + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
    return exitCode;
  }

  FileSystem fs = getFS();
  if (!(fs instanceof DistributedFileSystem)) {
    System.err.println("FileSystem is " + fs.getUri());
    return exitCode;
  }

  DistributedFileSystem dfs = (DistributedFileSystem) fs;
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().setBalancerBandwidth(bandwidth);
      System.out.println("Balancer bandwidth is set to " + bandwidth +
          " for " + proxy.getAddress());
    }
  } else {
    dfs.setBalancerBandwidth(bandwidth);
    System.out.println("Balancer bandwidth is set to " + bandwidth);
  }
  exitCode = 0;

  return exitCode;
}