Java Code Examples for org.apache.hadoop.hdfs.HAUtil#getNameNodeId()

The following examples show how to use org.apache.hadoop.hdfs.HAUtil#getNameNodeId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NameNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public static boolean doRollback(Configuration conf,
    boolean isConfirmationNeeded) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);

  FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
  System.err.print(
      "\"rollBack\" will remove the current state of the file system,\n"
      + "returning you to the state prior to initiating your recent.\n"
      + "upgrade. This action is permanent and cannot be undone. If you\n"
      + "are performing a rollback in an HA environment, you should be\n"
      + "certain that no NameNode process is running on any host.");
  if (isConfirmationNeeded) {
    if (!confirmPrompt("Roll back file system state?")) {
      System.err.println("Rollback aborted.");
      return true;
    }
  }
  nsys.getFSImage().doRollback(nsys);
  return false;
}
 
Example 2
Source File: DFSZKFailoverController.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example 3
Source File: NameNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public static boolean doRollback(Configuration conf,
    boolean isConfirmationNeeded) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);

  FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
  System.err.print(
      "\"rollBack\" will remove the current state of the file system,\n"
      + "returning you to the state prior to initiating your recent.\n"
      + "upgrade. This action is permanent and cannot be undone. If you\n"
      + "are performing a rollback in an HA environment, you should be\n"
      + "certain that no NameNode process is running on any host.");
  if (isConfirmationNeeded) {
    if (!confirmPrompt("Roll back file system state?")) {
      System.err.println("Rollback aborted.");
      return true;
    }
  }
  nsys.getFSImage().doRollback(nsys);
  return false;
}
 
Example 4
Source File: DFSZKFailoverController.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example 5
Source File: BootstrapStandby.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
Example 6
Source File: NameNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that configured directories exist, then print the metadata versions
 * of the software and the image.
 *
 * @param conf configuration to use
 * @throws IOException
 */
private static boolean printMetadataVersion(Configuration conf)
  throws IOException {
  final String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  final String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, namenodeId);
  final FSImage fsImage = new FSImage(conf);
  final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
  return fsImage.recoverTransitionRead(
    StartupOption.METADATAVERSION, fs, null);
}
 
Example 7
Source File: BootstrapStandby.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
Example 8
Source File: NameNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that configured directories exist, then print the metadata versions
 * of the software and the image.
 *
 * @param conf configuration to use
 * @throws IOException
 */
private static boolean printMetadataVersion(Configuration conf)
  throws IOException {
  final String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  final String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, namenodeId);
  final FSImage fsImage = new FSImage(conf);
  final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
  return fsImage.recoverTransitionRead(
    StartupOption.METADATAVERSION, fs, null);
}
 
Example 9
Source File: NameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Verify that configured directories exist, then
 * Interactively confirm that formatting is desired 
 * for each existing directory and format them.
 * 
 * @param conf configuration to use
 * @param force if true, format regardless of whether dirs exist
 * @return true if formatting was aborted, false otherwise
 * @throws IOException
 */
private static boolean format(Configuration conf, boolean force,
    boolean isInteractive) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);
  checkAllowFormat(conf);

  if (UserGroupInformation.isSecurityEnabled()) {
    InetSocketAddress socAddr = getAddress(conf);
    SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
        DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  }
  
  Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
  List<URI> dirsToPrompt = new ArrayList<URI>();
  dirsToPrompt.addAll(nameDirsToFormat);
  dirsToPrompt.addAll(sharedDirs);
  List<URI> editDirsToFormat = 
               FSNamesystem.getNamespaceEditsDirs(conf);

  // if clusterID is not provided - see if you can find the current one
  String clusterId = StartupOption.FORMAT.getClusterId();
  if(clusterId == null || clusterId.equals("")) {
    //Generate a new cluster id
    clusterId = NNStorage.newClusterID();
  }
  System.out.println("Formatting using clusterid: " + clusterId);
  
  FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
  try {
    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
    fsImage.getEditLog().initJournalsForWrite();

    if (!fsImage.confirmFormat(force, isInteractive)) {
      return true; // aborted
    }

    fsImage.format(fsn, clusterId);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception during format: ", ioe);
    fsImage.close();
    throw ioe;
  }
  return false;
}
 
Example 10
Source File: BlockManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.error("Security is enabled but block access tokens " +
          "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
          "aren't enabled. This may cause issues " +
          "when clients attempt to talk to a DataNode.");
    }
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    String thisNnId = HAUtil.getNameNodeId(conf, nsId);
    String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
        encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
  }
}
 
Example 11
Source File: NameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Verify that configured directories exist, then
 * Interactively confirm that formatting is desired 
 * for each existing directory and format them.
 * 
 * @param conf configuration to use
 * @param force if true, format regardless of whether dirs exist
 * @return true if formatting was aborted, false otherwise
 * @throws IOException
 */
private static boolean format(Configuration conf, boolean force,
    boolean isInteractive) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);
  checkAllowFormat(conf);

  if (UserGroupInformation.isSecurityEnabled()) {
    InetSocketAddress socAddr = getAddress(conf);
    SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
        DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  }
  
  Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
  List<URI> dirsToPrompt = new ArrayList<URI>();
  dirsToPrompt.addAll(nameDirsToFormat);
  dirsToPrompt.addAll(sharedDirs);
  List<URI> editDirsToFormat = 
               FSNamesystem.getNamespaceEditsDirs(conf);

  // if clusterID is not provided - see if you can find the current one
  String clusterId = StartupOption.FORMAT.getClusterId();
  if(clusterId == null || clusterId.equals("")) {
    //Generate a new cluster id
    clusterId = NNStorage.newClusterID();
  }
  System.out.println("Formatting using clusterid: " + clusterId);
  
  FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
  try {
    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
    fsImage.getEditLog().initJournalsForWrite();

    if (!fsImage.confirmFormat(force, isInteractive)) {
      return true; // aborted
    }

    fsImage.format(fsn, clusterId);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception during format: ", ioe);
    fsImage.close();
    throw ioe;
  }
  return false;
}
 
Example 12
Source File: BlockManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.error("Security is enabled but block access tokens " +
          "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
          "aren't enabled. This may cause issues " +
          "when clients attempt to talk to a DataNode.");
    }
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    String thisNnId = HAUtil.getNameNodeId(conf, nsId);
    String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
        encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
  }
}