Java Code Examples for org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption#RECOVER

The following examples show how to use org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption#RECOVER . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}
 
Example 2
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void createNameNode(int nnIndex, Configuration conf,
    int numDataNodes, boolean format, StartupOption operation,
    String clusterId, String nameserviceId,
    String nnId)
    throws IOException {
  // Format and clean out DataNode directories
  if (format) {
    DFSTestUtil.formatNameNode(conf);
  }
  if (operation == StartupOption.UPGRADE){
    operation.setClusterId(clusterId);
  }
  
  // Start the NameNode after saving the default file system.
  String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY);
  String[] args = createArgs(operation);
  NameNode nn =  NameNode.createNameNode(args, conf);
  if (operation == StartupOption.RECOVER) {
    return;
  }
  
  // After the NN has started, set back the bound ports into
  // the conf
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
  if (nn.getHttpAddress() != null) {
    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
        nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
  }
  if (nn.getHttpsAddress() != null) {
    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY,
        nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress()));
  }

  DFSUtil.setGenericConf(conf, nameserviceId, nnId,
      DFS_NAMENODE_HTTP_ADDRESS_KEY);
  nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
      operation, new Configuration(conf));
  // Restore the default fs name
  if (originalDefaultFs == null) {
    conf.set(FS_DEFAULT_NAME_KEY, "");
  } else {
    conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs);
  }
}
 
Example 3
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}
 
Example 4
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void createNameNode(int nnIndex, Configuration conf,
    int numDataNodes, boolean format, StartupOption operation,
    String clusterId, String nameserviceId,
    String nnId)
    throws IOException {
  // Format and clean out DataNode directories
  if (format) {
    DFSTestUtil.formatNameNode(conf);
  }
  if (operation == StartupOption.UPGRADE){
    operation.setClusterId(clusterId);
  }
  
  // Start the NameNode after saving the default file system.
  String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY);
  String[] args = createArgs(operation);
  NameNode nn =  NameNode.createNameNode(args, conf);
  if (operation == StartupOption.RECOVER) {
    return;
  }
  
  // After the NN has started, set back the bound ports into
  // the conf
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
  if (nn.getHttpAddress() != null) {
    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
        nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
  }
  if (nn.getHttpsAddress() != null) {
    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY,
        nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress()));
  }

  DFSUtil.setGenericConf(conf, nameserviceId, nnId,
      DFS_NAMENODE_HTTP_ADDRESS_KEY);
  nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId,
      operation, new Configuration(conf));
  // Restore the default fs name
  if (originalDefaultFs == null) {
    conf.set(FS_DEFAULT_NAME_KEY, "");
  } else {
    conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs);
  }
}