org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestClusterId.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testFormatClusterIdOption() throws IOException {
  
  // 1. should format without cluster id
  //StartupOption.FORMAT.setClusterId("");
  NameNode.format(config);
  // see if cluster id not empty.
  String cid = getClusterId(config);
  assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")) );

  // 2. successful format with given clusterid
  StartupOption.FORMAT.setClusterId("mycluster");
  NameNode.format(config);
  // see if cluster id matches with given clusterid.
  cid = getClusterId(config);
  assertTrue("ClusterId didn't match", cid.equals("mycluster"));

  // 3. format without any clusterid again. It should generate new
  //clusterid.
  StartupOption.FORMAT.setClusterId("");
  NameNode.format(config);
  String newCid = getClusterId(config);
  assertFalse("ClusterId should not be the same", newCid.equals(cid));
}
 
Example #2
Source File: TestHDFSServerPorts.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
  // Set up testing environment directories
  hdfsDir = new File(TEST_DATA_DIR, "backupNode");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  File currDir = new File(hdfsDir, "name2");
  File currDir2 = new File(currDir, "current");
  File currDir3 = new File(currDir, "image");
  
  assertTrue(currDir.mkdirs());
  assertTrue(currDir2.mkdirs());
  assertTrue(currDir3.mkdirs());
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name2")).toString());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  
  // Start BackupNode
  String[] args = new String [] { StartupOption.BACKUP.getName() };
  BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);

  return bu;
}
 
Example #3
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Restart the namenode at a given index. Optionally wait for the cluster
 * to become active.
 */
public synchronized void restartNameNode(int nnIndex, boolean waitActive,
    String... args) throws IOException {
  String nameserviceId = nameNodes[nnIndex].nameserviceId;
  String nnId = nameNodes[nnIndex].nnId;
  StartupOption startOpt = nameNodes[nnIndex].startOpt;
  Configuration conf = nameNodes[nnIndex].conf;
  shutdownNameNode(nnIndex);
  if (args.length != 0) {
    startOpt = null;
  } else {
    args = createArgs(startOpt);
  }
  NameNode nn = NameNode.createNameNode(args, conf);
  nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, startOpt,
      conf);
  if (waitActive) {
    waitClusterUp();
    LOG.info("Restarted the namenode");
    waitActive();
  }
}
 
Example #4
Source File: TestHdfsServerConstants.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test that we can parse a StartupOption string without the optional
 * RollingUpgradeStartupOption.
 */
@Test
public void testStartupOptionParsing() {
  verifyStartupOptionResult("FORMAT", StartupOption.FORMAT, null);
  verifyStartupOptionResult("REGULAR", StartupOption.REGULAR, null);
  verifyStartupOptionResult("CHECKPOINT", StartupOption.CHECKPOINT, null);
  verifyStartupOptionResult("UPGRADE", StartupOption.UPGRADE, null);
  verifyStartupOptionResult("ROLLBACK", StartupOption.ROLLBACK, null);
  verifyStartupOptionResult("FINALIZE", StartupOption.FINALIZE, null);
  verifyStartupOptionResult("ROLLINGUPGRADE", StartupOption.ROLLINGUPGRADE, null);
  verifyStartupOptionResult("IMPORT", StartupOption.IMPORT, null);
  verifyStartupOptionResult("INITIALIZESHAREDEDITS", StartupOption.INITIALIZESHAREDEDITS, null);

  try {
    verifyStartupOptionResult("UNKNOWN(UNKNOWNOPTION)", StartupOption.FORMAT, null);
    fail("Failed to get expected IllegalArgumentException");
  } catch(IllegalArgumentException iae) {
    // Expected!
  }
}
 
Example #5
Source File: FSEditLogLoader.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Load an edit log, and apply the changes to the in-memory structure
 * This is where we apply edits that we've been writing to disk all
 * along.
 */
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
    StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = createStartupProgressStep(edits);
  prog.beginStep(Phase.LOADING_EDITS, step);
  fsNamesys.writeLock();
  try {
    long startTime = monotonicNow();
    FSImage.LOG.info("Start loading edits file " + edits.getName());
    long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
        startOpt, recovery);
    FSImage.LOG.info("Edits file " + edits.getName() 
        + " of size " + edits.length() + " edits # " + numEdits 
        + " loaded in " + (monotonicNow()-startTime)/1000 + " seconds");
    return numEdits;
  } finally {
    edits.close();
    fsNamesys.writeUnlock();
    prog.endStep(Phase.LOADING_EDITS, step);
  }
}
 
Example #6
Source File: Journal.java    From big-c with Apache License 2.0 6 votes vote down vote up
Journal(Configuration conf, File logDir, String journalId,
    StartupOption startOpt, StorageErrorReporter errorReporter)
    throws IOException {
  storage = new JNStorage(conf, logDir, startOpt, errorReporter);
  this.journalId = journalId;

  refreshCachedData();
  
  this.fjm = storage.getJournalManager();
  
  this.metrics = JournalMetrics.create(this);
  
  EditLogFile latest = scanStorageForLatestEdits();
  if (latest != null) {
    highestWrittenTxId = latest.getLastTxId();
  }
}
 
Example #7
Source File: TestHdfsServerConstants.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test that we can parse a StartupOption string with a
 * RollingUpgradeStartupOption.
 */
@Test
public void testRollingUpgradeStartupOptionParsing() {
  verifyStartupOptionResult("ROLLINGUPGRADE(ROLLBACK)",
                            StartupOption.ROLLINGUPGRADE,
                            RollingUpgradeStartupOption.ROLLBACK);
  verifyStartupOptionResult("ROLLINGUPGRADE(DOWNGRADE)",
                            StartupOption.ROLLINGUPGRADE,
                            RollingUpgradeStartupOption.DOWNGRADE);
  verifyStartupOptionResult("ROLLINGUPGRADE(STARTED)",
      StartupOption.ROLLINGUPGRADE,
      RollingUpgradeStartupOption.STARTED);

  try {
    verifyStartupOptionResult("ROLLINGUPGRADE(UNKNOWNOPTION)", StartupOption.ROLLINGUPGRADE, null);
    fail("Failed to get expected IllegalArgumentException");
  } catch(IllegalArgumentException iae) {
    // Expected!
  }
}
 
Example #8
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static String[] createArgs(StartupOption operation) {
  if (operation == StartupOption.ROLLINGUPGRADE) {
    return new String[]{operation.getName(),
        operation.getRollingUpgradeStartupOption().name()};
  }
  String[] args = (operation == null ||
      operation == StartupOption.FORMAT ||
      operation == StartupOption.REGULAR) ?
          new String[] {} : new String[] {operation.getName()};
  return args;
}
 
Example #9
Source File: TestEncryptedTransfer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testEncryptedReadAfterNameNodeRestart() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    
    FileSystem fs = getFileSystem(conf);
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    FileChecksum checksum = fs.getFileChecksum(TEST_PATH);
    fs.close();
    cluster.shutdown();
    
    setEncryptionConfigKeys(conf);
    
    cluster = new MiniDFSCluster.Builder(conf)
        .manageDataDfsDirs(false)
        .manageNameDfsDirs(false)
        .format(false)
        .startupOption(StartupOption.REGULAR)
        .build();
    
    fs = getFileSystem(conf);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
    fs.close();
    
    cluster.restartNameNode();
    fs = getFileSystem(conf);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
    fs.close();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #10
Source File: TestDFSStorageStateRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private MiniDFSCluster createCluster(Configuration c) throws IOException {
  return new MiniDFSCluster.Builder(c)
                           .numDataNodes(0)
                           .startupOption(StartupOption.REGULAR)
                           .format(false)
                           .manageDataDfsDirs(false)
                           .manageNameDfsDirs(false)
                           .build();
}
 
Example #11
Source File: TestJournal.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  FileUtil.fullyDelete(TEST_LOG_DIR);
  conf = new Configuration();
  journal = new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
    mockErrorReporter);
  journal.format(FAKE_NSINFO);
}
 
Example #12
Source File: TestDatanodeConfig.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported schema. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" schema and no schema (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
 
Example #13
Source File: TestJournal.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
 
Example #14
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example #15
Source File: TestDFSStorageStateRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This test iterates over the testCases table for Datanode storage and
 * attempts to startup the DataNode normally.
 */
@Test
public void testDNStorageStates() throws Exception {
  String[] baseDirs;

  // First setup the datanode storage directory
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    for (int i = 0; i < NUM_DN_TEST_CASES; i++) {
      boolean[] testCase = testCases[i];
      boolean shouldRecover = testCase[SHOULD_RECOVER];
      boolean curAfterRecover = testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
      boolean prevAfterRecover = testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];

      log("DATA_NODE recovery", numDirs, i, testCase);
      createNameNodeStorageState(new boolean[] { true, true, false, false,
          false });
      cluster = createCluster(conf);
      baseDirs = createDataNodeStorageState(testCase);
      if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS] && !testCase[REMOVED_TMP_EXISTS]) {
        // DataNode will create and format current if no directories exist
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      } else {
        if (shouldRecover) {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          checkResultDataNode(baseDirs, curAfterRecover, prevAfterRecover);
        } else {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          assertFalse(cluster.getDataNodes().get(0).isDatanodeUp());
        }
      }
      cluster.shutdown();
    } // end testCases loop
  } // end numDirs loop
}
 
Example #16
Source File: NameNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
protected HAState createHAState(StartupOption startOpt) {
  if (!haEnabled || startOpt == StartupOption.UPGRADE 
      || startOpt == StartupOption.UPGRADEONLY) {
    return ACTIVE_STATE;
  } else {
    return STANDBY_STATE;
  }
}
 
Example #17
Source File: FSImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void initEditLog(StartupOption startOpt) throws IOException {
  Preconditions.checkState(getNamespaceID() != 0,
      "Must know namespace ID before initting edit log");
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
    // If this NN is not HA
    editLog.initJournalsForWrite();
    editLog.recoverUnclosedStreams();
  } else if (HAUtil.isHAEnabled(conf, nameserviceId)
      && (startOpt == StartupOption.UPGRADE
          || startOpt == StartupOption.UPGRADEONLY
          || RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) {
    // This NN is HA, but we're doing an upgrade or a rollback of rolling
    // upgrade so init the edit log for write.
    editLog.initJournalsForWrite();
    if (startOpt == StartupOption.UPGRADE
        || startOpt == StartupOption.UPGRADEONLY) {
      long sharedLogCTime = editLog.getSharedLogCTime();
      if (this.storage.getCTime() < sharedLogCTime) {
        throw new IOException("It looks like the shared log is already " +
            "being upgraded but this NN has not been upgraded yet. You " +
            "should restart this NameNode with the '" +
            StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " +
            "this NN in sync with the other.");
      }
    }
    editLog.recoverUnclosedStreams();
  } else {
    // This NN is HA and we're not doing an upgrade.
    editLog.initSharedJournalsForRead();
  }
}
 
Example #18
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the {@link #data}. The initialization is done only once, when
 * handshake with the the first namenode is completed.
 */
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
  final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory
      = FsDatasetSpi.Factory.getFactory(conf);
  
  if (!factory.isSimulated()) {
    final StartupOption startOpt = getStartupOption(conf);
    if (startOpt == null) {
      throw new IOException("Startup option not set.");
    }
    final String bpid = nsInfo.getBlockPoolID();
    //read storage info, lock data dirs and transition fs state if necessary
    synchronized (this) {
      storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
    }
    final StorageInfo bpStorage = storage.getBPStorage(bpid);
    LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
        + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
        + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
  }

  // If this is a newly formatted DataNode then assign a new DatanodeUuid.
  checkDatanodeUuid();

  synchronized(this)  {
    if (data == null) {
      data = factory.newInstance(this, storage, conf);
    }
  }
}
 
Example #19
Source File: NameNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
protected HAState createHAState(StartupOption startOpt) {
  if (!haEnabled || startOpt == StartupOption.UPGRADE 
      || startOpt == StartupOption.UPGRADEONLY) {
    return ACTIVE_STATE;
  } else {
    return STANDBY_STATE;
  }
}
 
Example #20
Source File: TestDFSStorageStateRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
private MiniDFSCluster createCluster(Configuration c) throws IOException {
  return new MiniDFSCluster.Builder(c)
                           .numDataNodes(0)
                           .startupOption(StartupOption.REGULAR)
                           .format(false)
                           .manageDataDfsDirs(false)
                           .manageNameDfsDirs(false)
                           .build();
}
 
Example #21
Source File: MiniDFSClusterWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void startDataNodes(Configuration conf, int numDataNodes, 
    boolean manageDfsDirs, StartupOption operation, 
    String[] racks, long[] simulatedCapacities,
    String[] nodeGroups) throws IOException {
  startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, nodeGroups,
      null, simulatedCapacities, false);
}
 
Example #22
Source File: DataStorage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Prepare a storage directory. It creates a builder which can be used to add
 * to the volume. If the volume cannot be added, it is OK to discard the
 * builder later.
 *
 * @param datanode DataNode object.
 * @param volume the root path of a storage directory.
 * @param nsInfos an array of namespace infos.
 * @return a VolumeBuilder that holds the metadata of this storage directory
 * and can be added to DataStorage later.
 * @throws IOException if encounters I/O errors.
 *
 * Note that if there is IOException, the state of DataStorage is not modified.
 */
public VolumeBuilder prepareVolume(DataNode datanode, File volume,
    List<NamespaceInfo> nsInfos) throws IOException {
  if (containsStorageDir(volume)) {
    final String errorMessage = "Storage directory is in use";
    LOG.warn(errorMessage + ".");
    throw new IOException(errorMessage);
  }

  StorageDirectory sd = loadStorageDirectory(
      datanode, nsInfos.get(0), volume, StartupOption.HOTSWAP);
  VolumeBuilder builder =
      new VolumeBuilder(this, sd);
  for (NamespaceInfo nsInfo : nsInfos) {
    List<File> bpDataDirs = Lists.newArrayList();
    bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(
        nsInfo.getBlockPoolID(), new File(volume, STORAGE_DIR_CURRENT)));
    makeBlockPoolDataDir(bpDataDirs, null);

    BlockPoolSliceStorage bpStorage;
    final String bpid = nsInfo.getBlockPoolID();
    synchronized (this) {
      bpStorage = this.bpStorageMap.get(bpid);
      if (bpStorage == null) {
        bpStorage = new BlockPoolSliceStorage(
            nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
            nsInfo.getClusterID());
        addBlockPoolStorage(bpid, bpStorage);
      }
    }
    builder.addBpStorageDirectories(
        bpid, bpStorage.loadBpStorageDirectories(
            datanode, nsInfo, bpDataDirs, StartupOption.HOTSWAP));
  }
  return builder;
}
 
Example #23
Source File: TestDFSStorageStateRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This test iterates over the testCases table for block pool storage and
 * attempts to startup the DataNode normally.
 */
@Test
public void testBlockPoolStorageStates() throws Exception {
  String[] baseDirs;

  // First setup the datanode storage directory
  String bpid = UpgradeUtilities.getCurrentBlockPoolID(null);
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new HdfsConfiguration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    for (int i = 0; i < NUM_DN_TEST_CASES; i++) {
      boolean[] testCase = testCases[i];
      boolean shouldRecover = testCase[SHOULD_RECOVER];
      boolean curAfterRecover = testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
      boolean prevAfterRecover = testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];

      log("BLOCK_POOL recovery", numDirs, i, testCase);
      createNameNodeStorageState(new boolean[] { true, true, false, false,
          false });
      cluster = createCluster(conf);
      baseDirs = createBlockPoolStorageState(bpid, testCase);
      if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS] && !testCase[REMOVED_TMP_EXISTS]) {
        // DataNode will create and format current if no directories exist
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      } else {
        if (shouldRecover) {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          checkResultBlockPool(baseDirs, curAfterRecover, prevAfterRecover);
        } else {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
        }
      }
      cluster.shutdown();
    } // end testCases loop
  } // end numDirs loop
}
 
Example #24
Source File: JNStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @param conf Configuration object
 * @param logDir the path to the directory in which data will be stored
 * @param errorReporter a callback to report errors
 * @throws IOException 
 */
protected JNStorage(Configuration conf, File logDir, StartupOption startOpt,
    StorageErrorReporter errorReporter) throws IOException {
  super(NodeType.JOURNAL_NODE);
  
  sd = new StorageDirectory(logDir);
  this.addStorageDir(sd);
  this.fjm = new FileJournalManager(conf, sd, errorReporter);

  analyzeAndRecoverStorage(startOpt);
}
 
Example #25
Source File: TestDFSStorageStateRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * This test iterates over the testCases table for block pool storage and
 * attempts to startup the DataNode normally.
 */
@Test
public void testBlockPoolStorageStates() throws Exception {
  String[] baseDirs;

  // First setup the datanode storage directory
  String bpid = UpgradeUtilities.getCurrentBlockPoolID(null);
  for (int numDirs = 1; numDirs <= 2; numDirs++) {
    conf = new HdfsConfiguration();
    conf.setInt("dfs.datanode.scan.period.hours", -1);      
    conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
    for (int i = 0; i < NUM_DN_TEST_CASES; i++) {
      boolean[] testCase = testCases[i];
      boolean shouldRecover = testCase[SHOULD_RECOVER];
      boolean curAfterRecover = testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
      boolean prevAfterRecover = testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];

      log("BLOCK_POOL recovery", numDirs, i, testCase);
      createNameNodeStorageState(new boolean[] { true, true, false, false,
          false });
      cluster = createCluster(conf);
      baseDirs = createBlockPoolStorageState(bpid, testCase);
      if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS] && !testCase[REMOVED_TMP_EXISTS]) {
        // DataNode will create and format current if no directories exist
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      } else {
        if (shouldRecover) {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          checkResultBlockPool(baseDirs, curAfterRecover, prevAfterRecover);
        } else {
          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
          assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
        }
      }
      cluster.shutdown();
    } // end testCases loop
  } // end numDirs loop
}
 
Example #26
Source File: TestHdfsServerConstants.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that parsing a StartupOption string gives the expected results.
 * If a RollingUpgradeStartupOption is specified than it is also checked.
 *
 * @param value
 * @param expectedOption
 * @param expectedRollupOption optional, may be null.
 */
private static void verifyStartupOptionResult(String value,
    StartupOption expectedOption,
    RollingUpgradeStartupOption expectedRollupOption) {

  StartupOption option = StartupOption.getEnum(value);
  assertEquals(expectedOption, option);

  if (expectedRollupOption != null) {
    assertEquals(expectedRollupOption, option.getRollingUpgradeStartupOption());
  }
}
 
Example #27
Source File: TestDFSUpgradeFromImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
void upgradeAndVerify(MiniDFSCluster.Builder bld, ClusterVerifier verifier)
    throws IOException {
  MiniDFSCluster cluster = null;
  try {
    bld.format(false).startupOption(StartupOption.UPGRADE)
      .clusterId("testClusterId");
    cluster = bld.build();
    cluster.waitActive();
    DistributedFileSystem dfs = cluster.getFileSystem();
    DFSClient dfsClient = dfs.dfs;
    //Safemode will be off only after upgrade is complete. Wait for it.
    while ( dfsClient.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET) ) {
      LOG.info("Waiting for SafeMode to be OFF.");
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }
    recoverAllLeases(dfsClient, new Path("/"));
    verifyFileSystem(dfs);

    if (verifier != null) {
      verifier.verifyClusterPostUpgrade(cluster);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  } 
}
 
Example #28
Source File: TestDFSUpgrade.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Attempts to start a NameNode with the given operation.  Starting
 * the NameNode should throw an exception.
 * @param operation - NameNode startup operation
 * @param exceptionClass - if non-null, will check that the caught exception
 *     is assignment-compatible with exceptionClass
 * @param messagePattern - if non-null, will check that a substring of the 
 *     message from the caught exception matches this pattern, via the
 *     {@link Matcher#find()} method.
 */
void startNameNodeShouldFail(StartupOption operation,
    Class<? extends Exception> exceptionClass, Pattern messagePattern) {
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                              .startupOption(operation)
                                              .format(false)
                                              .manageDataDfsDirs(false)
                                              .manageNameDfsDirs(false)
                                              .build(); // should fail
    fail("NameNode should have failed to start");
    
  } catch (Exception e) {
    // expect exception
    if (exceptionClass != null) {
      assertTrue("Caught exception is not of expected class "
          + exceptionClass.getSimpleName() + ": "
          + StringUtils.stringifyException(e), 
          exceptionClass.isInstance(e));
    }
    if (messagePattern != null) {
      assertTrue("Caught exception message string does not match expected pattern \""
          + messagePattern.pattern() + "\" : "
          + StringUtils.stringifyException(e), 
          messagePattern.matcher(e.getMessage()).find());
    }
    LOG.info("Successfully detected expected NameNode startup failure.");
  }
}
 
Example #29
Source File: TestFileAppendRestart.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
Example #30
Source File: MiniDFSClusterWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
    StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
    String[] racks, String[] hosts,
    long[][] storageCapacities,
    long[] simulatedCapacities,
    boolean setupHostsFile,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays) throws IOException {
  startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
      NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
      checkDataNodeAddrConfig, checkDataNodeHostConfig);
}