Java Code Examples for org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory

The following examples show how to use org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: FSImageTestUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Return a standalone instance of FSEditLog that will log into the given
 * log directory. The returned instance is not yet opened.
 */
public static FSEditLog createStandaloneEditLog(File logDir)
    throws IOException {
  assertTrue(logDir.mkdirs() || logDir.exists());
  if (!FileUtil.fullyDeleteContents(logDir)) {
    throw new IOException("Unable to delete contents of " + logDir);
  }
  NNStorage storage = Mockito.mock(NNStorage.class);
  StorageDirectory sd 
    = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);
  List<StorageDirectory> sds = Lists.newArrayList(sd);
  Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
  Mockito.doReturn(sd).when(storage)
    .getStorageDirectory(Matchers.<URI>anyObject());

  FSEditLog editLog = new FSEditLog(new Configuration(), 
                       storage,
                       ImmutableList.of(logDir.toURI()));
  editLog.initJournalsForWrite();
  return editLog;
}
 
Example 2
Source Project: big-c   Source File: TestCheckpoint.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
Example 3
Source Project: hadoop-gpu   Source File: FSEditLog.java    License: Apache License 2.0 6 votes vote down vote up
/**
  * If there is an IO Error on any log operations on storage directory,
  * remove any stream associated with that directory 
  */
 synchronized void processIOError(StorageDirectory sd) {
   // Try to remove stream only if one should exist
   if (!sd.getStorageDirType().isOfType(NameNodeDirType.EDITS))
     return;
   if (editStreams == null || editStreams.size() <= 1) {
     FSNamesystem.LOG.fatal(
         "Fatal Error : All storage directories are inaccessible."); 
     Runtime.getRuntime().exit(-1);
   }
   for (int idx = 0; idx < editStreams.size(); idx++) {
     File parentStorageDir = ((EditLogFileOutputStream)editStreams
                                      .get(idx)).getFile()
                                      .getParentFile().getParentFile();
     if (parentStorageDir.getName().equals(sd.getRoot().getName()))
       editStreams.remove(idx);
}
 }
 
Example 4
Source Project: hadoop   Source File: FSImage.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Rename all the fsimage files with the specific NameNodeFile type. The
 * associated checksum files will also be renamed.
 */
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
    throws IOException {
  ArrayList<StorageDirectory> al = null;
  FSImageTransactionalStorageInspector inspector =
      new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
  storage.inspectStorageDirs(inspector);
  for (FSImageFile image : inspector.getFoundImages()) {
    try {
      renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
    } catch (IOException ioe) {
      LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
      if (al == null) {
        al = Lists.newArrayList();
      }
      al.add(image.sd);
    }
  }
  if(al != null) {
    storage.reportErrorsOnDirectories(al);
  }
}
 
Example 5
Source Project: hadoop   Source File: NNUpgradeUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Return true if this storage dir can roll back to the previous storage
 * state, false otherwise. The NN will refuse to run the rollback operation
 * unless at least one JM or fsimage storage directory can roll back.
 * 
 * @param storage the storage info for the current state
 * @param prevStorage the storage info for the previous (unupgraded) state
 * @param targetLayoutVersion the layout version we intend to roll back to
 * @return true if this JM can roll back, false otherwise.
 * @throws IOException in the event of error
 */
static boolean canRollBack(StorageDirectory sd, StorageInfo storage,
    StorageInfo prevStorage, int targetLayoutVersion) throws IOException {
  File prevDir = sd.getPreviousDir();
  if (!prevDir.exists()) {  // use current directory then
    LOG.info("Storage directory " + sd.getRoot()
             + " does not contain previous fs state.");
    // read and verify consistency with other directories
    storage.readProperties(sd);
    return false;
  }

  // read and verify consistency of the prev dir
  prevStorage.readPreviousVersionProperties(sd);

  if (prevStorage.getLayoutVersion() != targetLayoutVersion) {
    throw new IOException(
      "Cannot rollback to storage version " +
      prevStorage.getLayoutVersion() +
      " using this version of the NameNode, which uses storage version " +
      targetLayoutVersion + ". " +
      "Please use the previous version of HDFS to perform the rollback.");
  }
  
  return true;
}
 
Example 6
Source Project: hadoop   Source File: NNUpgradeUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Finalize the upgrade. The previous dir, if any, will be renamed and
 * removed. After this is completed, rollback is no longer allowed.
 * 
 * @param sd the storage directory to finalize
 * @throws IOException in the event of error
 */
static void doFinalize(StorageDirectory sd) throws IOException {
  File prevDir = sd.getPreviousDir();
  if (!prevDir.exists()) { // already discarded
    LOG.info("Directory " + prevDir + " does not exist.");
    LOG.info("Finalize upgrade for " + sd.getRoot()+ " is not required.");
    return;
  }
  LOG.info("Finalizing upgrade of storage directory " + sd.getRoot());
  Preconditions.checkState(sd.getCurrentDir().exists(),
      "Current directory must exist.");
  final File tmpDir = sd.getFinalizedTmp();
  // rename previous to tmp and remove
  NNStorage.rename(prevDir, tmpDir);
  NNStorage.deleteDir(tmpDir);
  LOG.info("Finalize upgrade for " + sd.getRoot()+ " is complete.");
}
 
Example 7
Source Project: hadoop   Source File: NNUpgradeUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Rename the existing current dir to previous.tmp, and create a new empty
 * current dir.
 */
public static void renameCurToTmp(StorageDirectory sd) throws IOException {
  File curDir = sd.getCurrentDir();
  File prevDir = sd.getPreviousDir();
  final File tmpDir = sd.getPreviousTmp();

  Preconditions.checkState(curDir.exists(),
      "Current directory must exist for preupgrade.");
  Preconditions.checkState(!prevDir.exists(),
      "Previous directory must not exist for preupgrade.");
  Preconditions.checkState(!tmpDir.exists(),
      "Previous.tmp directory must not exist for preupgrade."
          + "Consider restarting for recovery.");

  // rename current to tmp
  NNStorage.rename(curDir, tmpDir);

  if (!curDir.mkdir()) {
    throw new IOException("Cannot create directory " + curDir);
  }
}
 
Example 8
Source Project: RDFS   Source File: FSDataset.java    License: Apache License 2.0 6 votes vote down vote up
public void addVolumes(Configuration conf, int namespaceId, String nsDir,
    Collection<StorageDirectory> dirs) throws Exception {
  if (dirs == null || dirs.isEmpty()) {
    return;
  }
  FSVolume[] volArray = new FSVolume[dirs.size()];
  File[] dirArray = new File[dirs.size()];
  int idx = 0;
  for (Iterator<StorageDirectory> iter = dirs.iterator() ; iter.hasNext(); idx++) {
    dirArray[idx] = iter.next().getCurrentDir();
    volArray[idx] = new FSVolume(this, dirArray[idx], conf);
  }

  lock.writeLock().lock();
  try {
    volumes.addVolumes(volArray);
    for (FSVolume vol : volArray) {
      vol.addNamespace(namespaceId, nsDir, conf, datanode.isSupportAppends());
    }
  } finally {
    lock.writeLock().unlock();
  }

  asyncDiskService.insertDisk(dirArray, conf);
}
 
Example 9
Source Project: hadoop   Source File: NNUpgradeUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Perform rollback of the storage dir to the previous state. The existing
 * current dir is removed, and the previous dir is renamed to current.
 * 
 * @param sd the storage directory to roll back.
 * @throws IOException in the event of error
 */
static void doRollBack(StorageDirectory sd)
    throws IOException {
  File prevDir = sd.getPreviousDir();
  if (!prevDir.exists()) {
    return;
  }

  File tmpDir = sd.getRemovedTmp();
  Preconditions.checkState(!tmpDir.exists(),
      "removed.tmp directory must not exist for rollback."
          + "Consider restarting for recovery.");
  // rename current to tmp
  File curDir = sd.getCurrentDir();
  Preconditions.checkState(curDir.exists(),
      "Current directory must exist for rollback.");

  NNStorage.rename(curDir, tmpDir);
  // rename previous to current
  NNStorage.rename(prevDir, curDir);

  // delete tmp dir
  NNStorage.deleteDir(tmpDir);
  LOG.info("Rollback of " + sd.getRoot() + " is complete.");
}
 
Example 10
Source Project: hadoop-gpu   Source File: FSEditLog.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create empty edit log files.
 * Initialize the output stream for logging.
 * 
 * @throws IOException
 */
public synchronized void open() throws IOException {
  numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0;
  if (editStreams == null)
    editStreams = new ArrayList<EditLogOutputStream>();
  for (Iterator<StorageDirectory> it = 
         fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
    StorageDirectory sd = it.next();
    File eFile = getEditFile(sd);
    try {
      EditLogOutputStream eStream = new EditLogFileOutputStream(eFile);
      editStreams.add(eStream);
    } catch (IOException e) {
      FSNamesystem.LOG.warn("Unable to open edit log file " + eFile);
      // Remove the directory from list of storage directories
      it.remove();
    }
  }
}
 
Example 11
Source Project: hadoop   Source File: TestStartup.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      img.getStorage();
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());	
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      img.getStorage();
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());	
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
 
Example 12
Source Project: big-c   Source File: FSImage.java    License: Apache License 2.0 6 votes vote down vote up
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
    NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
  final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
  final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
  // renameTo fails on Windows if the destination file already exists.
  if(LOG.isDebugEnabled()) {
    LOG.debug("renaming  " + fromFile.getAbsolutePath() 
              + " to " + toFile.getAbsolutePath());
  }
  if (!fromFile.renameTo(toFile)) {
    if (!toFile.delete() || !fromFile.renameTo(toFile)) {
      throw new IOException("renaming  " + fromFile.getAbsolutePath() + " to "  + 
          toFile.getAbsolutePath() + " FAILED");
    }
  }
  if (renameMD5) {
    MD5FileUtils.renameMD5File(fromFile, toFile);
  }
}
 
Example 13
Source Project: big-c   Source File: TestParallelImageWrite.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Confirm that FSImage files in all StorageDirectory are the same,
 * and non-empty, and there are the expected number of them.
 * @param fsn - the FSNamesystem being checked.
 * @param numImageDirs - the configured number of StorageDirectory of type IMAGE. 
 * @return - the md5 hash of the most recent FSImage files, which must all be the same.
 * @throws AssertionError if image files are empty or different,
 *     if less than two StorageDirectory are provided, or if the
 *     actual number of StorageDirectory is less than configured.
 */
public static String checkImages(
    FSNamesystem fsn, int numImageDirs)
throws Exception {    
  NNStorage stg = fsn.getFSImage().getStorage();
  //any failed StorageDirectory is removed from the storageDirs list
  assertEquals("Some StorageDirectories failed Upgrade",
      numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE));
  assertTrue("Not enough fsimage copies in MiniDFSCluster " + 
      "to test parallel write", numImageDirs > 1);

  // List of "current/" directory from each SD
  List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE);

  // across directories, all files with same names should be identical hashes   
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  FSImageTestUtil.assertSameNewestImage(dirs);
  
  // Return the hash of the newest image file
  StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next();
  File latestImage = FSImageTestUtil.findLatestImageFile(firstSd);
  String md5 = FSImageTestUtil.getImageFileMD5IgnoringTxId(latestImage);
  System.err.println("md5 of " + latestImage + ": " + md5);
  return md5;
}
 
Example 14
Source Project: big-c   Source File: TestStartup.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      img.getStorage();
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());	
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      img.getStorage();
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());	
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
 
Example 15
Source Project: big-c   Source File: TestFileJournalManager.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetRemoteEditLog() throws IOException {
  StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.EDITS, false,
      NNStorage.getFinalizedEditsFileName(1, 100),
      NNStorage.getFinalizedEditsFileName(101, 200),
      NNStorage.getInProgressEditsFileName(201),
      NNStorage.getFinalizedEditsFileName(1001, 1100));
      
  // passing null for NNStorage because this unit test will not use it
  FileJournalManager fjm = new FileJournalManager(conf, sd, null);
  assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1));
  assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101));
  assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150));
  assertEquals("[1001,1100]", getLogsAsString(fjm, 201));
  assertEquals("Asking for a newer log than exists should return empty list",
      "", getLogsAsString(fjm, 9999));
}
 
Example 16
Source Project: hadoop   Source File: TestParallelImageWrite.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Confirm that FSImage files in all StorageDirectory are the same,
 * and non-empty, and there are the expected number of them.
 * @param fsn - the FSNamesystem being checked.
 * @param numImageDirs - the configured number of StorageDirectory of type IMAGE. 
 * @return - the md5 hash of the most recent FSImage files, which must all be the same.
 * @throws AssertionError if image files are empty or different,
 *     if less than two StorageDirectory are provided, or if the
 *     actual number of StorageDirectory is less than configured.
 */
public static String checkImages(
    FSNamesystem fsn, int numImageDirs)
throws Exception {    
  NNStorage stg = fsn.getFSImage().getStorage();
  //any failed StorageDirectory is removed from the storageDirs list
  assertEquals("Some StorageDirectories failed Upgrade",
      numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE));
  assertTrue("Not enough fsimage copies in MiniDFSCluster " + 
      "to test parallel write", numImageDirs > 1);

  // List of "current/" directory from each SD
  List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE);

  // across directories, all files with same names should be identical hashes   
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  FSImageTestUtil.assertSameNewestImage(dirs);
  
  // Return the hash of the newest image file
  StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next();
  File latestImage = FSImageTestUtil.findLatestImageFile(firstSd);
  String md5 = FSImageTestUtil.getImageFileMD5IgnoringTxId(latestImage);
  System.err.println("md5 of " + latestImage + ": " + md5);
  return md5;
}
 
Example 17
Source Project: hadoop   Source File: UpgradeUtilities.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a <code>version</code> file for datanode inside the specified parent
 * directory.  If such a file already exists, it will be overwritten.
 * The given version string will be written to the file as the layout
 * version. None of the parameters may be null.
 *
 * @param parent directory where namenode VERSION file is stored
 * @param version StorageInfo to create VERSION file from
 * @param bpid Block pool Id
 * @param bpidToWrite Block pool Id to write into the version file
 */
public static void createDataNodeVersionFile(File[] parent,
    StorageInfo version, String bpid, String bpidToWrite) throws IOException {
  DataStorage storage = new DataStorage(version);
  storage.setDatanodeUuid("FixedDatanodeUuid");

  File[] versionFiles = new File[parent.length];
  for (int i = 0; i < parent.length; i++) {
    File versionFile = new File(parent[i], "VERSION");
    StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
    storage.createStorageID(sd, false);
    storage.writeProperties(versionFile, sd);
    versionFiles[i] = versionFile;
    File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
    createBlockPoolVersionFile(bpDir, version, bpidToWrite);
  }
}
 
Example 18
Source Project: hadoop-gpu   Source File: TestStartup.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * verify that edits log and fsimage are in different directories and of a correct size
 */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
  StorageDirectory sd =null;
  for (Iterator<StorageDirectory> it = img.dirIterator(); it.hasNext();) {
    sd = it.next();

    if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
      File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
      assertEquals(expectedImgSize, imf.length());	
    } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
      File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
      LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length()  + "; expected = " + expectedEditsSize);
      assertEquals(expectedEditsSize, edf.length());	
    } else {
      fail("Image/Edits directories are not different");
    }
  }

}
 
Example 19
Source Project: big-c   Source File: UpgradeUtilities.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a <code>version</code> file for datanode inside the specified parent
 * directory.  If such a file already exists, it will be overwritten.
 * The given version string will be written to the file as the layout
 * version. None of the parameters may be null.
 *
 * @param parent directory where namenode VERSION file is stored
 * @param version StorageInfo to create VERSION file from
 * @param bpid Block pool Id
 * @param bpidToWrite Block pool Id to write into the version file
 */
public static void createDataNodeVersionFile(File[] parent,
    StorageInfo version, String bpid, String bpidToWrite) throws IOException {
  DataStorage storage = new DataStorage(version);
  storage.setDatanodeUuid("FixedDatanodeUuid");

  File[] versionFiles = new File[parent.length];
  for (int i = 0; i < parent.length; i++) {
    File versionFile = new File(parent[i], "VERSION");
    StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
    storage.createStorageID(sd, false);
    storage.writeProperties(versionFile, sd);
    versionFiles[i] = versionFile;
    File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
    createBlockPoolVersionFile(bpDir, version, bpidToWrite);
  }
}
 
Example 20
Source Project: big-c   Source File: TestFileJournalManager.java    License: Apache License 2.0 6 votes vote down vote up
/** 
 * Test that we can load an edits directory with a corrupt inprogress file.
 * The corrupt inprogress file should be moved to the side.
 */
@Test
public void testManyLogsWithCorruptInprogress() throws IOException {
  File f = new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
  NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();

  File[] files = new File(f, "current").listFiles(new FilenameFilter() {
      @Override
      public boolean accept(File dir, String name) {
        if (name.startsWith("edits_inprogress")) {
          return true;
        }
        return false;
      }
    });
  assertEquals(files.length, 1);
  
  corruptAfterStartSegment(files[0]);

  FileJournalManager jm = new FileJournalManager(conf, sd, storage);
  assertEquals(10*TXNS_PER_ROLL+1, 
               getNumberOfTransactions(jm, 1, true, false));
}
 
Example 21
Source Project: hadoop   Source File: TestCheckpoint.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
Example 22
Source Project: hadoop   Source File: TestFileJournalManager.java    License: Apache License 2.0 6 votes vote down vote up
/** 
 * Test the normal operation of loading transactions from
 * file journal manager. 3 edits directories are setup without any
 * failures. Test that we read in the expected number of transactions.
 */
@Test
public void testNormalOperation() throws IOException {
  File f1 = new File(TestEditLog.TEST_DIR + "/normtest0");
  File f2 = new File(TestEditLog.TEST_DIR + "/normtest1");
  File f3 = new File(TestEditLog.TEST_DIR + "/normtest2");
  
  List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
  NNStorage storage = setupEdits(editUris, 5);
  
  long numJournals = 0;
  for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
    numJournals++;
  }
  assertEquals(3, numJournals);
}
 
Example 23
Source Project: hadoop   Source File: TestFileJournalManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test a mixture of inprogress files and finalised. Set up 3 edits 
 * directories and fail the second on the last roll. Verify that reading
 * the transactions, reads from the finalised directories.
 */
@Test
public void testInprogressRecoveryMixed() throws IOException {
  File f1 = new File(TestEditLog.TEST_DIR + "/mixtest0");
  File f2 = new File(TestEditLog.TEST_DIR + "/mixtest1");
  File f3 = new File(TestEditLog.TEST_DIR + "/mixtest2");
  
  List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());

  // abort after the 5th roll 
  NNStorage storage = setupEdits(editUris,
                                 5, new AbortSpec(5, 1));
  Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
  StorageDirectory sd = dirs.next();
  FileJournalManager jm = new FileJournalManager(conf, sd, storage);
  assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
  
  sd = dirs.next();
  jm = new FileJournalManager(conf, sd, storage);
  assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
      true, false));

  sd = dirs.next();
  jm = new FileJournalManager(conf, sd, storage);
  assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
}
 
Example 24
Source Project: hadoop   Source File: TestFileJournalManager.java    License: Apache License 2.0 6 votes vote down vote up
@Test(expected=IllegalStateException.class)
public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
  File f = new File(TestEditLog.TEST_DIR + "/filejournaltestError");
  // abort after 10th roll
  NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
                                 10, new AbortSpec(10, 0));
  StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();

  FileJournalManager jm = new FileJournalManager(conf, sd, storage);
  String sdRootPath = sd.getRoot().getAbsolutePath();
  FileUtil.chmod(sdRootPath, "-w", true);
  try {
    jm.finalizeLogSegment(0, 1);
  } finally {
    FileUtil.chmod(sdRootPath, "+w", true);
    assertTrue(storage.getRemovedStorageDirs().contains(sd));
  }
}
 
Example 25
Source Project: hadoop   Source File: TestFileJournalManager.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetRemoteEditLog() throws IOException {
  StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
      NameNodeDirType.EDITS, false,
      NNStorage.getFinalizedEditsFileName(1, 100),
      NNStorage.getFinalizedEditsFileName(101, 200),
      NNStorage.getInProgressEditsFileName(201),
      NNStorage.getFinalizedEditsFileName(1001, 1100));
      
  // passing null for NNStorage because this unit test will not use it
  FileJournalManager fjm = new FileJournalManager(conf, sd, null);
  assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1));
  assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101));
  assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150));
  assertEquals("[1001,1100]", getLogsAsString(fjm, 201));
  assertEquals("Asking for a newer log than exists should return empty list",
      "", getLogsAsString(fjm, 9999));
}
 
Example 26
Source Project: big-c   Source File: TestJournal.java    License: Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
 
Example 27
Source Project: hadoop   Source File: BootstrapStandby.java    License: Apache License 2.0 5 votes vote down vote up
private void doUpgrade(NNStorage storage) throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(false);
       it.hasNext();) {
    StorageDirectory sd = it.next();
    NNUpgradeUtil.doUpgrade(sd, storage);
  }
}
 
Example 28
Source Project: hadoop   Source File: FSImage.java    License: Apache License 2.0 5 votes vote down vote up
/** Check if upgrade is in progress. */
public static void checkUpgrade(NNStorage storage) throws IOException {
  // Upgrade or rolling upgrade is allowed only if there are 
  // no previous fs states in any of the directories
  for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
    StorageDirectory sd = it.next();
    if (sd.getPreviousDir().exists())
      throw new InconsistentFSStateException(sd.getRoot(),
          "previous fs state should not exist during upgrade. "
          + "Finalize or rollback first.");
  }
}
 
Example 29
Source Project: big-c   Source File: FSImageTestUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static void logStorageContents(Log LOG, NNStorage storage) {
  LOG.info("current storages and corresponding sizes:");
  for (StorageDirectory sd : storage.dirIterable(null)) {
    File curDir = sd.getCurrentDir();
    LOG.info("In directory " + curDir);
    File[] files = curDir.listFiles();
    Arrays.sort(files);
    for (File f : files) {
      LOG.info("  file " + f.getAbsolutePath() + "; len = " + f.length());  
    }
  }
}
 
Example 30
Source Project: hadoop   Source File: FSImage.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Save the contents of the FS image to the file.
 */
void saveFSImage(SaveNamespaceContext context, StorageDirectory sd,
    NameNodeFile dstType) throws IOException {
  long txid = context.getTxId();
  File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
  File dstFile = NNStorage.getStorageFile(sd, dstType, txid);
  
  FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
  FSImageCompression compression = FSImageCompression.createCompression(conf);
  saver.save(newFile, compression);
  
  MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
  storage.setMostRecentCheckpointInfo(txid, Time.now());
}