org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestParallelImageWrite.java From hadoop with Apache License 2.0 | 6 votes |
/** * Confirm that FSImage files in all StorageDirectory are the same, * and non-empty, and there are the expected number of them. * @param fsn - the FSNamesystem being checked. * @param numImageDirs - the configured number of StorageDirectory of type IMAGE. * @return - the md5 hash of the most recent FSImage files, which must all be the same. * @throws AssertionError if image files are empty or different, * if less than two StorageDirectory are provided, or if the * actual number of StorageDirectory is less than configured. */ public static String checkImages( FSNamesystem fsn, int numImageDirs) throws Exception { NNStorage stg = fsn.getFSImage().getStorage(); //any failed StorageDirectory is removed from the storageDirs list assertEquals("Some StorageDirectories failed Upgrade", numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE)); assertTrue("Not enough fsimage copies in MiniDFSCluster " + "to test parallel write", numImageDirs > 1); // List of "current/" directory from each SD List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE); // across directories, all files with same names should be identical hashes FSImageTestUtil.assertParallelFilesAreIdentical( dirs, Collections.<String>emptySet()); FSImageTestUtil.assertSameNewestImage(dirs); // Return the hash of the newest image file StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next(); File latestImage = FSImageTestUtil.findLatestImageFile(firstSd); String md5 = FSImageTestUtil.getImageFileMD5IgnoringTxId(latestImage); System.err.println("md5 of " + latestImage + ": " + md5); return md5; }
Example #2
Source File: TestCheckpoint.java From hadoop with Apache License 2.0 | 6 votes |
/** * Assert that, if sdToLock is locked, the cluster is not allowed to start up. * @param conf cluster conf to use * @param sdToLock the storage directory to lock */ private static void assertClusterStartFailsWhenDirLocked( Configuration conf, StorageDirectory sdToLock) throws IOException { // Lock the edits dir, then start the NN, and make sure it fails to start sdToLock.lock(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).format(false) .manageNameDfsDirs(false).numDataNodes(0).build(); assertFalse("cluster should fail to start after locking " + sdToLock, sdToLock.isLockSupported()); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("already locked", ioe); } finally { cleanup(cluster); cluster = null; sdToLock.unlock(); } }
Example #3
Source File: TestFileJournalManager.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testGetRemoteEditLog() throws IOException { StorageDirectory sd = FSImageTestUtil.mockStorageDirectory( NameNodeDirType.EDITS, false, NNStorage.getFinalizedEditsFileName(1, 100), NNStorage.getFinalizedEditsFileName(101, 200), NNStorage.getInProgressEditsFileName(201), NNStorage.getFinalizedEditsFileName(1001, 1100)); // passing null for NNStorage because this unit test will not use it FileJournalManager fjm = new FileJournalManager(conf, sd, null); assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1)); assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101)); assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150)); assertEquals("[1001,1100]", getLogsAsString(fjm, 201)); assertEquals("Asking for a newer log than exists should return empty list", "", getLogsAsString(fjm, 9999)); }
Example #4
Source File: TestFileJournalManager.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test the normal operation of loading transactions from * file journal manager. 3 edits directories are setup without any * failures. Test that we read in the expected number of transactions. */ @Test public void testNormalOperation() throws IOException { File f1 = new File(TestEditLog.TEST_DIR + "/normtest0"); File f2 = new File(TestEditLog.TEST_DIR + "/normtest1"); File f3 = new File(TestEditLog.TEST_DIR + "/normtest2"); List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI()); NNStorage storage = setupEdits(editUris, 5); long numJournals = 0; for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) { FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false)); numJournals++; } assertEquals(3, numJournals); }
Example #5
Source File: FSImage.java From hadoop with Apache License 2.0 | 6 votes |
/** * Rename all the fsimage files with the specific NameNodeFile type. The * associated checksum files will also be renamed. */ void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf) throws IOException { ArrayList<StorageDirectory> al = null; FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf)); storage.inspectStorageDirs(inspector); for (FSImageFile image : inspector.getFoundImages()) { try { renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true); } catch (IOException ioe) { LOG.warn("Unable to rename checkpoint in " + image.sd, ioe); if (al == null) { al = Lists.newArrayList(); } al.add(image.sd); } } if(al != null) { storage.reportErrorsOnDirectories(al); } }
Example #6
Source File: FSEditLog.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * If there is an IO Error on any log operations on storage directory, * remove any stream associated with that directory */ synchronized void processIOError(StorageDirectory sd) { // Try to remove stream only if one should exist if (!sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) return; if (editStreams == null || editStreams.size() <= 1) { FSNamesystem.LOG.fatal( "Fatal Error : All storage directories are inaccessible."); Runtime.getRuntime().exit(-1); } for (int idx = 0; idx < editStreams.size(); idx++) { File parentStorageDir = ((EditLogFileOutputStream)editStreams .get(idx)).getFile() .getParentFile().getParentFile(); if (parentStorageDir.getName().equals(sd.getRoot().getName())) editStreams.remove(idx); } }
Example #7
Source File: FSImageTestUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Return a standalone instance of FSEditLog that will log into the given * log directory. The returned instance is not yet opened. */ public static FSEditLog createStandaloneEditLog(File logDir) throws IOException { assertTrue(logDir.mkdirs() || logDir.exists()); if (!FileUtil.fullyDeleteContents(logDir)) { throw new IOException("Unable to delete contents of " + logDir); } NNStorage storage = Mockito.mock(NNStorage.class); StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS); List<StorageDirectory> sds = Lists.newArrayList(sd); Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS); Mockito.doReturn(sd).when(storage) .getStorageDirectory(Matchers.<URI>anyObject()); FSEditLog editLog = new FSEditLog(new Configuration(), storage, ImmutableList.of(logDir.toURI())); editLog.initJournalsForWrite(); return editLog; }
Example #8
Source File: TestFileJournalManager.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test a mixture of inprogress files and finalised. Set up 3 edits * directories and fail the second on the last roll. Verify that reading * the transactions, reads from the finalised directories. */ @Test public void testInprogressRecoveryMixed() throws IOException { File f1 = new File(TestEditLog.TEST_DIR + "/mixtest0"); File f2 = new File(TestEditLog.TEST_DIR + "/mixtest1"); File f3 = new File(TestEditLog.TEST_DIR + "/mixtest2"); List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI()); // abort after the 5th roll NNStorage storage = setupEdits(editUris, 5, new AbortSpec(5, 1)); Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS); StorageDirectory sd = dirs.next(); FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); jm = new FileJournalManager(conf, sd, storage); assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false)); }
Example #9
Source File: TestCheckpoint.java From big-c with Apache License 2.0 | 6 votes |
/** * Assert that, if sdToLock is locked, the cluster is not allowed to start up. * @param conf cluster conf to use * @param sdToLock the storage directory to lock */ private static void assertClusterStartFailsWhenDirLocked( Configuration conf, StorageDirectory sdToLock) throws IOException { // Lock the edits dir, then start the NN, and make sure it fails to start sdToLock.lock(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).format(false) .manageNameDfsDirs(false).numDataNodes(0).build(); assertFalse("cluster should fail to start after locking " + sdToLock, sdToLock.isLockSupported()); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("already locked", ioe); } finally { cleanup(cluster); cluster = null; sdToLock.unlock(); } }
Example #10
Source File: TestFileJournalManager.java From hadoop with Apache License 2.0 | 6 votes |
@Test(expected=IllegalStateException.class) public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException { File f = new File(TestEditLog.TEST_DIR + "/filejournaltestError"); // abort after 10th roll NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0)); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm = new FileJournalManager(conf, sd, storage); String sdRootPath = sd.getRoot().getAbsolutePath(); FileUtil.chmod(sdRootPath, "-w", true); try { jm.finalizeLogSegment(0, 1); } finally { FileUtil.chmod(sdRootPath, "+w", true); assertTrue(storage.getRemovedStorageDirs().contains(sd)); } }
Example #11
Source File: TestFileJournalManager.java From big-c with Apache License 2.0 | 6 votes |
/** * Test that we can load an edits directory with a corrupt inprogress file. * The corrupt inprogress file should be moved to the side. */ @Test public void testManyLogsWithCorruptInprogress() throws IOException { File f = new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress"); NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0)); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); File[] files = new File(f, "current").listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { if (name.startsWith("edits_inprogress")) { return true; } return false; } }); assertEquals(files.length, 1); corruptAfterStartSegment(files[0]); FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(10*TXNS_PER_ROLL+1, getNumberOfTransactions(jm, 1, true, false)); }
Example #12
Source File: UpgradeUtilities.java From big-c with Apache License 2.0 | 6 votes |
/** * Create a <code>version</code> file for datanode inside the specified parent * directory. If such a file already exists, it will be overwritten. * The given version string will be written to the file as the layout * version. None of the parameters may be null. * * @param parent directory where namenode VERSION file is stored * @param version StorageInfo to create VERSION file from * @param bpid Block pool Id * @param bpidToWrite Block pool Id to write into the version file */ public static void createDataNodeVersionFile(File[] parent, StorageInfo version, String bpid, String bpidToWrite) throws IOException { DataStorage storage = new DataStorage(version); storage.setDatanodeUuid("FixedDatanodeUuid"); File[] versionFiles = new File[parent.length]; for (int i = 0; i < parent.length; i++) { File versionFile = new File(parent[i], "VERSION"); StorageDirectory sd = new StorageDirectory(parent[i].getParentFile()); storage.createStorageID(sd, false); storage.writeProperties(versionFile, sd); versionFiles[i] = versionFile; File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]); createBlockPoolVersionFile(bpDir, version, bpidToWrite); } }
Example #13
Source File: NNUpgradeUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Return true if this storage dir can roll back to the previous storage * state, false otherwise. The NN will refuse to run the rollback operation * unless at least one JM or fsimage storage directory can roll back. * * @param storage the storage info for the current state * @param prevStorage the storage info for the previous (unupgraded) state * @param targetLayoutVersion the layout version we intend to roll back to * @return true if this JM can roll back, false otherwise. * @throws IOException in the event of error */ static boolean canRollBack(StorageDirectory sd, StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { File prevDir = sd.getPreviousDir(); if (!prevDir.exists()) { // use current directory then LOG.info("Storage directory " + sd.getRoot() + " does not contain previous fs state."); // read and verify consistency with other directories storage.readProperties(sd); return false; } // read and verify consistency of the prev dir prevStorage.readPreviousVersionProperties(sd); if (prevStorage.getLayoutVersion() != targetLayoutVersion) { throw new IOException( "Cannot rollback to storage version " + prevStorage.getLayoutVersion() + " using this version of the NameNode, which uses storage version " + targetLayoutVersion + ". " + "Please use the previous version of HDFS to perform the rollback."); } return true; }
Example #14
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
public void addVolumes(Configuration conf, int namespaceId, String nsDir, Collection<StorageDirectory> dirs) throws Exception { if (dirs == null || dirs.isEmpty()) { return; } FSVolume[] volArray = new FSVolume[dirs.size()]; File[] dirArray = new File[dirs.size()]; int idx = 0; for (Iterator<StorageDirectory> iter = dirs.iterator() ; iter.hasNext(); idx++) { dirArray[idx] = iter.next().getCurrentDir(); volArray[idx] = new FSVolume(this, dirArray[idx], conf); } lock.writeLock().lock(); try { volumes.addVolumes(volArray); for (FSVolume vol : volArray) { vol.addNamespace(namespaceId, nsDir, conf, datanode.isSupportAppends()); } } finally { lock.writeLock().unlock(); } asyncDiskService.insertDisk(dirArray, conf); }
Example #15
Source File: NNUpgradeUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Finalize the upgrade. The previous dir, if any, will be renamed and * removed. After this is completed, rollback is no longer allowed. * * @param sd the storage directory to finalize * @throws IOException in the event of error */ static void doFinalize(StorageDirectory sd) throws IOException { File prevDir = sd.getPreviousDir(); if (!prevDir.exists()) { // already discarded LOG.info("Directory " + prevDir + " does not exist."); LOG.info("Finalize upgrade for " + sd.getRoot()+ " is not required."); return; } LOG.info("Finalizing upgrade of storage directory " + sd.getRoot()); Preconditions.checkState(sd.getCurrentDir().exists(), "Current directory must exist."); final File tmpDir = sd.getFinalizedTmp(); // rename previous to tmp and remove NNStorage.rename(prevDir, tmpDir); NNStorage.deleteDir(tmpDir); LOG.info("Finalize upgrade for " + sd.getRoot()+ " is complete."); }
Example #16
Source File: TestFileJournalManager.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testGetRemoteEditLog() throws IOException { StorageDirectory sd = FSImageTestUtil.mockStorageDirectory( NameNodeDirType.EDITS, false, NNStorage.getFinalizedEditsFileName(1, 100), NNStorage.getFinalizedEditsFileName(101, 200), NNStorage.getInProgressEditsFileName(201), NNStorage.getFinalizedEditsFileName(1001, 1100)); // passing null for NNStorage because this unit test will not use it FileJournalManager fjm = new FileJournalManager(conf, sd, null); assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1)); assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101)); assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150)); assertEquals("[1001,1100]", getLogsAsString(fjm, 201)); assertEquals("Asking for a newer log than exists should return empty list", "", getLogsAsString(fjm, 9999)); }
Example #17
Source File: UpgradeUtilities.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a <code>version</code> file for datanode inside the specified parent * directory. If such a file already exists, it will be overwritten. * The given version string will be written to the file as the layout * version. None of the parameters may be null. * * @param parent directory where namenode VERSION file is stored * @param version StorageInfo to create VERSION file from * @param bpid Block pool Id * @param bpidToWrite Block pool Id to write into the version file */ public static void createDataNodeVersionFile(File[] parent, StorageInfo version, String bpid, String bpidToWrite) throws IOException { DataStorage storage = new DataStorage(version); storage.setDatanodeUuid("FixedDatanodeUuid"); File[] versionFiles = new File[parent.length]; for (int i = 0; i < parent.length; i++) { File versionFile = new File(parent[i], "VERSION"); StorageDirectory sd = new StorageDirectory(parent[i].getParentFile()); storage.createStorageID(sd, false); storage.writeProperties(versionFile, sd); versionFiles[i] = versionFile; File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]); createBlockPoolVersionFile(bpDir, version, bpidToWrite); } }
Example #18
Source File: TestStartup.java From big-c with Apache License 2.0 | 6 votes |
/** * verify that edits log and fsimage are in different directories and of a correct size */ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) { StorageDirectory sd =null; for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) { sd = it.next(); if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { img.getStorage(); File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize); assertEquals(expectedImgSize, imf.length()); } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { img.getStorage(); File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0); LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize); assertEquals(expectedEditsSize, edf.length()); } else { fail("Image/Edits directories are not different"); } } }
Example #19
Source File: TestStartup.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * verify that edits log and fsimage are in different directories and of a correct size */ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) { StorageDirectory sd =null; for (Iterator<StorageDirectory> it = img.dirIterator(); it.hasNext();) { sd = it.next(); if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize); assertEquals(expectedImgSize, imf.length()); } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS); LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize); assertEquals(expectedEditsSize, edf.length()); } else { fail("Image/Edits directories are not different"); } } }
Example #20
Source File: TestParallelImageWrite.java From big-c with Apache License 2.0 | 6 votes |
/** * Confirm that FSImage files in all StorageDirectory are the same, * and non-empty, and there are the expected number of them. * @param fsn - the FSNamesystem being checked. * @param numImageDirs - the configured number of StorageDirectory of type IMAGE. * @return - the md5 hash of the most recent FSImage files, which must all be the same. * @throws AssertionError if image files are empty or different, * if less than two StorageDirectory are provided, or if the * actual number of StorageDirectory is less than configured. */ public static String checkImages( FSNamesystem fsn, int numImageDirs) throws Exception { NNStorage stg = fsn.getFSImage().getStorage(); //any failed StorageDirectory is removed from the storageDirs list assertEquals("Some StorageDirectories failed Upgrade", numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE)); assertTrue("Not enough fsimage copies in MiniDFSCluster " + "to test parallel write", numImageDirs > 1); // List of "current/" directory from each SD List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE); // across directories, all files with same names should be identical hashes FSImageTestUtil.assertParallelFilesAreIdentical( dirs, Collections.<String>emptySet()); FSImageTestUtil.assertSameNewestImage(dirs); // Return the hash of the newest image file StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next(); File latestImage = FSImageTestUtil.findLatestImageFile(firstSd); String md5 = FSImageTestUtil.getImageFileMD5IgnoringTxId(latestImage); System.err.println("md5 of " + latestImage + ": " + md5); return md5; }
Example #21
Source File: FSImage.java From big-c with Apache License 2.0 | 6 votes |
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf, NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException { final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid); final File toFile = NNStorage.getStorageFile(sd, toNnf, txid); // renameTo fails on Windows if the destination file already exists. if(LOG.isDebugEnabled()) { LOG.debug("renaming " + fromFile.getAbsolutePath() + " to " + toFile.getAbsolutePath()); } if (!fromFile.renameTo(toFile)) { if (!toFile.delete() || !fromFile.renameTo(toFile)) { throw new IOException("renaming " + fromFile.getAbsolutePath() + " to " + toFile.getAbsolutePath() + " FAILED"); } } if (renameMD5) { MD5FileUtils.renameMD5File(fromFile, toFile); } }
Example #22
Source File: TestStartup.java From hadoop with Apache License 2.0 | 6 votes |
/** * verify that edits log and fsimage are in different directories and of a correct size */ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) { StorageDirectory sd =null; for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext();) { sd = it.next(); if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { img.getStorage(); File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize); assertEquals(expectedImgSize, imf.length()); } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) { img.getStorage(); File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0); LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize); assertEquals(expectedEditsSize, edf.length()); } else { fail("Image/Edits directories are not different"); } } }
Example #23
Source File: FSEditLog.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Create empty edit log files. * Initialize the output stream for logging. * * @throws IOException */ public synchronized void open() throws IOException { numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0; if (editStreams == null) editStreams = new ArrayList<EditLogOutputStream>(); for (Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { StorageDirectory sd = it.next(); File eFile = getEditFile(sd); try { EditLogOutputStream eStream = new EditLogFileOutputStream(eFile); editStreams.add(eStream); } catch (IOException e) { FSNamesystem.LOG.warn("Unable to open edit log file " + eFile); // Remove the directory from list of storage directories it.remove(); } } }
Example #24
Source File: NNUpgradeUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Perform rollback of the storage dir to the previous state. The existing * current dir is removed, and the previous dir is renamed to current. * * @param sd the storage directory to roll back. * @throws IOException in the event of error */ static void doRollBack(StorageDirectory sd) throws IOException { File prevDir = sd.getPreviousDir(); if (!prevDir.exists()) { return; } File tmpDir = sd.getRemovedTmp(); Preconditions.checkState(!tmpDir.exists(), "removed.tmp directory must not exist for rollback." + "Consider restarting for recovery."); // rename current to tmp File curDir = sd.getCurrentDir(); Preconditions.checkState(curDir.exists(), "Current directory must exist for rollback."); NNStorage.rename(curDir, tmpDir); // rename previous to current NNStorage.rename(prevDir, curDir); // delete tmp dir NNStorage.deleteDir(tmpDir); LOG.info("Rollback of " + sd.getRoot() + " is complete."); }
Example #25
Source File: NNUpgradeUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * Rename the existing current dir to previous.tmp, and create a new empty * current dir. */ public static void renameCurToTmp(StorageDirectory sd) throws IOException { File curDir = sd.getCurrentDir(); File prevDir = sd.getPreviousDir(); final File tmpDir = sd.getPreviousTmp(); Preconditions.checkState(curDir.exists(), "Current directory must exist for preupgrade."); Preconditions.checkState(!prevDir.exists(), "Previous directory must not exist for preupgrade."); Preconditions.checkState(!tmpDir.exists(), "Previous.tmp directory must not exist for preupgrade." + "Consider restarting for recovery."); // rename current to tmp NNStorage.rename(curDir, tmpDir); if (!curDir.mkdir()) { throw new IOException("Cannot create directory " + curDir); } }
Example #26
Source File: FSEditLog.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Returns the timestamp of the edit log */ synchronized long getFsEditTime() { Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS); if(it.hasNext()) return getEditFile(it.next()).lastModified(); return 0; }
Example #27
Source File: TestFileJournalManager.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test that FileJournalManager behaves correctly despite inprogress * files in all its edit log directories. Set up 3 directories and fail * all on the last roll. Verify that the correct number of transaction * are then loaded. */ @Test public void testInprogressRecoveryAll() throws IOException { File f1 = new File(TestEditLog.TEST_DIR + "/failalltest0"); File f2 = new File(TestEditLog.TEST_DIR + "/failalltest1"); File f3 = new File(TestEditLog.TEST_DIR + "/failalltest2"); List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI()); // abort after the 5th roll NNStorage storage = setupEdits(editUris, 5, new AbortSpec(5, 0), new AbortSpec(5, 1), new AbortSpec(5, 2)); Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS); StorageDirectory sd = dirs.next(); FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); }
Example #28
Source File: FSEditLog.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Closes the current edit log and opens edits.new. * Returns the lastModified time of the edits log. */ synchronized void rollEditLog() throws IOException { // // If edits.new already exists in some directory, verify it // exists in all directories. // if (existsNew()) { for (Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { File editsNew = getEditNewFile(it.next()); if (!editsNew.exists()) { throw new IOException("Inconsistent existance of edits.new " + editsNew); } } return; // nothing to do, edits.new exists! } close(); // close existing edit log // // Open edits.new // for (Iterator<StorageDirectory> it = fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) { StorageDirectory sd = it.next(); try { EditLogFileOutputStream eStream = new EditLogFileOutputStream(getEditNewFile(sd)); eStream.create(); editStreams.add(eStream); } catch (IOException e) { // remove stream and this storage directory from list processIOError(sd); it.remove(); } } }
Example #29
Source File: NameSpaceSliceStorage.java From RDFS with Apache License 2.0 | 5 votes |
@Override protected void getFields(Properties props, StorageDirectory sd) throws IOException { setNamespaceID(props, sd); setcTime(props, sd); String snsid = props.getProperty(NAMESPACE_ID); setNameSpaceID(sd.getRoot(), snsid); String property = props.getProperty(LAYOUT_VERSION); int lv; if (property == null) { Integer topLayout = getTopLevelLayout(sd); if (topLayout == null) { throw new InconsistentFSStateException(sd.getRoot(), "Top level layout and NS level layout do not exist"); } lv = topLayout; } else { lv = Integer.parseInt(property); } if (lv < FSConstants.LAYOUT_VERSION) { // future version throw new InconsistentFSStateException(sd.getRoot(), "has future layout version : " + lv); } layoutVersion = lv; }
Example #30
Source File: TestEditLog.java From hadoop with Apache License 2.0 | 5 votes |
private void assertExistsInStorageDirs(MiniDFSCluster cluster, NameNodeDirType dirType, String filename) { NNStorage storage = cluster.getNamesystem().getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(dirType)) { File f = new File(sd.getCurrentDir(), filename); assertTrue("Expect that " + f + " exists", f.exists()); } }