org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestHASafeMode.java From big-c with Apache License 2.0 | 6 votes |
/** * Regression test for a bug experienced while developing * HDFS-2742. The scenario here is: * - image contains some blocks * - edits log contains at least one block addition, followed * by deletion of more blocks than were added. * - When node starts up, some incorrect accounting of block * totals caused an assertion failure. */ @Test public void testBlocksDeletedInEditLog() throws Exception { banner("Starting with NN0 active and NN1 standby, creating some blocks"); // Make 4 blocks persisted in the image. DFSTestUtil.createFile(fs, new Path("/test"), 4*BLOCK_SIZE, (short) 3, 1L); NameNodeAdapter.enterSafeMode(nn0, false); NameNodeAdapter.saveNamespace(nn0); NameNodeAdapter.leaveSafeMode(nn0); // OP_ADD for 2 blocks DFSTestUtil.createFile(fs, new Path("/test2"), 2*BLOCK_SIZE, (short) 3, 1L); // OP_DELETE for 4 blocks fs.delete(new Path("/test"), true); restartActive(); }
Example #2
Source File: TestInitializeSharedEdits.java From hadoop with Apache License 2.0 | 6 votes |
private void assertCanStartHaNameNodes(String pathSuffix) throws ServiceFailedException, IOException, URISyntaxException, InterruptedException { // Now should be able to start both NNs. Pass "false" here so that we don't // try to waitActive on all NNs, since the second NN doesn't exist yet. cluster.restartNameNode(0, false); cluster.restartNameNode(1, true); // Make sure HA is working. cluster.getNameNode(0).getRpcServer().transitionToActive( new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER)); FileSystem fs = null; try { Path newPath = new Path(TEST_PATH, pathSuffix); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(newPath)); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1)); assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), newPath.toString(), false).isDir()); } finally { if (fs != null) { fs.close(); } } }
Example #3
Source File: TestHAStateTransitions.java From big-c with Apache License 2.0 | 6 votes |
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster, NameNode nn, boolean writeHeader) throws IOException { long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId(); URI sharedEditsUri = cluster.getSharedEditsDir(0, 1); File sharedEditsDir = new File(sharedEditsUri.getPath()); StorageDirectory storageDir = new StorageDirectory(sharedEditsDir); File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir, txid + 1); assertTrue("Failed to create in-progress edits file", inProgressFile.createNewFile()); if (writeHeader) { DataOutputStream out = new DataOutputStream(new FileOutputStream( inProgressFile)); EditLogFileOutputStream.writeHeader( NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out); out.close(); } }
Example #4
Source File: TestOpenFilesWithSnapshot.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testFilesDeletionWithCheckpoint() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.delete(new Path("/test/test/test2"), true); fs.delete(new Path("/test/test/test3"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); // read snapshot file after restart String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test2"); DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test3"); DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); }
Example #5
Source File: TestInitializeSharedEdits.java From big-c with Apache License 2.0 | 6 votes |
private void assertCanStartHaNameNodes(String pathSuffix) throws ServiceFailedException, IOException, URISyntaxException, InterruptedException { // Now should be able to start both NNs. Pass "false" here so that we don't // try to waitActive on all NNs, since the second NN doesn't exist yet. cluster.restartNameNode(0, false); cluster.restartNameNode(1, true); // Make sure HA is working. cluster.getNameNode(0).getRpcServer().transitionToActive( new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER)); FileSystem fs = null; try { Path newPath = new Path(TEST_PATH, pathSuffix); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(newPath)); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1)); assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), newPath.toString(), false).isDir()); } finally { if (fs != null) { fs.close(); } } }
Example #6
Source File: TestNameNodeMetrics.java From hadoop with Apache License 2.0 | 6 votes |
/** Test to ensure metrics reflects missing blocks */ @Test public void testMissingBlock() throws Exception { // Create a file with single block with two replicas Path file = getTestPath("testMissingBlocks"); createFile(file, 100, (short)1); // Corrupt the only replica of the block to result in a missing block LocatedBlock block = NameNodeAdapter.getBlockLocations( cluster.getNameNode(), file.toString(), 0, 1).get(0); cluster.getNamesystem().writeLock(); try { bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], "STORAGE_ID", "TEST"); } finally { cluster.getNamesystem().writeUnlock(); } updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("UnderReplicatedBlocks", 1L, rb); assertGauge("MissingBlocks", 1L, rb); assertGauge("MissingReplOneBlocks", 1L, rb); fs.delete(file, true); waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L); }
Example #7
Source File: TestSnapshotBlocksMap.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout = 30000) public void testReadSnapshotFileWithCheckpoint() throws Exception { Path foo = new Path("/foo"); hdfs.mkdirs(foo); hdfs.allowSnapshot(foo); Path bar = new Path("/foo/bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); assertTrue(hdfs.delete(bar, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(snapshotPath)); }
Example #8
Source File: TestOpenFilesWithSnapshot.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testOpenFilesWithRename() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); // check for zero sized blocks Path fileWithEmptyBlock = new Path("/test/test/test4"); fs.create(fileWithEmptyBlock); NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); String clientName = fs.getClient().getClientName(); // create one empty block nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null); fs.createSnapshot(path, "s2"); fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); fs.delete(new Path("/test/test-renamed"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); }
Example #9
Source File: TestHASafeMode.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test case for enter safemode in active namenode, when it is already in startup safemode. * It is a regression test for HDFS-2747. */ @Test public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception { banner("Restarting active"); DFSTestUtil .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L); restartActive(); nn0.getRpcServer().transitionToActive( new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER)); FSNamesystem namesystem = nn0.getNamesystem(); String status = namesystem.getSafemode(); assertTrue("Bad safemode status: '" + status + "'", status .startsWith("Safe mode is ON.")); NameNodeAdapter.enterSafeMode(nn0, false); assertTrue("Failed to enter into safemode in active", namesystem .isInSafeMode()); NameNodeAdapter.enterSafeMode(nn0, false); assertTrue("Failed to enter into safemode in active", namesystem .isInSafeMode()); }
Example #10
Source File: TestBlocksWithNotEnoughRacks.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testSufficientlySingleReplBlockUsesNewRack() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 1; final Path filePath = new Path("/testFile"); String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { // Create a file with one block with a replication factor of 1 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0); REPLICATION_FACTOR = 2; NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example #11
Source File: TestOpenFilesWithSnapshot.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testOpenFilesWithRename() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); // check for zero sized blocks Path fileWithEmptyBlock = new Path("/test/test/test4"); fs.create(fileWithEmptyBlock); NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); String clientName = fs.getClient().getClientName(); // create one empty block nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null); fs.createSnapshot(path, "s2"); fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); fs.delete(new Path("/test/test-renamed"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); }
Example #12
Source File: TestBlocksWithNotEnoughRacks.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testSufficientlySingleReplBlockUsesNewRack() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 1; final Path filePath = new Path("/testFile"); String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { // Create a file with one block with a replication factor of 1 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0); REPLICATION_FACTOR = 2; NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example #13
Source File: StressHdfsTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void test() throws Exception { randomlyEnableAutoSoftCommit(); int cnt = random().nextInt(2) + 1; for (int i = 0; i < cnt; i++) { createAndDeleteCollection(); } if (testRestartIntoSafeMode) { Timer timer = new Timer(); try { createCollection(DELETE_DATA_DIR_COLLECTION, "conf1", 1, 1, 1); waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false); jettys.get(0).stop(); // enter safe mode and restart a node NameNodeAdapter.enterSafeMode(dfsCluster.getNameNode(), false); int rnd = random().nextInt(10000); timer.schedule(new TimerTask() { @Override public void run() { NameNodeAdapter.leaveSafeMode(dfsCluster.getNameNode()); } }, rnd); jettys.get(0).start(); waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false); } finally { timer.cancel(); } } }
Example #14
Source File: TestDecommission.java From hadoop with Apache License 2.0 | 5 votes |
public void testClusterStats(int numNameNodes) throws IOException, InterruptedException { LOG.info("Starting test testClusterStats"); int numDatanodes = 1; startCluster(numNameNodes, numDatanodes, conf); for (int i = 0; i < numNameNodes; i++) { FileSystem fileSys = cluster.getFileSystem(i); Path file = new Path("testClusterStats.dat"); writeFile(fileSys, file, 1); FSNamesystem fsn = cluster.getNamesystem(i); NameNode namenode = cluster.getNameNode(i); DatanodeInfo decomInfo = decommissionNode(i, null, null, AdminStates.DECOMMISSION_INPROGRESS); DataNode decomNode = getDataNode(decomInfo); // Check namenode stats for multiple datanode heartbeats verifyStats(namenode, fsn, decomInfo, decomNode, true); // Stop decommissioning and verify stats writeConfigFile(excludeFile, null); refreshNodes(fsn, conf); DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo); DataNode retNode = getDataNode(decomInfo); waitNodeState(retInfo, AdminStates.NORMAL); verifyStats(namenode, fsn, retInfo, retNode, false); } }
Example #15
Source File: TestXAttrWithSnapshot.java From big-c with Apache License 2.0 | 5 votes |
/** * Restart the cluster, optionally saving a new checkpoint. * * @param checkpoint boolean true to save a new checkpoint * @throws Exception if restart fails */ private static void restart(boolean checkpoint) throws Exception { NameNode nameNode = cluster.getNameNode(); if (checkpoint) { NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); } shutdown(); initCluster(false); }
Example #16
Source File: TestBlocksWithNotEnoughRacks.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testUnderReplicatedUsesNewRacks() throws Exception { Configuration conf = getConf(); short REPLICATION_FACTOR = 3; final Path filePath = new Path("/testFile"); // All datanodes are on the same rack String racks[] = {"/rack1", "/rack1", "/rack1", "/rack1", "/rack1"}; MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); try { // Create a file with one block final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0); // Add new datanodes on a different rack and increase the // replication factor so the block is underreplicated and make // sure at least one of the hosts on the new rack is used. String newRacks[] = {"/rack2", "/rack2"}; cluster.startDataNodes(conf, 2, true, null, newRacks); REPLICATION_FACTOR = 5; NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); } finally { cluster.shutdown(); } }
Example #17
Source File: TestFileAppend4.java From RDFS with Apache License 2.0 | 5 votes |
/** * Test that the restart of a DN and the subsequent pipeline recovery do not cause * a file to become prematurely considered "complete", when it's a fresh file * with no .append() called. */ public void testNotPrematurelyCompleteWithFailureNotReopened() throws Exception { LOG.info("START"); cluster = new MiniDFSCluster(conf, 3, true, null); NameNode nn = cluster.getNameNode(); FileSystem fs1 = cluster.getFileSystem(); try { short rep = 3; // replication file1 = new Path("/delayedReceiveBlock"); stm = fs1.create(file1, true, (int)BLOCK_SIZE*2, rep, 64*1024*1024); LOG.info("======== Writing"); AppendTestUtil.write(stm, 0, 1024*1024); LOG.info("======== Waiting for a block allocation"); waitForBlockReplication(fs1, "/delayedReceiveBlock", 0, 3000); LOG.info("======== Checking not complete"); assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true)); // Stop one of the DNs, don't restart MiniDFSCluster.DataNodeProperties dnprops = cluster.stopDataNode(0); // Write some more data AppendTestUtil.write(stm, 0, 1024*1024); // Make sure we don't see the file as complete LOG.info("======== Checking progress"); assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true)); LOG.info("======== Closing"); stm.close(); } finally { LOG.info("======== Cleaning up"); fs1.close(); cluster.shutdown(); } }
Example #18
Source File: TestNameNodeMetrics.java From big-c with Apache License 2.0 | 5 votes |
/** Create excess blocks by reducing the replication factor for * for a file and ensure metrics reflects it */ @Test public void testExcessBlocks() throws Exception { Path file = getTestPath("testExcessBlocks"); createFile(file, 100, (short)2); long totalBlocks = 1; NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1); updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("ExcessBlocks", totalBlocks, rb); fs.delete(file, true); }
Example #19
Source File: TestLeaseRecovery2.java From hadoop with Apache License 2.0 | 5 votes |
static void checkLease(String f, int size) { final String holder = NameNodeAdapter.getLeaseHolderForPath( cluster.getNameNode(), f); if (size == 0) { assertEquals("lease holder should null, file is closed", null, holder); } else { assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder); } }
Example #20
Source File: TestHASafeMode.java From big-c with Apache License 2.0 | 5 votes |
/** * Regression test for HDFS-2804: standby should not populate replication * queues when exiting safe mode. */ @Test public void testNoPopulatingReplQueuesWhenExitingSafemode() throws Exception { DFSTestUtil.createFile(fs, new Path("/test"), 15*BLOCK_SIZE, (short)3, 1L); HATestUtil.waitForStandbyToCatchUp(nn0, nn1); // get some blocks in the SBN's image nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); NameNodeAdapter.saveNamespace(nn1); nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false); // and some blocks in the edit logs DFSTestUtil.createFile(fs, new Path("/test2"), 15*BLOCK_SIZE, (short)3, 1L); nn0.getRpcServer().rollEditLog(); cluster.stopDataNode(1); cluster.shutdownNameNode(1); //Configuration sbConf = cluster.getConfiguration(1); //sbConf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 1); cluster.restartNameNode(1, false); nn1 = cluster.getNameNode(1); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return !nn1.isInSafeMode(); } }, 100, 10000); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); assertEquals(0L, nn1.getNamesystem().getUnderReplicatedBlocks()); assertEquals(0L, nn1.getNamesystem().getPendingReplicationBlocks()); }
Example #21
Source File: TestStandbyIsHot.java From hadoop with Apache License 2.0 | 5 votes |
static void waitForBlockLocations(final MiniDFSCluster cluster, final NameNode nn, final String path, final int expectedReplicas) throws Exception { GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { try { LocatedBlocks locs = NameNodeAdapter.getBlockLocations(nn, path, 0, 1000); DatanodeInfo[] dnis = locs.getLastLocatedBlock().getLocations(); for (DatanodeInfo dni : dnis) { Assert.assertNotNull(dni); } int numReplicas = dnis.length; LOG.info("Got " + numReplicas + " locs: " + locs); if (numReplicas > expectedReplicas) { cluster.triggerDeletionReports(); } cluster.triggerHeartbeats(); return numReplicas == expectedReplicas; } catch (IOException e) { LOG.warn("No block locations yet: " + e.getMessage()); return false; } } }, 500, 20000); }
Example #22
Source File: TestAclWithSnapshot.java From hadoop with Apache License 2.0 | 5 votes |
/** * Restart the cluster, optionally saving a new checkpoint. * * @param checkpoint boolean true to save a new checkpoint * @throws Exception if restart fails */ private static void restart(boolean checkpoint) throws Exception { NameNode nameNode = cluster.getNameNode(); if (checkpoint) { NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); } shutdown(); initCluster(false); }
Example #23
Source File: TestSnapshotBlocksMap.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 30000) public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception { final Path foo = new Path("/foo"); final Path foo2 = new Path("/foo2"); hdfs.mkdirs(foo); hdfs.mkdirs(foo2); hdfs.allowSnapshot(foo); hdfs.allowSnapshot(foo2); final Path bar = new Path(foo, "bar"); final Path bar2 = new Path(foo2, "bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); // rename to another snapshottable directory and take snapshot assertTrue(hdfs.rename(bar, bar2)); hdfs.createSnapshot(foo2, "s2"); // delete the original renamed file to make sure blocks are not updated by // the original file assertTrue(hdfs.delete(bar2, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); // file in first snapshot String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath)); // file in second snapshot after rename+delete String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(), "s2/bar"); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); }
Example #24
Source File: TestBookKeeperAsHASharedDir.java From big-c with Apache License 2.0 | 5 votes |
private void assertCanStartHANameNodes(MiniDFSCluster cluster, Configuration conf, String path) throws ServiceFailedException, IOException, URISyntaxException, InterruptedException { // Now should be able to start both NNs. Pass "false" here so that we don't // try to waitActive on all NNs, since the second NN doesn't exist yet. cluster.restartNameNode(0, false); cluster.restartNameNode(1, true); // Make sure HA is working. cluster .getNameNode(0) .getRpcServer() .transitionToActive( new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER)); FileSystem fs = null; try { Path newPath = new Path(path); fs = HATestUtil.configureFailoverFs(cluster, conf); assertTrue(fs.mkdirs(newPath)); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0), cluster.getNameNode(1)); assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), newPath.toString(), false).isDir()); } finally { if (fs != null) { fs.close(); } } }
Example #25
Source File: TestSnapshotBlocksMap.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 30000) public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception { final Path foo = new Path("/foo"); final Path foo2 = new Path("/foo2"); hdfs.mkdirs(foo); hdfs.mkdirs(foo2); hdfs.allowSnapshot(foo); hdfs.allowSnapshot(foo2); final Path bar = new Path(foo, "bar"); final Path bar2 = new Path(foo2, "bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); // rename to another snapshottable directory and take snapshot assertTrue(hdfs.rename(bar, bar2)); hdfs.createSnapshot(foo2, "s2"); // delete the original renamed file to make sure blocks are not updated by // the original file assertTrue(hdfs.delete(bar2, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); // file in first snapshot String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath)); // file in second snapshot after rename+delete String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(), "s2/bar"); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); }
Example #26
Source File: TestSnapshotDeletion.java From hadoop with Apache License 2.0 | 5 votes |
/** * Delete a snapshot that is taken before a directory deletion (recursively), * directory diff list should be combined correctly. */ @Test (timeout=60000) public void testDeleteSnapshot2() throws Exception { final Path root = new Path("/"); Path dir = new Path("/dir1"); Path file1 = new Path(dir, "file1"); DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); hdfs.allowSnapshot(root); hdfs.createSnapshot(root, "s1"); Path file2 = new Path(dir, "file2"); DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed); INodeFile file2Node = fsdir.getINode(file2.toString()).asFile(); long file2NodeId = file2Node.getId(); hdfs.createSnapshot(root, "s2"); // delete directory recursively assertTrue(hdfs.delete(dir, true)); assertNotNull(fsdir.getInode(file2NodeId)); // delete second snapshot hdfs.deleteSnapshot(root, "s2"); assertTrue(fsdir.getInode(file2NodeId) == null); NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false); NameNodeAdapter.saveNamespace(cluster.getNameNode()); // restart NN cluster.restartNameNodes(); }
Example #27
Source File: TestAclConfigFlag.java From big-c with Apache License 2.0 | 5 votes |
/** * Restart the cluster, optionally saving a new checkpoint. * * @param checkpoint boolean true to save a new checkpoint * @param aclsEnabled if true, ACL support is enabled * @throws Exception if restart fails */ private void restart(boolean checkpoint, boolean aclsEnabled) throws Exception { NameNode nameNode = cluster.getNameNode(); if (checkpoint) { NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); } shutdown(); initCluster(false, aclsEnabled); }
Example #28
Source File: TestNameNodeMetrics.java From hadoop with Apache License 2.0 | 5 votes |
/** Create excess blocks by reducing the replication factor for * for a file and ensure metrics reflects it */ @Test public void testExcessBlocks() throws Exception { Path file = getTestPath("testExcessBlocks"); createFile(file, 100, (short)2); long totalBlocks = 1; NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1); updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("ExcessBlocks", totalBlocks, rb); fs.delete(file, true); }
Example #29
Source File: TestNameNodeMetrics.java From hadoop with Apache License 2.0 | 5 votes |
/** Corrupt a block and ensure metrics reflects it */ @Test public void testCorruptBlock() throws Exception { // Create a file with single block with two replicas final Path file = getTestPath("testCorruptBlock"); createFile(file, 100, (short)2); // Corrupt first replica of the block LocatedBlock block = NameNodeAdapter.getBlockLocations( cluster.getNameNode(), file.toString(), 0, 1).get(0); cluster.getNamesystem().writeLock(); try { bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0], "STORAGE_ID", "TEST"); } finally { cluster.getNamesystem().writeUnlock(); } updateMetrics(); MetricsRecordBuilder rb = getMetrics(NS_METRICS); assertGauge("CorruptBlocks", 1L, rb); assertGauge("PendingReplicationBlocks", 1L, rb); assertGauge("ScheduledReplicationBlocks", 1L, rb); fs.delete(file, true); rb = waitForDnMetricValue(NS_METRICS, "CorruptBlocks", 0L); assertGauge("PendingReplicationBlocks", 0L, rb); assertGauge("ScheduledReplicationBlocks", 0L, rb); }
Example #30
Source File: TestFileAppend4.java From RDFS with Apache License 2.0 | 5 votes |
/** * Test that a file is not considered complete when it only has in-progress * blocks. This ensures that when a block is appended to, it is converted * back into the right kind of "in progress" state. */ public void testNotPrematurelyComplete() throws Exception { LOG.info("START"); cluster = new MiniDFSCluster(conf, 3, true, null); FileSystem fs1 = cluster.getFileSystem(); try { int halfBlock = (int)BLOCK_SIZE/2; short rep = 3; // replication assertTrue(BLOCK_SIZE%4 == 0); file1 = new Path("/delayedReceiveBlock"); // write 1/2 block & close stm = fs1.create(file1, true, (int)BLOCK_SIZE*2, rep, BLOCK_SIZE); AppendTestUtil.write(stm, 0, halfBlock); stm.close(); NameNode nn = cluster.getNameNode(); LOG.info("======== Appending"); stm = fs1.append(file1); LOG.info("======== Writing"); AppendTestUtil.write(stm, 0, halfBlock/2); LOG.info("======== Checking progress"); assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true)); LOG.info("======== Closing"); stm.close(); } catch (Throwable e) { e.printStackTrace(); throw new IOException(e); } finally { LOG.info("======== Cleaning up"); fs1.close(); cluster.shutdown(); } }