org.apache.hadoop.hdfs.tools.DFSck Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.tools.DFSck.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DataNodeLocatorUtils.java From twister2 with Apache License 2.0 | 5 votes |
/** * This method retrieve the datanode name of the file in the hdfs cluster */ private List<String> getDataNodes(String[] fName) throws IOException { Configuration conf = new Configuration(false); conf.addResource(new org.apache.hadoop.fs.Path(HdfsDataContext.getHdfsConfigDirectory(config))); ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); List<String> datanodesList = new ArrayList<>(); InetSocketAddress namenodeAddress = new InetSocketAddress( HdfsDataContext.getHdfsNamenodeDefault(config), HdfsDataContext.getHdfsNamenodePortDefault(config)); DFSClient dfsClient = new DFSClient(namenodeAddress, conf); ClientProtocol nameNode = dfsClient.getNamenode(); DatanodeInfo[] datanodeReport = nameNode.getDatanodeReport(HdfsConstants.DatanodeReportType.ALL); for (DatanodeInfo di : datanodeReport) { datanodesList.add(di.getHostName()); } //To retrieve the datanode name of the respective file try { ToolRunner.run(new DFSck(conf, out), fName); out.println(); } catch (Exception e) { throw new RuntimeException("Exception Occured:" + e.getMessage()); } bStream.close(); out.close(); return datanodesList; }
Example #2
Source File: TestClientReportBadBlock.java From hadoop with Apache License 2.0 | 5 votes |
static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode, String... path) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); int errCode = ToolRunner.run(new DFSck(conf, out), path); if (checkErrorCode) Assert.assertEquals(expectedErrCode, errCode); return bStream.toString(); }
Example #3
Source File: TestHAFsck.java From hadoop with Apache License 2.0 | 5 votes |
static void runFsck(Configuration conf) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); int errCode = ToolRunner.run(new DFSck(conf, out), new String[]{"/", "-files"}); String result = bStream.toString(); System.out.println("output from fsck:\n" + result); assertEquals(0, errCode); assertTrue(result.contains("/test1")); assertTrue(result.contains("/test2")); }
Example #4
Source File: TestFsck.java From hadoop with Apache License 2.0 | 5 votes |
static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode,String... path) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL); int errCode = ToolRunner.run(new DFSck(conf, out), path); if (checkErrorCode) { assertEquals(expectedErrCode, errCode); } ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO); FSImage.LOG.error("OUTPUT = " + bStream.toString()); return bStream.toString(); }
Example #5
Source File: TestEncryptionZones.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test running fsck on a system with encryption zones. */ @Test(timeout = 60000) public void testFsckOnEncryptionZones() throws Exception { final int len = 8196; final Path zoneParent = new Path("/zones"); final Path zone1 = new Path(zoneParent, "zone1"); final Path zone1File = new Path(zone1, "file"); fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); dfsAdmin.createEncryptionZone(zone1, TEST_KEY); DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED); ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); int errCode = ToolRunner.run(new DFSck(conf, out), new String[]{ "/" }); assertEquals("Fsck ran with non-zero error code", 0, errCode); String result = bStream.toString(); assertTrue("Fsck did not return HEALTHY status", result.contains(NamenodeFsck.HEALTHY_STATUS)); // Run fsck directly on the encryption zone instead of root errCode = ToolRunner.run(new DFSck(conf, out), new String[]{ zoneParent.toString() }); assertEquals("Fsck ran with non-zero error code", 0, errCode); result = bStream.toString(); assertTrue("Fsck did not return HEALTHY status", result.contains(NamenodeFsck.HEALTHY_STATUS)); }
Example #6
Source File: TestClientReportBadBlock.java From big-c with Apache License 2.0 | 5 votes |
static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode, String... path) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); int errCode = ToolRunner.run(new DFSck(conf, out), path); if (checkErrorCode) Assert.assertEquals(expectedErrCode, errCode); return bStream.toString(); }
Example #7
Source File: TestHAFsck.java From big-c with Apache License 2.0 | 5 votes |
static void runFsck(Configuration conf) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); int errCode = ToolRunner.run(new DFSck(conf, out), new String[]{"/", "-files"}); String result = bStream.toString(); System.out.println("output from fsck:\n" + result); assertEquals(0, errCode); assertTrue(result.contains("/test1")); assertTrue(result.contains("/test2")); }
Example #8
Source File: TestFsck.java From big-c with Apache License 2.0 | 5 votes |
static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode,String... path) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL); int errCode = ToolRunner.run(new DFSck(conf, out), path); if (checkErrorCode) { assertEquals(expectedErrCode, errCode); } ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO); FSImage.LOG.error("OUTPUT = " + bStream.toString()); return bStream.toString(); }
Example #9
Source File: TestEncryptionZones.java From big-c with Apache License 2.0 | 5 votes |
/** * Test running fsck on a system with encryption zones. */ @Test(timeout = 60000) public void testFsckOnEncryptionZones() throws Exception { final int len = 8196; final Path zoneParent = new Path("/zones"); final Path zone1 = new Path(zoneParent, "zone1"); final Path zone1File = new Path(zone1, "file"); fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true); dfsAdmin.createEncryptionZone(zone1, TEST_KEY); DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED); ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); int errCode = ToolRunner.run(new DFSck(conf, out), new String[]{ "/" }); assertEquals("Fsck ran with non-zero error code", 0, errCode); String result = bStream.toString(); assertTrue("Fsck did not return HEALTHY status", result.contains(NamenodeFsck.HEALTHY_STATUS)); // Run fsck directly on the encryption zone instead of root errCode = ToolRunner.run(new DFSck(conf, out), new String[]{ zoneParent.toString() }); assertEquals("Fsck ran with non-zero error code", 0, errCode); result = bStream.toString(); assertTrue("Fsck did not return HEALTHY status", result.contains(NamenodeFsck.HEALTHY_STATUS)); }
Example #10
Source File: TestFsck.java From RDFS with Apache License 2.0 | 5 votes |
static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode, String... path) throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); ((Log4JLogger) FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL); int errCode = ToolRunner.run(new DFSck(conf, out), path); if (checkErrorCode) { assertEquals(expectedErrCode, errCode); } ((Log4JLogger) FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO); return bStream.toString(); }
Example #11
Source File: DistBlockIntegrityMonitor.java From RDFS with Apache License 2.0 | 5 votes |
private BufferedReader getLostFileReader(String[] dfsckArgs) throws IOException { ByteArrayOutputStream bout = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(bout, true); DFSck dfsck = new DFSck(getConf(), ps); try { dfsck.run(dfsckArgs); } catch (Exception e) { throw new IOException(e); } ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray()); return new BufferedReader(new InputStreamReader(bin)); }
Example #12
Source File: TestFsck.java From hadoop-gpu with Apache License 2.0 | 5 votes |
static String runFsck(Configuration conf, int expectedErrCode, boolean checkErrorCode,String... path) throws Exception { PrintStream oldOut = System.out; ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream newOut = new PrintStream(bStream, true); System.setOut(newOut); ((Log4JLogger)PermissionChecker.LOG).getLogger().setLevel(Level.ALL); int errCode = ToolRunner.run(new DFSck(conf), path); if (checkErrorCode) assertEquals(expectedErrCode, errCode); ((Log4JLogger)PermissionChecker.LOG).getLogger().setLevel(Level.INFO); System.setOut(oldOut); return bStream.toString(); }
Example #13
Source File: TestHAAppend.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test to verify the processing of PendingDataNodeMessageQueue in case of * append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS * comes in one edit log segment and OP_CLOSE edit comes in next log segment * which is loaded during failover. Regression test for HDFS-3605. */ @Test public void testMultipleAppendsDuringCatchupTailing() throws Exception { Configuration conf = new Configuration(); // Set a length edits tailing period, and explicit rolling, so we can // control the ingest of edits by the standby for this test. conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000"); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3).build(); FileSystem fs = null; try { cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); Path fileToAppend = new Path("/FileToAppend"); Path fileToTruncate = new Path("/FileToTruncate"); final byte[] data = new byte[1 << 16]; DFSUtil.getRandom().nextBytes(data); final int[] appendPos = AppendTestUtil.randomFilePartition( data.length, COUNT); final int[] truncatePos = AppendTestUtil.randomFilePartition( data.length, 1); // Create file, write some data, and hflush so that the first // block is in the edit log prior to roll. FSDataOutputStream out = createAndHflush( fs, fileToAppend, data, appendPos[0]); FSDataOutputStream out4Truncate = createAndHflush( fs, fileToTruncate, data, data.length); // Let the StandbyNode catch the creation of the file. cluster.getNameNode(0).getRpcServer().rollEditLog(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); out.close(); out4Truncate.close(); // Append and re-close a few time, so that many block entries are queued. for (int i = 0; i < COUNT; i++) { int end = i < COUNT - 1? appendPos[i + 1]: data.length; out = fs.append(fileToAppend); out.write(data, appendPos[i], end - appendPos[i]); out.close(); } boolean isTruncateReady = fs.truncate(fileToTruncate, truncatePos[0]); // Ensure that blocks have been reported to the SBN ahead of the edits // arriving. cluster.triggerBlockReports(); // Failover the current standby to active. cluster.shutdownNameNode(0); cluster.transitionToActive(1); // Check the FSCK doesn't detect any bad blocks on the SBN. int rc = ToolRunner.run(new DFSck(cluster.getConfiguration(1)), new String[] { "/", "-files", "-blocks" }); assertEquals(0, rc); assertEquals("CorruptBlocks should be empty.", 0, cluster.getNameNode(1) .getNamesystem().getCorruptReplicaBlocks()); AppendTestUtil.checkFullFile(fs, fileToAppend, data.length, data, fileToAppend.toString()); if (!isTruncateReady) { TestFileTruncate.checkBlockRecovery(fileToTruncate, cluster.getFileSystem(1)); } AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate.toString()); } finally { if (null != cluster) { cluster.shutdown(); } if (null != fs) { fs.close(); } } }
Example #14
Source File: TestHAAppend.java From big-c with Apache License 2.0 | 4 votes |
/** * Test to verify the processing of PendingDataNodeMessageQueue in case of * append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS * comes in one edit log segment and OP_CLOSE edit comes in next log segment * which is loaded during failover. Regression test for HDFS-3605. */ @Test public void testMultipleAppendsDuringCatchupTailing() throws Exception { Configuration conf = new Configuration(); // Set a length edits tailing period, and explicit rolling, so we can // control the ingest of edits by the standby for this test. conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000"); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3).build(); FileSystem fs = null; try { cluster.transitionToActive(0); fs = HATestUtil.configureFailoverFs(cluster, conf); Path fileToAppend = new Path("/FileToAppend"); Path fileToTruncate = new Path("/FileToTruncate"); final byte[] data = new byte[1 << 16]; DFSUtil.getRandom().nextBytes(data); final int[] appendPos = AppendTestUtil.randomFilePartition( data.length, COUNT); final int[] truncatePos = AppendTestUtil.randomFilePartition( data.length, 1); // Create file, write some data, and hflush so that the first // block is in the edit log prior to roll. FSDataOutputStream out = createAndHflush( fs, fileToAppend, data, appendPos[0]); FSDataOutputStream out4Truncate = createAndHflush( fs, fileToTruncate, data, data.length); // Let the StandbyNode catch the creation of the file. cluster.getNameNode(0).getRpcServer().rollEditLog(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); out.close(); out4Truncate.close(); // Append and re-close a few time, so that many block entries are queued. for (int i = 0; i < COUNT; i++) { int end = i < COUNT - 1? appendPos[i + 1]: data.length; out = fs.append(fileToAppend); out.write(data, appendPos[i], end - appendPos[i]); out.close(); } boolean isTruncateReady = fs.truncate(fileToTruncate, truncatePos[0]); // Ensure that blocks have been reported to the SBN ahead of the edits // arriving. cluster.triggerBlockReports(); // Failover the current standby to active. cluster.shutdownNameNode(0); cluster.transitionToActive(1); // Check the FSCK doesn't detect any bad blocks on the SBN. int rc = ToolRunner.run(new DFSck(cluster.getConfiguration(1)), new String[] { "/", "-files", "-blocks" }); assertEquals(0, rc); assertEquals("CorruptBlocks should be empty.", 0, cluster.getNameNode(1) .getNamesystem().getCorruptReplicaBlocks()); AppendTestUtil.checkFullFile(fs, fileToAppend, data.length, data, fileToAppend.toString()); if (!isTruncateReady) { TestFileTruncate.checkBlockRecovery(fileToTruncate, cluster.getFileSystem(1)); } AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data, fileToTruncate.toString()); } finally { if (null != cluster) { cluster.shutdown(); } if (null != fs) { fs.close(); } } }