Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlocks#get()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.LocatedBlocks#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestClientReportBadBlock.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create a file with one block and corrupt some/all of the block replicas. */ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException { DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0); DFSTestUtil.waitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node final LocatedBlocks locatedblocks = dfs.dfs.getNamenode() .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE); Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length); // The file only has one block LocatedBlock lblock = locatedblocks.get(0); DatanodeInfo[] datanodeinfos = lblock.getLocations(); ExtendedBlock block = lblock.getBlock(); // corrupt some /all of the block replicas for (int i = 0; i < corruptBlockCount; i++) { DatanodeInfo dninfo = datanodeinfos[i]; final DataNode dn = cluster.getDataNode(dninfo.getIpcPort()); corruptBlock(block, dn); LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo); } }
Example 2
Source File: TestDFSClientRetries.java From big-c with Apache License 2.0 | 6 votes |
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { LocatedBlock goodLocatedBlock = goodBlockList.get(0); LocatedBlock badLocatedBlock = new LocatedBlock( goodLocatedBlock.getBlock(), new DatanodeInfo[] { DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234) }, goodLocatedBlock.getStartOffset(), false); List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>(); badBlocks.add(badLocatedBlock); return new LocatedBlocks(goodBlockList.getFileLength(), false, badBlocks, null, true, null); }
Example 3
Source File: TestClientReportBadBlock.java From big-c with Apache License 2.0 | 6 votes |
/** * Create a file with one block and corrupt some/all of the block replicas. */ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException { DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0); DFSTestUtil.waitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node final LocatedBlocks locatedblocks = dfs.dfs.getNamenode() .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE); Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length); // The file only has one block LocatedBlock lblock = locatedblocks.get(0); DatanodeInfo[] datanodeinfos = lblock.getLocations(); ExtendedBlock block = lblock.getBlock(); // corrupt some /all of the block replicas for (int i = 0; i < corruptBlockCount; i++) { DatanodeInfo dninfo = datanodeinfos[i]; final DataNode dn = cluster.getDataNode(dninfo.getIpcPort()); corruptBlock(block, dn); LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo); } }
Example 4
Source File: TestDFSClientRetries.java From hadoop with Apache License 2.0 | 6 votes |
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { LocatedBlock goodLocatedBlock = goodBlockList.get(0); LocatedBlock badLocatedBlock = new LocatedBlock( goodLocatedBlock.getBlock(), new DatanodeInfo[] { DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234) }, goodLocatedBlock.getStartOffset(), false); List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>(); badBlocks.add(badLocatedBlock); return new LocatedBlocks(goodBlockList.getFileLength(), false, badBlocks, null, true, null); }
Example 5
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Get block location info about file * * getBlockLocations() returns a list of hostnames that store * data for a specific file region. It returns a set of hostnames * for every block within the indicated region. * * This function is very useful when writing code that considers * data-placement when performing operations. For example, the * MapReduce system tries to schedule tasks on the same machines * as the data-block the task processes. */ public BlockLocation[] getBlockLocations(String src, long start, long length) throws IOException, UnresolvedLinkException { TraceScope scope = getPathTraceScope("getBlockLocations", src); try { LocatedBlocks blocks = getLocatedBlocks(src, start, length); BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks); HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length]; for (int i = 0; i < locations.length; i++) { hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i)); } return hdfsLocations; } finally { scope.close(); } }
Example 6
Source File: TestInjectionForSimulatedStorage.java From hadoop with Apache License 2.0 | 5 votes |
private void waitForBlockReplication(String filename, ClientProtocol namenode, int expected, long maxWaitSec) throws IOException { long start = Time.monotonicNow(); //wait for all the blocks to be replicated; LOG.info("Checking for block replication for " + filename); LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE); assertEquals(numBlocks, blocks.locatedBlockCount()); for (int i = 0; i < numBlocks; ++i) { LOG.info("Checking for block:" + (i+1)); while (true) { // Loop to check for block i (usually when 0 is done all will be done blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE); assertEquals(numBlocks, blocks.locatedBlockCount()); LocatedBlock block = blocks.get(i); int actual = block.getLocations().length; if ( actual == expected ) { LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() + ", got " + actual + "."); break; } LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() + " yet. Expecting " + expected + ", got " + actual + "."); if (maxWaitSec > 0 && (Time.monotonicNow() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Thread.sleep(500); } catch (InterruptedException ignored) {} } } }
Example 7
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 5 votes |
private void waitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem dfs) throws Exception { for (int i = 0; i < 5; i++) { LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0, BLOCK_SIZE); LocatedBlock lb = lbs.get(0); if (lb.getLocations().length >= expectedReplicaNum) { return; } else { Thread.sleep(1000); } } }
Example 8
Source File: TestRaidShellFsck.java From RDFS with Apache License 2.0 | 5 votes |
/** * removes a block from the har part file */ private void removeHarParityBlock(int block) throws IOException { Path harPath = new Path(RAID_PATH, HAR_NAME); FileStatus [] listPaths = dfs.listStatus(harPath); boolean deleted = false; for (FileStatus f: listPaths) { if (f.getPath().getName().startsWith("part-")) { final Path partPath = new Path(f.getPath().toUri().getPath()); final LocatedBlocks partBlocks = dfs.getClient().namenode. getBlockLocations(partPath.toString(), 0, f.getLen()); if (partBlocks.locatedBlockCount() <= block) { throw new IOException("invalid har block " + block); } final LocatedBlock partBlock = partBlocks.get(block); removeAndReportBlock(dfs, partPath, partBlock); LOG.info("removed block " + block + "/" + partBlocks.locatedBlockCount() + " of file " + partPath.toString() + " block size " + partBlock.getBlockSize()); deleted = true; break; } } if (!deleted) { throw new IOException("cannot find part file in " + harPath.toString()); } }
Example 9
Source File: TestClientReportBadBlock.java From hadoop with Apache License 2.0 | 5 votes |
/** * Verify the number of corrupted block replicas by fetching the block * location from name node. */ private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations( filePath.toUri().getPath(), 0, Long.MAX_VALUE); // we expect only the first block of the file is used for this test LocatedBlock firstLocatedBlock = lBlocks.get(0); Assert.assertEquals(expectedReplicas, firstLocatedBlock.getLocations().length); }
Example 10
Source File: TestClientReportBadBlock.java From hadoop with Apache License 2.0 | 5 votes |
/** * Verify the first block of the file is corrupted (for all its replica). */ private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode() .getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE); final LocatedBlock firstLocatedBlock = locatedBlocks.get(0); Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt()); }
Example 11
Source File: TestClientReportBadBlock.java From big-c with Apache License 2.0 | 5 votes |
/** * Verify the number of corrupted block replicas by fetching the block * location from name node. */ private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations( filePath.toUri().getPath(), 0, Long.MAX_VALUE); // we expect only the first block of the file is used for this test LocatedBlock firstLocatedBlock = lBlocks.get(0); Assert.assertEquals(expectedReplicas, firstLocatedBlock.getLocations().length); }
Example 12
Source File: TestInjectionForSimulatedStorage.java From big-c with Apache License 2.0 | 5 votes |
private void waitForBlockReplication(String filename, ClientProtocol namenode, int expected, long maxWaitSec) throws IOException { long start = Time.monotonicNow(); //wait for all the blocks to be replicated; LOG.info("Checking for block replication for " + filename); LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE); assertEquals(numBlocks, blocks.locatedBlockCount()); for (int i = 0; i < numBlocks; ++i) { LOG.info("Checking for block:" + (i+1)); while (true) { // Loop to check for block i (usually when 0 is done all will be done blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE); assertEquals(numBlocks, blocks.locatedBlockCount()); LocatedBlock block = blocks.get(i); int actual = block.getLocations().length; if ( actual == expected ) { LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() + ", got " + actual + "."); break; } LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() + " yet. Expecting " + expected + ", got " + actual + "."); if (maxWaitSec > 0 && (Time.monotonicNow() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Thread.sleep(500); } catch (InterruptedException ignored) {} } } }
Example 13
Source File: TestStorageMover.java From big-c with Apache License 2.0 | 5 votes |
private void waitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem dfs) throws Exception { for (int i = 0; i < 5; i++) { LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0, BLOCK_SIZE); LocatedBlock lb = lbs.get(0); if (lb.getLocations().length >= expectedReplicaNum) { return; } else { Thread.sleep(1000); } } }
Example 14
Source File: TestInjectionForSimulatedStorage.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void waitForBlockReplication(String filename, ClientProtocol namenode, int expected, long maxWaitSec) throws IOException { long start = System.currentTimeMillis(); //wait for all the blocks to be replicated; LOG.info("Checking for block replication for " + filename); LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE); assertEquals(numBlocks, blocks.locatedBlockCount()); for (int i = 0; i < numBlocks; ++i) { LOG.info("Checking for block:" + (i+1)); while (true) { // Loop to check for block i (usually when 0 is done all will be done blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE); assertEquals(numBlocks, blocks.locatedBlockCount()); LocatedBlock block = blocks.get(i); int actual = block.getLocations().length; if ( actual == expected ) { LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() + ", got " + actual + "."); break; } LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() + " yet. Expecting " + expected + ", got " + actual + "."); if (maxWaitSec > 0 && (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) { throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename); } try { Thread.sleep(500); } catch (InterruptedException ignored) {} } } }
Example 15
Source File: TestRaidShellFsck.java From RDFS with Apache License 2.0 | 4 votes |
/** * removes a parity block in the specified stripe */ private void removeParityBlock(Path filePath, int stripe) throws IOException { // find parity file ParityFilePair ppair = ParityFilePair.getParityFile(Codec.getCodec("xor"), filePath, conf); String parityPathStr = ppair.getPath().toUri().getPath(); LOG.info("parity path: " + parityPathStr); FileSystem parityFS = ppair.getFileSystem(); if (!(parityFS instanceof DistributedFileSystem)) { throw new IOException("parity file is not on distributed file system"); } DistributedFileSystem parityDFS = (DistributedFileSystem) parityFS; // now corrupt the block corresponding to the stripe selected FileStatus parityFileStatus = parityDFS.getFileStatus(new Path(parityPathStr)); long parityBlockSize = parityFileStatus.getBlockSize(); long parityFileLength = parityFileStatus.getLen(); long parityFileLengthInBlocks = (parityFileLength / parityBlockSize) + (((parityFileLength % parityBlockSize) == 0) ? 0L : 1L); if (parityFileLengthInBlocks <= stripe) { throw new IOException("selected stripe " + stripe + " but parity file only has " + parityFileLengthInBlocks + " blocks"); } if (parityBlockSize != BLOCK_SIZE) { throw new IOException("file block size is " + BLOCK_SIZE + " but parity file block size is " + parityBlockSize); } LocatedBlocks parityFileBlocks = parityDFS.getClient().namenode. getBlockLocations(parityPathStr, 0, parityFileLength); if (parityFileBlocks.locatedBlockCount() != parityFileLengthInBlocks) { throw new IOException("expected " + parityFileLengthInBlocks + " parity file blocks but got " + parityFileBlocks.locatedBlockCount() + " blocks"); } LocatedBlock parityFileBlock = parityFileBlocks.get(stripe); removeAndReportBlock(parityDFS, new Path(parityPathStr), parityFileBlock); LOG.info("removed parity file block/stripe " + stripe + " for " + filePath.toString()); }
Example 16
Source File: DFSInputStream.java From RDFS with Apache License 2.0 | 4 votes |
/** * Grab the open-file info from namenode */ synchronized void openInfo() throws IOException { if (src == null && blocks == null) { throw new IOException("No fine provided to open"); } LocatedBlocks newInfo = src != null ? getLocatedBlocks(src, 0, prefetchSize) : blocks; if (newInfo == null) { throw new IOException("Cannot open filename " + src); } // I think this check is not correct. A file could have been appended to // between two calls to openInfo(). if (locatedBlocks != null && !locatedBlocks.isUnderConstruction() && !newInfo.isUnderConstruction()) { Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator(); Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator(); while (oldIter.hasNext() && newIter.hasNext()) { if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) { throw new IOException("Blocklist for " + src + " has changed!"); } } } // if the file is under construction, then fetch size of last block // from datanode. if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) { LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1); if (last.getLocations().length > 0) { try { Block newBlock = getBlockInfo(last); // only if the block has data (not null) if (newBlock != null) { long newBlockSize = newBlock.getNumBytes(); newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize); } } catch (IOException e) { DFSClient.LOG.debug("DFSClient file " + src + " is being concurrently append to" + " but datanodes probably does not have block " + last.getBlock(), e); } } } this.locatedBlocks = new DFSLocatedBlocks(newInfo); this.currentNode = null; }
Example 17
Source File: TestRaidShell.java From RDFS with Apache License 2.0 | 4 votes |
/** * Create a file with three stripes, corrupt a block each in two stripes, * and wait for the the file to be fixed. */ public void testBlockFix() throws Exception { LOG.info("Test testBlockFix started."); long blockSize = 8192L; int stripeLength = 3; mySetup(stripeLength, -1); Path file1 = new Path(RAID_SRC_PATH, "file1"); Path destPath = new Path("/raid"+RAID_SRC_PATH); long crc1 = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1, 1, 7, blockSize); long file1Len = fileSys.getFileStatus(file1).getLen(); LOG.info("Test testBlockFix created test files"); // create an instance of the RaidNode Configuration localConf = new Configuration(conf); localConf.setInt("raid.blockfix.interval", 1000); localConf.set("raid.blockfix.classname", "org.apache.hadoop.raid.LocalBlockIntegrityMonitor"); // the RaidNode does the raiding inline (instead of submitting to map/reduce) conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode"); // use local block fixer conf.set("raid.blockfix.classname", "org.apache.hadoop.raid.LocalBlockIntegrityMonitor"); cnode = RaidNode.createRaidNode(null, localConf); try { TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath); cnode.stop(); cnode.join(); cnode = null; FileStatus srcStat = fileSys.getFileStatus(file1); LocatedBlocks locations = getBlockLocations(file1, srcStat.getLen()); DistributedFileSystem dfs = (DistributedFileSystem)fileSys; ClientProtocol namenode = dfs.getClient().namenode; String[] corruptFiles = DFSUtil.getCorruptFiles(dfs); assertEquals(corruptFiles.length, 0); // Corrupt blocks in two different stripes. We can fix them. TestRaidDfs.corruptBlock(file1, locations.get(0).getBlock(), NUM_DATANODES, true, dfsCluster); // delete block TestRaidDfs.corruptBlock(file1, locations.get(4).getBlock(), NUM_DATANODES, false, dfsCluster); // corrupt block TestRaidDfs.corruptBlock(file1, locations.get(6).getBlock(), NUM_DATANODES, true, dfsCluster); // delete last (partial) block LocatedBlock[] toReport = new LocatedBlock[3]; toReport[0] = locations.get(0); toReport[1] = locations.get(4); toReport[2] = locations.get(6); namenode.reportBadBlocks(toReport); corruptFiles = DFSUtil.getCorruptFiles(dfs); assertEquals(corruptFiles.length, 1); assertEquals(corruptFiles[0], file1.toString()); // Create RaidShell and fix the file. RaidShell shell = new RaidShell(conf); String[] args = new String[2]; args[0] = "-recoverBlocks"; args[1] = file1.toUri().getPath(); ToolRunner.run(shell, args); long start = System.currentTimeMillis(); do { LOG.info("Test testBlockFix waiting for files to be fixed."); Thread.sleep(1000); corruptFiles = DFSUtil.getCorruptFiles(dfs); } while (corruptFiles.length != 0 && System.currentTimeMillis() - start < 120000); assertEquals(0, corruptFiles.length); dfs = getDFS(conf, dfs); assertTrue(TestRaidDfs.validateFile(dfs, file1, file1Len, crc1)); } catch (Exception e) { LOG.info("Test testBlockFix Exception " + e + StringUtils.stringifyException(e)); throw e; } finally { myTearDown(); } LOG.info("Test testBlockFix completed."); }
Example 18
Source File: TestAddBlockRetry.java From big-c with Apache License 2.0 | 4 votes |
/** * Retry addBlock() while another thread is in chooseTarget(). * See HDFS-4452. */ @Test public void testRetryAddBlockWhileInChooseTarget() throws Exception { final String src = "/testRetryAddBlockWhileInChooseTarget"; final FSNamesystem ns = cluster.getNamesystem(); final NamenodeProtocols nn = cluster.getNameNodeRpc(); // create file nn.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short)3, 1024, null); // start first addBlock() LOG.info("Starting first addBlock for " + src); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; DatanodeStorageInfo targets[] = ns.getNewBlockTargets( src, INodeId.GRANDFATHER_INODE_ID, "clientName", null, null, null, onRetryBlock); assertNotNull("Targets must be generated", targets); // run second addBlock() LOG.info("Starting second addBlock for " + src); nn.addBlock(src, "clientName", null, null, INodeId.GRANDFATHER_INODE_ID, null); assertTrue("Penultimate block must be complete", checkFileProgress(src, false)); LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE); assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size()); LocatedBlock lb2 = lbs.get(0); assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length); // continue first addBlock() LocatedBlock newBlock = ns.storeAllocatedBlock( src, INodeId.GRANDFATHER_INODE_ID, "clientName", null, targets); assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock()); // check locations lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE); assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size()); LocatedBlock lb1 = lbs.get(0); assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length); assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock()); }
Example 19
Source File: TestFileCorruption.java From RDFS with Apache License 2.0 | 4 votes |
/** * check if listCorruptFileBlocks() returns the right number of * corrupt files if there are two corrupt files with the same name * in different directories */ public void test2CorruptFilesWithSameName() throws Exception { MiniDFSCluster cluster = null; Random random = new Random(); try { Configuration conf = new Configuration(); // datanode scans directories conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode sends block reports conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); conf.setBoolean("dfs.permissions", false); cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); assertTrue("fs is not a DFS", fs instanceof DistributedFileSystem); DistributedFileSystem dfs = (DistributedFileSystem) fs; Path file1 = new Path("/srcdat12/test2file.test"); Path file2 = new Path("/srcdat13/test2file.test"); // create two files with the same name DFSTestUtil.createFile(fs, file1, 1L, (short)1, 1L); DFSTestUtil.createFile(fs, file2, 1L, (short)1, 1L); // fetch bad file list from namenode. There should be none. ClientProtocol namenode = DFSClient.createNamenode(conf); String[] badFiles = DFSUtil.getCorruptFiles(dfs); assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting None.", badFiles.length == 0); // Now deliberately corrupt one block in each file Path[] files = {file1, file2}; for (Path file: files) { LocatedBlocks fileBlocks = namenode.getBlockLocations(file.toString(), 0, 1L); LocatedBlock block = fileBlocks.get(0); File data_dir = new File(TEST_ROOT_DIR, "dfs/data/"); File dir1 = cluster.getBlockDirectory("data"+(2 * 0 + 1)); File dir2 = cluster.getBlockDirectory("data"+(2 * 0 + 2)); if (!(dir1.isDirectory() && dir2.isDirectory())) { throw new IOException("data directories not found for data node 0: " + dir1.toString() + " " + dir2.toString()); } File[] dirs = new File[2]; dirs[0] = dir1; dirs[1] = dir2; for (File dir: dirs) { File[] blockFiles = dir.listFiles(); if ((blockFiles == null) || (blockFiles.length == 0)) { throw new IOException("no blocks found in data node's data directory"); } for (File blockFile: blockFiles) { if ((blockFile.getName(). startsWith("blk_" + block.getBlock().getBlockId())) && (!blockFile.getName().endsWith(".meta"))) { blockFile.delete(); } } } LocatedBlock[] toReport = { block }; namenode.reportBadBlocks(toReport); } // fetch bad file list from namenode. There should be 2. badFiles = DFSUtil.getCorruptFiles(dfs); assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 2.", badFiles.length == 2); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 20
Source File: TestDiskError.java From hadoop-gpu with Apache License 2.0 | 4 votes |
public void testReplicationError() throws Exception { // bring up a cluster of 1 Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); try { // create a file of replication factor of 1 final Path fileName = new Path("/test.txt"); final int fileLen = 1; DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L); DFSTestUtil.waitReplication(fs, fileName, (short)1); // get the block belonged to the created file LocatedBlocks blocks = cluster.getNameNode().namesystem.getBlockLocations( fileName.toString(), 0, (long)fileLen); assertEquals(blocks.locatedBlockCount(), 1); LocatedBlock block = blocks.get(0); // bring up a second datanode cluster.startDataNodes(conf, 1, true, null, null); cluster.waitActive(); final int sndNode = 1; DataNode datanode = cluster.getDataNodes().get(sndNode); // replicate the block to the second datanode InetSocketAddress target = datanode.getSelfAddr(); Socket s = new Socket(target.getAddress(), target.getPort()); //write the header. DataOutputStream out = new DataOutputStream( s.getOutputStream()); out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION ); out.write( DataTransferProtocol.OP_WRITE_BLOCK ); out.writeLong( block.getBlock().getBlockId()); out.writeLong( block.getBlock().getGenerationStamp() ); out.writeInt(1); out.writeBoolean( false ); // recovery flag Text.writeString( out, "" ); out.writeBoolean(false); // Not sending src node information out.writeInt(0); // write check header out.writeByte( 1 ); out.writeInt( 512 ); out.flush(); // close the connection before sending the content of the block out.close(); // the temporary block & meta files should be deleted String dataDir = cluster.getDataDirectory(); File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "tmp"); File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "tmp"); while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) { Thread.sleep(100); } // then increase the file's replication factor fs.setReplication(fileName, (short)2); // replication should succeed DFSTestUtil.waitReplication(fs, fileName, (short)1); // clean up the file fs.delete(fileName, false); } finally { cluster.shutdown(); } }