Java Code Examples for org.apache.hadoop.hdfs.protocol.Block#isBlockFilename()
The following examples show how to use
org.apache.hadoop.hdfs.protocol.Block#isBlockFilename() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** * Populate the given blockSet with any child blocks * found at this node. */ public void getBlockInfo(LightWeightHashSet<Block> blockSet) { FSDir[] children = this.getChildren(); if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getBlockInfo(blockSet); } } File blockFiles[] = dir.listFiles(); String[] blockFilesNames = getFileNames(blockFiles); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFilesNames[i])) { long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames, blockFilesNames[i]); blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp)); } } }
Example 2
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** * Populate the given blockSet with any child blocks * found at this node. With each block, return the full path * of the block file. */ void getBlockAndFileInfo(LightWeightHashSet<BlockAndFile> blockSet) { FSDir[] children = this.getChildren(); if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getBlockAndFileInfo(blockSet); } } File blockFiles[] = dir.listFiles(); String[] blockFilesNames = getFileNames(blockFiles); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFilesNames[i])) { long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames, blockFilesNames[i]); Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp); blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block)); } } }
Example 3
Source File: FSDataset.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Populate the given blockSet with any child blocks * found at this node. */ public void getBlockInfo(TreeSet<Block> blockSet) { if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getBlockInfo(blockSet); } } File blockFiles[] = dir.listFiles(); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFiles[i])) { long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]); blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp)); } } }
Example 4
Source File: FSDataset.java From hadoop-gpu with Apache License 2.0 | 6 votes |
void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap, FSVolume volume) { if (children != null) { for (int i = 0; i < children.length; i++) { children[i].getVolumeMap(volumeMap, volume); } } File blockFiles[] = dir.listFiles(); for (int i = 0; i < blockFiles.length; i++) { if (Block.isBlockFilename(blockFiles[i])) { long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]); volumeMap.put(new Block(blockFiles[i], blockFiles[i].length(), genStamp), new DatanodeBlockInfo(volume, blockFiles[i])); } } }
Example 5
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Add replicas under the given directory to the volume map * @param volumeMap the replicas map * @param dir an input directory * @param lazyWriteReplicaMap Map of replicas on transient * storage. * @param isFinalized true if the directory has finalized replicas; * false if the directory has rbw replicas * @param exceptions list of exception which need to return to parent thread. * @param subTaskQueue queue of sub tasks */ void addToReplicasMap(ReplicaMap volumeMap, File dir, final RamDiskReplicaTracker lazyWriteReplicaMap, boolean isFinalized, List<IOException> exceptions, Queue<RecursiveAction> subTaskQueue) throws IOException { File[] files = fileIoProvider.listFiles(volume, dir); Arrays.sort(files, FILE_COMPARATOR); for (int i = 0; i < files.length; i++) { File file = files[i]; if (file.isDirectory()) { // Launch new sub task. AddReplicaProcessor subTask = new AddReplicaProcessor(volumeMap, file, lazyWriteReplicaMap, isFinalized, exceptions, subTaskQueue); subTask.fork(); subTaskQueue.add(subTask); } if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) { file = recoverTempUnlinkedBlock(file); if (file == null) { // the original block still exists, so we cover it // in another iteration and can continue here continue; } } if (!Block.isBlockFilename(file)) { continue; } long genStamp = FsDatasetUtil.getGenerationStampFromFile( files, file, i); long blockId = Block.filename2id(file.getName()); Block block = new Block(blockId, file.length(), genStamp); addReplicaToReplicasMap(block, volumeMap, lazyWriteReplicaMap, isFinalized); } }
Example 6
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
void getBlocksBeingWrittenInfo(LightWeightHashSet<Block> blockSet) { if (rbwDir == null) { return; } File[] blockFiles = rbwDir.listFiles(); if (blockFiles == null) { return; } String[] blockFileNames = getFileNames(blockFiles); for (int i = 0; i < blockFiles.length; i++) { if (!blockFiles[i].isDirectory()) { // get each block in the rbwDir direcotry if (Block.isBlockFilename(blockFileNames[i])) { long genStamp = FSDataset.getGenerationStampFromFile( blockFileNames, blockFileNames[i]); Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp); // add this block to block set blockSet.add(block); if (DataNode.LOG.isDebugEnabled()) { DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block); } } } } }
Example 7
Source File: FSDataset.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** */ public FSDir(File dir) throws IOException { this.dir = dir; this.children = null; if (!dir.exists()) { if (!dir.mkdirs()) { throw new IOException("Mkdirs failed to create " + dir.toString()); } } else { File[] files = dir.listFiles(); int numChildren = 0; for (int idx = 0; idx < files.length; idx++) { if (files[idx].isDirectory()) { numChildren++; } else if (Block.isBlockFilename(files[idx])) { numBlocks++; } } if (numChildren > 0) { children = new FSDir[numChildren]; int curdir = 0; for (int idx = 0; idx < files.length; idx++) { if (files[idx].isDirectory()) { children[curdir] = new FSDir(files[idx]); curdir++; } } } } }
Example 8
Source File: TestDatanodeRestart.java From hadoop with Apache License 2.0 | 4 votes |
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException { FSDataOutputStream out = null; FileSystem fs = cluster.getFileSystem(); final Path src = new Path("/test.txt"); try { final int fileLen = 515; // create some rbw replicas on disk byte[] writeBuf = new byte[fileLen]; new Random().nextBytes(writeBuf); out = fs.create(src); out.write(writeBuf); out.hflush(); DataNode dn = cluster.getDataNodes().get(0); for (FsVolumeSpi v : dataset(dn).getVolumes()) { final FsVolumeImpl volume = (FsVolumeImpl)v; File currentDir = volume.getCurrentDir().getParentFile().getParentFile(); File rbwDir = new File(currentDir, "rbw"); for (File file : rbwDir.listFiles()) { if (isCorrupt && Block.isBlockFilename(file)) { new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt } } } cluster.restartDataNodes(); cluster.waitActive(); dn = cluster.getDataNodes().get(0); // check volumeMap: one rwr replica String bpid = cluster.getNamesystem().getBlockPoolId(); ReplicaMap replicas = dataset(dn).volumeMap; Assert.assertEquals(1, replicas.size(bpid)); ReplicaInfo replica = replicas.replicas(bpid).iterator().next(); Assert.assertEquals(ReplicaState.RWR, replica.getState()); if (isCorrupt) { Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes()); } else { Assert.assertEquals(fileLen, replica.getNumBytes()); } dataset(dn).invalidate(bpid, new Block[]{replica}); } finally { IOUtils.closeStream(out); if (fs.exists(src)) { fs.delete(src, false); } fs.close(); } }
Example 9
Source File: TestDatanodeRestart.java From big-c with Apache License 2.0 | 4 votes |
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException { FSDataOutputStream out = null; FileSystem fs = cluster.getFileSystem(); final Path src = new Path("/test.txt"); try { final int fileLen = 515; // create some rbw replicas on disk byte[] writeBuf = new byte[fileLen]; new Random().nextBytes(writeBuf); out = fs.create(src); out.write(writeBuf); out.hflush(); DataNode dn = cluster.getDataNodes().get(0); for (FsVolumeSpi v : dataset(dn).getVolumes()) { final FsVolumeImpl volume = (FsVolumeImpl)v; File currentDir = volume.getCurrentDir().getParentFile().getParentFile(); File rbwDir = new File(currentDir, "rbw"); for (File file : rbwDir.listFiles()) { if (isCorrupt && Block.isBlockFilename(file)) { new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt } } } cluster.restartDataNodes(); cluster.waitActive(); dn = cluster.getDataNodes().get(0); // check volumeMap: one rwr replica String bpid = cluster.getNamesystem().getBlockPoolId(); ReplicaMap replicas = dataset(dn).volumeMap; Assert.assertEquals(1, replicas.size(bpid)); ReplicaInfo replica = replicas.replicas(bpid).iterator().next(); Assert.assertEquals(ReplicaState.RWR, replica.getState()); if (isCorrupt) { Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes()); } else { Assert.assertEquals(fileLen, replica.getNumBytes()); } dataset(dn).invalidate(bpid, new Block[]{replica}); } finally { IOUtils.closeStream(out); if (fs.exists(src)) { fs.delete(src, false); } fs.close(); } }
Example 10
Source File: TestDatanodeRestart.java From RDFS with Apache License 2.0 | 4 votes |
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException { FSDataOutputStream out = null; try { FileSystem fs = cluster.getFileSystem(); NamespaceInfo nsInfo = cluster.getNameNode().versionRequest(); final int fileLen = 515; // create some rbw replicas on disk byte[] writeBuf = new byte[fileLen]; new Random().nextBytes(writeBuf); final Path src = new Path("/test.txt"); out = fs.create(src); out.write(writeBuf); out.sync(); DataNode dn = cluster.getDataNodes().get(0); // corrupt rbw replicas for (FSVolume volume : ((FSDataset) dn.data).volumes.getVolumes()) { File rbwDir = volume.getRbwDir(nsInfo.getNamespaceID()); for (File file : rbwDir.listFiles()) { if (isCorrupt && Block.isBlockFilename(file.getName())) { new RandomAccessFile(file, "rw").setLength(fileLen - 1); // corrupt } } } cluster.restartDataNodes(); cluster.waitActive(); dn = cluster.getDataNodes().get(0); // check volumeMap: one rbw replica Map<Block, DatanodeBlockInfo> volumeMap = ((FSDataset) (dn.data)).volumeMap.getNamespaceMap(nsInfo.getNamespaceID()); assertEquals(1, volumeMap.size()); Block replica = volumeMap.keySet().iterator().next(); if (isCorrupt) { assertEquals((fileLen - 1), replica.getNumBytes()); } else { assertEquals(fileLen, replica.getNumBytes()); } dn.data.invalidate(nsInfo.getNamespaceID(), new Block[] { replica }); fs.delete(src, false); } finally { IOUtils.closeStream(out); } }