org.apache.hadoop.hdfs.protocol.Block Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.Block.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestReplicationPolicy.java From big-c with Apache License 2.0 | 6 votes |
/** asserts the chosen blocks with expected priority blocks */ private void assertTheChosenBlocks( List<List<Block>> chosenBlocks, int firstPrioritySize, int secondPrioritySize, int thirdPrioritySize, int fourthPrioritySize, int fifthPrioritySize) { assertEquals( "Not returned the expected number of QUEUE_HIGHEST_PRIORITY blocks", firstPrioritySize, chosenBlocks.get( UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY).size()); assertEquals( "Not returned the expected number of QUEUE_VERY_UNDER_REPLICATED blocks", secondPrioritySize, chosenBlocks.get( UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED).size()); assertEquals( "Not returned the expected number of QUEUE_UNDER_REPLICATED blocks", thirdPrioritySize, chosenBlocks.get( UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED).size()); assertEquals( "Not returned the expected number of QUEUE_REPLICAS_BADLY_DISTRIBUTED blocks", fourthPrioritySize, chosenBlocks.get( UnderReplicatedBlocks.QUEUE_REPLICAS_BADLY_DISTRIBUTED).size()); assertEquals( "Not returned the expected number of QUEUE_WITH_CORRUPT_BLOCKS blocks", fifthPrioritySize, chosenBlocks.get( UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size()); }
Example #2
Source File: DataNode.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) { Daemon d = new Daemon(threadGroup, new Runnable() { /** Recover a list of blocks. It is run by the primary datanode. */ public void run() { for(int i = 0; i < blocks.length; i++) { try { logRecoverBlock("NameNode", blocks[i], targets[i]); recoverBlock(blocks[i], false, targets[i], true); } catch (IOException e) { LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e); } } } }); d.start(); return d; }
Example #3
Source File: TestSequentialBlockId.java From hadoop with Apache License 2.0 | 6 votes |
/** * Test that the block type (legacy or not) can be correctly detected * based on its generation stamp. * * @throws IOException */ @Test public void testBlockTypeDetection() throws IOException { // Setup a mock object and stub out a few routines to // retrieve the generation stamp counters. BlockIdManager bid = mock(BlockIdManager.class); final long maxGenStampForLegacyBlocks = 10000; when(bid.getGenerationStampV1Limit()) .thenReturn(maxGenStampForLegacyBlocks); Block legacyBlock = spy(new Block()); when(legacyBlock.getGenerationStamp()) .thenReturn(maxGenStampForLegacyBlocks/2); Block newBlock = spy(new Block()); when(newBlock.getGenerationStamp()) .thenReturn(maxGenStampForLegacyBlocks+1); // Make sure that isLegacyBlock() can correctly detect // legacy and new blocks. when(bid.isLegacyBlock(any(Block.class))).thenCallRealMethod(); assertThat(bid.isLegacyBlock(legacyBlock), is(true)); assertThat(bid.isLegacyBlock(newBlock), is(false)); }
Example #4
Source File: FSEditLogOp.java From hadoop with Apache License 2.0 | 5 votes |
@Override void readFields(DataInputStream in, int logVersion) throws IOException { path = FSImageSerialization.readString(in); Block[] blocks = FSImageSerialization.readCompactBlockArray(in, logVersion); Preconditions.checkState(blocks.length == 2 || blocks.length == 1); penultimateBlock = blocks.length == 1 ? null : blocks[0]; lastBlock = blocks[blocks.length - 1]; readRpcIds(in, logVersion); }
Example #5
Source File: TestPBHelper.java From hadoop with Apache License 2.0 | 5 votes |
private static BlockWithLocations getBlockWithLocations(int bid) { final String[] datanodeUuids = {"dn1", "dn2", "dn3"}; final String[] storageIDs = {"s1", "s2", "s3"}; final StorageType[] storageTypes = { StorageType.DISK, StorageType.DISK, StorageType.DISK}; return new BlockWithLocations(new Block(bid, 0, 1), datanodeUuids, storageIDs, storageTypes); }
Example #6
Source File: BlockManager.java From hadoop with Apache License 2.0 | 5 votes |
boolean blockHasEnoughRacks(Block b) { if (!this.shouldCheckForEnoughRacks) { return true; } boolean enoughRacks = false;; Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(b); int numExpectedReplicas = getReplication(b); String rackName = null; for(DatanodeStorageInfo storage : blocksMap.getStorages(b)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { if ((corruptNodes == null ) || !corruptNodes.contains(cur)) { if (numExpectedReplicas == 1 || (numExpectedReplicas > 1 && !datanodeManager.hasClusterEverBeenMultiRack())) { enoughRacks = true; break; } String rackNameNew = cur.getNetworkLocation(); if (rackName == null) { rackName = rackNameNew; } else if (!rackName.equals(rackNameNew)) { enoughRacks = true; break; } } } } return enoughRacks; }
Example #7
Source File: InvalidateBlocks.java From hadoop with Apache License 2.0 | 5 votes |
/** Remove the block from the specified storage. */ synchronized void remove(final DatanodeInfo dn, final Block block) { final LightWeightHashSet<Block> v = node2blocks.get(dn); if (v != null && v.remove(block)) { numBlocks--; if (v.isEmpty()) { node2blocks.remove(dn); } } }
Example #8
Source File: CorruptReplicasMap.java From hadoop with Apache License 2.0 | 5 votes |
/** * return the reason about corrupted replica for a given block * on a given dn * @param block block that has corrupted replica * @param node datanode that contains this corrupted replica * @return reason */ String getCorruptReason(Block block, DatanodeDescriptor node) { Reason reason = null; if(corruptReplicasMap.containsKey(block)) { if (corruptReplicasMap.get(block).containsKey(node)) { reason = corruptReplicasMap.get(block).get(node); } } if (reason != null) { return reason.toString(); } else { return null; } }
Example #9
Source File: ReplicaMap.java From big-c with Apache License 2.0 | 5 votes |
/** * Get the meta information of the replica that matches both block id * and generation stamp * @param bpid block pool id * @param block block with its id as the key * @return the replica's meta information * @throws IllegalArgumentException if the input block or block pool is null */ ReplicaInfo get(String bpid, Block block) { checkBlockPool(bpid); checkBlock(block); ReplicaInfo replicaInfo = get(bpid, block.getBlockId()); if (replicaInfo != null && block.getGenerationStamp() == replicaInfo.getGenerationStamp()) { return replicaInfo; } return null; }
Example #10
Source File: CorruptReplicasMap.java From big-c with Apache License 2.0 | 5 votes |
/** * Get Nodes which have corrupt replicas of Block * * @param blk Block for which nodes are requested * @return collection of nodes. Null if does not exists */ Collection<DatanodeDescriptor> getNodes(Block blk) { Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk); if (nodes == null) return null; return nodes.keySet(); }
Example #11
Source File: BlockManager.java From big-c with Apache License 2.0 | 5 votes |
/** * Queue the given reported block for later processing in the * standby node. @see PendingDataNodeMessages. * @param reason a textual reason to report in the debug logs */ private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, String reason) { assert shouldPostponeBlocksFromFuture; if (LOG.isDebugEnabled()) { LOG.debug("Queueing reported block " + block + " in state " + reportedState + " from datanode " + storageInfo.getDatanodeDescriptor() + " for later processing because " + reason + "."); } pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState); }
Example #12
Source File: TestBlockManager.java From hadoop with Apache License 2.0 | 5 votes |
private BlockInfoContiguous blockOnNodes(long blkId, List<DatanodeDescriptor> nodes) { Block block = new Block(blkId); BlockInfoContiguous blockInfo = new BlockInfoContiguous(block, (short) 3); for (DatanodeDescriptor dn : nodes) { for (DatanodeStorageInfo storage : dn.getStorageInfos()) { blockInfo.addStorage(storage); } } return blockInfo; }
Example #13
Source File: PendingReplicationBlocks.java From hadoop with Apache License 2.0 | 5 votes |
/** * Add a block to the list of pending Replications * @param block The corresponding block * @param targets The DataNodes where replicas of the block should be placed */ void increment(Block block, DatanodeDescriptor[] targets) { synchronized (pendingReplications) { PendingBlockInfo found = pendingReplications.get(block); if (found == null) { pendingReplications.put(block, new PendingBlockInfo(targets)); } else { found.incrementReplicas(targets); found.setTimeStamp(); } } }
Example #14
Source File: RemoteBlockReader.java From hadoop with Apache License 2.0 | 5 votes |
private RemoteBlockReader(String file, String bpid, long blockId, DataInputStream in, DataChecksum checksum, boolean verifyChecksum, long startOffset, long firstChunkOffset, long bytesToRead, Peer peer, DatanodeID datanodeID, PeerCache peerCache) { // Path is used only for printing block and file information in debug super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId + ":" + bpid + ":of:"+ file)/*too non path-like?*/, 1, verifyChecksum, checksum.getChecksumSize() > 0? checksum : null, checksum.getBytesPerChecksum(), checksum.getChecksumSize()); this.isLocal = DFSClient.isLocalAddress(NetUtils. createSocketAddr(datanodeID.getXferAddr())); this.peer = peer; this.datanodeID = datanodeID; this.in = in; this.checksum = checksum; this.startOffset = Math.max( startOffset, 0 ); this.blockId = blockId; // The total number of bytes that we need to transfer from the DN is // the amount that the user wants (bytesToRead), plus the padding at // the beginning in order to chunk-align. Note that the DN may elect // to send more than this amount if the read starts/ends mid-chunk. this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset); this.firstChunkOffset = firstChunkOffset; lastChunkOffset = firstChunkOffset; lastChunkLen = -1; bytesPerChecksum = this.checksum.getBytesPerChecksum(); checksumSize = this.checksum.getChecksumSize(); this.peerCache = peerCache; }
Example #15
Source File: DistributedAvatarFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
@Override public LocatedBlockWithMetaInfo addBlockAndFetchMetaInfo(final String src, final String clientName, final DatanodeInfo[] excludedNodes, final DatanodeInfo[] favoredNodes, final long startPos, final Block lastBlock) throws IOException { return (new MutableFSCaller<LocatedBlockWithMetaInfo>() { @Override LocatedBlockWithMetaInfo call(int retries) throws IOException { if (retries > 0 && lastBlock == null) { FileStatus info = namenode.getFileInfo(src); if (info != null) { LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info .getLen()); if (blocks.locatedBlockCount() > 0 ) { LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1); if (last.getBlockSize() == 0) { // This one has not been written to namenode.abandonBlock(last.getBlock(), src, clientName); } } } } return namenode.addBlockAndFetchMetaInfo(src, clientName, excludedNodes, favoredNodes, startPos, lastBlock); } }).callFS(); }
Example #16
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Temporary files. They get moved to the finalized block directory when * the block is finalized. */ File createTmpFile(Block b) throws IOException { File f = new File(tmpDir, b.getBlockName()); File tmpFile = DatanodeUtil.createFileWithExistsCheck( volume, b, f, fileIoProvider); // If any exception during creation, its expected that counter will not be // incremented, So no need to decrement incrNumBlocks(); return tmpFile; }
Example #17
Source File: BlockManager.java From hadoop with Apache License 2.0 | 5 votes |
private void addToExcessReplicate(DatanodeInfo dn, Block block) { assert namesystem.hasWriteLock(); LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(dn.getDatanodeUuid()); if (excessBlocks == null) { excessBlocks = new LightWeightLinkedSet<Block>(); excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks); } if (excessBlocks.add(block)) { excessBlocksCount.incrementAndGet(); blockLog.debug("BLOCK* addToExcessReplicate: ({}, {}) is added to" + " excessReplicateMap", dn, block); } }
Example #18
Source File: TestCorruptReplicaInfo.java From big-c with Apache License 2.0 | 5 votes |
private Block getBlock(Long block_id) { if (!block_map.containsKey(block_id)) { block_map.put(block_id, new Block(block_id,0,0)); } return block_map.get(block_id); }
Example #19
Source File: TestCommitBlockSynchronization.java From big-c with Apache License 2.0 | 5 votes |
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file) throws IOException { Configuration conf = new Configuration(); FSImage image = new FSImage(conf); final DatanodeStorageInfo[] targets = {}; FSNamesystem namesystem = new FSNamesystem(conf, image); namesystem.setImageLoaded(true); // set file's parent as root and put the file to inodeMap, so // FSNamesystem's isFileDeleted() method will return false on this file if (file.getParent() == null) { INodeDirectory mparent = mock(INodeDirectory.class); INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0], mparent.getPermissionStatus(), mparent.getAccessTime()); parent.setLocalName(new byte[0]); parent.addChild(file); file.setParent(parent); } namesystem.dir.getINodeMap().put(file); FSNamesystem namesystemSpy = spy(namesystem); BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction( block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets); blockInfo.setBlockCollection(file); blockInfo.setGenerationStamp(genStamp); blockInfo.initializeBlockRecovery(genStamp); doReturn(true).when(file).removeLastBlock(any(Block.class)); doReturn(true).when(file).isUnderConstruction(); doReturn(new BlockInfoContiguous[1]).when(file).getBlocks(); doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class)); doReturn(blockInfo).when(file).getLastBlock(); doReturn("").when(namesystemSpy).closeFileCommitBlocks( any(INodeFile.class), any(BlockInfoContiguous.class)); doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog(); return namesystemSpy; }
Example #20
Source File: PendingReplicationBlocks.java From big-c with Apache License 2.0 | 5 votes |
/** * Add a block to the list of pending Replications * @param block The corresponding block * @param targets The DataNodes where replicas of the block should be placed */ void increment(Block block, DatanodeDescriptor[] targets) { synchronized (pendingReplications) { PendingBlockInfo found = pendingReplications.get(block); if (found == null) { pendingReplications.put(block, new PendingBlockInfo(targets)); } else { found.incrementReplicas(targets); found.setTimeStamp(); } } }
Example #21
Source File: SimulatedFSDataset.java From hadoop with Apache License 2.0 | 5 votes |
/** Get a map for a given block pool Id */ private Map<Block, BInfo> getMap(String bpid) throws IOException { final Map<Block, BInfo> map = blockMap.get(bpid); if (map == null) { throw new IOException("Non existent blockpool " + bpid); } return map; }
Example #22
Source File: SimulatedFSDataset.java From RDFS with Apache License 2.0 | 5 votes |
/** * Returns metaData of block b as an input stream * @param b - the block for which the metadata is desired * @return metaData of block b as an input stream * @throws IOException - block does not exist or problems accessing * the meta file */ private synchronized InputStream getMetaDataInStream(int namespaceId, Block b) throws IOException { BInfo binfo = getBlockMap(namespaceId).get(b); if (binfo == null) { throw new IOException("No such Block " + b ); } if (!binfo.finalized) { throw new IOException("Block " + b + " is being written, its meta cannot be read"); } return binfo.getMetaIStream(); }
Example #23
Source File: TestBlockReplicationQueue.java From RDFS with Apache License 2.0 | 5 votes |
private void containsBlocks(boolean l){ LOG.info("Contains blocks..."); long start = System.currentTimeMillis(); for (Block b : blockList) { if(!l) assertTrue(queue.contains(b)); else assertTrue(queueL.contains(b)); } long stop = System.currentTimeMillis(); LOG.info("Contains blocks... DONE. TIME: "+(stop-start)); }
Example #24
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
/** * Turn the block identifier into a filename. */ public File getFile(int namespaceId, Block b) { lock.readLock().lock(); try { DatanodeBlockInfo info = volumeMap.get(namespaceId, b); if (info != null) { return info.getFile(); } return null; } finally { lock.readLock().unlock(); } }
Example #25
Source File: SimulatedFSDataset.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public synchronized long getMetaDataLength(Block b) throws IOException { BInfo binfo = blockMap.get(b); if (binfo == null) { throw new IOException("No such Block " + b ); } if (!binfo.finalized) { throw new IOException("Block " + b + " is being written, its metalength cannot be read"); } return binfo.getMetaIStream().getLength(); }
Example #26
Source File: TestBlockUnderConstruction.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test NameNode.getBlockLocations(..) on reading un-closed files. */ @Test public void testGetBlockLocations() throws IOException { final NamenodeProtocols namenode = cluster.getNameNodeRpc(); final Path p = new Path(BASE_DIR, "file2.dat"); final String src = p.toString(); final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3); // write a half block int len = BLOCK_SIZE >>> 1; writeFile(p, out, len); for(int i = 1; i < NUM_BLOCKS; ) { // verify consistency final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len); final List<LocatedBlock> blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); assertTrue(b instanceof BlockInfoContiguousUnderConstruction); if (++i < NUM_BLOCKS) { // write one more block writeFile(p, out, BLOCK_SIZE); len += BLOCK_SIZE; } } // close file out.close(); }
Example #27
Source File: SimulatedFSDataset.java From hadoop with Apache License 2.0 | 5 votes |
@Override public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException { ExtendedBlock b = rBlock.getBlock(); final Map<Block, BInfo> map = getMap(b.getBlockPoolId()); BInfo binfo = map.get(b.getLocalBlock()); if (binfo == null) { throw new IOException("No such Block " + b ); } return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), binfo.getGenerationStamp(), binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW); }
Example #28
Source File: BlocksMap.java From hadoop with Apache License 2.0 | 5 votes |
/** * Remove the block from the block map; * remove it from all data-node lists it belongs to; * and remove all data-node locations associated with the block. */ void removeBlock(Block block) { BlockInfoContiguous blockInfo = blocks.remove(block); if (blockInfo == null) return; blockInfo.setBlockCollection(null); for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); dn.removeBlock(blockInfo); // remove from the list and wipe the location } }
Example #29
Source File: BlockManager.java From hadoop with Apache License 2.0 | 5 votes |
/** Remove the blocks associated to the given datanode. */ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { final Iterator<? extends Block> it = node.getBlockIterator(); while(it.hasNext()) { removeStoredBlock(it.next(), node); } // Remove all pending DN messages referencing this DN. pendingDNMessages.removeAllMessagesForDatanode(node); node.resetBlocks(); invalidateBlocks.remove(node); }
Example #30
Source File: TestDFSUtil.java From RDFS with Apache License 2.0 | 5 votes |
/** * Test conversion of LocatedBlock to BlockLocation */ @Test public void testLocatedBlocks2Locations() { DatanodeInfo d = new DatanodeInfo(); DatanodeInfo[] ds = new DatanodeInfo[1]; ds[0] = d; // ok Block b1 = new Block(1, 1, 1); LocatedBlock l1 = new LocatedBlock(b1, ds, 0, false); // corrupt Block b2 = new Block(2, 1, 1); LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true); List<LocatedBlock> ls = Arrays.asList(l1, l2); LocatedBlocks lbs = new LocatedBlocks(10, ls, false); BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs); assertTrue("expected 2 blocks but got " + bs.length, bs.length == 2); int corruptCount = 0; for (BlockLocation b: bs) { if (b.isCorrupt()) { corruptCount++; } } assertTrue("expected 1 corrupt files but got " + corruptCount, corruptCount == 1); // test an empty location bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks()); assertEquals(0, bs.length); }