Java Code Examples for org.apache.hadoop.hdfs.protocol.Block

The following examples show how to use org.apache.hadoop.hdfs.protocol.Block. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   Source File: TestReplicationPolicy.java    License: Apache License 2.0 6 votes vote down vote up
/** asserts the chosen blocks with expected priority blocks */
private void assertTheChosenBlocks(
    List<List<Block>> chosenBlocks, int firstPrioritySize,
    int secondPrioritySize, int thirdPrioritySize, int fourthPrioritySize,
    int fifthPrioritySize) {
  assertEquals(
      "Not returned the expected number of QUEUE_HIGHEST_PRIORITY blocks",
      firstPrioritySize, chosenBlocks.get(
          UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY).size());
  assertEquals(
      "Not returned the expected number of QUEUE_VERY_UNDER_REPLICATED blocks",
      secondPrioritySize, chosenBlocks.get(
          UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED).size());
  assertEquals(
      "Not returned the expected number of QUEUE_UNDER_REPLICATED blocks",
      thirdPrioritySize, chosenBlocks.get(
          UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED).size());
  assertEquals(
      "Not returned the expected number of QUEUE_REPLICAS_BADLY_DISTRIBUTED blocks",
      fourthPrioritySize, chosenBlocks.get(
          UnderReplicatedBlocks.QUEUE_REPLICAS_BADLY_DISTRIBUTED).size());
  assertEquals(
      "Not returned the expected number of QUEUE_WITH_CORRUPT_BLOCKS blocks",
      fifthPrioritySize, chosenBlocks.get(
          UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS).size());
}
 
Example 2
Source Project: hadoop   Source File: TestSequentialBlockId.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test that the block type (legacy or not) can be correctly detected
 * based on its generation stamp.
 *
 * @throws IOException
 */
@Test
public void testBlockTypeDetection() throws IOException {

  // Setup a mock object and stub out a few routines to
  // retrieve the generation stamp counters.
  BlockIdManager bid = mock(BlockIdManager.class);
  final long maxGenStampForLegacyBlocks = 10000;

  when(bid.getGenerationStampV1Limit())
      .thenReturn(maxGenStampForLegacyBlocks);

  Block legacyBlock = spy(new Block());
  when(legacyBlock.getGenerationStamp())
      .thenReturn(maxGenStampForLegacyBlocks/2);

  Block newBlock = spy(new Block());
  when(newBlock.getGenerationStamp())
      .thenReturn(maxGenStampForLegacyBlocks+1);

  // Make sure that isLegacyBlock() can correctly detect
  // legacy and new blocks.
  when(bid.isLegacyBlock(any(Block.class))).thenCallRealMethod();
  assertThat(bid.isLegacyBlock(legacyBlock), is(true));
  assertThat(bid.isLegacyBlock(newBlock), is(false));
}
 
Example 3
Source Project: hadoop-gpu   Source File: DataNode.java    License: Apache License 2.0 6 votes vote down vote up
public Daemon recoverBlocks(final Block[] blocks, final DatanodeInfo[][] targets) {
  Daemon d = new Daemon(threadGroup, new Runnable() {
    /** Recover a list of blocks. It is run by the primary datanode. */
    public void run() {
      for(int i = 0; i < blocks.length; i++) {
        try {
          logRecoverBlock("NameNode", blocks[i], targets[i]);
          recoverBlock(blocks[i], false, targets[i], true);
        } catch (IOException e) {
          LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e);
        }
      }
    }
  });
  d.start();
  return d;
}
 
Example 4
Source Project: hadoop   Source File: RemoteBlockReader.java    License: Apache License 2.0 5 votes vote down vote up
private RemoteBlockReader(String file, String bpid, long blockId,
    DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  // Path is used only for printing block and file information in debug
  super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
                  ":" + bpid + ":of:"+ file)/*too non path-like?*/,
        1, verifyChecksum,
        checksum.getChecksumSize() > 0? checksum : null, 
        checksum.getBytesPerChecksum(),
        checksum.getChecksumSize());

  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = in;
  this.checksum = checksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);

  this.firstChunkOffset = firstChunkOffset;
  lastChunkOffset = firstChunkOffset;
  lastChunkLen = -1;

  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
  this.peerCache = peerCache;
}
 
Example 5
Source Project: RDFS   Source File: FSDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 */
INode unprotectedAddFile( String path, 
                          PermissionStatus permissions,
                          Block[] blocks, 
                          short replication,
                          long modificationTime,
                          long atime,
                          long preferredBlockSize) {
  INode newNode;
  long diskspace = -1; // unknown
  if (blocks == null)
    newNode = new INodeDirectory(permissions, modificationTime);
  else {
    newNode = new INodeFile(permissions, blocks.length, replication,
                            modificationTime, atime, preferredBlockSize);
    diskspace = ((INodeFile)newNode).diskspaceConsumed(blocks);
  }
  writeLock();
  try {
    try {
      newNode = addNode(path, newNode, diskspace, false);
      if(newNode != null && blocks != null) {
        int nrBlocks = blocks.length;
        // Add file->block mapping
        INodeFile newF = (INodeFile)newNode;
        for (int i = 0; i < nrBlocks; i++) {
          newF.setBlock(i, getFSNamesystem().blocksMap.addINode(blocks[i], newF));
        }
      }
    } catch (IOException e) {
      return null;
    }
    return newNode;
  } finally {
    writeUnlock();
  }
}
 
Example 6
Source Project: hadoop   Source File: InvalidateBlocks.java    License: Apache License 2.0 5 votes vote down vote up
/** Remove the block from the specified storage. */
synchronized void remove(final DatanodeInfo dn, final Block block) {
  final LightWeightHashSet<Block> v = node2blocks.get(dn);
  if (v != null && v.remove(block)) {
    numBlocks--;
    if (v.isEmpty()) {
      node2blocks.remove(dn);
    }
  }
}
 
Example 7
Source Project: hadoop   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
boolean blockHasEnoughRacks(Block b) {
  if (!this.shouldCheckForEnoughRacks) {
    return true;
  }
  boolean enoughRacks = false;;
  Collection<DatanodeDescriptor> corruptNodes = 
                                corruptReplicas.getNodes(b);
  int numExpectedReplicas = getReplication(b);
  String rackName = null;
  for(DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
    final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
    if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
      if ((corruptNodes == null ) || !corruptNodes.contains(cur)) {
        if (numExpectedReplicas == 1 ||
            (numExpectedReplicas > 1 &&
                !datanodeManager.hasClusterEverBeenMultiRack())) {
          enoughRacks = true;
          break;
        }
        String rackNameNew = cur.getNetworkLocation();
        if (rackName == null) {
          rackName = rackNameNew;
        } else if (!rackName.equals(rackNameNew)) {
          enoughRacks = true;
          break;
        }
      }
    }
  }
  return enoughRacks;
}
 
Example 8
Source Project: RDFS   Source File: FSDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the blocks associated with the file.
 */
Block[] getFileBlocks(String src) {
  waitForReady();
  readLock();
  try {
    INode targetNode = rootDir.getNode(src);
    if (targetNode == null)
      return null;
    if(targetNode.isDirectory())
      return null;
    return ((INodeFile)targetNode).getBlocks();
  } finally {
    readUnlock();
  }
}
 
Example 9
Source Project: big-c   Source File: TestReplicationPolicy.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test for the high priority blocks are processed before the low priority
 * blocks.
 */
@Test(timeout = 60000)
public void testReplicationWithPriority() throws Exception {
  int DFS_NAMENODE_REPLICATION_INTERVAL = 1000;
  int HIGH_PRIORITY = 0;
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
      .format(true).build();
  try {
    cluster.waitActive();
    final UnderReplicatedBlocks neededReplications = cluster.getNameNode()
        .getNamesystem().getBlockManager().neededReplications;
    for (int i = 0; i < 100; i++) {
      // Adding the blocks directly to normal priority
      neededReplications.add(new Block(random.nextLong()), 2, 0, 3);
    }
    // Lets wait for the replication interval, to start process normal
    // priority blocks
    Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
    
    // Adding the block directly to high priority list
    neededReplications.add(new Block(random.nextLong()), 1, 0, 3);
    
    // Lets wait for the replication interval
    Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);

    // Check replication completed successfully. Need not wait till it process
    // all the 100 normal blocks.
    assertFalse("Not able to clear the element from high priority list",
        neededReplications.iterator(HIGH_PRIORITY).hasNext());
  } finally {
    cluster.shutdown();
  }
}
 
Example 10
Source Project: hadoop   Source File: PendingReplicationBlocks.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Returns a list of blocks that have timed out their 
 * replication requests. Returns null if no blocks have
 * timed out.
 */
Block[] getTimedOutBlocks() {
  synchronized (timedOutItems) {
    if (timedOutItems.size() <= 0) {
      return null;
    }
    Block[] blockList = timedOutItems.toArray(
        new Block[timedOutItems.size()]);
    timedOutItems.clear();
    return blockList;
  }
}
 
Example 11
Source Project: RDFS   Source File: BlockPlacementPolicyRaid.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Obtain the companion blocks of the give block
 * Companion blocks are defined as the blocks that can help recover each
 * others by using raid decoder.
 * @param path The path of the file contains the block
 * @param info The info of this file
 * @param block The given block
 *              null if it is the block which is currently being written to
 * @return the block locations of companion blocks
 */
List<LocatedBlock> getCompanionBlocks(String path, FileInfo info, Block block)
    throws IOException {
  Codec codec = info.codec;
  switch (info.type) {
    case NOT_RAID:
      return Collections.emptyList();
    case HAR_TEMP_PARITY:
      return getCompanionBlocksForHarParityBlock(
          path, codec.parityLength, block);
    case TEMP_PARITY:
      return getCompanionBlocksForParityBlock(
          getSourceFile(path, codec.tmpParityDirectory),
          path, codec.parityLength, codec.stripeLength, block,
          codec.isDirRaid);
    case PARITY:
      return getCompanionBlocksForParityBlock(
          getSourceFile(path, codec.parityDirectory),
          path, codec.parityLength, codec.stripeLength, block, 
          codec.isDirRaid);
    case SOURCE:
      return getCompanionBlocksForSourceBlock(
          path,
          getParityFile(codec, path),
          codec.parityLength, codec.stripeLength, block,
          codec.isDirRaid);
  }
  return Collections.emptyList();
}
 
Example 12
Source Project: big-c   Source File: FSEditLogOp.java    License: Apache License 2.0 5 votes vote down vote up
@Override void fromXml(Stanza st) throws InvalidXmlException {
  this.path = st.getValue("PATH");
  List<Stanza> blocks = st.getChildren("BLOCK");
  this.blocks = new Block[blocks.size()];
  for (int i = 0; i < blocks.size(); i++) {
    this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i));
  }
  readRpcIdsFromXml(st);
}
 
Example 13
Source Project: hadoop   Source File: FSEditLogOp.java    License: Apache License 2.0 5 votes vote down vote up
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
  path = FSImageSerialization.readString(in);
  Block[] blocks = FSImageSerialization.readCompactBlockArray(in,
      logVersion);
  Preconditions.checkState(blocks.length == 2 || blocks.length == 1);
  penultimateBlock = blocks.length == 1 ? null : blocks[0];
  lastBlock = blocks[blocks.length - 1];
  readRpcIds(in, logVersion);
}
 
Example 14
Source Project: hadoop   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * The given node is reporting that it received a certain block.
 */
@VisibleForTesting
void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
    throws IOException {
  DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
  // Decrement number of blocks scheduled to this datanode.
  // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
  // RECEIVED_BLOCK), we currently also decrease the approximate number. 
  node.decrementBlocksScheduled(storageInfo.getStorageType());

  // get the deletion hint node
  DatanodeDescriptor delHintNode = null;
  if (delHint != null && delHint.length() != 0) {
    delHintNode = datanodeManager.getDatanode(delHint);
    if (delHintNode == null) {
      blockLog.warn("BLOCK* blockReceived: {} is expected to be removed " +
          "from an unrecorded node {}", block, delHint);
    }
  }

  //
  // Modify the blocks->datanode map and node's map.
  //
  pendingReplications.decrement(block, node);
  processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
      delHintNode);
}
 
Example 15
Source Project: RDFS   Source File: SimulatedFSDataset.java    License: Apache License 2.0 5 votes vote down vote up
public synchronized void setChannelPosition(int namespaceId, Block b, BlockWriteStreams stream, 
                                            long dataOffset, long ckOffset)
                                            throws IOException {
  BInfo binfo = getBlockMap(namespaceId).get(b);
  if (binfo == null) {
    throw new IOException("No such Block " + b );
  }
  binfo.setlength(dataOffset);
}
 
Example 16
Source Project: hadoop-gpu   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
private static void logRecoverBlock(String who,
    Block block, DatanodeID[] targets) {
  StringBuilder msg = new StringBuilder(targets[0].getName());
  for (int i = 1; i < targets.length; i++) {
    msg.append(", " + targets[i].getName());
  }
  LOG.info(who + " calls recoverBlock(block=" + block
      + ", targets=[" + msg + "])");
}
 
Example 17
Source Project: hadoop   Source File: CorruptReplicasMap.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * return the reason about corrupted replica for a given block
 * on a given dn
 * @param block block that has corrupted replica
 * @param node datanode that contains this corrupted replica
 * @return reason
 */
String getCorruptReason(Block block, DatanodeDescriptor node) {
  Reason reason = null;
  if(corruptReplicasMap.containsKey(block)) {
    if (corruptReplicasMap.get(block).containsKey(node)) {
      reason = corruptReplicasMap.get(block).get(node);
    }
  }
  if (reason != null) {
    return reason.toString();
  } else {
    return null;
  }
}
 
Example 18
Source Project: big-c   Source File: ReplicaMap.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the meta information of the replica that matches both block id 
 * and generation stamp
 * @param bpid block pool id
 * @param block block with its id as the key
 * @return the replica's meta information
 * @throws IllegalArgumentException if the input block or block pool is null
 */
ReplicaInfo get(String bpid, Block block) {
  checkBlockPool(bpid);
  checkBlock(block);
  ReplicaInfo replicaInfo = get(bpid, block.getBlockId());
  if (replicaInfo != null && 
      block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
    return replicaInfo;
  }
  return null;
}
 
Example 19
Source Project: big-c   Source File: CorruptReplicasMap.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get Nodes which have corrupt replicas of Block
 * 
 * @param blk Block for which nodes are requested
 * @return collection of nodes. Null if does not exists
 */
Collection<DatanodeDescriptor> getNodes(Block blk) {
  Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
  if (nodes == null)
    return null;
  return nodes.keySet();
}
 
Example 20
Source Project: big-c   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Queue the given reported block for later processing in the
 * standby node. @see PendingDataNodeMessages.
 * @param reason a textual reason to report in the debug logs
 */
private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
    ReplicaState reportedState, String reason) {
  assert shouldPostponeBlocksFromFuture;
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("Queueing reported block " + block +
        " in state " + reportedState + 
        " from datanode " + storageInfo.getDatanodeDescriptor() +
        " for later processing because " + reason + ".");
  }
  pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState);
}
 
Example 21
Source Project: hadoop   Source File: TestBlockManager.java    License: Apache License 2.0 5 votes vote down vote up
private BlockInfoContiguous blockOnNodes(long blkId, List<DatanodeDescriptor> nodes) {
  Block block = new Block(blkId);
  BlockInfoContiguous blockInfo = new BlockInfoContiguous(block, (short) 3);

  for (DatanodeDescriptor dn : nodes) {
    for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
      blockInfo.addStorage(storage);
    }
  }
  return blockInfo;
}
 
Example 22
Source Project: hadoop   Source File: PendingReplicationBlocks.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Add a block to the list of pending Replications
 * @param block The corresponding block
 * @param targets The DataNodes where replicas of the block should be placed
 */
void increment(Block block, DatanodeDescriptor[] targets) {
  synchronized (pendingReplications) {
    PendingBlockInfo found = pendingReplications.get(block);
    if (found == null) {
      pendingReplications.put(block, new PendingBlockInfo(targets));
    } else {
      found.incrementReplicas(targets);
      found.setTimeStamp();
    }
  }
}
 
Example 23
Source Project: hadoop   Source File: TestPBHelper.java    License: Apache License 2.0 5 votes vote down vote up
private static BlockWithLocations getBlockWithLocations(int bid) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  return new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
}
 
Example 24
Source Project: RDFS   Source File: DistributedAvatarFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public LocatedBlockWithMetaInfo addBlockAndFetchMetaInfo(final String src,
    final String clientName, final DatanodeInfo[] excludedNodes,
   	final DatanodeInfo[] favoredNodes, final long startPos,
    final Block lastBlock)
    throws IOException {
  return (new MutableFSCaller<LocatedBlockWithMetaInfo>() {
    @Override
    LocatedBlockWithMetaInfo call(int retries) throws IOException {
      if (retries > 0 && lastBlock == null) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
              .getLen());
          if (blocks.locatedBlockCount() > 0 ) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlockAndFetchMetaInfo(src, clientName,
   	      excludedNodes, favoredNodes, startPos, lastBlock);
    }

  }).callFS();
}
 
Example 25
Source Project: lucene-solr   Source File: BlockPoolSlice.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Temporary files. They get moved to the finalized block directory when
 * the block is finalized.
 */
File createTmpFile(Block b) throws IOException {
  File f = new File(tmpDir, b.getBlockName());
  File tmpFile = DatanodeUtil.createFileWithExistsCheck(
      volume, b, f, fileIoProvider);
  // If any exception during creation, its expected that counter will not be
  // incremented, So no need to decrement
  incrNumBlocks();
  return tmpFile;
}
 
Example 26
Source Project: hadoop   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
private void addToExcessReplicate(DatanodeInfo dn, Block block) {
  assert namesystem.hasWriteLock();
  LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(dn.getDatanodeUuid());
  if (excessBlocks == null) {
    excessBlocks = new LightWeightLinkedSet<Block>();
    excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks);
  }
  if (excessBlocks.add(block)) {
    excessBlocksCount.incrementAndGet();
    blockLog.debug("BLOCK* addToExcessReplicate: ({}, {}) is added to"
        + " excessReplicateMap", dn, block);
  }
}
 
Example 27
Source Project: big-c   Source File: TestCorruptReplicaInfo.java    License: Apache License 2.0 5 votes vote down vote up
private Block getBlock(Long block_id) {
  if (!block_map.containsKey(block_id)) {
    block_map.put(block_id, new Block(block_id,0,0));
  }
  
  return block_map.get(block_id);
}
 
Example 28
Source Project: big-c   Source File: TestCommitBlockSynchronization.java    License: Apache License 2.0 5 votes vote down vote up
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
    throws IOException {
  Configuration conf = new Configuration();
  FSImage image = new FSImage(conf);
  final DatanodeStorageInfo[] targets = {};

  FSNamesystem namesystem = new FSNamesystem(conf, image);
  namesystem.setImageLoaded(true);

  // set file's parent as root and put the file to inodeMap, so
  // FSNamesystem's isFileDeleted() method will return false on this file
  if (file.getParent() == null) {
    INodeDirectory mparent = mock(INodeDirectory.class);
    INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0],
        mparent.getPermissionStatus(), mparent.getAccessTime());
    parent.setLocalName(new byte[0]);
    parent.addChild(file);
    file.setParent(parent);
  }
  namesystem.dir.getINodeMap().put(file);

  FSNamesystem namesystemSpy = spy(namesystem);
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
  blockInfo.setBlockCollection(file);
  blockInfo.setGenerationStamp(genStamp);
  blockInfo.initializeBlockRecovery(genStamp);
  doReturn(true).when(file).removeLastBlock(any(Block.class));
  doReturn(true).when(file).isUnderConstruction();
  doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();

  doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
  doReturn(blockInfo).when(file).getLastBlock();
  doReturn("").when(namesystemSpy).closeFileCommitBlocks(
      any(INodeFile.class), any(BlockInfoContiguous.class));
  doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();

  return namesystemSpy;
}
 
Example 29
Source Project: big-c   Source File: PendingReplicationBlocks.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Add a block to the list of pending Replications
 * @param block The corresponding block
 * @param targets The DataNodes where replicas of the block should be placed
 */
void increment(Block block, DatanodeDescriptor[] targets) {
  synchronized (pendingReplications) {
    PendingBlockInfo found = pendingReplications.get(block);
    if (found == null) {
      pendingReplications.put(block, new PendingBlockInfo(targets));
    } else {
      found.incrementReplicas(targets);
      found.setTimeStamp();
    }
  }
}
 
Example 30
Source Project: hadoop   Source File: SimulatedFSDataset.java    License: Apache License 2.0 5 votes vote down vote up
/** Get a map for a given block pool Id */
private Map<Block, BInfo> getMap(String bpid) throws IOException {
  final Map<Block, BInfo> map = blockMap.get(bpid);
  if (map == null) {
    throw new IOException("Non existent blockpool " + bpid);
  }
  return map;
}