Java Code Examples for org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState

The following examples show how to use org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   Source File: BlockManager.java    License: Apache License 2.0 6 votes vote down vote up
private boolean isBlockUnderConstruction(BlockInfoContiguous storedBlock,
    BlockUCState ucState, ReplicaState reportedState) {
  switch(reportedState) {
  case FINALIZED:
    switch(ucState) {
    case UNDER_CONSTRUCTION:
    case UNDER_RECOVERY:
      return true;
    default:
      return false;
    }
  case RBW:
  case RWR:
    return (!storedBlock.isComplete());
  case RUR:       // should not be reported                                                                                             
  case TEMPORARY: // should not be reported                                                                                             
  default:
    return false;
  }
}
 
Example 2
Source Project: big-c   Source File: TestBlockRecovery.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecovery_02.11.
 * Two replicas are RBW.
 * @throws IOException in case of an error
 */
@Test
public void testRBWReplicas() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
  ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);

  InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
  InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

  long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
  testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
  verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
  verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
 
Example 3
Source Project: big-c   Source File: TestBlockRecovery.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
 
Example 4
void addReplicaIfNotPresent(DatanodeStorageInfo storage,
                   Block block,
                   ReplicaState rState) {
  Iterator<ReplicaUnderConstruction> it = replicas.iterator();
  while (it.hasNext()) {
    ReplicaUnderConstruction r = it.next();
    DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation();
    if(expectedLocation == storage) {
      // Record the gen stamp from the report
      r.setGenerationStamp(block.getGenerationStamp());
      return;
    } else if (expectedLocation != null &&
               expectedLocation.getDatanodeDescriptor() ==
                   storage.getDatanodeDescriptor()) {

      // The Datanode reported that the block is on a different storage
      // than the one chosen by BlockPlacementPolicy. This can occur as
      // we allow Datanodes to choose the target storage. Update our
      // state by removing the stale entry and adding a new one.
      it.remove();
      break;
    }
  }
  replicas.add(new ReplicaUnderConstruction(block, storage, rState));
}
 
Example 5
Source Project: hadoop   Source File: FsDatasetImpl.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
    // remove from volumeMap
    volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
    
    // delete the on-disk temp file
    if (delBlockFromDisk(replicaInfo.getBlockFile(), 
        replicaInfo.getMetaFile(), b.getLocalBlock())) {
      LOG.warn("Block " + b + " unfinalized and removed. " );
    }
    if (replicaInfo.getVolume().isTransientStorage()) {
      ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
    }
  }
}
 
Example 6
Source Project: big-c   Source File: FsDatasetImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
    ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica;
  try {
    // change the replica's state/gs etc.
    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
      replica = append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo,
                       newGS, b.getNumBytes());
    } else { //RBW
      bumpReplicaGS(replicaInfo, newGS);
      replica = (ReplicaBeingWritten) replicaInfo;
    }
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example 7
Source Project: hadoop   Source File: TestBlockListAsLongs.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMix() {
  BlockListAsLongs blocks = checkReport(
      new FinalizedReplica(b1, null, null),
      new FinalizedReplica(b2, null, null),
      new ReplicaBeingWritten(b3, null, null, null),
      new ReplicaWaitingToBeRecovered(b4, null, null));
  assertArrayEquals(
      new long[] {
          2, 2,
          1, 11, 111,
          2, 22, 222,
          -1, -1, -1,
          3, 33, 333, ReplicaState.RBW.getValue(),
          4, 44, 444, ReplicaState.RWR.getValue() },
      blocks.getBlockListAsLongs());
}
 
Example 8
Source Project: big-c   Source File: FsDatasetImpl.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Check if a block is valid.
 *
 * @param b           The block to check.
 * @param minLength   The minimum length that the block must have.  May be 0.
 * @param state       If this is null, it is ignored.  If it is non-null, we
 *                        will check that the replica has this state.
 *
 * @throws ReplicaNotFoundException          If the replica is not found 
 *
 * @throws UnexpectedReplicaStateException   If the replica is not in the 
 *                                             expected state.
 * @throws FileNotFoundException             If the block file is not found or there
 *                                              was an error locating it.
 * @throws EOFException                      If the replica length is too short.
 * 
 * @throws IOException                       May be thrown from the methods called. 
 */
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
    throws ReplicaNotFoundException, UnexpectedReplicaStateException,
    FileNotFoundException, EOFException, IOException {
  final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo == null) {
    throw new ReplicaNotFoundException(b);
  }
  if (replicaInfo.getState() != state) {
    throw new UnexpectedReplicaStateException(b,state);
  }
  if (!replicaInfo.getBlockFile().exists()) {
    throw new FileNotFoundException(replicaInfo.getBlockFile().getPath());
  }
  long onDiskLength = getLength(b);
  if (onDiskLength < minLength) {
    throw new EOFException(b + "'s on-disk length " + onDiskLength
        + " is shorter than minLength " + minLength);
  }
}
 
Example 9
Source Project: big-c   Source File: TestBlockRecovery.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecovery_02.13. 
 * Two replicas are RWR.
 * @throws IOException in case of an error
 */
@Test
public void testRWRReplicas() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR);
  ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR);

  InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
  InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

  long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
  testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
  
  verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
  verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
 
Example 10
Source Project: big-c   Source File: BlockListAsLongs.java    License: Apache License 2.0 6 votes vote down vote up
public void add(Replica replica) {
  try {
    // zig-zag to reduce size of legacy blocks
    cos.writeSInt64NoTag(replica.getBlockId());
    cos.writeRawVarint64(replica.getBytesOnDisk());
    cos.writeRawVarint64(replica.getGenerationStamp());
    ReplicaState state = replica.getState();
    // although state is not a 64-bit value, using a long varint to
    // allow for future use of the upper bits
    cos.writeRawVarint64(state.getValue());
    if (state == ReplicaState.FINALIZED) {
      numFinalized++;
    }
    numBlocks++;
  } catch (IOException ioe) {
    // shouldn't happen, ByteString.Output doesn't throw IOE
    throw new IllegalStateException(ioe);
  }
}
 
Example 11
Source Project: lucene-solr   Source File: BlockPoolSlice.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo,
                                 RamDiskReplica replicaState) throws IOException {
  File metaFile = replicaState.getSavedMetaFile();
  File blockFile = replicaState.getSavedBlockFile();
  final long blockId = replicaInfo.getBlockId();
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);

  ReplicaInfo newReplicaInfo =
      new ReplicaBuilder(ReplicaState.FINALIZED)
          .setBlockId(blockId)
          .setLength(replicaInfo.getBytesOnDisk())
          .setGenerationStamp(replicaInfo.getGenerationStamp())
          .setFsVolume(replicaState.getLazyPersistVolume())
          .setDirectoryToUse(targetBlockFile.getParentFile())
          .build();
  return newReplicaInfo;
}
 
Example 12
Source Project: hadoop   Source File: BlockListAsLongs.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Iterator<BlockReportReplica> iterator() {
  return new Iterator<BlockReportReplica>() {
    final BlockReportReplica block = new BlockReportReplica();
    final CodedInputStream cis = buffer.newCodedInput();
    private int currentBlockIndex = 0;

    @Override
    public boolean hasNext() {
      return currentBlockIndex < numBlocks;
    }

    @Override
    public BlockReportReplica next() {
      currentBlockIndex++;
      try {
        // zig-zag to reduce size of legacy blocks and mask off bits
        // we don't (yet) understand
        block.setBlockId(cis.readSInt64());
        block.setNumBytes(cis.readRawVarint64() & NUM_BYTES_MASK);
        block.setGenerationStamp(cis.readRawVarint64());
        long state = cis.readRawVarint64() & REPLICA_STATE_MASK;
        block.setState(ReplicaState.getState((int)state));
      } catch (IOException e) {
        throw new IllegalStateException(e);
      }
      return block;
    }

    @Override
    public void remove() {
      throw new UnsupportedOperationException();
    }
  };
}
 
Example 13
Source Project: hadoop   Source File: BlockListAsLongs.java    License: Apache License 2.0 5 votes vote down vote up
public BlockReportReplica(Block block) {
  super(block);
  if (block instanceof BlockReportReplica) {
    this.state = ((BlockReportReplica)block).getState();
  } else {
    this.state = ReplicaState.FINALIZED;
  }
}
 
Example 14
Source Project: big-c   Source File: TestTransferRbw.java    License: Apache License 2.0 5 votes vote down vote up
private static ReplicaInPipeline getReplica(final DataNode datanode,
    final String bpid, final ReplicaState expectedState) throws InterruptedException {
  final Collection<ReplicaInfo> replicas = FsDatasetTestUtil.getReplicas(
      datanode.getFSDataset(), bpid);
  for(int i = 0; i < 5 && replicas.size() == 0; i++) {
    LOG.info("wait since replicas.size() == 0; i=" + i);
    Thread.sleep(1000);
  }
  Assert.assertEquals(1, replicas.size());
  final ReplicaInfo r = replicas.iterator().next();
  Assert.assertEquals(expectedState, r.getState());
  return (ReplicaInPipeline)r;
}
 
Example 15
Source Project: big-c   Source File: FsDatasetImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example 16
Source Project: hadoop   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Queue the given reported block for later processing in the
 * standby node. @see PendingDataNodeMessages.
 * @param reason a textual reason to report in the debug logs
 */
private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
    ReplicaState reportedState, String reason) {
  assert shouldPostponeBlocksFromFuture;
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("Queueing reported block " + block +
        " in state " + reportedState + 
        " from datanode " + storageInfo.getDatanodeDescriptor() +
        " for later processing because " + reason + ".");
  }
  pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState);
}
 
Example 17
Source Project: big-c   Source File: SimulatedFSDataset.java    License: Apache License 2.0 5 votes vote down vote up
@Override // FsDatasetSpi
public synchronized boolean isValidBlock(ExtendedBlock b) {
  try {
    checkBlock(b, 0, ReplicaState.FINALIZED);
  } catch (IOException e) {
    return false;
  }
  return true;
}
 
Example 18
Source Project: hadoop   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
    DatanodeStorageInfo storageInfo) throws IOException {
  BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock;
  block.addReplicaIfNotPresent(
      storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);

  if (ucBlock.reportedState == ReplicaState.FINALIZED &&
      !block.findDatanode(storageInfo.getDatanodeDescriptor())) {
    addStoredBlock(block, storageInfo, null, true);
  }
}
 
Example 19
Source Project: hadoop   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
private void processAndHandleReportedBlock(
    DatanodeStorageInfo storageInfo, Block block,
    ReplicaState reportedState, DatanodeDescriptor delHintNode)
    throws IOException {
  // blockReceived reports a finalized block
  Collection<BlockInfoContiguous> toAdd = new LinkedList<BlockInfoContiguous>();
  Collection<Block> toInvalidate = new LinkedList<Block>();
  Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
  Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
  final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();

  processReportedBlock(storageInfo, block, reportedState,
                            toAdd, toInvalidate, toCorrupt, toUC);
  // the block is only in one of the to-do lists
  // if it is in none then data-node already has it
  assert toUC.size() + toAdd.size() + toInvalidate.size() + toCorrupt.size() <= 1
    : "The block should be only in one of the lists.";

  for (StatefulBlockInfo b : toUC) { 
    addStoredBlockUnderConstruction(b, storageInfo);
  }
  long numBlocksLogged = 0;
  for (BlockInfoContiguous b : toAdd) {
    addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog);
    numBlocksLogged++;
  }
  if (numBlocksLogged > maxNumBlocksToLog) {
    blockLog.info("BLOCK* addBlock: logged info for {} of {} reported.",
        maxNumBlocksToLog, numBlocksLogged);
  }
  for (Block b : toInvalidate) {
    blockLog.info("BLOCK* addBlock: block {} on node {} size {} does not " +
        "belong to any file", b, node, b.getNumBytes());
    addToInvalidates(b, node);
  }
  for (BlockToMarkCorrupt b : toCorrupt) {
    markBlockAsCorrupt(b, storageInfo, node);
  }
}
 
Example 20
ReplicaUnderConstruction(Block block,
                         DatanodeStorageInfo target,
                         ReplicaState state) {
  super(block);
  this.expectedLocation = target;
  this.state = state;
  this.chosenAsPrimary = false;
}
 
Example 21
Source Project: big-c   Source File: BlockListAsLongs.java    License: Apache License 2.0 5 votes vote down vote up
public BlockReportReplica(Block block) {
  super(block);
  if (block instanceof BlockReportReplica) {
    this.state = ((BlockReportReplica)block).getState();
  } else {
    this.state = ReplicaState.FINALIZED;
  }
}
 
Example 22
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static ReplicaStateProto convert(ReplicaState state) {
  switch (state) {
  case RBW:
    return ReplicaStateProto.RBW;
  case RUR:
    return ReplicaStateProto.RUR;
  case RWR:
    return ReplicaStateProto.RWR;
  case TEMPORARY:
    return ReplicaStateProto.TEMPORARY;
  case FINALIZED:
  default:
    return ReplicaStateProto.FINALIZED;
  }
}
 
Example 23
Source Project: hadoop   Source File: FsDatasetImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
    long newGS, long expectedBlockLen) throws IOException {
  // If the block was successfully finalized because all packets
  // were successfully processed at the Datanode but the ack for
  // some of the packets were not received by the client. The client 
  // re-opens the connection and retries sending those packets.
  // The other reason is that an "append" is occurring to this block.
  
  // check the validity of the parameter
  if (newGS < b.getGenerationStamp()) {
    throw new IOException("The new generation stamp " + newGS + 
        " should be greater than the replica " + b + "'s generation stamp");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  LOG.info("Appending to " + replicaInfo);
  if (replicaInfo.getState() != ReplicaState.FINALIZED) {
    throw new ReplicaNotFoundException(
        ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
  }
  if (replicaInfo.getNumBytes() != expectedBlockLen) {
    throw new IOException("Corrupted replica " + replicaInfo + 
        " with a length of " + replicaInfo.getNumBytes() + 
        " expected length is " + expectedBlockLen);
  }

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica = null;
  try {
    replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
        b.getNumBytes());
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example 24
Source Project: big-c   Source File: FsDatasetImpl.java    License: Apache License 2.0 5 votes vote down vote up
/** Does the block exist and have the given state? */
private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
  try {
    checkBlock(b, 0, state);
  } catch (IOException e) {
    return false;
  }
  return true;
}
 
Example 25
Source Project: hadoop   Source File: FsDatasetImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Complete the block write!
 */
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
  if (Thread.interrupted()) {
    // Don't allow data modifications from interrupted threads
    throw new IOException("Cannot finalize block from Interrupted Thread");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    // this is legal, when recovery happens on a file that has
    // been opened for append but never modified
    return;
  }
  finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
 
Example 26
Source Project: hadoop   Source File: FsDatasetImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
Example 27
Source Project: hadoop   Source File: FsDatasetImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(!b.getVolume().isTransientStorage() &&
       b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
Example 28
Source Project: big-c   Source File: SimulatedFSDataset.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
  ExtendedBlock b = rBlock.getBlock();
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }

  return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), 
      binfo.getGenerationStamp(), 
      binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
}
 
Example 29
ReplicaUnderConstruction(Block block,
                         DatanodeStorageInfo target,
                         ReplicaState state) {
  super(block);
  this.expectedLocation = target;
  this.state = state;
  this.chosenAsPrimary = false;
}
 
Example 30
Source Project: big-c   Source File: FsDatasetImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}