org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
    ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica;
  try {
    // change the replica's state/gs etc.
    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
      replica = append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo,
                       newGS, b.getNumBytes());
    } else { //RBW
      bumpReplicaGS(replicaInfo, newGS);
      replica = (ReplicaBeingWritten) replicaInfo;
    }
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example #2
Source File: BlockManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
private boolean isBlockUnderConstruction(BlockInfoContiguous storedBlock,
    BlockUCState ucState, ReplicaState reportedState) {
  switch(reportedState) {
  case FINALIZED:
    switch(ucState) {
    case UNDER_CONSTRUCTION:
    case UNDER_RECOVERY:
      return true;
    default:
      return false;
    }
  case RBW:
  case RWR:
    return (!storedBlock.isComplete());
  case RUR:       // should not be reported                                                                                             
  case TEMPORARY: // should not be reported                                                                                             
  default:
    return false;
  }
}
 
Example #3
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
    // remove from volumeMap
    volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
    
    // delete the on-disk temp file
    if (delBlockFromDisk(replicaInfo.getBlockFile(), 
        replicaInfo.getMetaFile(), b.getLocalBlock())) {
      LOG.warn("Block " + b + " unfinalized and removed. " );
    }
    if (replicaInfo.getVolume().isTransientStorage()) {
      ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
    }
  }
}
 
Example #4
Source File: BlockInfoContiguousUnderConstruction.java    From big-c with Apache License 2.0 6 votes vote down vote up
void addReplicaIfNotPresent(DatanodeStorageInfo storage,
                   Block block,
                   ReplicaState rState) {
  Iterator<ReplicaUnderConstruction> it = replicas.iterator();
  while (it.hasNext()) {
    ReplicaUnderConstruction r = it.next();
    DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation();
    if(expectedLocation == storage) {
      // Record the gen stamp from the report
      r.setGenerationStamp(block.getGenerationStamp());
      return;
    } else if (expectedLocation != null &&
               expectedLocation.getDatanodeDescriptor() ==
                   storage.getDatanodeDescriptor()) {

      // The Datanode reported that the block is on a different storage
      // than the one chosen by BlockPlacementPolicy. This can occur as
      // we allow Datanodes to choose the target storage. Update our
      // state by removing the stale entry and adding a new one.
      it.remove();
      break;
    }
  }
  replicas.add(new ReplicaUnderConstruction(block, storage, rState));
}
 
Example #5
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecoveryFI_07. max replica length from all DNs is zero.
 *
 * @throws IOException in case of an error
 */
@Test
public void testZeroLenReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
      block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
      initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID);
  verify(dnP).commitBlockSynchronization(
      block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
 
Example #6
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecovery_02.13. 
 * Two replicas are RWR.
 * @throws IOException in case of an error
 */
@Test
public void testRWRReplicas() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR);
  ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR);

  InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
  InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

  long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
  testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
  
  verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
  verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
 
Example #7
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * BlockRecovery_02.11.
 * Two replicas are RBW.
 * @throws IOException in case of an error
 */
@Test
public void testRBWReplicas() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
  ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);

  InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
  InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

  long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
  testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
  verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
  verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, minLen);
}
 
Example #8
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Check if a block is valid.
 *
 * @param b           The block to check.
 * @param minLength   The minimum length that the block must have.  May be 0.
 * @param state       If this is null, it is ignored.  If it is non-null, we
 *                        will check that the replica has this state.
 *
 * @throws ReplicaNotFoundException          If the replica is not found 
 *
 * @throws UnexpectedReplicaStateException   If the replica is not in the 
 *                                             expected state.
 * @throws FileNotFoundException             If the block file is not found or there
 *                                              was an error locating it.
 * @throws EOFException                      If the replica length is too short.
 * 
 * @throws IOException                       May be thrown from the methods called. 
 */
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
    throws ReplicaNotFoundException, UnexpectedReplicaStateException,
    FileNotFoundException, EOFException, IOException {
  final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo == null) {
    throw new ReplicaNotFoundException(b);
  }
  if (replicaInfo.getState() != state) {
    throw new UnexpectedReplicaStateException(b,state);
  }
  if (!replicaInfo.getBlockFile().exists()) {
    throw new FileNotFoundException(replicaInfo.getBlockFile().getPath());
  }
  long onDiskLength = getLength(b);
  if (onDiskLength < minLength) {
    throw new EOFException(b + "'s on-disk length " + onDiskLength
        + " is shorter than minLength " + minLength);
  }
}
 
Example #9
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo,
                                 RamDiskReplica replicaState) throws IOException {
  File metaFile = replicaState.getSavedMetaFile();
  File blockFile = replicaState.getSavedBlockFile();
  final long blockId = replicaInfo.getBlockId();
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);

  ReplicaInfo newReplicaInfo =
      new ReplicaBuilder(ReplicaState.FINALIZED)
          .setBlockId(blockId)
          .setLength(replicaInfo.getBytesOnDisk())
          .setGenerationStamp(replicaInfo.getGenerationStamp())
          .setFsVolume(replicaState.getLazyPersistVolume())
          .setDirectoryToUse(targetBlockFile.getParentFile())
          .build();
  return newReplicaInfo;
}
 
Example #10
Source File: TestBlockListAsLongs.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMix() {
  BlockListAsLongs blocks = checkReport(
      new FinalizedReplica(b1, null, null),
      new FinalizedReplica(b2, null, null),
      new ReplicaBeingWritten(b3, null, null, null),
      new ReplicaWaitingToBeRecovered(b4, null, null));
  assertArrayEquals(
      new long[] {
          2, 2,
          1, 11, 111,
          2, 22, 222,
          -1, -1, -1,
          3, 33, 333, ReplicaState.RBW.getValue(),
          4, 44, 444, ReplicaState.RWR.getValue() },
      blocks.getBlockListAsLongs());
}
 
Example #11
Source File: BlockListAsLongs.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void add(Replica replica) {
  try {
    // zig-zag to reduce size of legacy blocks
    cos.writeSInt64NoTag(replica.getBlockId());
    cos.writeRawVarint64(replica.getBytesOnDisk());
    cos.writeRawVarint64(replica.getGenerationStamp());
    ReplicaState state = replica.getState();
    // although state is not a 64-bit value, using a long varint to
    // allow for future use of the upper bits
    cos.writeRawVarint64(state.getValue());
    if (state == ReplicaState.FINALIZED) {
      numFinalized++;
    }
    numBlocks++;
  } catch (IOException ioe) {
    // shouldn't happen, ByteString.Output doesn't throw IOE
    throw new IllegalStateException(ioe);
  }
}
 
Example #12
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
Example #13
Source File: BlockManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
    DatanodeStorageInfo storageInfo) throws IOException {
  BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock;
  block.addReplicaIfNotPresent(
      storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);

  if (ucBlock.reportedState == ReplicaState.FINALIZED &&
      !block.findDatanode(storageInfo.getDatanodeDescriptor())) {
    addStoredBlock(block, storageInfo, null, true);
  }
}
 
Example #14
Source File: BlockListAsLongs.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public Iterator<BlockReportReplica> iterator() {
  return new Iterator<BlockReportReplica>() {
    final BlockReportReplica block = new BlockReportReplica();
    final CodedInputStream cis = buffer.newCodedInput();
    private int currentBlockIndex = 0;

    @Override
    public boolean hasNext() {
      return currentBlockIndex < numBlocks;
    }

    @Override
    public BlockReportReplica next() {
      currentBlockIndex++;
      try {
        // zig-zag to reduce size of legacy blocks and mask off bits
        // we don't (yet) understand
        block.setBlockId(cis.readSInt64());
        block.setNumBytes(cis.readRawVarint64() & NUM_BYTES_MASK);
        block.setGenerationStamp(cis.readRawVarint64());
        long state = cis.readRawVarint64() & REPLICA_STATE_MASK;
        block.setState(ReplicaState.getState((int)state));
      } catch (IOException e) {
        throw new IllegalStateException(e);
      }
      return block;
    }

    @Override
    public void remove() {
      throw new UnsupportedOperationException();
    }
  };
}
 
Example #15
Source File: BlockListAsLongs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public BlockReportReplica(Block block) {
  super(block);
  if (block instanceof BlockReportReplica) {
    this.state = ((BlockReportReplica)block).getState();
  } else {
    this.state = ReplicaState.FINALIZED;
  }
}
 
Example #16
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Complete the block write!
 */
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
  if (Thread.interrupted()) {
    // Don't allow data modifications from interrupted threads
    throw new IOException("Cannot finalize block from Interrupted Thread");
  }
  ReplicaInfo replicaInfo = getReplicaInfo(b);
  if (replicaInfo.getState() == ReplicaState.FINALIZED) {
    // this is legal, when recovery happens on a file that has
    // been opened for append but never modified
    return;
  }
  finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
 
Example #17
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Does the block exist and have the given state? */
private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
  try {
    checkBlock(b, 0, state);
  } catch (IOException e) {
    return false;
  }
  return true;
}
 
Example #18
Source File: ReplicaUnderRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public ReplicaUnderRecovery(ReplicaInfo replica, long recoveryId) {
  super(replica, replica.getVolume(), replica.getDir());
  if ( replica.getState() != ReplicaState.FINALIZED &&
       replica.getState() != ReplicaState.RBW &&
       replica.getState() != ReplicaState.RWR ) {
    throw new IllegalArgumentException("Cannot recover replica: " + replica);
  }
  this.original = replica;
  this.recoveryId = recoveryId;
}
 
Example #19
Source File: SimulatedFSDataset.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
  ExtendedBlock b = rBlock.getBlock();
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }

  return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), 
      binfo.getGenerationStamp(), 
      binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
}
 
Example #20
Source File: BlockManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void processAndHandleReportedBlock(
    DatanodeStorageInfo storageInfo, Block block,
    ReplicaState reportedState, DatanodeDescriptor delHintNode)
    throws IOException {
  // blockReceived reports a finalized block
  Collection<BlockInfoContiguous> toAdd = new LinkedList<BlockInfoContiguous>();
  Collection<Block> toInvalidate = new LinkedList<Block>();
  Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
  Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
  final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();

  processReportedBlock(storageInfo, block, reportedState,
                            toAdd, toInvalidate, toCorrupt, toUC);
  // the block is only in one of the to-do lists
  // if it is in none then data-node already has it
  assert toUC.size() + toAdd.size() + toInvalidate.size() + toCorrupt.size() <= 1
    : "The block should be only in one of the lists.";

  for (StatefulBlockInfo b : toUC) { 
    addStoredBlockUnderConstruction(b, storageInfo);
  }
  long numBlocksLogged = 0;
  for (BlockInfoContiguous b : toAdd) {
    addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog);
    numBlocksLogged++;
  }
  if (numBlocksLogged > maxNumBlocksToLog) {
    blockLog.info("BLOCK* addBlock: logged info for {} of {} reported.",
        maxNumBlocksToLog, numBlocksLogged);
  }
  for (Block b : toInvalidate) {
    blockLog.info("BLOCK* addBlock: block {} on node {} size {} does not " +
        "belong to any file", b, node, b.getNumBytes());
    addToInvalidates(b, node);
  }
  for (BlockToMarkCorrupt b : toCorrupt) {
    markBlockAsCorrupt(b, storageInfo, node);
  }
}
 
Example #21
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(!b.getVolume().isTransientStorage() &&
       b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
Example #22
Source File: SimulatedFSDataset.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
  ExtendedBlock b = rBlock.getBlock();
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("No such Block " + b );  
  }

  return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), 
      binfo.getGenerationStamp(), 
      binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
}
 
Example #23
Source File: BlockInfoContiguousUnderConstruction.java    From big-c with Apache License 2.0 5 votes vote down vote up
ReplicaUnderConstruction(Block block,
                         DatanodeStorageInfo target,
                         ReplicaState state) {
  super(block);
  this.expectedLocation = target;
  this.state = state;
  this.chosenAsPrimary = false;
}
 
Example #24
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of finalized blocks from in-memory blockmap for a block pool.
 */
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
  ArrayList<FinalizedReplica> finalized =
      new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
    if(b.getState() == ReplicaState.FINALIZED) {
      finalized.add(new FinalizedReplica((FinalizedReplica)b));
    }
  }
  return finalized;
}
 
Example #25
Source File: BlockListAsLongs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public Iterator<BlockReportReplica> iterator() {
  return new Iterator<BlockReportReplica>() {
    final BlockReportReplica block = new BlockReportReplica();
    final CodedInputStream cis = buffer.newCodedInput();
    private int currentBlockIndex = 0;

    @Override
    public boolean hasNext() {
      return currentBlockIndex < numBlocks;
    }

    @Override
    public BlockReportReplica next() {
      currentBlockIndex++;
      try {
        // zig-zag to reduce size of legacy blocks and mask off bits
        // we don't (yet) understand
        block.setBlockId(cis.readSInt64());
        block.setNumBytes(cis.readRawVarint64() & NUM_BYTES_MASK);
        block.setGenerationStamp(cis.readRawVarint64());
        long state = cis.readRawVarint64() & REPLICA_STATE_MASK;
        block.setState(ReplicaState.getState((int)state));
      } catch (IOException e) {
        throw new IllegalStateException(e);
      }
      return block;
    }

    @Override
    public void remove() {
      throw new UnsupportedOperationException();
    }
  };
}
 
Example #26
Source File: SimulatedFSDataset.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check if a block is valid.
 *
 * @param b           The block to check.
 * @param minLength   The minimum length that the block must have.  May be 0.
 * @param state       If this is null, it is ignored.  If it is non-null, we
 *                        will check that the replica has this state.
 *
 * @throws ReplicaNotFoundException          If the replica is not found
 *
 * @throws UnexpectedReplicaStateException   If the replica is not in the 
 *                                             expected state.
 */
@Override // {@link FsDatasetSpi}
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
    throws ReplicaNotFoundException, UnexpectedReplicaStateException {
  final BInfo binfo = getBInfo(b);
  
  if (binfo == null) {
    throw new ReplicaNotFoundException(b);
  }
  if ((state == ReplicaState.FINALIZED && !binfo.isFinalized()) ||
      (state != ReplicaState.FINALIZED && binfo.isFinalized())) {
    throw new UnexpectedReplicaStateException(b,state);
  }
}
 
Example #27
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecovery_02.9.
 * One replica is Finalized and another is RBW. 
 * @throws IOException in case of an error
 */
@Test
public void testFinalizedRbwReplicas() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  
  // rbw and finalized replicas have the same length
  ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
  ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RBW);

  InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
  InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

  testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
  verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
      REPLICA_LEN1);
  verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
      REPLICA_LEN1);
  
  // rbw replica has a different length from the finalized one
  replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
  replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);

  dn1 = mock(InterDatanodeProtocol.class);
  dn2 = mock(InterDatanodeProtocol.class);

  testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
  verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
  verify(dn2, never()).updateReplicaUnderRecovery(
      block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
}
 
Example #28
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecovery_02.8.
 * Two replicas are in Finalized state
 * @throws IOException in case of an error
 */
@Test
public void testFinalizedReplicas () throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
  ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-2, ReplicaState.FINALIZED);

  InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
  InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

  testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
  verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
      REPLICA_LEN1);
  verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
      REPLICA_LEN1);

  // two finalized replicas have different length
  replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
  replica2 = new ReplicaRecoveryInfo(BLOCK_ID, 
      REPLICA_LEN2, GEN_STAMP-2, ReplicaState.FINALIZED);

  try {
    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    Assert.fail("Two finalized replicas should not have different lengthes!");
  } catch (IOException e) {
    Assert.assertTrue(e.getMessage().startsWith(
        "Inconsistent size of finalized replicas. "));
  }
}
 
Example #29
Source File: BlockManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
    DatanodeStorageInfo storageInfo) throws IOException {
  BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock;
  block.addReplicaIfNotPresent(
      storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);

  if (ucBlock.reportedState == ReplicaState.FINALIZED &&
      !block.findDatanode(storageInfo.getDatanodeDescriptor())) {
    addStoredBlock(block, storageInfo, null, true);
  }
}
 
Example #30
Source File: SimulatedFSDataset.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized boolean isValidRbw(ExtendedBlock b) {
  try {
    checkBlock(b, 0, ReplicaState.RBW);
  } catch (IOException e) {
    return false;
  }
  return true;
}