org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.DataNode.BlockRecord. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
    ReplicaRecoveryInfo replica2,
    InterDatanodeProtocol dn1,
    InterDatanodeProtocol dn2,
    long expectLen) throws IOException {
  
  DatanodeInfo[] locs = new DatanodeInfo[]{
      mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
  RecoveringBlock rBlock = new RecoveringBlock(block, 
      locs, RECOVERY_ID);
  ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
  BlockRecord record1 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
  BlockRecord record2 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
  syncList.add(record1);
  syncList.add(record2);
  
  when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage1");
  when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage2");
  dn.syncBlock(rBlock, syncList);
}
 
Example #2
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, 
    ReplicaRecoveryInfo replica2,
    InterDatanodeProtocol dn1,
    InterDatanodeProtocol dn2,
    long expectLen) throws IOException {
  
  DatanodeInfo[] locs = new DatanodeInfo[]{
      mock(DatanodeInfo.class), mock(DatanodeInfo.class)};
  RecoveringBlock rBlock = new RecoveringBlock(block, 
      locs, RECOVERY_ID);
  ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
  BlockRecord record1 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
  BlockRecord record2 = new BlockRecord(
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
  syncList.add(record1);
  syncList.add(record2);
  
  when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage1");
  when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
      anyLong(), anyLong())).thenReturn("storage2");
  dn.syncBlock(rBlock, syncList);
}
 
Example #3
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
  throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  verify(spyDN, never()).syncBlock(
      any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
 
Example #4
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new IOException()).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  verify(spyDN, never()).syncBlock(
      any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
 
Example #5
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
  List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
  BlockRecord blockRecord = new BlockRecord(
      new DatanodeID(dnR), spyDN,
      new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(),
          block.getGenerationStamp(), ReplicaState.FINALIZED));
  blocks.add(blockRecord);
  return blocks;
}
 
Example #6
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_05. One DN throws RecoveryInProgressException.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testRecoveryInProgressException()
  throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new RecoveryInProgressException("Replica recovery is in progress")).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  verify(spyDN, never()).syncBlock(
      any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
 
Example #7
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_06. all datanodes throws an exception.
 *
 * @throws IOException
 *           in case of an error
 */
@Test
public void testErrorReplicas() throws IOException, InterruptedException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  DataNode spyDN = spy(dn);
  doThrow(new IOException()).
     when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
  Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks());
  d.join();
  verify(spyDN, never()).syncBlock(
      any(RecoveringBlock.class), anyListOf(BlockRecord.class));
}
 
Example #8
Source File: TestBlockRecovery.java    From big-c with Apache License 2.0 5 votes vote down vote up
private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
  List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
  BlockRecord blockRecord = new BlockRecord(
      new DatanodeID(dnR), spyDN,
      new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(),
          block.getGenerationStamp(), ReplicaState.FINALIZED));
  blocks.add(blockRecord);
  return blocks;
}
 
Example #9
Source File: NamespaceService.java    From RDFS with Apache License 2.0 4 votes vote down vote up
abstract LocatedBlock syncBlock(Block block, List<BlockRecord> syncList,
boolean closeFile, List<InterDatanodeProtocol> datanodeProxies,
long deadline)
throws IOException;