org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBPOfferService.java    From big-c with Apache License 2.0 6 votes vote down vote up
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
    final ExtendedBlock fakeBlock,
    final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
  final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
  final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
    ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {

    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReceivedAndDeleted(
          Mockito.<DatanodeRegistration>anyObject(),
          Mockito.eq(fakeBlockPoolId),
          captor.capture());
        return true;
      } catch (Throwable t) {
        return false;
      }
    }
  }, 100, 10000);
  return captor.getValue()[0].getBlocks();
}
 
Example #2
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
    final ExtendedBlock fakeBlock,
    final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
  final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
  final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
    ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {

    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReceivedAndDeleted(
          Mockito.<DatanodeRegistration>anyObject(),
          Mockito.eq(fakeBlockPoolId),
          captor.capture());
        return true;
      } catch (Throwable t) {
        return false;
      }
    }
  }, 100, 10000);
  return captor.getValue()[0].getBlocks();
}
 
Example #3
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private ExtendedBlock addBlocks(String fileName, String clientName)
throws IOException {
  ExtendedBlock prevBlock = null;
  for(int jdx = 0; jdx < blocksPerFile; jdx++) {
    LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
        prevBlock, null, INodeId.GRANDFATHER_INODE_ID, null);
    prevBlock = loc.getBlock();
    for(DatanodeInfo dnInfo : loc.getLocations()) {
      int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
      datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
      ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
          loc.getBlock().getLocalBlock(),
          ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
          .getBlock().getBlockPoolId(), report);
    }
  }
  return prevBlock;
}
 
Example #4
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static ReceivedDeletedBlockInfo convert(
    ReceivedDeletedBlockInfoProto proto) {
  ReceivedDeletedBlockInfo.BlockStatus status = null;
  switch (proto.getStatus()) {
  case RECEIVING:
    status = BlockStatus.RECEIVING_BLOCK;
    break;
  case RECEIVED:
    status = BlockStatus.RECEIVED_BLOCK;
    break;
  case DELETED:
    status = BlockStatus.DELETED_BLOCK;
    break;
  }
  return new ReceivedDeletedBlockInfo(
      PBHelper.convert(proto.getBlock()),
      status,
      proto.hasDeleteHint() ? proto.getDeleteHint() : null);
}
 
Example #5
Source File: DatanodeProtocolClientSideTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void blockReceivedAndDeleted(DatanodeRegistration registration,
    String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks)
    throws IOException {
  BlockReceivedAndDeletedRequestProto.Builder builder = 
      BlockReceivedAndDeletedRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  for (StorageReceivedDeletedBlocks storageBlock : receivedAndDeletedBlocks) {
    StorageReceivedDeletedBlocksProto.Builder repBuilder = 
        StorageReceivedDeletedBlocksProto.newBuilder();
    repBuilder.setStorageUuid(storageBlock.getStorage().getStorageID());  // Set for wire compatibility.
    repBuilder.setStorage(PBHelper.convert(storageBlock.getStorage()));
    for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) {
      repBuilder.addBlocks(PBHelper.convert(rdBlock));
    }
    builder.addBlocks(repBuilder.build());
  }
  try {
    rpcProxy.blockReceivedAndDeleted(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
Example #6
Source File: DatanodeProtocolClientSideTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void blockReceivedAndDeleted(DatanodeRegistration registration,
    String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks)
    throws IOException {
  BlockReceivedAndDeletedRequestProto.Builder builder = 
      BlockReceivedAndDeletedRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  for (StorageReceivedDeletedBlocks storageBlock : receivedAndDeletedBlocks) {
    StorageReceivedDeletedBlocksProto.Builder repBuilder = 
        StorageReceivedDeletedBlocksProto.newBuilder();
    repBuilder.setStorageUuid(storageBlock.getStorage().getStorageID());  // Set for wire compatibility.
    repBuilder.setStorage(PBHelper.convert(storageBlock.getStorage()));
    for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) {
      repBuilder.addBlocks(PBHelper.convert(rdBlock));
    }
    builder.addBlocks(repBuilder.build());
  }
  try {
    rpcProxy.blockReceivedAndDeleted(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
Example #7
Source File: PBHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static ReceivedDeletedBlockInfo convert(
    ReceivedDeletedBlockInfoProto proto) {
  ReceivedDeletedBlockInfo.BlockStatus status = null;
  switch (proto.getStatus()) {
  case RECEIVING:
    status = BlockStatus.RECEIVING_BLOCK;
    break;
  case RECEIVED:
    status = BlockStatus.RECEIVED_BLOCK;
    break;
  case DELETED:
    status = BlockStatus.DELETED_BLOCK;
    break;
  }
  return new ReceivedDeletedBlockInfo(
      PBHelper.convert(proto.getBlock()),
      status,
      proto.hasDeleteHint() ? proto.getDeleteHint() : null);
}
 
Example #8
Source File: NNThroughputBenchmark.java    From big-c with Apache License 2.0 6 votes vote down vote up
private ExtendedBlock addBlocks(String fileName, String clientName)
throws IOException {
  ExtendedBlock prevBlock = null;
  for(int jdx = 0; jdx < blocksPerFile; jdx++) {
    LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
        prevBlock, null, INodeId.GRANDFATHER_INODE_ID, null);
    prevBlock = loc.getBlock();
    for(DatanodeInfo dnInfo : loc.getLocations()) {
      int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
      datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
      ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
          loc.getBlock().getLocalBlock(),
          ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
      StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
          datanodes[dnIdx].storage.getStorageID(), rdBlocks) };
      nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
          .getBlock().getBlockPoolId(), report);
    }
  }
  return prevBlock;
}
 
Example #9
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static ReceivedDeletedBlockInfoProto convert(
    ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
  ReceivedDeletedBlockInfoProto.Builder builder = 
      ReceivedDeletedBlockInfoProto.newBuilder();
  
  ReceivedDeletedBlockInfoProto.BlockStatus status;
  switch (receivedDeletedBlockInfo.getStatus()) {
  case RECEIVING_BLOCK:
    status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
    break;
  case RECEIVED_BLOCK:
    status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVED;
    break;
  case DELETED_BLOCK:
    status = ReceivedDeletedBlockInfoProto.BlockStatus.DELETED;
    break;
  default:
    throw new IllegalArgumentException("Bad status: " +
        receivedDeletedBlockInfo.getStatus());
  }
  builder.setStatus(status);
  
  if (receivedDeletedBlockInfo.getDelHints() != null) {
    builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints());
  }
  return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
      .build();
}
 
Example #10
Source File: DatanodeProtocolServerSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
    RpcController controller, BlockReceivedAndDeletedRequestProto request)
    throws ServiceException {
  List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
  StorageReceivedDeletedBlocks[] info = 
      new StorageReceivedDeletedBlocks[sBlocks.size()];
  for (int i = 0; i < sBlocks.size(); i++) {
    StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
    List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
    ReceivedDeletedBlockInfo[] rdBlocks = 
        new ReceivedDeletedBlockInfo[list.size()];
    for (int j = 0; j < list.size(); j++) {
      rdBlocks[j] = PBHelper.convert(list.get(j));
    }
    if (sBlock.hasStorage()) {
      info[i] = new StorageReceivedDeletedBlocks(
          PBHelper.convert(sBlock.getStorage()), rdBlocks);
    } else {
      info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks);
    }
  }
  try {
    impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), info);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
 
Example #11
Source File: BPServiceActor.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Add blocks from blockArray to pendingIncrementalBR, unless the
 * block already exists in pendingIncrementalBR.
 * @param blockArray list of blocks to add.
 * @return the number of missing blocks that we added.
 */
int putMissingBlockInfos(ReceivedDeletedBlockInfo[] blockArray) {
  int blocksPut = 0;
  for (ReceivedDeletedBlockInfo rdbi : blockArray) {
    if (!pendingIncrementalBR.containsKey(rdbi.getBlock().getBlockId())) {
      pendingIncrementalBR.put(rdbi.getBlock().getBlockId(), rdbi);
      ++blocksPut;
    }
  }
  return blocksPut;
}
 
Example #12
Source File: TestBPOfferService.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that the BPOS can register to talk to two different NNs,
 * sends block reports to both, etc.
 */
@Test
public void testBasicFunctionality() throws Exception {
  BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
  bpos.start();
  try {
    waitForInitialization(bpos);
    
    // The DN should have register to both NNs.
    Mockito.verify(mockNN1).registerDatanode(
        Mockito.any(DatanodeRegistration.class));
    Mockito.verify(mockNN2).registerDatanode(
        Mockito.any(DatanodeRegistration.class));
    
    // Should get block reports from both NNs
    waitForBlockReport(mockNN1);
    waitForBlockReport(mockNN2);

    // When we receive a block, it should report it to both NNs
    bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK, "", "");

    ReceivedDeletedBlockInfo[] ret = waitForBlockReceived(FAKE_BLOCK, mockNN1);
    assertEquals(1, ret.length);
    assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());
    
    ret = waitForBlockReceived(FAKE_BLOCK, mockNN2);
    assertEquals(1, ret.length);
    assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());

  } finally {
    bpos.stop();
  }
}
 
Example #13
Source File: BPServiceActor.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Dequeue and return all pending incremental block report state.
 * @return
 */
ReceivedDeletedBlockInfo[] dequeueBlockInfos() {
  ReceivedDeletedBlockInfo[] blockInfos =
      pendingIncrementalBR.values().toArray(
          new ReceivedDeletedBlockInfo[getBlockInfoCount()]);

  pendingIncrementalBR.clear();
  return blockInfos;
}
 
Example #14
Source File: BPServiceActor.java    From big-c with Apache License 2.0 5 votes vote down vote up
void notifyNamenodeDeletedBlock(
    ReceivedDeletedBlockInfo bInfo, String storageUuid) {
  synchronized (pendingIncrementalBRperStorage) {
    addPendingReplicationBlockInfo(
        bInfo, dn.getFSDataset().getStorage(storageUuid));
  }
}
 
Example #15
Source File: BPServiceActor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void notifyNamenodeBlock(ReceivedDeletedBlockInfo bInfo,
    String storageUuid, boolean now) {
  synchronized (pendingIncrementalBRperStorage) {
    addPendingReplicationBlockInfo(
        bInfo, dn.getFSDataset().getStorage(storageUuid));
    sendImmediateIBR = true;
    // If now is true, the report is sent right away.
    // Otherwise, it will be sent out in the next heartbeat.
    if (now) {
      pendingIncrementalBRperStorage.notifyAll();
    }
  }
}
 
Example #16
Source File: BPServiceActor.java    From big-c with Apache License 2.0 5 votes vote down vote up
void notifyNamenodeBlock(ReceivedDeletedBlockInfo bInfo,
    String storageUuid, boolean now) {
  synchronized (pendingIncrementalBRperStorage) {
    addPendingReplicationBlockInfo(
        bInfo, dn.getFSDataset().getStorage(storageUuid));
    sendImmediateIBR = true;
    // If now is true, the report is sent right away.
    // Otherwise, it will be sent out in the next heartbeat.
    if (now) {
      pendingIncrementalBRperStorage.notifyAll();
    }
  }
}
 
Example #17
Source File: BPServiceActor.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Add a blockInfo for notification to NameNode. If another entry
 * exists for the same block it is removed.
 *
 * Caller must synchronize access using pendingIncrementalBRperStorage.
 */
void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo,
    DatanodeStorage storage) {
  // Make sure another entry for the same block is first removed.
  // There may only be one such entry.
  for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
        pendingIncrementalBRperStorage.entrySet()) {
    if (entry.getValue().removeBlockInfo(bInfo)) {
      break;
    }
  }
  getIncrementalBRMapForStorage(storage).putBlockInfo(bInfo);
}
 
Example #18
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that the BPOS can register to talk to two different NNs,
 * sends block reports to both, etc.
 */
@Test
public void testBasicFunctionality() throws Exception {
  BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
  bpos.start();
  try {
    waitForInitialization(bpos);
    
    // The DN should have register to both NNs.
    Mockito.verify(mockNN1).registerDatanode(
        Mockito.any(DatanodeRegistration.class));
    Mockito.verify(mockNN2).registerDatanode(
        Mockito.any(DatanodeRegistration.class));
    
    // Should get block reports from both NNs
    waitForBlockReport(mockNN1);
    waitForBlockReport(mockNN2);

    // When we receive a block, it should report it to both NNs
    bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK, "", "");

    ReceivedDeletedBlockInfo[] ret = waitForBlockReceived(FAKE_BLOCK, mockNN1);
    assertEquals(1, ret.length);
    assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());
    
    ret = waitForBlockReceived(FAKE_BLOCK, mockNN2);
    assertEquals(1, ret.length);
    assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());

  } finally {
    bpos.stop();
  }
}
 
Example #19
Source File: DatanodeProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
    RpcController controller, BlockReceivedAndDeletedRequestProto request)
    throws ServiceException {
  List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
  StorageReceivedDeletedBlocks[] info = 
      new StorageReceivedDeletedBlocks[sBlocks.size()];
  for (int i = 0; i < sBlocks.size(); i++) {
    StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
    List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
    ReceivedDeletedBlockInfo[] rdBlocks = 
        new ReceivedDeletedBlockInfo[list.size()];
    for (int j = 0; j < list.size(); j++) {
      rdBlocks[j] = PBHelper.convert(list.get(j));
    }
    if (sBlock.hasStorage()) {
      info[i] = new StorageReceivedDeletedBlocks(
          PBHelper.convert(sBlock.getStorage()), rdBlocks);
    } else {
      info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks);
    }
  }
  try {
    impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()),
        request.getBlockPoolId(), info);
  } catch (IOException e) {
    throw new ServiceException(e);
  }
  return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
 
Example #20
Source File: BPServiceActor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Add a blockInfo for notification to NameNode. If another entry
 * exists for the same block it is removed.
 *
 * Caller must synchronize access using pendingIncrementalBRperStorage.
 */
void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo,
    DatanodeStorage storage) {
  // Make sure another entry for the same block is first removed.
  // There may only be one such entry.
  for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
        pendingIncrementalBRperStorage.entrySet()) {
    if (entry.getValue().removeBlockInfo(bInfo)) {
      break;
    }
  }
  getIncrementalBRMapForStorage(storage).putBlockInfo(bInfo);
}
 
Example #21
Source File: BPServiceActor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void notifyNamenodeDeletedBlock(
    ReceivedDeletedBlockInfo bInfo, String storageUuid) {
  synchronized (pendingIncrementalBRperStorage) {
    addPendingReplicationBlockInfo(
        bInfo, dn.getFSDataset().getStorage(storageUuid));
  }
}
 
Example #22
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static ReceivedDeletedBlockInfoProto convert(
    ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
  ReceivedDeletedBlockInfoProto.Builder builder = 
      ReceivedDeletedBlockInfoProto.newBuilder();
  
  ReceivedDeletedBlockInfoProto.BlockStatus status;
  switch (receivedDeletedBlockInfo.getStatus()) {
  case RECEIVING_BLOCK:
    status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
    break;
  case RECEIVED_BLOCK:
    status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVED;
    break;
  case DELETED_BLOCK:
    status = ReceivedDeletedBlockInfoProto.BlockStatus.DELETED;
    break;
  default:
    throw new IllegalArgumentException("Bad status: " +
        receivedDeletedBlockInfo.getStatus());
  }
  builder.setStatus(status);
  
  if (receivedDeletedBlockInfo.getDelHints() != null) {
    builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints());
  }
  return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
      .build();
}
 
Example #23
Source File: BPServiceActor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Dequeue and return all pending incremental block report state.
 * @return
 */
ReceivedDeletedBlockInfo[] dequeueBlockInfos() {
  ReceivedDeletedBlockInfo[] blockInfos =
      pendingIncrementalBR.values().toArray(
          new ReceivedDeletedBlockInfo[getBlockInfoCount()]);

  pendingIncrementalBR.clear();
  return blockInfos;
}
 
Example #24
Source File: BPServiceActor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Add blocks from blockArray to pendingIncrementalBR, unless the
 * block already exists in pendingIncrementalBR.
 * @param blockArray list of blocks to add.
 * @return the number of missing blocks that we added.
 */
int putMissingBlockInfos(ReceivedDeletedBlockInfo[] blockArray) {
  int blocksPut = 0;
  for (ReceivedDeletedBlockInfo rdbi : blockArray) {
    if (!pendingIncrementalBR.containsKey(rdbi.getBlock().getBlockId())) {
      pendingIncrementalBR.put(rdbi.getBlock().getBlockId(), rdbi);
      ++blocksPut;
    }
  }
  return blocksPut;
}
 
Example #25
Source File: TestIncrementalBlockReports.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Inject a fake 'deleted' block into the BPServiceActor state.
 */
private void injectBlockDeleted() {
  ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(
      getDummyBlock(), BlockStatus.DELETED_BLOCK, null);
  actor.notifyNamenodeDeletedBlock(rdbi, storageUuid);
}
 
Example #26
Source File: TestTriggerBlockReport.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void testTriggerBlockReport(boolean incremental) throws Exception {
  Configuration conf = new HdfsConfiguration();

  // Set a really long value for dfs.blockreport.intervalMsec and
  // dfs.heartbeat.interval, so that incremental block reports and heartbeats
  // won't be sent during this test unless they're triggered
  // manually.
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);

  final MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  DatanodeProtocolClientSideTranslatorPB spy =
      DataNodeTestUtils.spyOnBposToNN(
          cluster.getDataNodes().get(0), cluster.getNameNode());
  DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L);

  // We should get 1 incremental block report.
  Mockito.verify(spy, timeout(60000).times(1)).blockReceivedAndDeleted(
      any(DatanodeRegistration.class),
      anyString(),
      any(StorageReceivedDeletedBlocks[].class));

  // We should not receive any more incremental or incremental block reports,
  // since the interval we configured is so long.
  for (int i = 0; i < 3; i++) {
    Thread.sleep(10);
    Mockito.verify(spy, times(0)).blockReport(
        any(DatanodeRegistration.class),
        anyString(),
        any(StorageBlockReport[].class),
        Mockito.<BlockReportContext>anyObject());
    Mockito.verify(spy, times(1)).blockReceivedAndDeleted(
        any(DatanodeRegistration.class),
        anyString(),
        any(StorageReceivedDeletedBlocks[].class));
  }

  // Create a fake block deletion notification on the DataNode.
  // This will be sent with the next incremental block report.
  ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(
      new Block(5678, 512, 1000),  BlockStatus.DELETED_BLOCK, null);
  DataNode datanode = cluster.getDataNodes().get(0);
  BPServiceActor actor =
      datanode.getAllBpOs()[0].getBPServiceActors().get(0);
  String storageUuid =
      datanode.getFSDataset().getVolumes().get(0).getStorageID();
  actor.notifyNamenodeDeletedBlock(rdbi, storageUuid);

  // Manually trigger a block report.
  datanode.triggerBlockReport(
      new BlockReportOptions.Factory().
          setIncremental(incremental).
          build()
  );

  // triggerBlockReport returns before the block report is
  // actually sent.  Wait for it to be sent here.
  if (incremental) {
    Mockito.verify(spy, timeout(60000).times(2)).
        blockReceivedAndDeleted(
            any(DatanodeRegistration.class),
            anyString(),
            any(StorageReceivedDeletedBlocks[].class));
  } else {
    Mockito.verify(spy, timeout(60000)).blockReport(
        any(DatanodeRegistration.class),
        anyString(),
        any(StorageBlockReport[].class),
        Mockito.<BlockReportContext>anyObject());
  }

  cluster.shutdown();
}
 
Example #27
Source File: TestIncrementalBlockReports.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Inject a fake 'deleted' block into the BPServiceActor state.
 */
private void injectBlockDeleted() {
  ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(
      getDummyBlock(), BlockStatus.DELETED_BLOCK, null);
  actor.notifyNamenodeDeletedBlock(rdbi, storageUuid);
}
 
Example #28
Source File: TestIncrementalBlockReports.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Inject a fake 'received' block into the BPServiceActor state.
 */
private void injectBlockReceived() {
  ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(
      getDummyBlock(), BlockStatus.RECEIVED_BLOCK, null);
  actor.notifyNamenodeBlock(rdbi, storageUuid, true);
}
 
Example #29
Source File: TestBlockManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * test when NN starts and in same mode, it receives an incremental blockReport
 * firstly. Then receives first full block report.
 */
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  DatanodeDescriptor node = nodes.get(0);
  DatanodeStorageInfo ds = node.getStorageInfos()[0];
  node.isAlive = true;
  DatanodeRegistration nodeReg =  new DatanodeRegistration(node, null, null, "");

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node);
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // Build a incremental report
  List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
  // Build a full report
  BlockListAsLongs.Builder builder = BlockListAsLongs.builder();

  // blk_42 is finalized.
  long receivedBlockId = 42;  // arbitrary
  BlockInfoContiguous receivedBlock = addBlockToBM(receivedBlockId);
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
  builder.add(new FinalizedReplica(receivedBlock, null, null));

  // blk_43 is under construction.
  long receivingBlockId = 43;
  BlockInfoContiguous receivingBlock = addUcBlockToBM(receivingBlockId);
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
  builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));

  // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
  long receivingReceivedBlockId = 44;
  BlockInfoContiguous receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
  builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));

  // blk_45 is not in full BR, because it's deleted.
  long ReceivedDeletedBlockId = 45;
  rdbiList.add(new ReceivedDeletedBlockInfo(
      new Block(ReceivedDeletedBlockId),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
  rdbiList.add(new ReceivedDeletedBlockInfo(
      new Block(ReceivedDeletedBlockId),
      ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));

  // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
  long existedBlockId = 46;
  BlockInfoContiguous existedBlock = addBlockToBM(existedBlockId);
  builder.add(new FinalizedReplica(existedBlock, null, null));

  // process IBR and full BR
  StorageReceivedDeletedBlocks srdb =
      new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()),
          rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
  bm.processIncrementalBlockReport(node, srdb);
  // Make sure it's the first full report
  assertEquals(0, ds.getBlockReportCount());
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      builder.build(), null, false);
  assertEquals(1, ds.getBlockReportCount());

  // verify the storage info is correct
  assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo
      (ds) >= 0);
  assertTrue(((BlockInfoContiguousUnderConstruction) bm.
      getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0);
  assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId))
      .findStorageInfo(ds) >= 0);
  assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
  assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo
      (ds) >= 0);
}
 
Example #30
Source File: TestBlockManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * test when NN starts and in same mode, it receives an incremental blockReport
 * firstly. Then receives first full block report.
 */
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  DatanodeDescriptor node = nodes.get(0);
  DatanodeStorageInfo ds = node.getStorageInfos()[0];
  node.isAlive = true;
  DatanodeRegistration nodeReg =  new DatanodeRegistration(node, null, null, "");

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node);
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // Build a incremental report
  List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
  // Build a full report
  BlockListAsLongs.Builder builder = BlockListAsLongs.builder();

  // blk_42 is finalized.
  long receivedBlockId = 42;  // arbitrary
  BlockInfoContiguous receivedBlock = addBlockToBM(receivedBlockId);
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
  builder.add(new FinalizedReplica(receivedBlock, null, null));

  // blk_43 is under construction.
  long receivingBlockId = 43;
  BlockInfoContiguous receivingBlock = addUcBlockToBM(receivingBlockId);
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
  builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));

  // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
  long receivingReceivedBlockId = 44;
  BlockInfoContiguous receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
  rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
  builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));

  // blk_45 is not in full BR, because it's deleted.
  long ReceivedDeletedBlockId = 45;
  rdbiList.add(new ReceivedDeletedBlockInfo(
      new Block(ReceivedDeletedBlockId),
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
  rdbiList.add(new ReceivedDeletedBlockInfo(
      new Block(ReceivedDeletedBlockId),
      ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));

  // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
  long existedBlockId = 46;
  BlockInfoContiguous existedBlock = addBlockToBM(existedBlockId);
  builder.add(new FinalizedReplica(existedBlock, null, null));

  // process IBR and full BR
  StorageReceivedDeletedBlocks srdb =
      new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()),
          rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
  bm.processIncrementalBlockReport(node, srdb);
  // Make sure it's the first full report
  assertEquals(0, ds.getBlockReportCount());
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      builder.build(), null, false);
  assertEquals(1, ds.getBlockReportCount());

  // verify the storage info is correct
  assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo
      (ds) >= 0);
  assertTrue(((BlockInfoContiguousUnderConstruction) bm.
      getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0);
  assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId))
      .findStorageInfo(ds) >= 0);
  assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
  assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo
      (ds) >= 0);
}