org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NameNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
                                   long[] blocks) throws IOException {
  verifyRequest(nodeReg);
  myMetrics.numBlockReport.inc();
  BlockListAsLongs blist = new BlockListAsLongs(blocks);
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
           +"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks");
  }

  namesystem.processReport(nodeReg, blist);
  if (getFSImage().isUpgradeFinalized())
    return DatanodeCommand.FINALIZE;
  return null;
}
 
Example #2
Source File: AvatarNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
  if (runInfo.shutdown || !runInfo.isRunning) {
    return null;
  }
  if (ignoreDatanodes()) {
    LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
              " to back off");
    // Do not process block reports yet as the ingest thread is catching up
    return AvatarDatanodeCommand.BACKOFF;
  }

  if (currentAvatar == Avatar.STANDBY) {
    Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);

    BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
        failed.toArray(new Block[failed.size()]));
    return bCmd;
  } else {
    return super.blockReport(nodeReg, rep);
  }
}
 
Example #3
Source File: ReportBadBlockAction.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, 
  DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
  if (bpRegistration == null) {
    return;
  }
  DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
  String[] uuids = { storageUuid };
  StorageType[] types = { storageType };
  LocatedBlock[] locatedBlock = { new LocatedBlock(block,
      dnArr, uuids, types) };

  try {
    bpNamenode.reportBadBlocks(locatedBlock);
  } catch (RemoteException re) {
    DataNode.LOG.info("reportBadBlock encountered RemoteException for "
        + "block:  " + block , re);
  } catch (IOException e) {
    throw new BPServiceActorActionException("Failed to report bad block "
        + block + " to namenode: ");
  }
}
 
Example #4
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example #5
Source File: DatanodeProtocolClientSideTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void blockReceivedAndDeleted(DatanodeRegistration registration,
    String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks)
    throws IOException {
  BlockReceivedAndDeletedRequestProto.Builder builder = 
      BlockReceivedAndDeletedRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  for (StorageReceivedDeletedBlocks storageBlock : receivedAndDeletedBlocks) {
    StorageReceivedDeletedBlocksProto.Builder repBuilder = 
        StorageReceivedDeletedBlocksProto.newBuilder();
    repBuilder.setStorageUuid(storageBlock.getStorage().getStorageID());  // Set for wire compatibility.
    repBuilder.setStorage(PBHelper.convert(storageBlock.getStorage()));
    for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) {
      repBuilder.addBlocks(PBHelper.convert(rdBlock));
    }
    builder.addBlocks(repBuilder.build());
  }
  try {
    rpcProxy.blockReceivedAndDeleted(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
Example #6
Source File: TestBlockManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
 
Example #7
Source File: TestBPOfferService.java    From big-c with Apache License 2.0 6 votes vote down vote up
private ReceivedDeletedBlockInfo[] waitForBlockReceived(
    final ExtendedBlock fakeBlock,
    final DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
  final String fakeBlockPoolId = fakeBlock.getBlockPoolId();
  final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
    ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {

    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReceivedAndDeleted(
          Mockito.<DatanodeRegistration>anyObject(),
          Mockito.eq(fakeBlockPoolId),
          captor.capture());
        return true;
      } catch (Throwable t) {
        return false;
      }
    }
  }, 100, 10000);
  return captor.getValue()[0].getBlocks();
}
 
Example #8
Source File: DataNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Check that the registration returned from a NameNode is consistent
 * with the information in the storage. If the storage is fresh/unformatted,
 * sets the storage ID based on this registration.
 * Also updates the block pool's state in the secret manager.
 */
synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
    String blockPoolId) throws IOException {
  // Set the ID if we haven't already
  if (null == id) {
    id = bpRegistration;
  }

  if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) {
    throw new IOException("Inconsistent Datanode IDs. Name-node returned "
        + bpRegistration.getDatanodeUuid()
        + ". Expecting " + storage.getDatanodeUuid());
  }
  
  registerBlockPoolWithSecretManager(bpRegistration, blockPoolId);
}
 
Example #9
Source File: TestBlockManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  DatanodeStorageInfo ds = node.getStorageInfos()[0];

  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertEquals(0, ds.getBlockReportCount());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
      BlockListAsLongs.EMPTY, null, false);
  assertEquals(1, ds.getBlockReportCount());
}
 
Example #10
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void waitForBlockReport(
    final DatanodeProtocolClientSideTranslatorPB mockNN1,
    final DatanodeProtocolClientSideTranslatorPB mockNN2)
        throws Exception {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return get(mockNN1) || get(mockNN2);
    }

    private Boolean get(DatanodeProtocolClientSideTranslatorPB mockNN) {
      try {
        Mockito.verify(mockNN).blockReport(
                Mockito.<DatanodeRegistration>anyObject(),
                Mockito.eq(FAKE_BPID),
                Mockito.<StorageBlockReport[]>anyObject(),
                Mockito.<BlockReportContext>anyObject());
        return true;
      } catch (Throwable t) {
        LOG.info("waiting on block report: " + t.getMessage());
        return false;
      }
    }
  }, 500, 10000);
}
 
Example #11
Source File: TestBPOfferService.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void waitForBlockReport(final DatanodeProtocolClientSideTranslatorPB mockNN)
    throws Exception {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      try {
        Mockito.verify(mockNN).blockReport(
            Mockito.<DatanodeRegistration>anyObject(),  
            Mockito.eq(FAKE_BPID),
            Mockito.<StorageBlockReport[]>anyObject(),
            Mockito.<BlockReportContext>anyObject());
        return true;
      } catch (Throwable t) {
        LOG.info("waiting on block report: " + t.getMessage());
        return false;
      }
    }
  }, 500, 10000);
}
 
Example #12
Source File: FSNamesystem.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Checks if the node is not on the hosts list.  If it is not, then
 * it will be ignored.  If the node is in the hosts list, but is also 
 * on the exclude list, then it will be decommissioned.
 * Returns FALSE if node is rejected for registration. 
 * Returns TRUE if node is registered (including when it is on the 
 * exclude list and is being decommissioned). 
 */
private synchronized boolean verifyNodeRegistration(DatanodeRegistration nodeReg, String ipAddr) 
  throws IOException {
  if (!inHostsList(nodeReg, ipAddr)) {
    return false;    
  }
  if (inExcludedHostsList(nodeReg, ipAddr)) {
    DatanodeDescriptor node = getDatanode(nodeReg);
    if (node == null) {
      throw new IOException("verifyNodeRegistration: unknown datanode " +
                            nodeReg.getName());
    }
    if (!checkDecommissionStateInternal(node)) {
      startDecommission(node);
    }
  } 
  return true;
}
 
Example #13
Source File: TestNNHandlesBlockReportPerStorage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void sendBlockReports(DatanodeRegistration dnR, String poolId,
    StorageBlockReport[] reports) throws IOException {
  int i = 0;
  for (StorageBlockReport report : reports) {
    LOG.info("Sending block report for storage " + report.getStorage().getStorageID());
    StorageBlockReport[] singletonReport = { report };
    cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport,
        new BlockReportContext(reports.length, i, System.nanoTime()));
    i++;
  }
}
 
Example #14
Source File: AvatarDataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private ServicePair(InetSocketAddress nameAddr1, InetSocketAddress nameAddr2,
    InetSocketAddress avatarAddr1, InetSocketAddress avatarAddr2,
    InetSocketAddress defaultAddr, String nameserviceId) {
  this.nameAddr1 = nameAddr1;
  this.nameAddr2 = nameAddr2;
  this.avatarAddr1 = avatarAddr1;
  this.avatarAddr2 = avatarAddr2;
  this.defaultAddr = defaultAddr.getHostName() + ":" + defaultAddr.getPort();
  this.nameserviceId = nameserviceId;
  zkClient = new AvatarZooKeeperClient(getConf(), null);
  this.nsRegistration = new DatanodeRegistration(getMachineName());
}
 
Example #15
Source File: TestDnRespectsBlockReportSplitThreshold.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Tests the behavior when the count of blocks is exactly one less than
 * the threshold.
 */
@Test(timeout=300000)
public void testCornerCaseUnderThreshold() throws IOException, InterruptedException {
  startUpCluster(BLOCKS_IN_FILE + 1);
  NameNode nn = cluster.getNameNode();
  DataNode dn = cluster.getDataNodes().get(0);

  // Create a file with a few blocks.
  createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);

  // Insert a spy object for the NN RPC.
  DatanodeProtocolClientSideTranslatorPB nnSpy =
      DataNodeTestUtils.spyOnBposToNN(dn, nn);

  // Trigger a block report so there is an interaction with the spy
  // object.
  DataNodeTestUtils.triggerBlockReport(dn);

  ArgumentCaptor<StorageBlockReport[]> captor =
      ArgumentCaptor.forClass(StorageBlockReport[].class);

  Mockito.verify(nnSpy, times(1)).blockReport(
      any(DatanodeRegistration.class),
      anyString(),
      captor.capture(),  Mockito.<BlockReportContext>anyObject());

  verifyCapturedArguments(captor, cluster.getStoragesPerDatanode(), BLOCKS_IN_FILE);
}
 
Example #16
Source File: NameNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void blockReceived(DatanodeRegistration nodeReg, 
                          Block blocks[],
                          String delHints[]) throws IOException {
  verifyRequest(nodeReg);
  stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
                       +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
  for (int i = 0; i < blocks.length; i++) {
    namesystem.blockReceived(nodeReg, blocks[i], delHints[i]);
  }
}
 
Example #17
Source File: TestDataNodeHotSwapVolumes.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Test that a full block report is sent after hot swapping volumes */
@Test(timeout=100000)
public void testFullBlockReportAfterRemovingVolumes()
    throws IOException, ReconfigurationException {

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

  // Similar to TestTriggerBlockReport, set a really long value for
  // dfs.heartbeat.interval, so that incremental block reports and heartbeats
  // won't be sent during this test unless they're triggered
  // manually.
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  cluster.waitActive();

  final DataNode dn = cluster.getDataNodes().get(0);
  DatanodeProtocolClientSideTranslatorPB spy =
      DataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());

  // Remove a data dir from datanode
  File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString());

  // We should get 1 full report
  Mockito.verify(spy, timeout(60000).times(1)).blockReport(
      any(DatanodeRegistration.class),
      anyString(),
      any(StorageBlockReport[].class),
      any(BlockReportContext.class));
}
 
Example #18
Source File: BlockReportTestBase.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Similar to BlockReport_03() but works with two DNs
 * Test writes a file and closes it.
 * The second datanode is started in the cluster.
 * As soon as the replication process is completed test finds a block from
 * the second DN and sets its GS to be < of original one.
 * this is the markBlockAsCorrupt case 3 so we expect one pending deletion
 * Block report is forced and the check for # of currupted blocks is performed.
 * Another block is chosen and its length is set to a lesser than original.
 * A check for another corrupted block is performed after yet another
 * BlockReport
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_07() throws Exception {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  final int DN_N1 = DN_N0 + 1;

  // write file and start second node to be "older" than the original
  writeFile(METHOD_NAME, FILE_SIZE, filePath);
  startDNandWait(filePath, true);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N1);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
  assertThat("Wrong number of PendingReplication blocks",
             cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));

  reports = getBlockReports(dn, poolId, false, true);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
  assertThat("Wrong number of PendingReplication blocks",
             cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));

  printStats();

}
 
Example #19
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This test case test the {@link BPOfferService#trySendErrorReport} method
 * such that if call to standby namenode times out then that should not 
 * affect the active namenode heartbeat processing since this function 
 * are in writeLock.
 * @throws Exception
 */
@Test
public void testTrySendErrorReportWhenStandbyNNTimesOut() throws Exception {
  BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
  bpos.start();
  try {
    waitForInitialization(bpos);
    // Should start with neither NN as active.
    assertNull(bpos.getActiveNN());
    // Have NN1 claim active at txid 1
    mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
    bpos.triggerHeartbeatForTests();
    // Now mockNN1 is acting like active namenode and mockNN2 as Standby
    assertSame(mockNN1, bpos.getActiveNN());
    Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(0))
        .when(mockNN1).errorReport(Mockito.any(DatanodeRegistration.class),
        Mockito.anyInt(), Mockito.anyString());
    Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(1))
        .when(mockNN2).errorReport(Mockito.any(DatanodeRegistration.class),
        Mockito.anyInt(), Mockito.anyString());
    String errorString = "Can't send invalid block " + FAKE_BLOCK;
    bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
    bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
    Thread.sleep(10000);
    long difference = secondCallTime - firstCallTime;
    assertTrue("Active namenode trySendErrorReport processing "
        + "should be independent of standby namenode trySendErrorReport"
        + " processing ", difference < 5000);
  } finally {
    bpos.stop();
  }
}
 
Example #20
Source File: NameNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
* add new replica blocks to the Inode to target mapping
* also add the Inode file to DataNodeDesc
*/
public void blocksBeingWrittenReport(DatanodeRegistration nodeReg,
    BlockReport blocks) throws IOException {
  verifyRequest(nodeReg);
  long[] blocksAsLong = blocks.getBlockReportInLongs();
  BlockListAsLongs blist = new BlockListAsLongs(blocksAsLong);
  namesystem.processBlocksBeingWrittenReport(nodeReg, blist);
      
  stateChangeLog.info("*BLOCK* NameNode.blocksBeingWrittenReport: "
      +"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks");
}
 
Example #21
Source File: TestIncrementalBlockReports.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Add a received block entry and then replace it. Ensure that a single
 * IBR is generated and that pending receive request state is cleared.
 * This test case verifies the failure in HDFS-5922.
 *
 * @throws InterruptedException
 * @throws IOException
 */
@Test (timeout=60000)
public void testReplaceReceivedBlock() throws InterruptedException, IOException {
  try {
    // Spy on calls from the DN to the NN
    DatanodeProtocolClientSideTranslatorPB nnSpy = spyOnDnCallsToNn();
    injectBlockReceived();
    injectBlockReceived();    // Overwrite the existing entry.

    // Sleep for a very short time since IBR is generated
    // asynchronously.
    Thread.sleep(2000);

    // Ensure that the received block is reported.
    Mockito.verify(nnSpy, atLeastOnce()).blockReceivedAndDeleted(
        any(DatanodeRegistration.class),
        anyString(),
        any(StorageReceivedDeletedBlocks[].class));

    // Ensure that no more IBRs are pending.
    assertFalse(actor.hasPendingIBR());

  } finally {
    cluster.shutdown();
    cluster = null;
  }
}
 
Example #22
Source File: DatanodeProtocolClientSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
    StorageReport[] reports, long cacheCapacity, long cacheUsed,
    int xmitsInProgress, int xceiverCount, int failedVolumes,
    VolumeFailureSummary volumeFailureSummary) throws IOException {
  HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
      .setRegistration(PBHelper.convert(registration))
      .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
      .setFailedVolumes(failedVolumes);
  builder.addAllReports(PBHelper.convertStorageReports(reports));
  if (cacheCapacity != 0) {
    builder.setCacheCapacity(cacheCapacity);
  }
  if (cacheUsed != 0) {
    builder.setCacheUsed(cacheUsed);
  }
  if (volumeFailureSummary != null) {
    builder.setVolumeFailureSummary(PBHelper.convertVolumeFailureSummary(
        volumeFailureSummary));
  }
  HeartbeatResponseProto resp;
  try {
    resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  DatanodeCommand[] cmds = new DatanodeCommand[resp.getCmdsList().size()];
  int index = 0;
  for (DatanodeCommandProto p : resp.getCmdsList()) {
    cmds[index] = PBHelper.convert(p);
    index++;
  }
  RollingUpgradeStatus rollingUpdateStatus = null;
  if (resp.hasRollingUpgradeStatus()) {
    rollingUpdateStatus = PBHelper.convert(resp.getRollingUpgradeStatus());
  }
  return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),
      rollingUpdateStatus);
}
 
Example #23
Source File: NameNodeRpcServer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, StorageBlockReport[] reports,
      BlockReportContext context) throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean noStaleStorages = false;
  for (int r = 0; r < reports.length; r++) {
    final BlockListAsLongs blocks = reports[r].getBlocks();
    //
    // BlockManager.processReport accumulates information of prior calls
    // for the same node and storage, so the value returned by the last
    // call of this loop is the final updated value for noStaleStorage.
    //
    noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(),
        blocks, context, (r == reports.length - 1));
    metrics.incrStorageBlockReportOps();
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !namesystem.isRollingUpgrade() &&
      !nn.isStandbyState() &&
      noStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
 
Example #24
Source File: DatanodeProtocolClientSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeCommand blockReport(DatanodeRegistration registration,
    String poolId, StorageBlockReport[] reports, BlockReportContext context)
      throws IOException {
  BlockReportRequestProto.Builder builder = BlockReportRequestProto
      .newBuilder().setRegistration(PBHelper.convert(registration))
      .setBlockPoolId(poolId);
  
  boolean useBlocksBuffer = registration.getNamespaceInfo()
      .isCapabilitySupported(Capability.STORAGE_BLOCK_REPORT_BUFFERS);

  for (StorageBlockReport r : reports) {
    StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
        .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
    BlockListAsLongs blocks = r.getBlocks();
    if (useBlocksBuffer) {
      reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
      reportBuilder.addAllBlocksBuffers(blocks.getBlocksBuffers());
    } else {
      for (long value : blocks.getBlockListAsLongs()) {
        reportBuilder.addBlocks(value);
      }
    }
    builder.addReports(reportBuilder.build());
  }
  builder.setContext(PBHelper.convert(context));
  BlockReportResponseProto resp;
  try {
    resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
  return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
 
Example #25
Source File: TestStorageReport.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Ensure that storage type and storage state are propagated
 * in Storage Reports.
 */
@Test
public void testStorageReportHasStorageTypeAndState() throws IOException {

  // Make sure we are not testing with the default type, that would not
  // be a very good test.
  assertNotSame(storageType, StorageType.DEFAULT);
  NameNode nn = cluster.getNameNode();
  DataNode dn = cluster.getDataNodes().get(0);

  // Insert a spy object for the NN RPC.
  DatanodeProtocolClientSideTranslatorPB nnSpy =
      DataNodeTestUtils.spyOnBposToNN(dn, nn);

  // Trigger a heartbeat so there is an interaction with the spy
  // object.
  DataNodeTestUtils.triggerHeartbeat(dn);

  // Verify that the callback passed in the expected parameters.
  ArgumentCaptor<StorageReport[]> captor =
      ArgumentCaptor.forClass(StorageReport[].class);

  Mockito.verify(nnSpy).sendHeartbeat(
      any(DatanodeRegistration.class),
      captor.capture(),
      anyLong(), anyLong(), anyInt(), anyInt(), anyInt(),
      Mockito.any(VolumeFailureSummary.class));

  StorageReport[] reports = captor.getValue();

  for (StorageReport report: reports) {
    assertThat(report.getStorage().getStorageType(), is(storageType));
    assertThat(report.getStorage().getState(), is(DatanodeStorage.State.NORMAL));
  }
}
 
Example #26
Source File: BlockReceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Finalize the block and close the block file
 * @param startTime time when BlockReceiver started receiving the block
 */
private void finalizeBlock(long startTime) throws IOException {
  long endTime = 0;
  // Hold a volume reference to finalize block.
  try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) {
    BlockReceiver.this.close();
    endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
    block.setNumBytes(replicaInfo.getNumBytes());
    datanode.data.finalizeBlock(block);
  }

  if (pinning) {
    datanode.data.setPinning(block);
  }
  
  datanode.closeBlock(
      block, DataNode.EMPTY_DEL_HINT, replicaInfo.getStorageUuid());
  if (ClientTraceLog.isInfoEnabled() && isClient) {
    long offset = 0;
    DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
        .getBlockPoolId());
    ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
        myAddr, block.getNumBytes(), "HDFS_WRITE", clientname, offset,
        dnR.getDatanodeUuid(), block, endTime - startTime));
  } else {
    LOG.info("Received " + block + " size " + block.getNumBytes()
        + " from " + inAddr);
  }
}
 
Example #27
Source File: BlockReportTestBase.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test writes a file and closes it.
 * Block reported is generated with an extra block.
 * Block report is forced and the check for # of pendingdeletion
 * blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_04() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  DFSTestUtil.createFile(fs, filePath,
                         FILE_SIZE, REPL_FACTOR, rand.nextLong());


  DataNode dn = cluster.getDataNodes().get(DN_N0);
  // all blocks belong to the same file, hence same BP
  String poolId = cluster.getNamesystem().getBlockPoolId();

  // Create a bogus new block which will not be present on the namenode.
  ExtendedBlock b = new ExtendedBlock(
      poolId, rand.nextLong(), 1024L, rand.nextLong());
  dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);

  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
 
Example #28
Source File: BlockReportTestBase.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test writes a file and closes it.
 * Block reported is generated with an extra block.
 * Block report is forced and the check for # of pendingdeletion
 * blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_04() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  DFSTestUtil.createFile(fs, filePath,
                         FILE_SIZE, REPL_FACTOR, rand.nextLong());


  DataNode dn = cluster.getDataNodes().get(DN_N0);
  // all blocks belong to the same file, hence same BP
  String poolId = cluster.getNamesystem().getBlockPoolId();

  // Create a bogus new block which will not be present on the namenode.
  ExtendedBlock b = new ExtendedBlock(
      poolId, rand.nextLong(), 1024L, rand.nextLong());
  dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);

  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
 
Example #29
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that the BPOS can register to talk to two different NNs,
 * sends block reports to both, etc.
 */
@Test
public void testBasicFunctionality() throws Exception {
  BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
  bpos.start();
  try {
    waitForInitialization(bpos);
    
    // The DN should have register to both NNs.
    Mockito.verify(mockNN1).registerDatanode(
        Mockito.any(DatanodeRegistration.class));
    Mockito.verify(mockNN2).registerDatanode(
        Mockito.any(DatanodeRegistration.class));
    
    // Should get block reports from both NNs
    waitForBlockReport(mockNN1);
    waitForBlockReport(mockNN2);

    // When we receive a block, it should report it to both NNs
    bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK, "", "");

    ReceivedDeletedBlockInfo[] ret = waitForBlockReceived(FAKE_BLOCK, mockNN1);
    assertEquals(1, ret.length);
    assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());
    
    ret = waitForBlockReceived(FAKE_BLOCK, mockNN2);
    assertEquals(1, ret.length);
    assertEquals(FAKE_BLOCK.getLocalBlock(), ret[0].getBlock());

  } finally {
    bpos.stop();
  }
}
 
Example #30
Source File: DataNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public static void setNewStorageID(DatanodeRegistration dnReg) {
  /* Return 
   * "DS-randInt-ipaddr-currentTimeMillis"
   * It is considered extermely rare for all these numbers to match
   * on a different machine accidentally for the following 
   * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and
   * b) Good chance ip address would be different, and
   * c) Even on the same machine, Datanode is designed to use different ports.
   * d) Good chance that these are started at different times.
   * For a confict to occur all the 4 above have to match!.
   * The format of this string can be changed anytime in future without
   * affecting its functionality.
   */
  String ip = "unknownIP";
  try {
    ip = DNS.getDefaultIP("default");
  } catch (UnknownHostException ignored) {
    LOG.warn("Could not find ip address of \"default\" inteface.");
  }
  
  int rand = 0;
  try {
    rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE);
  } catch (NoSuchAlgorithmException e) {
    LOG.warn("Could not use SecureRandom");
    rand = R.nextInt(Integer.MAX_VALUE);
  }
  dnReg.storageID = "DS-" + rand + "-"+ ip + "-" + dnReg.getPort() + "-" + 
                    System.currentTimeMillis();
}