Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#getDatanode()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#getDatanode() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Expire a DataNode heartbeat on the NameNode
 * @param dnId
 * @throws IOException
 */
public void setDataNodeDead(DatanodeID dnId) throws IOException {
  DatanodeDescriptor dnd =
      NameNodeAdapter.getDatanode(getNamesystem(), dnId);
  DFSTestUtil.setDatanodeDead(dnd);
  BlockManagerTestUtil.checkHeartbeat(getNamesystem().getBlockManager());
}
 
Example 2
Source File: TestDecommission.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);
  
  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);
    
    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    
    DatanodeInfo decomInfo = decommissionNode(i, null, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    DataNode decomNode = getDataNode(decomInfo);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, decomInfo, decomNode, true);
    
    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
    DataNode retNode = getDataNode(decomInfo);
    waitNodeState(retInfo, AdminStates.NORMAL);
    verifyStats(namenode, fsn, retInfo, retNode, false);
  }
}
 
Example 3
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Expire a DataNode heartbeat on the NameNode
 * @param dnId
 * @throws IOException
 */
public void setDataNodeDead(DatanodeID dnId) throws IOException {
  DatanodeDescriptor dnd =
      NameNodeAdapter.getDatanode(getNamesystem(), dnId);
  DFSTestUtil.setDatanodeDead(dnd);
  BlockManagerTestUtil.checkHeartbeat(getNamesystem().getBlockManager());
}
 
Example 4
Source File: TestDecommission.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);
  
  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);
    
    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    
    DatanodeInfo decomInfo = decommissionNode(i, null, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    DataNode decomNode = getDataNode(decomInfo);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, decomInfo, decomNode, true);
    
    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
    DataNode retNode = getDataNode(decomInfo);
    waitNodeState(retInfo, AdminStates.NORMAL);
    verifyStats(namenode, fsn, retInfo, retNode, false);
  }
}
 
Example 5
Source File: TestHeartbeatHandling.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test if
 * {@link FSNamesystem#handleHeartbeat}
 * can pick up replication and/or invalidate requests and observes the max
 * limit
 */
@Test
public void testHeartbeat() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final HeartbeatManager hm = namesystem.getBlockManager(
        ).getDatanodeManager().getHeartbeatManager();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
    final String storageID = DatanodeStorage.generateUuid();
    dd.updateStorage(new DatanodeStorage(storageID));

    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
    final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};

    try {
      namesystem.writeLock();
      synchronized(hm) {
        for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
          dd.addBlockToBeReplicated(
              new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
              ONE_TARGET);
        }
        DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
            namesystem).getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

        ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
        for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
          blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
        }
        dd.addBlocksToBeInvalidated(blockList);
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(0, cmds.length);
      }
    } finally {
      namesystem.writeUnlock();
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestDecommission.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private DatanodeInfo decommissionNode(int nnIndex,
                                String datanodeUuid,
                                ArrayList<DatanodeInfo>decommissionedNodes,
                                AdminStates waitForState)
  throws IOException {
  DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

  //
  // pick one datanode randomly unless the caller specifies one.
  //
  int index = 0;
  if (datanodeUuid == null) {
    boolean found = false;
    while (!found) {
      index = myrand.nextInt(info.length);
      if (!info[index].isDecommissioned()) {
        found = true;
      }
    }
  } else {
    // The caller specifies a DN
    for (; index < info.length; index++) {
      if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
        break;
      }
    }
    if (index == info.length) {
      throw new IOException("invalid datanodeUuid " + datanodeUuid);
    }
  }
  String nodename = info[index].getXferAddr();
  LOG.info("Decommissioning node: " + nodename);

  // write nodename into the exclude file.
  ArrayList<String> nodes = new ArrayList<String>();
  if (decommissionedNodes != null) {
    for (DatanodeInfo dn : decommissionedNodes) {
      nodes.add(dn.getName());
    }
  }
  nodes.add(nodename);
  writeConfigFile(excludeFile, nodes);
  refreshNodes(cluster.getNamesystem(nnIndex), conf);
  DatanodeInfo ret = NameNodeAdapter.getDatanode(
      cluster.getNamesystem(nnIndex), info[index]);
  waitNodeState(ret, waitForState);
  return ret;
}
 
Example 7
Source File: TestHeartbeatHandling.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if
 * {@link FSNamesystem#handleHeartbeat}
 * can pick up replication and/or invalidate requests and observes the max
 * limit
 */
@Test
public void testHeartbeat() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final HeartbeatManager hm = namesystem.getBlockManager(
        ).getDatanodeManager().getHeartbeatManager();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
    final String storageID = DatanodeStorage.generateUuid();
    dd.updateStorage(new DatanodeStorage(storageID));

    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
    final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};

    try {
      namesystem.writeLock();
      synchronized(hm) {
        for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
          dd.addBlockToBeReplicated(
              new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
              ONE_TARGET);
        }
        DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
            namesystem).getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

        ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
        for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
          blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
        }
        dd.addBlocksToBeInvalidated(blockList);
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(0, cmds.length);
      }
    } finally {
      namesystem.writeUnlock();
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 8
Source File: TestDecommission.java    From big-c with Apache License 2.0 4 votes vote down vote up
private DatanodeInfo decommissionNode(int nnIndex,
                                String datanodeUuid,
                                ArrayList<DatanodeInfo>decommissionedNodes,
                                AdminStates waitForState)
  throws IOException {
  DFSClient client = getDfsClient(cluster.getNameNode(nnIndex), conf);
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

  //
  // pick one datanode randomly unless the caller specifies one.
  //
  int index = 0;
  if (datanodeUuid == null) {
    boolean found = false;
    while (!found) {
      index = myrand.nextInt(info.length);
      if (!info[index].isDecommissioned()) {
        found = true;
      }
    }
  } else {
    // The caller specifies a DN
    for (; index < info.length; index++) {
      if (info[index].getDatanodeUuid().equals(datanodeUuid)) {
        break;
      }
    }
    if (index == info.length) {
      throw new IOException("invalid datanodeUuid " + datanodeUuid);
    }
  }
  String nodename = info[index].getXferAddr();
  LOG.info("Decommissioning node: " + nodename);

  // write nodename into the exclude file.
  ArrayList<String> nodes = new ArrayList<String>();
  if (decommissionedNodes != null) {
    for (DatanodeInfo dn : decommissionedNodes) {
      nodes.add(dn.getName());
    }
  }
  nodes.add(nodename);
  writeConfigFile(excludeFile, nodes);
  refreshNodes(cluster.getNamesystem(nnIndex), conf);
  DatanodeInfo ret = NameNodeAdapter.getDatanode(
      cluster.getNamesystem(nnIndex), info[index]);
  waitNodeState(ret, waitForState);
  return ret;
}