Java Code Examples for org.apache.hadoop.hdfs.server.namenode.FSNamesystem#getBlockPoolId()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.FSNamesystem#getBlockPoolId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRBWBlockInvalidation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test when a block's replica is removed from RBW folder in one of the
 * datanode, namenode should ask to invalidate that corrupted block and
 * schedule replication for one more replica for that under replicated block.
 */
@Test(timeout=600000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
    throws IOException, InterruptedException {
  // This test cannot pass on Windows due to file locking enforcement.  It will
  // reject the attempt to delete the block file from the RBW folder.
  assumeTrue(!Path.WINDOWS);

  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
      .build();
  FSDataOutputStream out = null;
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
    out = fs.create(testPath, (short) 2);
    out.writeBytes("HDFS-3157: " + testPath);
    out.hsync();
    cluster.startDataNodes(conf, 1, true, null, null, null);
    String bpid = namesystem.getBlockPoolId();
    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
    Block block = blk.getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(0);

    // Delete partial block and its meta information from the RBW folder
    // of first datanode.
    File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
    File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
    assertTrue("Could not delete the block file from the RBW folder",
        blockFile.delete());
    assertTrue("Could not delete the block meta file from the RBW folder",
        metaFile.delete());

    out.close();
    
    int liveReplicas = 0;
    while (true) {
      if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) {
        // This confirms we have a corrupt replica
        LOG.info("Live Replicas after corruption: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be less than 2 replicas in the "
        + "liveReplicasMap", 1, liveReplicas);
    
    while (true) {
      if ((liveReplicas =
            countReplicas(namesystem, blk).liveReplicas()) > 1) {
        //Wait till the live replica count becomes equal to Replication Factor
        LOG.info("Live Replicas after Rereplication: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be two live replicas", 2, liveReplicas);

    while (true) {
      Thread.sleep(100);
      if (countReplicas(namesystem, blk).corruptReplicas() == 0) {
        LOG.info("Corrupt Replicas becomes 0");
        break;
      }
    }
  } finally {
    if (out != null) {
      out.close();
    }
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestHeartbeatHandling.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test if
 * {@link FSNamesystem#handleHeartbeat}
 * can pick up replication and/or invalidate requests and observes the max
 * limit
 */
@Test
public void testHeartbeat() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final HeartbeatManager hm = namesystem.getBlockManager(
        ).getDatanodeManager().getHeartbeatManager();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
    final String storageID = DatanodeStorage.generateUuid();
    dd.updateStorage(new DatanodeStorage(storageID));

    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
    final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};

    try {
      namesystem.writeLock();
      synchronized(hm) {
        for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
          dd.addBlockToBeReplicated(
              new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
              ONE_TARGET);
        }
        DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
            namesystem).getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

        ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
        for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
          blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
        }
        dd.addBlocksToBeInvalidated(blockList);
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(0, cmds.length);
      }
    } finally {
      namesystem.writeUnlock();
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestRBWBlockInvalidation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test when a block's replica is removed from RBW folder in one of the
 * datanode, namenode should ask to invalidate that corrupted block and
 * schedule replication for one more replica for that under replicated block.
 */
@Test(timeout=600000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
    throws IOException, InterruptedException {
  // This test cannot pass on Windows due to file locking enforcement.  It will
  // reject the attempt to delete the block file from the RBW folder.
  assumeTrue(!Path.WINDOWS);

  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
      .build();
  FSDataOutputStream out = null;
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
    out = fs.create(testPath, (short) 2);
    out.writeBytes("HDFS-3157: " + testPath);
    out.hsync();
    cluster.startDataNodes(conf, 1, true, null, null, null);
    String bpid = namesystem.getBlockPoolId();
    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
    Block block = blk.getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(0);

    // Delete partial block and its meta information from the RBW folder
    // of first datanode.
    File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
    File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
    assertTrue("Could not delete the block file from the RBW folder",
        blockFile.delete());
    assertTrue("Could not delete the block meta file from the RBW folder",
        metaFile.delete());

    out.close();
    
    int liveReplicas = 0;
    while (true) {
      if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) {
        // This confirms we have a corrupt replica
        LOG.info("Live Replicas after corruption: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be less than 2 replicas in the "
        + "liveReplicasMap", 1, liveReplicas);
    
    while (true) {
      if ((liveReplicas =
            countReplicas(namesystem, blk).liveReplicas()) > 1) {
        //Wait till the live replica count becomes equal to Replication Factor
        LOG.info("Live Replicas after Rereplication: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be two live replicas", 2, liveReplicas);

    while (true) {
      Thread.sleep(100);
      if (countReplicas(namesystem, blk).corruptReplicas() == 0) {
        LOG.info("Corrupt Replicas becomes 0");
        break;
      }
    }
  } finally {
    if (out != null) {
      out.close();
    }
    cluster.shutdown();
  }
}
 
Example 4
Source File: TestHeartbeatHandling.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if
 * {@link FSNamesystem#handleHeartbeat}
 * can pick up replication and/or invalidate requests and observes the max
 * limit
 */
@Test
public void testHeartbeat() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final HeartbeatManager hm = namesystem.getBlockManager(
        ).getDatanodeManager().getHeartbeatManager();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
    final String storageID = DatanodeStorage.generateUuid();
    dd.updateStorage(new DatanodeStorage(storageID));

    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
    final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};

    try {
      namesystem.writeLock();
      synchronized(hm) {
        for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
          dd.addBlockToBeReplicated(
              new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
              ONE_TARGET);
        }
        DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
            namesystem).getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

        ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
        for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
          blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
        }
        dd.addBlocksToBeInvalidated(blockList);
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(0, cmds.length);
      }
    } finally {
      namesystem.writeUnlock();
    }
  } finally {
    cluster.shutdown();
  }
}