Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#triggerBlockReports()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#triggerBlockReports() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHAAppend.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test to verify the processing of PendingDataNodeMessageQueue in case of
 * append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS
 * comes in one edit log segment and OP_CLOSE edit comes in next log segment
 * which is loaded during failover. Regression test for HDFS-3605.
 */
@Test
public void testMultipleAppendsDuringCatchupTailing() throws Exception {
  Configuration conf = new Configuration();
  
  // Set a length edits tailing period, and explicit rolling, so we can
  // control the ingest of edits by the standby for this test.
  conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000");
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(3).build();
  FileSystem fs = null;
  try {
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);

    Path fileToAppend = new Path("/FileToAppend");
    Path fileToTruncate = new Path("/FileToTruncate");
    
    final byte[] data = new byte[1 << 16];
    DFSUtil.getRandom().nextBytes(data);
    final int[] appendPos = AppendTestUtil.randomFilePartition(
        data.length, COUNT);
    final int[] truncatePos = AppendTestUtil.randomFilePartition(
        data.length, 1);

    // Create file, write some data, and hflush so that the first
    // block is in the edit log prior to roll.
    FSDataOutputStream out = createAndHflush(
        fs, fileToAppend, data, appendPos[0]);

    FSDataOutputStream out4Truncate = createAndHflush(
        fs, fileToTruncate, data, data.length);
    
    // Let the StandbyNode catch the creation of the file. 
    cluster.getNameNode(0).getRpcServer().rollEditLog();
    cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
    out.close();
    out4Truncate.close();

    // Append and re-close a few time, so that many block entries are queued.
    for (int i = 0; i < COUNT; i++) {
      int end = i < COUNT - 1? appendPos[i + 1]: data.length;
      out = fs.append(fileToAppend);
      out.write(data, appendPos[i], end - appendPos[i]);
      out.close();
    }
    boolean isTruncateReady = fs.truncate(fileToTruncate, truncatePos[0]);

    // Ensure that blocks have been reported to the SBN ahead of the edits
    // arriving.
    cluster.triggerBlockReports();

    // Failover the current standby to active.
    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    
    // Check the FSCK doesn't detect any bad blocks on the SBN.
    int rc = ToolRunner.run(new DFSck(cluster.getConfiguration(1)),
        new String[] { "/", "-files", "-blocks" });
    assertEquals(0, rc);
    
    assertEquals("CorruptBlocks should be empty.", 0, cluster.getNameNode(1)
        .getNamesystem().getCorruptReplicaBlocks());

    AppendTestUtil.checkFullFile(fs, fileToAppend, data.length, data,
        fileToAppend.toString());

    if (!isTruncateReady) {
      TestFileTruncate.checkBlockRecovery(fileToTruncate,
          cluster.getFileSystem(1));
    }
    AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data,
        fileToTruncate.toString());
  } finally {
    if (null != cluster) {
      cluster.shutdown();
    }
    if (null != fs) {
      fs.close();
    }
  }
}
 
Example 2
Source File: TestStandbyIsHot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
  Configuration conf = new Configuration();
  // We read from the standby to watch block locations
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    
    Thread.sleep(1000);
    System.err.println("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    System.err.println("==================================");

    // Block locations should show up on standby.
    LOG.info("Waiting for block locations to appear on standby node");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);

    // Trigger immediate heartbeats and block reports so
    // that the active "trusts" all of the DNs
    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // Change replication
    LOG.info("Changing replication to 1");
    fs.setReplication(TEST_FILE_PATH, (short)1);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    waitForBlockLocations(cluster, nn1, TEST_FILE, 1);

    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for lowered replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
    
    // Change back to 3
    LOG.info("Changing replication to 3");
    fs.setReplication(TEST_FILE_PATH, (short)3);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for higher replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
    
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestStandbyBlockManagement.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testInvalidateBlock() throws Exception {
  Configuration conf = new Configuration();
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(3)
      .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);

    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

    Thread.sleep(1000);
    LOG.info("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    LOG.info("==================================");

    // delete the file
    fs.delete(TEST_FILE_PATH, false);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());

    nn1.getRpcServer().rollEditLog();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

  } finally {
    cluster.shutdown();
  }
}
 
Example 4
Source File: TestHAAppend.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test to verify the processing of PendingDataNodeMessageQueue in case of
 * append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS
 * comes in one edit log segment and OP_CLOSE edit comes in next log segment
 * which is loaded during failover. Regression test for HDFS-3605.
 */
@Test
public void testMultipleAppendsDuringCatchupTailing() throws Exception {
  Configuration conf = new Configuration();
  
  // Set a length edits tailing period, and explicit rolling, so we can
  // control the ingest of edits by the standby for this test.
  conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000");
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(3).build();
  FileSystem fs = null;
  try {
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);

    Path fileToAppend = new Path("/FileToAppend");
    Path fileToTruncate = new Path("/FileToTruncate");
    
    final byte[] data = new byte[1 << 16];
    DFSUtil.getRandom().nextBytes(data);
    final int[] appendPos = AppendTestUtil.randomFilePartition(
        data.length, COUNT);
    final int[] truncatePos = AppendTestUtil.randomFilePartition(
        data.length, 1);

    // Create file, write some data, and hflush so that the first
    // block is in the edit log prior to roll.
    FSDataOutputStream out = createAndHflush(
        fs, fileToAppend, data, appendPos[0]);

    FSDataOutputStream out4Truncate = createAndHflush(
        fs, fileToTruncate, data, data.length);
    
    // Let the StandbyNode catch the creation of the file. 
    cluster.getNameNode(0).getRpcServer().rollEditLog();
    cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
    out.close();
    out4Truncate.close();

    // Append and re-close a few time, so that many block entries are queued.
    for (int i = 0; i < COUNT; i++) {
      int end = i < COUNT - 1? appendPos[i + 1]: data.length;
      out = fs.append(fileToAppend);
      out.write(data, appendPos[i], end - appendPos[i]);
      out.close();
    }
    boolean isTruncateReady = fs.truncate(fileToTruncate, truncatePos[0]);

    // Ensure that blocks have been reported to the SBN ahead of the edits
    // arriving.
    cluster.triggerBlockReports();

    // Failover the current standby to active.
    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    
    // Check the FSCK doesn't detect any bad blocks on the SBN.
    int rc = ToolRunner.run(new DFSck(cluster.getConfiguration(1)),
        new String[] { "/", "-files", "-blocks" });
    assertEquals(0, rc);
    
    assertEquals("CorruptBlocks should be empty.", 0, cluster.getNameNode(1)
        .getNamesystem().getCorruptReplicaBlocks());

    AppendTestUtil.checkFullFile(fs, fileToAppend, data.length, data,
        fileToAppend.toString());

    if (!isTruncateReady) {
      TestFileTruncate.checkBlockRecovery(fileToTruncate,
          cluster.getFileSystem(1));
    }
    AppendTestUtil.checkFullFile(fs, fileToTruncate, truncatePos[0], data,
        fileToTruncate.toString());
  } finally {
    if (null != cluster) {
      cluster.shutdown();
    }
    if (null != fs) {
      fs.close();
    }
  }
}
 
Example 5
Source File: TestStandbyIsHot.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
  Configuration conf = new Configuration();
  // We read from the standby to watch block locations
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    
    Thread.sleep(1000);
    System.err.println("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    System.err.println("==================================");

    // Block locations should show up on standby.
    LOG.info("Waiting for block locations to appear on standby node");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);

    // Trigger immediate heartbeats and block reports so
    // that the active "trusts" all of the DNs
    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // Change replication
    LOG.info("Changing replication to 1");
    fs.setReplication(TEST_FILE_PATH, (short)1);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    waitForBlockLocations(cluster, nn1, TEST_FILE, 1);

    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for lowered replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
    
    // Change back to 3
    LOG.info("Changing replication to 3");
    fs.setReplication(TEST_FILE_PATH, (short)3);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for higher replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
    
  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestStandbyBlockManagement.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testInvalidateBlock() throws Exception {
  Configuration conf = new Configuration();
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(3)
      .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);

    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

    Thread.sleep(1000);
    LOG.info("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    LOG.info("==================================");

    // delete the file
    fs.delete(TEST_FILE_PATH, false);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());

    nn1.getRpcServer().rollEditLog();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

  } finally {
    cluster.shutdown();
  }
}