Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#triggerHeartbeats()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#triggerHeartbeats() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestStandbyIsHot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
  Configuration conf = new Configuration();
  // We read from the standby to watch block locations
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    
    Thread.sleep(1000);
    System.err.println("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    System.err.println("==================================");

    // Block locations should show up on standby.
    LOG.info("Waiting for block locations to appear on standby node");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);

    // Trigger immediate heartbeats and block reports so
    // that the active "trusts" all of the DNs
    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // Change replication
    LOG.info("Changing replication to 1");
    fs.setReplication(TEST_FILE_PATH, (short)1);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    waitForBlockLocations(cluster, nn1, TEST_FILE, 1);

    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for lowered replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
    
    // Change back to 3
    LOG.info("Changing replication to 3");
    fs.setReplication(TEST_FILE_PATH, (short)3);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for higher replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
    
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestStandbyBlockManagement.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testInvalidateBlock() throws Exception {
  Configuration conf = new Configuration();
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(3)
      .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);

    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

    Thread.sleep(1000);
    LOG.info("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    LOG.info("==================================");

    // delete the file
    fs.delete(TEST_FILE_PATH, false);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());

    nn1.getRpcServer().rollEditLog();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestBalancer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test special case. Two replicas belong to same block should not in same node.
 * We have 2 nodes.
 * We have a block in (DN0,SSD) and (DN1,DISK).
 * Replica in (DN0,SSD) should not be moved to (DN1,SSD).
 * Otherwise DN1 has 2 replicas.
 */
@Test(timeout=100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
  final Configuration conf = new HdfsConfiguration();

  int blockSize = 5 * 1024 * 1024 ;
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);

  int numOfDatanodes =2;
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .racks(new String[]{"/default/rack0", "/default/rack0"})
      .storagesPerDatanode(2)
      .storageTypes(new StorageType[][]{
          {StorageType.SSD, StorageType.DISK},
          {StorageType.SSD, StorageType.DISK}})
      .storageCapacities(new long[][]{
          {100 * blockSize, 20 * blockSize},
          {20 * blockSize, 100 * blockSize}})
      .build();

  try {
    cluster.waitActive();

    //set "/bar" directory with ONE_SSD storage policy.
    DistributedFileSystem fs = cluster.getFileSystem();
    Path barDir = new Path("/bar");
    fs.mkdir(barDir,new FsPermission((short)777));
    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
    long fileLen  = 30 * blockSize;
    // fooFile has ONE_SSD policy. So
    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
    Path fooFile = new Path(barDir, "foo");
    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
    // update space info
    cluster.triggerHeartbeats();

    Balancer.Parameters p = Balancer.Parameters.DEFAULT;
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    final int r = Balancer.run(namenodes, p, conf);

    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
    // already has one. Otherwise DN1 will have 2 replicas.
    // For same reason, no replicas were moved.
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);

  } finally {
    cluster.shutdown();
  }
}
 
Example 4
Source File: TestStandbyIsHot.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
  Configuration conf = new Configuration();
  // We read from the standby to watch block locations
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    
    Thread.sleep(1000);
    System.err.println("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    System.err.println("==================================");

    // Block locations should show up on standby.
    LOG.info("Waiting for block locations to appear on standby node");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);

    // Trigger immediate heartbeats and block reports so
    // that the active "trusts" all of the DNs
    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // Change replication
    LOG.info("Changing replication to 1");
    fs.setReplication(TEST_FILE_PATH, (short)1);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    waitForBlockLocations(cluster, nn1, TEST_FILE, 1);

    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for lowered replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
    
    // Change back to 3
    LOG.info("Changing replication to 3");
    fs.setReplication(TEST_FILE_PATH, (short)3);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for higher replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
    
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: TestStandbyBlockManagement.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testInvalidateBlock() throws Exception {
  Configuration conf = new Configuration();
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(3)
      .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);

    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

    Thread.sleep(1000);
    LOG.info("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    LOG.info("==================================");

    // delete the file
    fs.delete(TEST_FILE_PATH, false);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());

    nn1.getRpcServer().rollEditLog();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestBalancer.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test special case. Two replicas belong to same block should not in same node.
 * We have 2 nodes.
 * We have a block in (DN0,SSD) and (DN1,DISK).
 * Replica in (DN0,SSD) should not be moved to (DN1,SSD).
 * Otherwise DN1 has 2 replicas.
 */
@Test(timeout=100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
  final Configuration conf = new HdfsConfiguration();

  int blockSize = 5 * 1024 * 1024 ;
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);

  int numOfDatanodes =2;
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .racks(new String[]{"/default/rack0", "/default/rack0"})
      .storagesPerDatanode(2)
      .storageTypes(new StorageType[][]{
          {StorageType.SSD, StorageType.DISK},
          {StorageType.SSD, StorageType.DISK}})
      .storageCapacities(new long[][]{
          {100 * blockSize, 20 * blockSize},
          {20 * blockSize, 100 * blockSize}})
      .build();

  try {
    cluster.waitActive();

    //set "/bar" directory with ONE_SSD storage policy.
    DistributedFileSystem fs = cluster.getFileSystem();
    Path barDir = new Path("/bar");
    fs.mkdir(barDir,new FsPermission((short)777));
    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
    long fileLen  = 30 * blockSize;
    // fooFile has ONE_SSD policy. So
    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
    Path fooFile = new Path(barDir, "foo");
    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
    // update space info
    cluster.triggerHeartbeats();

    Balancer.Parameters p = Balancer.Parameters.DEFAULT;
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    final int r = Balancer.run(namenodes, p, conf);

    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
    // already has one. Otherwise DN1 will have 2 replicas.
    // For same reason, no replicas were moved.
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);

  } finally {
    cluster.shutdown();
  }
}