Java Code Examples for org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil#configureFailoverFs()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil#configureFailoverFs() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDFSClientFailover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);
  
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  
  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
 
Example 2
Source File: TestDFSClientFailover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  
  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
 
Example 3
Source File: TestDFSClientFailover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Same test as above, but for FileContext.
 */
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Configuration haClientConf = fs.getConf();
  
  FileContext fc = FileContext.getFileContext(haClientConf);
  Path root = new Path("/");
  fc.listStatus(root);
  fc.listStatus(fc.makeQualified(root));
  fc.getDefaultFileSystem().getCanonicalServiceName();

  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
 
Example 4
Source File: TestNameNodeRetryCacheMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
Example 5
Source File: TestNameNodeRetryCacheMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
Example 6
Source File: TestDFSClientFailover.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test that even a non-idempotent method will properly fail-over if the
 * first IPC attempt times out trying to connect. Regression test for
 * HDFS-4404. 
 */
@Test
public void testFailoverOnConnectTimeout() throws Exception {
  conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
      InjectingSocketFactory.class, SocketFactory.class);
  // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
  // when connecting to the first NN.
  InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);

  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  // Make the second NN the active one.
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  
  // Call a non-idempotent method, and ensure the failover of the call proceeds
  // successfully.
  IOUtils.closeStream(fs.create(TEST_FILE));
}
 
Example 7
Source File: TestDFSClientFailover.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);
  
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  
  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
 
Example 8
Source File: TestBookKeeperHACheckpoints.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
           BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
           .toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(1)
    .manageNameDfsSharedDirs(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
 
Example 9
Source File: TestEncryptionZonesWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
 
Example 10
Source File: TestEditLogAutoroll.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  // Stall the standby checkpointer in two ways
  conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE);
  conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20);
  // Make it autoroll after 10 edits
  conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
  conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(0)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);

      fs = cluster.getFileSystem(0);
      editLog = nn0.getNamesystem().getEditLog();
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
Example 11
Source File: TestEncryptionZonesWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
 
Example 12
Source File: TestBookKeeperHACheckpoints.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
           BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
           .toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(1)
    .manageNameDfsSharedDirs(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
 
Example 13
Source File: TestBookKeeperAsHASharedDir.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void assertCanStartHANameNodes(MiniDFSCluster cluster,
    Configuration conf, String path) throws ServiceFailedException,
    IOException, URISyntaxException, InterruptedException {
  // Now should be able to start both NNs. Pass "false" here so that we don't
  // try to waitActive on all NNs, since the second NN doesn't exist yet.
  cluster.restartNameNode(0, false);
  cluster.restartNameNode(1, true);

  // Make sure HA is working.
  cluster
      .getNameNode(0)
      .getRpcServer()
      .transitionToActive(
          new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
  FileSystem fs = null;
  try {
    Path newPath = new Path(path);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(newPath));
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
        newPath.toString(), false).isDir());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
 
Example 14
Source File: TestBookKeeperAsHASharedDir.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test simple HA failover usecase with BK
 */
@Test
public void testFailoverWithBK() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
             BKJMUtil.createJournalURI("/hotfailover").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0)
      .manageNameDfsSharedDirs(false)
      .build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);

    cluster.waitActive();
    cluster.transitionToActive(0);

    Path p = new Path("/testBKJMfailover");

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

    fs.mkdirs(p);
    cluster.shutdownNameNode(0);

    cluster.transitionToActive(1);

    assertTrue(fs.exists(p));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 15
Source File: TestBookKeeperAsHASharedDir.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test simple HA failover usecase with BK
 */
@Test
public void testFailoverWithBK() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
             BKJMUtil.createJournalURI("/hotfailover").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0)
      .manageNameDfsSharedDirs(false)
      .build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);

    cluster.waitActive();
    cluster.transitionToActive(0);

    Path p = new Path("/testBKJMfailover");

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

    fs.mkdirs(p);
    cluster.shutdownNameNode(0);

    cluster.transitionToActive(1);

    assertTrue(fs.exists(p));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Source File: TestEditLogAutoroll.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  conf = new Configuration();
  // Stall the standby checkpointer in two ways
  conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE);
  conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20);
  // Make it autoroll after 10 edits
  conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
  conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(0)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);

      fs = cluster.getFileSystem(0);
      editLog = nn0.getNamesystem().getEditLog();
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
Example 17
Source File: TestDFSInotifyEventInputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testNNFailover() throws IOException, URISyntaxException,
    MissingEventsException {
  Configuration conf = new HdfsConfiguration();
  MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();

  try {
    cluster.getDfsCluster().waitActive();
    cluster.getDfsCluster().transitionToActive(0);
    DFSClient client = ((DistributedFileSystem) HATestUtil.configureFailoverFs
        (cluster.getDfsCluster(), conf)).dfs;
    DFSInotifyEventInputStream eis = client.getInotifyEventStream();
    for (int i = 0; i < 10; i++) {
      client.mkdirs("/dir" + i, null, false);
    }
    cluster.getDfsCluster().shutdownNameNode(0);
    cluster.getDfsCluster().transitionToActive(1);
    EventBatch batch = null;
    // we can read all of the edits logged by the old active from the new
    // active
    for (int i = 0; i < 10; i++) {
      batch = waitForNextEvents(eis);
      Assert.assertEquals(1, batch.getEvents().length);
      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
      Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
          i));
    }
    Assert.assertTrue(eis.poll() == null);
  } finally {
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestDFSClientFailover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that the client doesn't ever try to DNS-resolve the logical URI.
 * Regression test for HADOOP-9150.
 */
@Test
public void testDoesntDnsResolveLogicalURI() throws Exception {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameService spyNS = spyOnNameService();
  String logicalHost = fs.getUri().getHost();
  Path qualifiedRoot = fs.makeQualified(new Path("/"));
  
  // Make a few calls against the filesystem.
  fs.getCanonicalServiceName();
  fs.listStatus(qualifiedRoot);
  
  // Ensure that the logical hostname was never resolved.
  Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
 
Example 19
Source File: TestBookKeeperAsHASharedDir.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that two namenodes can't continue as primary
 */
@Test
public void testMultiplePrimariesStarted() throws Exception {
  Path p1 = new Path("/testBKJMMultiplePrimary");

  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
             BKJMUtil.createJournalURI("/hotfailoverMultiple").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0)
      .manageNameDfsSharedDirs(false)
      .checkExitOnShutdown(false)
      .build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    cluster.waitActive();
    cluster.transitionToActive(0);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    fs.mkdirs(p1);
    nn1.getRpcServer().rollEditLog();
    cluster.transitionToActive(1);
    fs = cluster.getFileSystem(0); // get the older active server.

    try {
      fs.delete(p1, true);
      fail("Log update on older active should cause it to exit");
    } catch (RemoteException re) {
      assertTrue(re.getClassName().contains("ExitException"));
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 20
Source File: TestBookKeeperAsHASharedDir.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that two namenodes can't continue as primary
 */
@Test
public void testMultiplePrimariesStarted() throws Exception {
  Path p1 = new Path("/testBKJMMultiplePrimary");

  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
             BKJMUtil.createJournalURI("/hotfailoverMultiple").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0)
      .manageNameDfsSharedDirs(false)
      .checkExitOnShutdown(false)
      .build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    cluster.waitActive();
    cluster.transitionToActive(0);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    fs.mkdirs(p1);
    nn1.getRpcServer().rollEditLog();
    cluster.transitionToActive(1);
    fs = cluster.getFileSystem(0); // get the older active server.

    try {
      fs.delete(p1, true);
      fail("Log update on older active should cause it to exit");
    } catch (RemoteException re) {
      assertTrue(re.getClassName().contains("ExitException"));
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}