Java Code Examples for org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil#setFailoverConfigurations()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil#setFailoverConfigurations() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestWebHDFSForHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 2
Source File: TestMover.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestNameNodeRetryCacheMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
Example 4
Source File: TestDFSClientFailover.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
 
Example 5
Source File: TestWebHDFSForHA.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 6
Source File: TestNameNodeRetryCacheMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
Example 7
Source File: TestDFSClientFailover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
 
Example 8
Source File: TestMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 9
Source File: TestDFSClientFailover.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  Path p = new Path("hdfs://" + logicalName + ":12345/");
  try {
    p.getFileSystem(config).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}
 
Example 10
Source File: TestWebHDFSForHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 11
Source File: TestAllowFormat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
 
Example 12
Source File: TestDFSClientFailover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  Path p = new Path("hdfs://" + logicalName + ":12345/");
  try {
    p.getFileSystem(config).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}
 
Example 13
Source File: TestWebHDFSForHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverAfterOpen() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
      "://" + LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  final Path p = new Path("/test");
  final byte[] data = "Hello".getBytes();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(1);

    FSDataOutputStream out = fs.create(p);
    cluster.shutdownNameNode(1);
    cluster.transitionToActive(0);

    out.write(data);
    out.close();
    FSDataInputStream in = fs.open(p);
    byte[] buf = new byte[data.length];
    IOUtils.readFully(in, buf, 0, buf.length);
    Assert.assertArrayEquals(data, buf);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 14
Source File: HdfsTestUtil.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static Configuration getClientConfiguration(MiniDFSCluster dfsCluster) {
  Configuration conf = getBasicConfiguration(dfsCluster.getConfiguration(0));
  if (dfsCluster.getNumNameNodes() > 1) {
    HATestUtil.setFailoverConfigurations(dfsCluster, conf);
  }
  return conf;
}
 
Example 15
Source File: TestWebHDFSForHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testHA() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(0);

    final Path dir = new Path("/test");
    Assert.assertTrue(fs.mkdirs(dir));

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);

    final Path dir2 = new Path("/test2");
    Assert.assertTrue(fs.mkdirs(dir2));
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Source File: TestWebHDFSForHA.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure the WebHdfsFileSystem will retry based on RetriableException when
 * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
 */
@Test (timeout=120000)
public void testRetryWhileNNStartup() throws Exception {
  final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();
    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();
    cluster.transitionToActive(0);

    final NameNode namenode = cluster.getNameNode(0);
    final NamenodeProtocols rpcServer = namenode.getRpcServer();
    Whitebox.setInternalState(namenode, "rpcServer", null);

    new Thread() {
      @Override
      public void run() {
        boolean result = false;
        FileSystem fs = null;
        try {
          fs = FileSystem.get(WEBHDFS_URI, conf);
          final Path dir = new Path("/test");
          result = fs.mkdirs(dir);
        } catch (IOException e) {
          result = false;
        } finally {
          IOUtils.cleanup(null, fs);
        }
        synchronized (TestWebHDFSForHA.this) {
          resultMap.put("mkdirs", result);
          TestWebHDFSForHA.this.notifyAll();
        }
      }
    }.start();

    Thread.sleep(1000);
    Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
    synchronized (this) {
      while (!resultMap.containsKey("mkdirs")) {
        this.wait();
      }
      Assert.assertTrue(resultMap.get("mkdirs"));
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 17
Source File: TestBalancerWithHANameNodes.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test a cluster with even distribution, then a new empty node is added to
 * the cluster. Test start a cluster with specified number of nodes, and fills
 * it to be 30% full (with a single file replicated identically to all
 * datanodes); It then adds one new empty node and starts balancing.
 */
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
  Configuration conf = new HdfsConfiguration();
  TestBalancer.initConf(conf);
  long newNodeCapacity = TestBalancer.CAPACITY; // new node's capacity
  String newNodeRack = TestBalancer.RACK2; // new node's rack
  // array of racks for original nodes in cluster
  String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
  // array of capacities of original nodes in cluster
  long[] capacities = new long[] { TestBalancer.CAPACITY,
      TestBalancer.CAPACITY };
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
  nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
  Configuration copiedConf = new Configuration(conf);
  cluster = new MiniDFSCluster.Builder(copiedConf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(capacities.length)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  HATestUtil.setFailoverConfigurations(cluster, conf);
  try {
    cluster.waitActive();
    cluster.transitionToActive(1);
    Thread.sleep(500);
    client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        ClientProtocol.class).getProxy();
    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace
        / numOfDatanodes, (short) numOfDatanodes, 1);

    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
        new long[] { newNodeCapacity });
    totalCapacity += newNodeCapacity;
    TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
        cluster);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    assertEquals(1, namenodes.size());
    assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
        cluster, Balancer.Parameters.DEFAULT);
  } finally {
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestBalancerWithHANameNodes.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test a cluster with even distribution, then a new empty node is added to
 * the cluster. Test start a cluster with specified number of nodes, and fills
 * it to be 30% full (with a single file replicated identically to all
 * datanodes); It then adds one new empty node and starts balancing.
 */
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
  Configuration conf = new HdfsConfiguration();
  TestBalancer.initConf(conf);
  long newNodeCapacity = TestBalancer.CAPACITY; // new node's capacity
  String newNodeRack = TestBalancer.RACK2; // new node's rack
  // array of racks for original nodes in cluster
  String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
  // array of capacities of original nodes in cluster
  long[] capacities = new long[] { TestBalancer.CAPACITY,
      TestBalancer.CAPACITY };
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
  nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
  Configuration copiedConf = new Configuration(conf);
  cluster = new MiniDFSCluster.Builder(copiedConf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(capacities.length)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  HATestUtil.setFailoverConfigurations(cluster, conf);
  try {
    cluster.waitActive();
    cluster.transitionToActive(1);
    Thread.sleep(500);
    client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        ClientProtocol.class).getProxy();
    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace
        / numOfDatanodes, (short) numOfDatanodes, 1);

    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
        new long[] { newNodeCapacity });
    totalCapacity += newNodeCapacity;
    TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
        cluster);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    assertEquals(1, namenodes.size());
    assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
        cluster, Balancer.Parameters.DEFAULT);
  } finally {
    cluster.shutdown();
  }
}
 
Example 19
Source File: TestWebHDFSForHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure the WebHdfsFileSystem will retry based on RetriableException when
 * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
 */
@Test (timeout=120000)
public void testRetryWhileNNStartup() throws Exception {
  final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();
    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();
    cluster.transitionToActive(0);

    final NameNode namenode = cluster.getNameNode(0);
    final NamenodeProtocols rpcServer = namenode.getRpcServer();
    Whitebox.setInternalState(namenode, "rpcServer", null);

    new Thread() {
      @Override
      public void run() {
        boolean result = false;
        FileSystem fs = null;
        try {
          fs = FileSystem.get(WEBHDFS_URI, conf);
          final Path dir = new Path("/test");
          result = fs.mkdirs(dir);
        } catch (IOException e) {
          result = false;
        } finally {
          IOUtils.cleanup(null, fs);
        }
        synchronized (TestWebHDFSForHA.this) {
          resultMap.put("mkdirs", result);
          TestWebHDFSForHA.this.notifyAll();
        }
      }
    }.start();

    Thread.sleep(1000);
    Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
    synchronized (this) {
      while (!resultMap.containsKey("mkdirs")) {
        this.wait();
      }
      Assert.assertTrue(resultMap.get("mkdirs"));
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 20
Source File: TestMiniClusterHadoopNNAWithStreamEngine.java    From NNAnalytics with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  RANDOM.nextBytes(TINY_FILE_BYTES);
  RANDOM.nextBytes(SMALL_FILE_BYTES);
  RANDOM.nextBytes(MEDIUM_FILE_BYTES);

  // Speed up editlog tailing.
  CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  CONF.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  CONF.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
  CONF.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  CONF.setBoolean("fs.hdfs.impl.disable.cache", true);
  CONF.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NAMESERVICE);

  MiniQJMHACluster.Builder qjmBuilder = new MiniQJMHACluster.Builder(CONF);
  qjmBuilder.getDfsBuilder().numDataNodes(NUMDATANODES);
  cluster = qjmBuilder.build();
  cluster.getDfsCluster().waitActive();
  cluster.getDfsCluster().transitionToActive(0);

  HATestUtil.setFailoverConfigurations(cluster.getDfsCluster(), CONF, NAMESERVICE, 0);
  CONF.set("dfs.nameservice.id", NAMESERVICE);

  nna = new HadoopWebServerMain();
  nnaConf = new ApplicationConfiguration();
  nnaConf.set("nna.support.bootstrap.overrides", "true");
  nnaConf.set("ldap.enable", "false");
  nnaConf.set("authorization.enable", "false");
  nnaConf.set("nna.historical", "true");
  nnaConf.set("nna.base.dir", MiniDFSCluster.getBaseDirectory());
  nnaConf.set("nna.web.base.dir", "src/main/resources/webapps/nna");
  nnaConf.set("nna.query.engine.impl", JavaStreamQueryEngine.class.getCanonicalName());
  nna.init(nnaConf, null, CONF);
  hostPort = new HttpHost("localhost", 4567);
  client = new DefaultHttpClient();

  // Fetch NNA Namespace.
  HttpGet fetch = new HttpGet("http://localhost:4567/fetchNamespace");
  HttpResponse fetchRes = client.execute(hostPort, fetch);
  assertThat(fetchRes.getStatusLine().getStatusCode(), is(200));
  IOUtils.readLines(fetchRes.getEntity().getContent());

  // Reload NNA Namespace.
  HttpGet reload = new HttpGet("http://localhost:4567/reloadNamespace");
  HttpResponse reloadRes = client.execute(hostPort, reload);
  assertThat(reloadRes.getStatusLine().getStatusCode(), is(200));
  IOUtils.readLines(reloadRes.getEntity().getContent());
}