Java Code Examples for org.apache.hadoop.hdfs.MiniDFSNNTopology

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSNNTopology. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestWebHdfsWithMultipleNameNodes.java    License: Apache License 2.0 6 votes vote down vote up
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();
  
  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
 
Example 2
Source Project: big-c   Source File: TestInitializeSharedEdits.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setupCluster() throws IOException {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  
  MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
  
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  shutdownClusterAndRemoveSharedEditsDir();
}
 
Example 3
Source Project: hadoop   Source File: TestRetryCacheWithHA.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(DataNodes).build();
  cluster.waitActive();
  cluster.transitionToActive(0);
  // setup the configuration
  HATestUtil.setFailoverConfigurations(cluster, conf);
  dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
}
 
Example 4
Source Project: hadoop   Source File: TestHarFileSystemWithHA.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);
    
    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);
    
    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source Project: hadoop   Source File: TestHASafeMode.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();
  
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
 
Example 6
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
Example 7
Source Project: hadoop   Source File: TestEditLogTailer.java    License: Apache License 2.0 6 votes vote down vote up
private static void testStandbyTriggersLogRolls(int activeIndex)
    throws Exception {
  Configuration conf = new Configuration();
  // Roll every 1s
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  
  // Have to specify IPC ports so the NNs can talk to each other.
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  try {
    cluster.transitionToActive(activeIndex);
    waitForLogRollInSharedDir(cluster, 3);
  } finally {
    cluster.shutdown();
  }
}
 
Example 8
Source Project: hadoop   Source File: TestQuotasWithHA.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  HAUtil.setAllowStandbyReads(conf, true);
  
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();
  
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  cluster.transitionToActive(0);
}
 
Example 9
Source Project: hadoop   Source File: TestNameNodeRetryCacheMetrics.java    License: Apache License 2.0 6 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
Example 10
Source Project: big-c   Source File: TestBootstrapStandby.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setupCluster() throws IOException {
  Configuration conf = new Configuration();

  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002)));
  
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();
  
  nn0 = cluster.getNameNode(0);
  cluster.transitionToActive(0);
  cluster.shutdownNameNode(1);
}
 
Example 11
Source Project: big-c   Source File: TestRetryCacheWithHA.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, ResponseSize);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, ResponseSize);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(DataNodes).build();
  cluster.waitActive();
  cluster.transitionToActive(0);
  // setup the configuration
  HATestUtil.setFailoverConfigurations(cluster, conf);
  dfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
}
 
Example 12
Source Project: hadoop   Source File: TestBootstrapStandbyWithBKJM.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
      .createJournalURI("/bootstrapStandby").toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      SlowCodec.class.getCanonicalName());
  CompressionCodecFactory.setCodecClasses(conf,
      ImmutableList.<Class> of(SlowCodec.class));
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
          new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
          new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
      .numDataNodes(1).manageNameDfsSharedDirs(false).build();
  cluster.waitActive();
}
 
Example 13
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
Example 14
Source Project: big-c   Source File: TestWebHdfsWithMultipleNameNodes.java    License: Apache License 2.0 6 votes vote down vote up
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();
  
  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
 
Example 15
Source Project: big-c   Source File: TestMover.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 16
Source Project: big-c   Source File: TestEditLogTailer.java    License: Apache License 2.0 6 votes vote down vote up
private static void testStandbyTriggersLogRolls(int activeIndex)
    throws Exception {
  Configuration conf = new Configuration();
  // Roll every 1s
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  
  // Have to specify IPC ports so the NNs can talk to each other.
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032)));

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  try {
    cluster.transitionToActive(activeIndex);
    waitForLogRollInSharedDir(cluster, 3);
  } finally {
    cluster.shutdown();
  }
}
 
Example 17
Source Project: hadoop   Source File: TestViewFsWithAcls.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
  clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(clusterConf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .numDataNodes(2)
      .build();
  cluster.waitClusterUp();

  fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
  fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
 
Example 18
Source Project: hadoop   Source File: TestViewFsWithXAttrs.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
  cluster = new MiniDFSCluster.Builder(clusterConf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .numDataNodes(2)
      .build();
  cluster.waitClusterUp();

  fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
  fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
 
Example 19
Source Project: hadoop   Source File: TestViewFileSystemHdfs.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
    LoginException, URISyntaxException {
  SupportsBlocks = true;
  CONF.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  
  cluster =
      new MiniDFSCluster.Builder(CONF).nnTopology(
              MiniDFSNNTopology.simpleFederatedTopology(2))
          .numDataNodes(2)
          .build();
  cluster.waitClusterUp();
  
  fHdfs = cluster.getFileSystem(0);
  fHdfs2 = cluster.getFileSystem(1);
  fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
      FsConstants.VIEWFS_URI.toString());
  fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
      FsConstants.VIEWFS_URI.toString());

  defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + 
      UserGroupInformation.getCurrentUser().getShortUserName()));
  
  fHdfs.mkdirs(defaultWorkingDirectory);
  fHdfs2.mkdirs(defaultWorkingDirectory2);
}
 
Example 20
Source Project: hadoop   Source File: TestViewFileSystemWithAcls.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
  clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(clusterConf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .numDataNodes(2)
      .build();
  cluster.waitClusterUp();

  fHdfs = cluster.getFileSystem(0);
  fHdfs2 = cluster.getFileSystem(1);
}
 
Example 21
Source Project: big-c   Source File: TestDataNodeExit.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws IOException {
  conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 100);
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
    .build();
  for (int i = 0; i < 3; i++) {
    cluster.waitActive(i);
  }
}
 
Example 22
Source Project: big-c   Source File: TestBookKeeperHACheckpoints.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
           BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
           .toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
      .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(1)
    .manageNameDfsSharedDirs(false)
    .build();
  cluster.waitActive();

  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);

  cluster.transitionToActive(0);
}
 
Example 23
Source Project: big-c   Source File: TestStandbyCheckpoints.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("rawtypes")
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();

  // Dial down the retention of extra edits and checkpoints. This is to
  // help catch regressions of HDFS-4238 (SBN should not purge shared edits)
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(1)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      nn1 = cluster.getNameNode(1);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
Example 24
Source Project: hadoop   Source File: TestNNHealthCheck.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testNNHealthCheckWithLifelineAddress() throws IOException {
  //conf.set(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY, "0.0.0.0:0");
  cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
  doNNHealthCheckTest();
}
 
Example 25
Source Project: big-c   Source File: TestBlockScanner.java    License: Apache License 2.0 5 votes vote down vote up
TestContext(Configuration conf, int numNameServices) throws Exception {
  this.numNameServices = numNameServices;
  MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).
      numDataNodes(1).
      storagesPerDatanode(1);
  if (numNameServices > 1) {
    bld.nnTopology(MiniDFSNNTopology.
          simpleFederatedTopology(numNameServices));
  }
  cluster = bld.build();
  cluster.waitActive();
  dfs = new DistributedFileSystem[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    dfs[i] = cluster.getFileSystem(i);
  }
  bpids = new String[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    bpids[i] = cluster.getNamesystem(i).getBlockPoolId();
  }
  datanode = cluster.getDataNodes().get(0);
  blockScanner = datanode.getBlockScanner();
  for (int i = 0; i < numNameServices; i++) {
    dfs[i].mkdirs(new Path("/test"));
  }
  data = datanode.getFSDataset();
  volumes = data.getVolumes();
}
 
Example 26
Source Project: big-c   Source File: TestDNFencing.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK);
  // Bump up replication interval so that we only run replication
  // checks explicitly.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 600);
  // Increase max streams so that we re-replicate quickly.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000);
  // See RandomDeleterPolicy javadoc.
  conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
      RandomDeleterPolicy.class, BlockPlacementPolicy.class); 
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  nn1 = cluster.getNameNode(0);
  nn2 = cluster.getNameNode(1);
  
  cluster.waitActive();
  cluster.transitionToActive(0);
  // Trigger block reports so that the first NN trusts all
  // of the DNs, and will issue deletions
  cluster.triggerBlockReports();
  fs = HATestUtil.configureFailoverFs(cluster, conf);
}
 
Example 27
Source Project: big-c   Source File: TestBookKeeperAsHASharedDir.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * NameNode should load the edits correctly if the applicable edits are
 * present in the BKJM.
 */
@Test
public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/correctEditLogSelection").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .manageNameDfsSharedDirs(false).build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    cluster.waitActive();
    cluster.transitionToActive(0);
    nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active.
    // Transition to standby current active gracefully.
    cluster.transitionToStandby(0);
    // Make the other Active and Roll edits multiple times
    cluster.transitionToActive(1);
    nn2.getRpcServer().rollEditLog();
    nn2.getRpcServer().rollEditLog();
    // Now One more failover. So NN1 should be able to failover successfully.
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 28
Source Project: hadoop   Source File: TestStandbyCheckpoints.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("rawtypes")
@Before
public void setupCluster() throws Exception {
  Configuration conf = setupCommonConfig();

  // Dial down the retention of extra edits and checkpoints. This is to
  // help catch regressions of HDFS-4238 (SBN should not purge shared edits)
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);

  int retryCount = 0;
  while (true) {
    try {
      int basePort = 10060 + random.nextInt(100) * 2;
      MiniDFSNNTopology topology = new MiniDFSNNTopology()
          .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
              .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
              .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));

      cluster = new MiniDFSCluster.Builder(conf)
          .nnTopology(topology)
          .numDataNodes(1)
          .build();
      cluster.waitActive();

      nn0 = cluster.getNameNode(0);
      nn1 = cluster.getNameNode(1);
      fs = HATestUtil.configureFailoverFs(cluster, conf);

      cluster.transitionToActive(0);
      ++retryCount;
      break;
    } catch (BindException e) {
      LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
          + retryCount + " times");
    }
  }
}
 
Example 29
Source Project: hadoop   Source File: TestGetGroupsWithHA.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUpNameNode() throws IOException {
  conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf);
}
 
Example 30
Source Project: hadoop   Source File: HAStressTestHarness.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Start and return the MiniDFSCluster.
 */
public MiniDFSCluster startCluster() throws IOException {
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  return cluster;
}