Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getConfiguration()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#getConfiguration() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHdfsHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void evaluate() throws Throwable {
  MiniDFSCluster miniHdfs = null;
  Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
  if (Boolean.parseBoolean(System.getProperty(HADOOP_MINI_HDFS, "true"))) {
    miniHdfs = startMiniHdfs(conf);
    conf = miniHdfs.getConfiguration(0);
  }
  try {
    HDFS_CONF_TL.set(conf);
    HDFS_TEST_DIR_TL.set(resetHdfsTestDir(conf));
    statement.evaluate();
  } finally {
    HDFS_CONF_TL.remove();
    HDFS_TEST_DIR_TL.remove();
  }
}
 
Example 2
Source File: TestHdfsHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void evaluate() throws Throwable {
  MiniDFSCluster miniHdfs = null;
  Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
  if (Boolean.parseBoolean(System.getProperty(HADOOP_MINI_HDFS, "true"))) {
    miniHdfs = startMiniHdfs(conf);
    conf = miniHdfs.getConfiguration(0);
  }
  try {
    HDFS_CONF_TL.set(conf);
    HDFS_TEST_DIR_TL.set(resetHdfsTestDir(conf));
    statement.evaluate();
  } finally {
    HDFS_CONF_TL.remove();
    HDFS_TEST_DIR_TL.remove();
  }
}
 
Example 3
Source File: TestHistoryServerFileSystemStateStoreService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testTokenStoreHdfs() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  conf = cluster.getConfiguration(0);
  try {
    testTokenStore("/tmp/historystore");
  } finally {
      cluster.shutdown();
  }
}
 
Example 4
Source File: TestMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test Mover Cli by specifying a list of files/directories using option "-p".
 * There is only one namenode (and hence name service) specified in the conf.
 */
@Test
public void testMoverCli() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration()).numDataNodes(0).build();
  try {
    final Configuration conf = cluster.getConfiguration(0);
    try {
      Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
      Assert.fail("Expected exception for illegal path bar");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("bar is not absolute", e);
    }

    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    Assert.assertNull(movePaths.get(nn));

    movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
    namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, movePaths.size());
    nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: TestHistoryServerFileSystemStateStoreService.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testTokenStoreHdfs() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  conf = cluster.getConfiguration(0);
  try {
    testTokenStore("/tmp/historystore");
  } finally {
      cluster.shutdown();
  }
}
 
Example 6
Source File: TestMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test Mover Cli by specifying a list of files/directories using option "-p".
 * There is only one namenode (and hence name service) specified in the conf.
 */
@Test
public void testMoverCli() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration()).numDataNodes(0).build();
  try {
    final Configuration conf = cluster.getConfiguration(0);
    try {
      Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
      Assert.fail("Expected exception for illegal path bar");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("bar is not absolute", e);
    }

    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    Assert.assertNull(movePaths.get(nn));

    movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
    namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, movePaths.size());
    nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Starts two namenodes and two secondary namenodes, verifies that secondary
 * namenodes are configured correctly to talk to their respective namenodes
 * and can do the checkpoint.
 * 
 * @throws IOException
 */
@Test
public void testMultipleSecondaryNamenodes() throws IOException {
  Configuration conf = new HdfsConfiguration();
  String nameserviceId1 = "ns1";
  String nameserviceId2 = "ns2";
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1
      + "," + nameserviceId2);
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary1 = null;
  SecondaryNameNode secondary2 = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
            conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
        .build();
    Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
    Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
    InetSocketAddress nn1RpcAddress = cluster.getNameNode(0)
        .getNameNodeAddress();
    InetSocketAddress nn2RpcAddress = cluster.getNameNode(1)
        .getNameNodeAddress();
    String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
    String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();

    // Set the Service Rpc address to empty to make sure the node specific
    // setting works
    snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
    snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");

    // Set the nameserviceIds
    snConf1.set(DFSUtil.addKeySuffixes(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1),
        nn1);
    snConf2.set(DFSUtil.addKeySuffixes(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2),
        nn2);

    secondary1 = startSecondaryNameNode(snConf1);
    secondary2 = startSecondaryNameNode(snConf2);

    // make sure the two secondary namenodes are talking to correct namenodes.
    assertEquals(secondary1.getNameNodeAddress().getPort(),
        nn1RpcAddress.getPort());
    assertEquals(secondary2.getNameNodeAddress().getPort(),
        nn2RpcAddress.getPort());
    assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
        .getNameNodeAddress().getPort());

    // both should checkpoint.
    secondary1.doCheckpoint();
    secondary2.doCheckpoint();
  } finally {
    cleanup(secondary1);
    secondary1 = null;
    cleanup(secondary2);
    secondary2 = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 8
Source File: TestDeleteBlockPool.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
        "namesServerId1,namesServerId2");
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
          conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
      .numDataNodes(1).build();

    cluster.waitActive();

    FileSystem fs1 = cluster.getFileSystem(0);
    FileSystem fs2 = cluster.getFileSystem(1);

    DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
    DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);

    DataNode dn1 = cluster.getDataNodes().get(0);

    String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
    String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
    
    File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
    File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
    
    Configuration nn1Conf = cluster.getConfiguration(0);
    nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
    dn1.refreshNamenodes(nn1Conf);
    assertEquals(1, dn1.getAllBpOs().length);
    
    DFSAdmin admin = new DFSAdmin(nn1Conf);
    String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
    String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
    
    int ret = admin.run(args);
    assertFalse(0 == ret);

    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
    
    String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
    ret = admin.run(forceArgs);
    assertEquals(0, ret);
    
    verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
    
    //bpid1 remains good
    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
    
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 9
Source File: TestDataNodeMultipleRegistrations.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
  MiniDFSNNTopology top = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1"))
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));

  top.setFederation(true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
      .numDataNodes(0).build();
  try {
    cluster.startDataNodes(conf, 1, true, null, null);
    // let the initialization be complete
    Thread.sleep(10000);
    DataNode dn = cluster.getDataNodes().get(0);
    assertTrue("Datanode should be running", dn.isDatanodeUp());
    assertEquals("BPOfferService should be running", 1,
        dn.getAllBpOs().length);
    DataNodeProperties dnProp = cluster.stopDataNode(0);

    cluster.getNameNode(0).stop();
    cluster.getNameNode(1).stop();
    Configuration nn1 = cluster.getConfiguration(0);
    Configuration nn2 = cluster.getConfiguration(1);
    // setting up invalid cluster
    StartupOption.FORMAT.setClusterId("cluster-2");
    DFSTestUtil.formatNameNode(nn1);
    MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),
        FSNamesystem.getNamespaceDirs(nn2), nn2);
    cluster.restartNameNode(0, false);
    cluster.restartNameNode(1, false);
    cluster.restartDataNode(dnProp);
    
    // let the initialization be complete
    Thread.sleep(10000);
    dn = cluster.getDataNodes().get(0);
    assertFalse("Datanode should have shutdown as only service failed",
        dn.isDatanodeUp());
  } finally {
    cluster.shutdown();
  }
}
 
Example 10
Source File: TestBookKeeperAsHASharedDir.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
 * the edits log segments to new bkjm shared edits.
 * 
 * @throws Exception
 */
@Test
public void testInitializeBKSharedEdits() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    HAUtil.setAllowStandbyReads(conf, true);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

    MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
        .numDataNodes(0).build();
    cluster.waitActive();
    // Shutdown and clear the current filebased shared dir.
    cluster.shutdownNameNodes();
    File shareddir = new File(cluster.getSharedEditsDir(0, 1));
    assertTrue("Initial Shared edits dir not fully deleted",
        FileUtil.fullyDelete(shareddir));

    // Check namenodes should not start without shared dir.
    assertCanNotStartNamenode(cluster, 0);
    assertCanNotStartNamenode(cluster, 1);

    // Configure bkjm as new shared edits dir in both namenodes
    Configuration nn1Conf = cluster.getConfiguration(0);
    Configuration nn2Conf = cluster.getConfiguration(1);
    nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    BKJMUtil.addJournalManagerDefinition(nn1Conf);
    BKJMUtil.addJournalManagerDefinition(nn2Conf);

    // Initialize the BKJM shared edits.
    assertFalse(NameNode.initializeSharedEdits(nn1Conf));

    // NameNode should be able to start and should be in sync with BKJM as
    // shared dir
    assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 11
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Starts two namenodes and two secondary namenodes, verifies that secondary
 * namenodes are configured correctly to talk to their respective namenodes
 * and can do the checkpoint.
 * 
 * @throws IOException
 */
@Test
public void testMultipleSecondaryNamenodes() throws IOException {
  Configuration conf = new HdfsConfiguration();
  String nameserviceId1 = "ns1";
  String nameserviceId2 = "ns2";
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1
      + "," + nameserviceId2);
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary1 = null;
  SecondaryNameNode secondary2 = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
            conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
        .build();
    Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
    Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
    InetSocketAddress nn1RpcAddress = cluster.getNameNode(0)
        .getNameNodeAddress();
    InetSocketAddress nn2RpcAddress = cluster.getNameNode(1)
        .getNameNodeAddress();
    String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
    String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();

    // Set the Service Rpc address to empty to make sure the node specific
    // setting works
    snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
    snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");

    // Set the nameserviceIds
    snConf1.set(DFSUtil.addKeySuffixes(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1),
        nn1);
    snConf2.set(DFSUtil.addKeySuffixes(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2),
        nn2);

    secondary1 = startSecondaryNameNode(snConf1);
    secondary2 = startSecondaryNameNode(snConf2);

    // make sure the two secondary namenodes are talking to correct namenodes.
    assertEquals(secondary1.getNameNodeAddress().getPort(),
        nn1RpcAddress.getPort());
    assertEquals(secondary2.getNameNodeAddress().getPort(),
        nn2RpcAddress.getPort());
    assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
        .getNameNodeAddress().getPort());

    // both should checkpoint.
    secondary1.doCheckpoint();
    secondary2.doCheckpoint();
  } finally {
    cleanup(secondary1);
    secondary1 = null;
    cleanup(secondary2);
    secondary2 = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 12
Source File: TestDeleteBlockPool.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
        "namesServerId1,namesServerId2");
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
          conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
      .numDataNodes(1).build();

    cluster.waitActive();

    FileSystem fs1 = cluster.getFileSystem(0);
    FileSystem fs2 = cluster.getFileSystem(1);

    DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
    DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);

    DataNode dn1 = cluster.getDataNodes().get(0);

    String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
    String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
    
    File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
    File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
    
    Configuration nn1Conf = cluster.getConfiguration(0);
    nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
    dn1.refreshNamenodes(nn1Conf);
    assertEquals(1, dn1.getAllBpOs().length);
    
    DFSAdmin admin = new DFSAdmin(nn1Conf);
    String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
    String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
    
    int ret = admin.run(args);
    assertFalse(0 == ret);

    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
    
    String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
    ret = admin.run(forceArgs);
    assertEquals(0, ret);
    
    verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
    
    //bpid1 remains good
    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
    
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 13
Source File: TestDataNodeMultipleRegistrations.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
  MiniDFSNNTopology top = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1"))
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));

  top.setFederation(true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
      .numDataNodes(0).build();
  try {
    cluster.startDataNodes(conf, 1, true, null, null);
    // let the initialization be complete
    Thread.sleep(10000);
    DataNode dn = cluster.getDataNodes().get(0);
    assertTrue("Datanode should be running", dn.isDatanodeUp());
    assertEquals("BPOfferService should be running", 1,
        dn.getAllBpOs().length);
    DataNodeProperties dnProp = cluster.stopDataNode(0);

    cluster.getNameNode(0).stop();
    cluster.getNameNode(1).stop();
    Configuration nn1 = cluster.getConfiguration(0);
    Configuration nn2 = cluster.getConfiguration(1);
    // setting up invalid cluster
    StartupOption.FORMAT.setClusterId("cluster-2");
    DFSTestUtil.formatNameNode(nn1);
    MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),
        FSNamesystem.getNamespaceDirs(nn2), nn2);
    cluster.restartNameNode(0, false);
    cluster.restartNameNode(1, false);
    cluster.restartDataNode(dnProp);
    
    // let the initialization be complete
    Thread.sleep(10000);
    dn = cluster.getDataNodes().get(0);
    assertFalse("Datanode should have shutdown as only service failed",
        dn.isDatanodeUp());
  } finally {
    cluster.shutdown();
  }
}
 
Example 14
Source File: TestBookKeeperAsHASharedDir.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
 * the edits log segments to new bkjm shared edits.
 * 
 * @throws Exception
 */
@Test
public void testInitializeBKSharedEdits() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    HAUtil.setAllowStandbyReads(conf, true);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

    MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
        .numDataNodes(0).build();
    cluster.waitActive();
    // Shutdown and clear the current filebased shared dir.
    cluster.shutdownNameNodes();
    File shareddir = new File(cluster.getSharedEditsDir(0, 1));
    assertTrue("Initial Shared edits dir not fully deleted",
        FileUtil.fullyDelete(shareddir));

    // Check namenodes should not start without shared dir.
    assertCanNotStartNamenode(cluster, 0);
    assertCanNotStartNamenode(cluster, 1);

    // Configure bkjm as new shared edits dir in both namenodes
    Configuration nn1Conf = cluster.getConfiguration(0);
    Configuration nn2Conf = cluster.getConfiguration(1);
    nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    BKJMUtil.addJournalManagerDefinition(nn1Conf);
    BKJMUtil.addJournalManagerDefinition(nn2Conf);

    // Initialize the BKJM shared edits.
    assertFalse(NameNode.initializeSharedEdits(nn1Conf));

    // NameNode should be able to start and should be in sync with BKJM as
    // shared dir
    assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 15
Source File: MiniCluster.java    From spork with Apache License 2.0 4 votes vote down vote up
@Override
protected void setupMiniDfsAndMrClusters() {
    try {
        final int dataNodes = 4;     // There will be 4 data nodes
        final int taskTrackers = 4;  // There will be 4 task tracker nodes

        System.setProperty("hadoop.log.dir", "build/test/logs");
        // Create the dir that holds hadoop-site.xml file
        // Delete if hadoop-site.xml exists already
        CONF_DIR.mkdirs();
        if(CONF_FILE.exists()) {
            CONF_FILE.delete();
        }

        // Builds and starts the mini dfs and mapreduce clusters
        Configuration config = new Configuration();
        config.set("yarn.scheduler.capacity.root.queues", "default");
        config.set("yarn.scheduler.capacity.root.default.capacity", "100");
        m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
        m_fileSys = m_dfs.getFileSystem();
        m_dfs_conf = m_dfs.getConfiguration(0);

        //Create user home directory
        m_fileSys.mkdirs(m_fileSys.getWorkingDirectory());

        m_mr = new MiniMRYarnCluster("PigMiniCluster", taskTrackers);
        m_mr.init(m_dfs_conf);
        m_mr.start();

        // Write the necessary config info to hadoop-site.xml
        m_mr_conf = new Configuration(m_mr.getConfig());

        m_conf = m_mr_conf;
        m_conf.set("fs.default.name", m_dfs_conf.get("fs.default.name"));
        m_conf.unset(MRConfiguration.JOB_CACHE_FILES);

        m_conf.setInt(MRConfiguration.IO_SORT_MB, 200);
        m_conf.set(MRConfiguration.CHILD_JAVA_OPTS, "-Xmx512m");

        m_conf.setInt(MRConfiguration.SUMIT_REPLICATION, 2);
        m_conf.setInt(MRConfiguration.MAP_MAX_ATTEMPTS, 2);
        m_conf.setInt(MRConfiguration.REDUCE_MAX_ATTEMPTS, 2);
        m_conf.set("dfs.datanode.address", "0.0.0.0:0");
        m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
        m_conf.set("pig.jobcontrol.sleep", "100");
        m_conf.writeXml(new FileOutputStream(CONF_FILE));
        m_fileSys.copyFromLocalFile(new Path(CONF_FILE.getAbsoluteFile().toString()),
                new Path("/pigtest/conf/hadoop-site.xml"));
        DistributedCache.addFileToClassPath(new Path("/pigtest/conf/hadoop-site.xml"), m_conf);

        System.err.println("XXX: Setting fs.default.name to: " + m_dfs_conf.get("fs.default.name"));
        // Set the system properties needed by Pig
        System.setProperty("cluster", m_conf.get(MRConfiguration.JOB_TRACKER));
        //System.setProperty("namenode", m_dfs_conf.get("fs.default.name"));
        System.setProperty("namenode", m_conf.get("fs.default.name"));
        System.setProperty("junit.hadoop.conf", CONF_DIR.getPath());
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}