Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getSharedEditsDir()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#getSharedEditsDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestEditLogTailer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void waitForLogRollInSharedDir(MiniDFSCluster cluster,
    long startTxId) throws Exception {
  URI sharedUri = cluster.getSharedEditsDir(0, 1);
  File sharedDir = new File(sharedUri.getPath(), "current");
  final File expectedLog = new File(sharedDir,
      NNStorage.getInProgressEditsFileName(startTxId));
  
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return expectedLog.exists();
    }
  }, 100, 10000);
}
 
Example 2
Source File: TestEditLogTailer.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void waitForLogRollInSharedDir(MiniDFSCluster cluster,
    long startTxId) throws Exception {
  URI sharedUri = cluster.getSharedEditsDir(0, 1);
  File sharedDir = new File(sharedUri.getPath(), "current");
  final File expectedLog = new File(sharedDir,
      NNStorage.getInProgressEditsFileName(startTxId));
  
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return expectedLog.exists();
    }
  }, 100, 10000);
}
 
Example 3
Source File: TestDFSUpgradeWithHA.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Ensure that an admin cannot finalize an HA upgrade without at least one NN
 * being active.
 */
@Test
public void testCannotFinalizeIfNoActive() throws IOException,
    URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
    
    // Now there's no active NN.
    cluster.transitionToStandby(1);

    try {
      runFinalizeCommand(cluster);
      fail("Should not have been able to finalize upgrade with no NN active");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "Cannot finalize with no NameNode active", ioe);
    }
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 4
Source File: TestDFSUpgradeWithHA.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure that an HA NN with NFS-based HA can successfully start and
 * upgrade.
 */
@Test
public void testNfsUpgrade() throws IOException, URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();
    
    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 5
Source File: TestDFSUpgradeWithHA.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test rollback with NFS shared dir.
 */
@Test
public void testRollbackWithNfs() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, true);
    checkPreviousDirExistence(sharedDir, true);
    assertCTimesEqual(cluster);
    
    // Now shut down the cluster and do the rollback.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkPreviousDirExistence(sharedDir, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 6
Source File: TestDFSUpgradeWithHA.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure that starting a second NN with the -upgrade flag fails if the
 * other NN has already done that.
 */
@Test
public void testCannotUpgradeSecondNameNode() throws IOException,
    URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(0)
    .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Make sure that starting the second NN with the -upgrade flag fails.
    cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE);
    try {
      cluster.restartNameNode(1, false);
      fail("Should not have been able to start second NN with -upgrade");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "It looks like the shared log is already being upgraded", ioe);
    }
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 7
Source File: TestBookKeeperAsHASharedDir.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
 * the edits log segments to new bkjm shared edits.
 * 
 * @throws Exception
 */
@Test
public void testInitializeBKSharedEdits() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    HAUtil.setAllowStandbyReads(conf, true);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

    MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
        .numDataNodes(0).build();
    cluster.waitActive();
    // Shutdown and clear the current filebased shared dir.
    cluster.shutdownNameNodes();
    File shareddir = new File(cluster.getSharedEditsDir(0, 1));
    assertTrue("Initial Shared edits dir not fully deleted",
        FileUtil.fullyDelete(shareddir));

    // Check namenodes should not start without shared dir.
    assertCanNotStartNamenode(cluster, 0);
    assertCanNotStartNamenode(cluster, 1);

    // Configure bkjm as new shared edits dir in both namenodes
    Configuration nn1Conf = cluster.getConfiguration(0);
    Configuration nn2Conf = cluster.getConfiguration(1);
    nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    BKJMUtil.addJournalManagerDefinition(nn1Conf);
    BKJMUtil.addJournalManagerDefinition(nn2Conf);

    // Initialize the BKJM shared edits.
    assertFalse(NameNode.initializeSharedEdits(nn1Conf));

    // NameNode should be able to start and should be in sync with BKJM as
    // shared dir
    assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 8
Source File: TestDFSUpgradeWithHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Ensure that an admin cannot finalize an HA upgrade without at least one NN
 * being active.
 */
@Test
public void testCannotFinalizeIfNoActive() throws IOException,
    URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
    
    // Now there's no active NN.
    cluster.transitionToStandby(1);

    try {
      runFinalizeCommand(cluster);
      fail("Should not have been able to finalize upgrade with no NN active");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "Cannot finalize with no NameNode active", ioe);
    }
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 9
Source File: TestDFSUpgradeWithHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure that an HA NN with NFS-based HA can successfully start and
 * upgrade.
 */
@Test
public void testNfsUpgrade() throws IOException, URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();
    
    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 10
Source File: TestDFSUpgradeWithHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test rollback with NFS shared dir.
 */
@Test
public void testRollbackWithNfs() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, true);
    checkPreviousDirExistence(sharedDir, true);
    assertCTimesEqual(cluster);
    
    // Now shut down the cluster and do the rollback.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkPreviousDirExistence(sharedDir, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 11
Source File: TestDFSUpgradeWithHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Make sure that starting a second NN with the -upgrade flag fails if the
 * other NN has already done that.
 */
@Test
public void testCannotUpgradeSecondNameNode() throws IOException,
    URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(0)
    .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Make sure that starting the second NN with the -upgrade flag fails.
    cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE);
    try {
      cluster.restartNameNode(1, false);
      fail("Should not have been able to start second NN with -upgrade");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "It looks like the shared log is already being upgraded", ioe);
    }
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 12
Source File: TestBookKeeperAsHASharedDir.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
 * the edits log segments to new bkjm shared edits.
 * 
 * @throws Exception
 */
@Test
public void testInitializeBKSharedEdits() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    HAUtil.setAllowStandbyReads(conf, true);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

    MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
        .numDataNodes(0).build();
    cluster.waitActive();
    // Shutdown and clear the current filebased shared dir.
    cluster.shutdownNameNodes();
    File shareddir = new File(cluster.getSharedEditsDir(0, 1));
    assertTrue("Initial Shared edits dir not fully deleted",
        FileUtil.fullyDelete(shareddir));

    // Check namenodes should not start without shared dir.
    assertCanNotStartNamenode(cluster, 0);
    assertCanNotStartNamenode(cluster, 1);

    // Configure bkjm as new shared edits dir in both namenodes
    Configuration nn1Conf = cluster.getConfiguration(0);
    Configuration nn2Conf = cluster.getConfiguration(1);
    nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    BKJMUtil.addJournalManagerDefinition(nn1Conf);
    BKJMUtil.addJournalManagerDefinition(nn2Conf);

    // Initialize the BKJM shared edits.
    assertFalse(NameNode.initializeSharedEdits(nn1Conf));

    // NameNode should be able to start and should be in sync with BKJM as
    // shared dir
    assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}