Java Code Examples for org.apache.hadoop.test.GenericTestUtils#assertExists()

The following examples show how to use org.apache.hadoop.test.GenericTestUtils#assertExists() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestEditLogsDuringFailover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Check that the given list of edits files are present in the given storage
 * dirs.
 */
private void assertEditFiles(Iterable<URI> dirs, String ... files)
    throws IOException {
  for (URI u : dirs) {
    File editDirRoot = new File(u.getPath());
    File editDir = new File(editDirRoot, "current");
    GenericTestUtils.assertExists(editDir);
    if (files.length == 0) {
      LOG.info("Checking no edit files exist in " + editDir);
    } else {
      LOG.info("Checking for following edit files in " + editDir
          + ": " + Joiner.on(",").join(files));
    }
    
    GenericTestUtils.assertGlobEquals(editDir, "edits_.*", files);
  }
}
 
Example 2
Source File: TestEditLogsDuringFailover.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Check that the given list of edits files are present in the given storage
 * dirs.
 */
private void assertEditFiles(Iterable<URI> dirs, String ... files)
    throws IOException {
  for (URI u : dirs) {
    File editDirRoot = new File(u.getPath());
    File editDir = new File(editDirRoot, "current");
    GenericTestUtils.assertExists(editDir);
    if (files.length == 0) {
      LOG.info("Checking no edit files exist in " + editDir);
    } else {
      LOG.info("Checking for following edit files in " + editDir
          + ": " + Joiner.on(",").join(files));
    }
    
    GenericTestUtils.assertGlobEquals(editDir, "edits_.*", files);
  }
}
 
Example 3
Source File: TestJournal.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Assume that a client is writing to a journal, but loses its connection
 * in the middle of a segment. Thus, any future journal() calls in that
 * segment may fail, because some txns were missed while the connection was
 * down.
 *
 * Eventually, the connection comes back, and the NN tries to start a new
 * segment at a higher txid. This should abort the old one and succeed.
 */
@Test (timeout = 10000)
public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
  journal.newEpoch(FAKE_NSINFO, 1);
  
  // Start a segment at txid 1, and write a batch of 3 txns.
  journal.startLogSegment(makeRI(1), 1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  journal.journal(makeRI(2), 1, 1, 3,
      QJMTestUtil.createTxnData(1, 3));

  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(1));
  
  // Try to start new segment at txid 6, this should abort old segment and
  // then succeed, allowing us to write txid 6-9.
  journal.startLogSegment(makeRI(3), 6,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  journal.journal(makeRI(4), 6, 6, 3,
      QJMTestUtil.createTxnData(6, 3));

  // The old segment should *not* be finalized.
  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(1));
  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(6));
}
 
Example 4
Source File: TestBootstrapStandby.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test for the case where the shared edits dir doesn't have
 * all of the recent edit logs.
 */
@Test
public void testSharedEditsMissingLogs() throws Exception {
  removeStandbyNameDirs();
  
  CheckpointSignature sig = nn0.getRpcServer().rollEditLog();
  assertEquals(3, sig.getCurSegmentTxId());
  
  // Should have created edits_1-2 in shared edits dir
  URI editsUri = cluster.getSharedEditsDir(0, 1);
  File editsDir = new File(editsUri);
  File editsSegment = new File(new File(editsDir, "current"),
      NNStorage.getFinalizedEditsFileName(1, 2));
  GenericTestUtils.assertExists(editsSegment);

  // Delete the segment.
  assertTrue(editsSegment.delete());
  
  // Trying to bootstrap standby should now fail since the edit
  // logs aren't available in the shared dir.
  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(BootstrapStandby.class));
  try {
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, rc);
  } finally {
    logs.stopCapturing();
  }
  GenericTestUtils.assertMatches(logs.getOutput(),
      "FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
 
Example 5
Source File: TestJournal.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
 
Example 6
Source File: TestFileAppendRestart.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestFSImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
Example 8
Source File: TestValidateConfigurationSettings.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * HDFS-3013: NameNode format command doesn't pick up
 * dfs.namenode.name.dir.NameServiceId configuration.
 */
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
    throws IOException {
  Configuration conf = new HdfsConfiguration();

  // Set ephemeral ports 
  conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "127.0.0.1:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
      "127.0.0.1:0");
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  
  // Set a nameservice-specific configuration for name dir
  File dir = new File(MiniDFSCluster.getBaseDirectory(),
      "testGenericKeysForNameNodeFormat");
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
      dir.getAbsolutePath());
  
  // Format and verify the right dir is formatted.
  DFSTestUtil.formatNameNode(conf);
  GenericTestUtils.assertExists(dir);

  // Ensure that the same dir is picked up by the running NN
  NameNode nameNode = new NameNode(conf);
  nameNode.stop();
}
 
Example 9
Source File: TestBootstrapStandby.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test for the case where the shared edits dir doesn't have
 * all of the recent edit logs.
 */
@Test
public void testSharedEditsMissingLogs() throws Exception {
  removeStandbyNameDirs();
  
  CheckpointSignature sig = nn0.getRpcServer().rollEditLog();
  assertEquals(3, sig.getCurSegmentTxId());
  
  // Should have created edits_1-2 in shared edits dir
  URI editsUri = cluster.getSharedEditsDir(0, 1);
  File editsDir = new File(editsUri);
  File editsSegment = new File(new File(editsDir, "current"),
      NNStorage.getFinalizedEditsFileName(1, 2));
  GenericTestUtils.assertExists(editsSegment);

  // Delete the segment.
  assertTrue(editsSegment.delete());
  
  // Trying to bootstrap standby should now fail since the edit
  // logs aren't available in the shared dir.
  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(BootstrapStandby.class));
  try {
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, rc);
  } finally {
    logs.stopCapturing();
  }
  GenericTestUtils.assertMatches(logs.getOutput(),
      "FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
 
Example 10
Source File: TestJournal.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Assume that a client is writing to a journal, but loses its connection
 * in the middle of a segment. Thus, any future journal() calls in that
 * segment may fail, because some txns were missed while the connection was
 * down.
 *
 * Eventually, the connection comes back, and the NN tries to start a new
 * segment at a higher txid. This should abort the old one and succeed.
 */
@Test (timeout = 10000)
public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
  journal.newEpoch(FAKE_NSINFO, 1);
  
  // Start a segment at txid 1, and write a batch of 3 txns.
  journal.startLogSegment(makeRI(1), 1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  journal.journal(makeRI(2), 1, 1, 3,
      QJMTestUtil.createTxnData(1, 3));

  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(1));
  
  // Try to start new segment at txid 6, this should abort old segment and
  // then succeed, allowing us to write txid 6-9.
  journal.startLogSegment(makeRI(3), 6,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  journal.journal(makeRI(4), 6, 6, 3,
      QJMTestUtil.createTxnData(6, 3));

  // The old segment should *not* be finalized.
  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(1));
  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(6));
}
 
Example 11
Source File: TestJournal.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
 
Example 12
Source File: TestFileAppendRestart.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
Example 13
Source File: TestFSImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
Example 14
Source File: TestValidateConfigurationSettings.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * HDFS-3013: NameNode format command doesn't pick up
 * dfs.namenode.name.dir.NameServiceId configuration.
 */
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
    throws IOException {
  Configuration conf = new HdfsConfiguration();

  // Set ephemeral ports 
  conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "127.0.0.1:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
      "127.0.0.1:0");
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  
  // Set a nameservice-specific configuration for name dir
  File dir = new File(MiniDFSCluster.getBaseDirectory(),
      "testGenericKeysForNameNodeFormat");
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
      dir.getAbsolutePath());
  
  // Format and verify the right dir is formatted.
  DFSTestUtil.formatNameNode(conf);
  GenericTestUtils.assertExists(dir);

  // Ensure that the same dir is picked up by the running NN
  NameNode nameNode = new NameNode(conf);
  nameNode.stop();
}
 
Example 15
Source File: TestQuorumJournalManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testPurgeLogs() throws Exception {
  for (int txid = 1; txid <= 5; txid++) {
    writeSegment(cluster, qjm, txid, 1, true);
  }
  File curDir = cluster.getCurrentDir(0, JID);
  GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
      NNStorage.getFinalizedEditsFileName(1, 1),
      NNStorage.getFinalizedEditsFileName(2, 2),
      NNStorage.getFinalizedEditsFileName(3, 3),
      NNStorage.getFinalizedEditsFileName(4, 4),
      NNStorage.getFinalizedEditsFileName(5, 5));
  File paxosDir = new File(curDir, "paxos");
  GenericTestUtils.assertExists(paxosDir);

  // Create new files in the paxos directory, which should get purged too.
  assertTrue(new File(paxosDir, "1").createNewFile());
  assertTrue(new File(paxosDir, "3").createNewFile());
  
  GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
      "1", "3");
  
  // Create some temporary files of the sort that are used during recovery.
  assertTrue(new File(curDir,
      "edits_inprogress_0000000000000000001.epoch=140").createNewFile());
  assertTrue(new File(curDir,
      "edits_inprogress_0000000000000000002.empty").createNewFile());
  
  qjm.purgeLogsOlderThan(3);
  
  // Log purging is asynchronous, so we have to wait for the calls
  // to be sent and respond before verifying.
  waitForAllPendingCalls(qjm.getLoggerSetForTests());
  
  // Older edits should be purged
  GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
      NNStorage.getFinalizedEditsFileName(3, 3),
      NNStorage.getFinalizedEditsFileName(4, 4),
      NNStorage.getFinalizedEditsFileName(5, 5));
 
  // Older paxos files should be purged
  GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
      "3");
}
 
Example 16
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that, if a storage directory is failed when a checkpoint occurs,
 * the non-failed storage directory receives the checkpoint.
 */
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of a volume -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    StorageDirectory sd1 = storage.getStorageDir(1);
    
    currentDir = sd0.getCurrentDir();
    FileUtil.setExecutable(currentDir, false);

    // Upload checkpoint when NN has a bad storage dir. This should
    // succeed and create the checkpoint in the good dir.
    secondary.doCheckpoint();
    
    GenericTestUtils.assertExists(
        new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
    
    // Restore the good dir
    FileUtil.setExecutable(currentDir, true);
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to both dirs
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.setExecutable(currentDir, true);
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 17
Source File: TestPersistBlocks.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestPersistBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 19
Source File: TestQuorumJournalManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testPurgeLogs() throws Exception {
  for (int txid = 1; txid <= 5; txid++) {
    writeSegment(cluster, qjm, txid, 1, true);
  }
  File curDir = cluster.getCurrentDir(0, JID);
  GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
      NNStorage.getFinalizedEditsFileName(1, 1),
      NNStorage.getFinalizedEditsFileName(2, 2),
      NNStorage.getFinalizedEditsFileName(3, 3),
      NNStorage.getFinalizedEditsFileName(4, 4),
      NNStorage.getFinalizedEditsFileName(5, 5));
  File paxosDir = new File(curDir, "paxos");
  GenericTestUtils.assertExists(paxosDir);

  // Create new files in the paxos directory, which should get purged too.
  assertTrue(new File(paxosDir, "1").createNewFile());
  assertTrue(new File(paxosDir, "3").createNewFile());
  
  GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
      "1", "3");
  
  // Create some temporary files of the sort that are used during recovery.
  assertTrue(new File(curDir,
      "edits_inprogress_0000000000000000001.epoch=140").createNewFile());
  assertTrue(new File(curDir,
      "edits_inprogress_0000000000000000002.empty").createNewFile());
  
  qjm.purgeLogsOlderThan(3);
  
  // Log purging is asynchronous, so we have to wait for the calls
  // to be sent and respond before verifying.
  waitForAllPendingCalls(qjm.getLoggerSetForTests());
  
  // Older edits should be purged
  GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
      NNStorage.getFinalizedEditsFileName(3, 3),
      NNStorage.getFinalizedEditsFileName(4, 4),
      NNStorage.getFinalizedEditsFileName(5, 5));
 
  // Older paxos files should be purged
  GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
      "3");
}
 
Example 20
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that, if a storage directory is failed when a checkpoint occurs,
 * the non-failed storage directory receives the checkpoint.
 */
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of a volume -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    StorageDirectory sd1 = storage.getStorageDir(1);
    
    currentDir = sd0.getCurrentDir();
    FileUtil.setExecutable(currentDir, false);

    // Upload checkpoint when NN has a bad storage dir. This should
    // succeed and create the checkpoint in the good dir.
    secondary.doCheckpoint();
    
    GenericTestUtils.assertExists(
        new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
    
    // Restore the good dir
    FileUtil.setExecutable(currentDir, true);
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to both dirs
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.setExecutable(currentDir, true);
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}