Java Code Examples for org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#lock()

The following examples show how to use org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#lock() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
Example 2
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
Example 3
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that, an attempt to lock a storage that is already locked by nodename,
 * logs error message that includes JVM name of the namenode that locked it.
 */
@Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  StorageDirectory savedSd = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    
    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
        LogFactory.getLog(Storage.class));
    try {
      // try to lock the storage that's already locked
      savedSd.lock();
      fail("Namenode should not be able to lock a storage" +
          " that is already locked");
    } catch (IOException ioe) {
      // cannot read lock file on Windows, so message cannot get JVM name
      String lockingJvmName = Path.WINDOWS ? "" :
        " " + ManagementFactory.getRuntimeMXBean().getName();
      String expectedLogMessage = "It appears that another node "
        + lockingJvmName + " has already locked the storage directory";
      assertTrue("Log output does not contain expected log message: "
        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
    }
  } finally {
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 4
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Assert that the given storage directory can't be locked, because
 * it's already locked.
 */
private static void assertLockFails(StorageDirectory sd) {
  try {
    sd.lock();
    // If the above line didn't throw an exception, then
    // locking must not be supported
    assertFalse(sd.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  }
}
 
Example 5
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that, an attempt to lock a storage that is already locked by nodename,
 * logs error message that includes JVM name of the namenode that locked it.
 */
@Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  StorageDirectory savedSd = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    
    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
        LogFactory.getLog(Storage.class));
    try {
      // try to lock the storage that's already locked
      savedSd.lock();
      fail("Namenode should not be able to lock a storage" +
          " that is already locked");
    } catch (IOException ioe) {
      // cannot read lock file on Windows, so message cannot get JVM name
      String lockingJvmName = Path.WINDOWS ? "" :
        " " + ManagementFactory.getRuntimeMXBean().getName();
      String expectedLogMessage = "It appears that another node "
        + lockingJvmName + " has already locked the storage directory";
      assertTrue("Log output does not contain expected log message: "
        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
    }
  } finally {
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 6
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Assert that the given storage directory can't be locked, because
 * it's already locked.
 */
private static void assertLockFails(StorageDirectory sd) {
  try {
    sd.lock();
    // If the above line didn't throw an exception, then
    // locking must not be supported
    assertFalse(sd.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  }
}
 
Example 7
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the SecondaryNameNode properly locks its storage directories.
 */
@Test
public void testSecondaryNameNodeLocking() throws Exception {
  // Start a primary NN so that the secondary will start successfully
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    StorageDirectory savedSd = null;
    // Start a secondary NN, then make sure that all of its storage
    // dirs got locked.
    secondary = startSecondaryNameNode(conf);
    
    NNStorage storage = secondary.getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    LOG.info("===> Shutting down first 2NN");
    secondary.shutdown();
    secondary = null;

    LOG.info("===> Locking a dir, starting second 2NN");
    // Lock one of its dirs, make sure it fails to start
    LOG.info("Trying to lock" + savedSd);
    savedSd.lock();
    try {
      secondary = startSecondaryNameNode(conf);
      assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
          savedSd.isLockSupported());
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains("already locked", ioe);
    } finally {
      savedSd.unlock();
    }
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 8
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the SecondaryNameNode properly locks its storage directories.
 */
@Test
public void testSecondaryNameNodeLocking() throws Exception {
  // Start a primary NN so that the secondary will start successfully
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    StorageDirectory savedSd = null;
    // Start a secondary NN, then make sure that all of its storage
    // dirs got locked.
    secondary = startSecondaryNameNode(conf);
    
    NNStorage storage = secondary.getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    LOG.info("===> Shutting down first 2NN");
    secondary.shutdown();
    secondary = null;

    LOG.info("===> Locking a dir, starting second 2NN");
    // Lock one of its dirs, make sure it fails to start
    LOG.info("Trying to lock" + savedSd);
    savedSd.lock();
    try {
      secondary = startSecondaryNameNode(conf);
      assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
          savedSd.isLockSupported());
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains("already locked", ioe);
    } finally {
      savedSd.unlock();
    }
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}