Java Code Examples for org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#unlock()

The following examples show how to use org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory#unlock() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
Example 2
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Assert that, if sdToLock is locked, the cluster is not allowed to start up.
 * @param conf cluster conf to use
 * @param sdToLock the storage directory to lock
 */
private static void assertClusterStartFailsWhenDirLocked(
    Configuration conf, StorageDirectory sdToLock) throws IOException {
  // Lock the edits dir, then start the NN, and make sure it fails to start
  sdToLock.lock();
  MiniDFSCluster cluster = null;
  try {      
    cluster = new MiniDFSCluster.Builder(conf).format(false)
        .manageNameDfsDirs(false).numDataNodes(0).build();
    assertFalse("cluster should fail to start after locking " +
        sdToLock, sdToLock.isLockSupported());
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains("already locked", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
    sdToLock.unlock();
  }
}
 
Example 3
Source File: BackupImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Analyze backup storage directories for consistency.<br>
 * Recover from incomplete checkpoints if required.<br>
 * Read VERSION and fstime files if exist.<br>
 * Do not load image or edits.
 *
 * @throws IOException if the node should shutdown.
 */
void recoverCreateRead() throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured storage dirs are inaccessible
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        // for backup node all directories may be unformatted initially
        LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
        LOG.info("Formatting ...");
        sd.clearDirectory(); // create empty current
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if(curState != StorageState.NOT_FORMATTED) {
        // read and verify consistency with other directories
        storage.readProperties(sd);
      }
    } catch(IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example 4
Source File: BackupImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Analyze backup storage directories for consistency.<br>
 * Recover from incomplete checkpoints if required.<br>
 * Read VERSION and fstime files if exist.<br>
 * Do not load image or edits.
 *
 * @throws IOException if the node should shutdown.
 */
void recoverCreateRead() throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured storage dirs are inaccessible
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        // for backup node all directories may be unformatted initially
        LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
        LOG.info("Formatting ...");
        sd.clearDirectory(); // create empty current
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if(curState != StorageState.NOT_FORMATTED) {
        // read and verify consistency with other directories
        storage.readProperties(sd);
      }
    } catch(IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example 5
Source File: FSImage.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * For each storage directory, performs recovery of incomplete transitions
 * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
 * state into the dataDirStates map.
 * @param dataDirStates output of storage directory states
 * @return true if there is at least one valid formatted storage directory
 */
public static boolean recoverStorageDirs(StartupOption startOpt,
    NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates)
    throws IOException {
  boolean isFormatted = false;
  // This loop needs to be over all storage dirs, even shared dirs, to make
  // sure that we properly examine their state, but we make sure we don't
  // mutate the shared dir below in the actual loop.
  for (Iterator<StorageDirectory> it = 
                    storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    if (startOpt == StartupOption.METADATAVERSION) {
      /* All we need is the layout version. */
      storage.readProperties(sd);
      return true;
    }

    try {
      curState = sd.analyzeStorage(startOpt, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // name-node fails if any of the configured storage dirs are missing
        throw new InconsistentFSStateException(sd.getRoot(),
                    "storage directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if (curState != StorageState.NOT_FORMATTED 
          && startOpt != StartupOption.ROLLBACK) {
        // read and verify consistency with other directories
        storage.readProperties(sd, startOpt);
        isFormatted = true;
      }
      if (startOpt == StartupOption.IMPORT && isFormatted)
        // import of a checkpoint is allowed only into empty image directories
        throw new IOException("Cannot import image from a checkpoint. " 
            + " NameNode already contains an image in " + sd.getRoot());
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
    dataDirStates.put(sd,curState);
  }
  return isFormatted;
}
 
Example 6
Source File: SecondaryNameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Analyze checkpoint directories.
 * Create directories if they do not exist.
 * Recover from an unsuccessful checkpoint if necessary.
 *
 * @throws IOException
 */
void recoverCreate(boolean format) throws IOException {
  storage.attemptRestoreRemovedStorage();
  storage.unlockAll();

  for (Iterator<StorageDirectory> it = 
               storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    boolean isAccessible = true;
    try { // create directories if don't exist yet
      if(!sd.getRoot().mkdirs()) {
        // do nothing, directory is already created
      }
    } catch(SecurityException se) {
      isAccessible = false;
    }
    if(!isAccessible)
      throw new InconsistentFSStateException(sd.getRoot(),
          "cannot access checkpoint directory.");
    
    if (format) {
      // Don't confirm, since this is just the secondary namenode.
      LOG.info("Formatting storage directory " + sd);
      sd.clearDirectory();
    }
    
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured checkpoint dirs are inaccessible 
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;  // it's ok since initially there is no current and VERSION
      case NORMAL:
        // Read the VERSION file. This verifies that:
        // (a) the VERSION file for each of the directories is the same,
        // and (b) when we connect to a NN, we can verify that the remote
        // node matches the same namespace that we ran on previously.
        storage.readProperties(sd);
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example 7
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the SecondaryNameNode properly locks its storage directories.
 */
@Test
public void testSecondaryNameNodeLocking() throws Exception {
  // Start a primary NN so that the secondary will start successfully
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    StorageDirectory savedSd = null;
    // Start a secondary NN, then make sure that all of its storage
    // dirs got locked.
    secondary = startSecondaryNameNode(conf);
    
    NNStorage storage = secondary.getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    LOG.info("===> Shutting down first 2NN");
    secondary.shutdown();
    secondary = null;

    LOG.info("===> Locking a dir, starting second 2NN");
    // Lock one of its dirs, make sure it fails to start
    LOG.info("Trying to lock" + savedSd);
    savedSd.lock();
    try {
      secondary = startSecondaryNameNode(conf);
      assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
          savedSd.isLockSupported());
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains("already locked", ioe);
    } finally {
      savedSd.unlock();
    }
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 8
Source File: FSImage.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * For each storage directory, performs recovery of incomplete transitions
 * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
 * state into the dataDirStates map.
 * @param dataDirStates output of storage directory states
 * @return true if there is at least one valid formatted storage directory
 */
public static boolean recoverStorageDirs(StartupOption startOpt,
    NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates)
    throws IOException {
  boolean isFormatted = false;
  // This loop needs to be over all storage dirs, even shared dirs, to make
  // sure that we properly examine their state, but we make sure we don't
  // mutate the shared dir below in the actual loop.
  for (Iterator<StorageDirectory> it = 
                    storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    if (startOpt == StartupOption.METADATAVERSION) {
      /* All we need is the layout version. */
      storage.readProperties(sd);
      return true;
    }

    try {
      curState = sd.analyzeStorage(startOpt, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // name-node fails if any of the configured storage dirs are missing
        throw new InconsistentFSStateException(sd.getRoot(),
                    "storage directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if (curState != StorageState.NOT_FORMATTED 
          && startOpt != StartupOption.ROLLBACK) {
        // read and verify consistency with other directories
        storage.readProperties(sd, startOpt);
        isFormatted = true;
      }
      if (startOpt == StartupOption.IMPORT && isFormatted)
        // import of a checkpoint is allowed only into empty image directories
        throw new IOException("Cannot import image from a checkpoint. " 
            + " NameNode already contains an image in " + sd.getRoot());
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
    dataDirStates.put(sd,curState);
  }
  return isFormatted;
}
 
Example 9
Source File: SecondaryNameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Analyze checkpoint directories.
 * Create directories if they do not exist.
 * Recover from an unsuccessful checkpoint if necessary.
 *
 * @throws IOException
 */
void recoverCreate(boolean format) throws IOException {
  storage.attemptRestoreRemovedStorage();
  storage.unlockAll();

  for (Iterator<StorageDirectory> it = 
               storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    boolean isAccessible = true;
    try { // create directories if don't exist yet
      if(!sd.getRoot().mkdirs()) {
        // do nothing, directory is already created
      }
    } catch(SecurityException se) {
      isAccessible = false;
    }
    if(!isAccessible)
      throw new InconsistentFSStateException(sd.getRoot(),
          "cannot access checkpoint directory.");
    
    if (format) {
      // Don't confirm, since this is just the secondary namenode.
      LOG.info("Formatting storage directory " + sd);
      sd.clearDirectory();
    }
    
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured checkpoint dirs are inaccessible 
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;  // it's ok since initially there is no current and VERSION
      case NORMAL:
        // Read the VERSION file. This verifies that:
        // (a) the VERSION file for each of the directories is the same,
        // and (b) when we connect to a NN, we can verify that the remote
        // node matches the same namespace that we ran on previously.
        storage.readProperties(sd);
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example 10
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the SecondaryNameNode properly locks its storage directories.
 */
@Test
public void testSecondaryNameNodeLocking() throws Exception {
  // Start a primary NN so that the secondary will start successfully
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    StorageDirectory savedSd = null;
    // Start a secondary NN, then make sure that all of its storage
    // dirs got locked.
    secondary = startSecondaryNameNode(conf);
    
    NNStorage storage = secondary.getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(null)) {
      assertLockFails(sd);
      savedSd = sd;
    }
    LOG.info("===> Shutting down first 2NN");
    secondary.shutdown();
    secondary = null;

    LOG.info("===> Locking a dir, starting second 2NN");
    // Lock one of its dirs, make sure it fails to start
    LOG.info("Trying to lock" + savedSd);
    savedSd.lock();
    try {
      secondary = startSecondaryNameNode(conf);
      assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
          savedSd.isLockSupported());
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains("already locked", ioe);
    } finally {
      savedSd.unlock();
    }
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 11
Source File: FSEditLog.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Closes the current edit log and opens edits.new. 
 * Returns the lastModified time of the edits log.
 */
synchronized void rollEditLog() throws IOException {
  //
  // If edits.new already exists in some directory, verify it
  // exists in all directories.
  //
  if (existsNew()) {
    for (Iterator<StorageDirectory> it = 
             fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
      File editsNew = getEditNewFile(it.next());
   if (!editsNew.exists()) { 
        throw new IOException("Inconsistent existance of edits.new " +
                              editsNew);
      }
    }
    return; // nothing to do, edits.new exists!
  }

  close();                     // close existing edit log

  fsimage.attemptRestoreRemovedStorage();
  
  //
  // Open edits.new
  //
  boolean failedSd = false;
  for (Iterator<StorageDirectory> it = 
         fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
    StorageDirectory sd = it.next();
    try {
      EditLogFileOutputStream eStream = 
           new EditLogFileOutputStream(getEditNewFile(sd), metrics);
      eStream.create();
      editStreams.add(eStream);
    } catch (IOException e) {
      failedSd = true;
      // remove stream and this storage directory from list
      FSImage.LOG.warn("rollEdidLog: removing storage " + sd.getRoot().getPath(), e);
      sd.unlock();
      fsimage.removedStorageDirs.add(sd);
      it.remove();
    }
  }
  if(failedSd)
    fsimage.incrementCheckpointTime();  // update time for the valid ones
}