org.apache.hadoop.hdfs.server.common.InconsistentFSStateException Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.common.InconsistentFSStateException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DataStorage.java    From RDFS with Apache License 2.0 6 votes vote down vote up
protected void getFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  setLayoutVersion(props, sd);
  setStorageType(props, sd);

  // Read NamespaceID in version before federation
  if (layoutVersion > FSConstants.FEDERATION_VERSION) {
    setNamespaceID(props, sd);
    setcTime(props, sd);
  }

  String ssid = props.getProperty(STORAGE_ID);
  if (ssid == null ||
      !("".equals(storageID) || "".equals(ssid) ||
        storageID.equals(ssid)))
    throw new InconsistentFSStateException(sd.getRoot(),
        "has incompatible storage Id.");
  if ("".equals(storageID)) // update id only if it was empty
    storageID = ssid;
}
 
Example #2
Source File: FSImage.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
  File oldImageDir = new File(sd.getRoot(), "image");
  if (!oldImageDir.exists()) {
    if(sd.getVersionFile().exists())
      throw new InconsistentFSStateException(sd.getRoot(),
          oldImageDir + " does not exist.");
    return false;
  }
  // check the layout version inside the image file
  File oldF = new File(oldImageDir, "fsimage");
  RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
  try {
    oldFile.seek(0);
    int odlVersion = oldFile.readInt();
    if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
      return false;
  } finally {
    oldFile.close();
  }
  return true;
}
 
Example #3
Source File: FSImage.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
  File oldImageDir = new File(sd.getRoot(), "image");
  if (!oldImageDir.exists()) {
    if(sd.getVersionFile().exists())
      throw new InconsistentFSStateException(sd.getRoot(),
          oldImageDir + " does not exist.");
    return false;
  }
  // check the layout version inside the image file
  File oldF = new File(oldImageDir, "fsimage");
  RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
  try {
    oldFile.seek(0);
    int odlVersion = oldFile.readInt();
    if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
      return false;
  } finally {
    oldFile.close();
  }
  return true;
}
 
Example #4
Source File: NameSpaceSliceStorage.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
protected void getFields(Properties props, StorageDirectory sd)
    throws IOException {
  setNamespaceID(props, sd);
  setcTime(props, sd);
  
  String snsid = props.getProperty(NAMESPACE_ID);
  setNameSpaceID(sd.getRoot(), snsid);

  String property = props.getProperty(LAYOUT_VERSION);
  int lv;
  if (property == null) {
    Integer topLayout = getTopLevelLayout(sd);
    if (topLayout == null) {
      throw new InconsistentFSStateException(sd.getRoot(),
          "Top level layout and NS level layout do not exist");
    }
    lv = topLayout;
  } else {
    lv = Integer.parseInt(property);
  }
  if (lv < FSConstants.LAYOUT_VERSION) { // future version
    throw new InconsistentFSStateException(sd.getRoot(),
        "has future layout version : " + lv);
  }
  layoutVersion = lv;
}
 
Example #5
Source File: FSImage.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected void getFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  super.getFields(props, sd);
  if (layoutVersion == 0)
    throw new IOException("NameNode directory " 
                          + sd.getRoot() + " is not formatted.");
  String sDUS, sDUV;
  sDUS = props.getProperty("distributedUpgradeState"); 
  sDUV = props.getProperty("distributedUpgradeVersion");
  setDistributedUpgradeState(
      sDUS == null? false : Boolean.parseBoolean(sDUS),
      sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV));
  String sMd5 = props.getProperty(MESSAGE_DIGEST_PROPERTY);
  if (layoutVersion <= -26) {
    if (sMd5 == null) {
      throw new InconsistentFSStateException(sd.getRoot(),
          "file " + STORAGE_FILE_VERSION + " does not have MD5 image digest.");
    }
    this.setImageDigest(new MD5Hash(sMd5));
  } else if (sMd5 != null) {
    throw new InconsistentFSStateException(sd.getRoot(),
        "file " + STORAGE_FILE_VERSION + 
        " has image MD5 digest when version is " + layoutVersion);
  }
  this.checkpointTime = readCheckpointTime(sd);
}
 
Example #6
Source File: JNStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
protected void setLayoutVersion(Properties props, StorageDirectory sd)
    throws IncorrectVersionException, InconsistentFSStateException {
  int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
  // For journal node, since it now does not decode but just scan through the
  // edits, it can handle edits with future version in most of the cases.
  // Thus currently we may skip the layoutVersion check here.
  layoutVersion = lv;
}
 
Example #7
Source File: NameSpaceSliceStorage.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** Validate and set namespace ID */
private void setNameSpaceID(File storage, String nsid)
    throws InconsistentFSStateException {
  if (nsid == null || nsid.equals("")) {
    throw new InconsistentFSStateException(storage, "file "
        + STORAGE_FILE_VERSION + " is invalid.");
  }
  int newNsId = Integer.parseInt(nsid);
  if (namespaceID > 0 && namespaceID != newNsId) {
    throw new InconsistentFSStateException(storage,
        "Unexepcted namespaceID " + nsid + " . Expected " + namespaceID);
  }
  namespaceID = newNsId;
}
 
Example #8
Source File: BlockPoolSliceStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
    throws InconsistentFSStateException {
  if (bpid == null || bpid.equals("")) {
    throw new InconsistentFSStateException(storage, "file "
        + STORAGE_FILE_VERSION + " is invalid.");
  }
  
  if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
    throw new InconsistentFSStateException(storage,
        "Unexpected blockpoolID " + bpid + ". Expected " + blockpoolID);
  }
  blockpoolID = bpid;
}
 
Example #9
Source File: BackupImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Analyze backup storage directories for consistency.<br>
 * Recover from incomplete checkpoints if required.<br>
 * Read VERSION and fstime files if exist.<br>
 * Do not load image or edits.
 *
 * @throws IOException if the node should shutdown.
 */
void recoverCreateRead() throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured storage dirs are inaccessible
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        // for backup node all directories may be unformatted initially
        LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
        LOG.info("Formatting ...");
        sd.clearDirectory(); // create empty current
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if(curState != StorageState.NOT_FORMATTED) {
        // read and verify consistency with other directories
        storage.readProperties(sd);
      }
    } catch(IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example #10
Source File: NNStorage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
    throws InconsistentFSStateException {
  if (bpid == null || bpid.equals("")) {
    throw new InconsistentFSStateException(storage, "file "
        + Storage.STORAGE_FILE_VERSION + " has no block pool Id.");
  }
  
  if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
    throw new InconsistentFSStateException(storage,
        "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
  }
  setBlockPoolID(bpid);
}
 
Example #11
Source File: FSImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
    FSImageFile imageFile, StartupOption startupOption) throws IOException {
  LOG.debug("Planning to load image :\n" + imageFile);
  StorageDirectory sdForProperties = imageFile.sd;
  storage.readProperties(sdForProperties, startupOption);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
    // For txid-based layout, we should have a .md5 file
    // next to the image file
    boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK
        .matches(startupOption);
    loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
  } else if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
    // In 0.22, we have the checksum stored in the VERSION file.
    String md5 = storage.getDeprecatedProperty(
        NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
    if (md5 == null) {
      throw new InconsistentFSStateException(sdForProperties.getRoot(),
          "Message digest property " +
          NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
          " not set for storage directory " + sdForProperties.getRoot());
    }
    loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery,
        false);
  } else {
    // We don't have any record of the md5sum
    loadFSImage(imageFile.getFile(), null, target, recovery, false);
  }
}
 
Example #12
Source File: FSImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Check if upgrade is in progress. */
public static void checkUpgrade(NNStorage storage) throws IOException {
  // Upgrade or rolling upgrade is allowed only if there are 
  // no previous fs states in any of the directories
  for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
    StorageDirectory sd = it.next();
    if (sd.getPreviousDir().exists())
      throw new InconsistentFSStateException(sd.getRoot(),
          "previous fs state should not exist during upgrade. "
          + "Finalize or rollback first.");
  }
}
 
Example #13
Source File: NNStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
    throws InconsistentFSStateException {
  if (bpid == null || bpid.equals("")) {
    throw new InconsistentFSStateException(storage, "file "
        + Storage.STORAGE_FILE_VERSION + " has no block pool Id.");
  }
  
  if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
    throw new InconsistentFSStateException(storage,
        "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
  }
  setBlockPoolID(bpid);
}
 
Example #14
Source File: JNStorage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void setLayoutVersion(Properties props, StorageDirectory sd)
    throws IncorrectVersionException, InconsistentFSStateException {
  int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
  // For journal node, since it now does not decode but just scan through the
  // edits, it can handle edits with future version in most of the cases.
  // Thus currently we may skip the layoutVersion check here.
  layoutVersion = lv;
}
 
Example #15
Source File: BlockPoolSliceStorage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
    throws InconsistentFSStateException {
  if (bpid == null || bpid.equals("")) {
    throw new InconsistentFSStateException(storage, "file "
        + STORAGE_FILE_VERSION + " is invalid.");
  }
  
  if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
    throw new InconsistentFSStateException(storage,
        "Unexpected blockpoolID " + bpid + ". Expected " + blockpoolID);
  }
  blockpoolID = bpid;
}
 
Example #16
Source File: BackupImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Analyze backup storage directories for consistency.<br>
 * Recover from incomplete checkpoints if required.<br>
 * Read VERSION and fstime files if exist.<br>
 * Do not load image or edits.
 *
 * @throws IOException if the node should shutdown.
 */
void recoverCreateRead() throws IOException {
  for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured storage dirs are inaccessible
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        // for backup node all directories may be unformatted initially
        LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
        LOG.info("Formatting ...");
        sd.clearDirectory(); // create empty current
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if(curState != StorageState.NOT_FORMATTED) {
        // read and verify consistency with other directories
        storage.readProperties(sd);
      }
    } catch(IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example #17
Source File: FSImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
    FSImageFile imageFile, StartupOption startupOption) throws IOException {
  LOG.debug("Planning to load image :\n" + imageFile);
  StorageDirectory sdForProperties = imageFile.sd;
  storage.readProperties(sdForProperties, startupOption);

  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
    // For txid-based layout, we should have a .md5 file
    // next to the image file
    boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK
        .matches(startupOption);
    loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
  } else if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
    // In 0.22, we have the checksum stored in the VERSION file.
    String md5 = storage.getDeprecatedProperty(
        NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
    if (md5 == null) {
      throw new InconsistentFSStateException(sdForProperties.getRoot(),
          "Message digest property " +
          NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
          " not set for storage directory " + sdForProperties.getRoot());
    }
    loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery,
        false);
  } else {
    // We don't have any record of the md5sum
    loadFSImage(imageFile.getFile(), null, target, recovery, false);
  }
}
 
Example #18
Source File: FSImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Check if upgrade is in progress. */
public static void checkUpgrade(NNStorage storage) throws IOException {
  // Upgrade or rolling upgrade is allowed only if there are 
  // no previous fs states in any of the directories
  for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
    StorageDirectory sd = it.next();
    if (sd.getPreviousDir().exists())
      throw new InconsistentFSStateException(sd.getRoot(),
          "previous fs state should not exist during upgrade. "
          + "Finalize or rollback first.");
  }
}
 
Example #19
Source File: DataStorage.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
protected void getFields(Properties props, 
                         StorageDirectory sd 
                         ) throws IOException {
  super.getFields(props, sd);
  String ssid = props.getProperty("storageID");
  if (ssid == null ||
      !("".equals(storageID) || "".equals(ssid) ||
        storageID.equals(ssid)))
    throw new InconsistentFSStateException(sd.getRoot(),
                                           "has incompatible storage Id.");
  if ("".equals(storageID)) // update id only if it was empty
    storageID = ssid;
}
 
Example #20
Source File: SecondaryNameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Analyze checkpoint directories.
 * Create directories if they do not exist.
 * Recover from an unsuccessful checkpoint if necessary.
 *
 * @throws IOException
 */
void recoverCreate(boolean format) throws IOException {
  storage.attemptRestoreRemovedStorage();
  storage.unlockAll();

  for (Iterator<StorageDirectory> it = 
               storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    boolean isAccessible = true;
    try { // create directories if don't exist yet
      if(!sd.getRoot().mkdirs()) {
        // do nothing, directory is already created
      }
    } catch(SecurityException se) {
      isAccessible = false;
    }
    if(!isAccessible)
      throw new InconsistentFSStateException(sd.getRoot(),
          "cannot access checkpoint directory.");
    
    if (format) {
      // Don't confirm, since this is just the secondary namenode.
      LOG.info("Formatting storage directory " + sd);
      sd.clearDirectory();
    }
    
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured checkpoint dirs are inaccessible 
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;  // it's ok since initially there is no current and VERSION
      case NORMAL:
        // Read the VERSION file. This verifies that:
        // (a) the VERSION file for each of the directories is the same,
        // and (b) when we connect to a NN, we can verify that the remote
        // node matches the same namespace that we ran on previously.
        storage.readProperties(sd);
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example #21
Source File: SecondaryNameNode.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Analyze checkpoint directories.
 * Create directories if they do not exist.
 * Recover from an unsuccessful checkpoint is necessary. 
 * 
 * @param dataDirs
 * @param editsDirs
 * @throws IOException
 */
void recoverCreate(Collection<File> dataDirs,
                   Collection<File> editsDirs) throws IOException {
  Collection<File> tempDataDirs = new ArrayList<File>(dataDirs);
  Collection<File> tempEditsDirs = new ArrayList<File>(editsDirs);
  this.storageDirs = new ArrayList<StorageDirectory>();
  setStorageDirectories(tempDataDirs, tempEditsDirs);
  for (Iterator<StorageDirectory> it = 
               dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    boolean isAccessible = true;
    try { // create directories if don't exist yet
      if(!sd.getRoot().mkdirs()) {
        // do nothing, directory is already created
      }
    } catch(SecurityException se) {
      isAccessible = false;
    }
    if(!isAccessible)
      throw new InconsistentFSStateException(sd.getRoot(),
          "cannot access checkpoint directory.");
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured checkpoint dirs are inaccessible 
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;  // it's ok since initially there is no current and VERSION
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example #22
Source File: SecondaryNameNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Analyze checkpoint directories.
 * Create directories if they do not exist.
 * Recover from an unsuccessful checkpoint is necessary. 
 * 
 * @param dataDirs
 * @param editsDirs
 * @throws IOException
 */
void recoverCreate(Collection<File> dataDirs,
                   Collection<File> editsDirs) throws IOException {
  Collection<File> tempDataDirs = new ArrayList<File>(dataDirs);
  Collection<File> tempEditsDirs = new ArrayList<File>(editsDirs);
  this.storageDirs = new ArrayList<StorageDirectory>();
  setStorageDirectories(tempDataDirs, tempEditsDirs);
  for (Iterator<StorageDirectory> it = 
               dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    boolean isAccessible = true;
    try { // create directories if don't exist yet
      if(!sd.getRoot().mkdirs()) {
        // do nothing, directory is already created
      }
    } catch(SecurityException se) {
      isAccessible = false;
    }
    if(!isAccessible)
      throw new InconsistentFSStateException(sd.getRoot(),
          "cannot access checkpoint directory.");
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured checkpoint dirs are inaccessible 
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;  // it's ok since initially there is no current and VERSION
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example #23
Source File: DataStorage.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void setFieldsFromProperties(Properties props, StorageDirectory sd,
    boolean overrideLayoutVersion, int toLayoutVersion) throws IOException {
  if (overrideLayoutVersion) {
    this.layoutVersion = toLayoutVersion;
  } else {
    setLayoutVersion(props, sd);
  }
  setcTime(props, sd);
  checkStorageType(props, sd);
  setClusterId(props, layoutVersion, sd);
  
  // Read NamespaceID in version before federation
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, layoutVersion)) {
    setNamespaceID(props, sd);
  }
  

  // valid storage id, storage id may be empty
  String ssid = props.getProperty("storageID");
  if (ssid == null) {
    throw new InconsistentFSStateException(sd.getRoot(), "file "
        + STORAGE_FILE_VERSION + " is invalid.");
  }
  String sid = sd.getStorageUuid();
  if (!(sid == null || sid.equals("") ||
        ssid.equals("") || sid.equals(ssid))) {
    throw new InconsistentFSStateException(sd.getRoot(),
        "has incompatible storage Id.");
  }

  if (sid == null) { // update id only if it was null
    sd.setStorageUuid(ssid);
  }

  // Update the datanode UUID if present.
  if (props.getProperty("datanodeUuid") != null) {
    String dnUuid = props.getProperty("datanodeUuid");

    if (getDatanodeUuid() == null) {
      setDatanodeUuid(dnUuid);
    } else if (getDatanodeUuid().compareTo(dnUuid) != 0) {
      throw new InconsistentFSStateException(sd.getRoot(),
          "Root " + sd.getRoot() + ": DatanodeUuid=" + dnUuid +
          ", does not match " + getDatanodeUuid() + " from other" +
          " StorageDirectory.");
    }
  }
}
 
Example #24
Source File: FSImage.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * For each storage directory, performs recovery of incomplete transitions
 * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
 * state into the dataDirStates map.
 * @param dataDirStates output of storage directory states
 * @return true if there is at least one valid formatted storage directory
 */
public static boolean recoverStorageDirs(StartupOption startOpt,
    NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates)
    throws IOException {
  boolean isFormatted = false;
  // This loop needs to be over all storage dirs, even shared dirs, to make
  // sure that we properly examine their state, but we make sure we don't
  // mutate the shared dir below in the actual loop.
  for (Iterator<StorageDirectory> it = 
                    storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    if (startOpt == StartupOption.METADATAVERSION) {
      /* All we need is the layout version. */
      storage.readProperties(sd);
      return true;
    }

    try {
      curState = sd.analyzeStorage(startOpt, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // name-node fails if any of the configured storage dirs are missing
        throw new InconsistentFSStateException(sd.getRoot(),
                    "storage directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if (curState != StorageState.NOT_FORMATTED 
          && startOpt != StartupOption.ROLLBACK) {
        // read and verify consistency with other directories
        storage.readProperties(sd, startOpt);
        isFormatted = true;
      }
      if (startOpt == StartupOption.IMPORT && isFormatted)
        // import of a checkpoint is allowed only into empty image directories
        throw new IOException("Cannot import image from a checkpoint. " 
            + " NameNode already contains an image in " + sd.getRoot());
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
    dataDirStates.put(sd,curState);
  }
  return isFormatted;
}
 
Example #25
Source File: DataStorage.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void setFieldsFromProperties(Properties props, StorageDirectory sd,
    boolean overrideLayoutVersion, int toLayoutVersion) throws IOException {
  if (overrideLayoutVersion) {
    this.layoutVersion = toLayoutVersion;
  } else {
    setLayoutVersion(props, sd);
  }
  setcTime(props, sd);
  checkStorageType(props, sd);
  setClusterId(props, layoutVersion, sd);
  
  // Read NamespaceID in version before federation
  if (!DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, layoutVersion)) {
    setNamespaceID(props, sd);
  }
  

  // valid storage id, storage id may be empty
  String ssid = props.getProperty("storageID");
  if (ssid == null) {
    throw new InconsistentFSStateException(sd.getRoot(), "file "
        + STORAGE_FILE_VERSION + " is invalid.");
  }
  String sid = sd.getStorageUuid();
  if (!(sid == null || sid.equals("") ||
        ssid.equals("") || sid.equals(ssid))) {
    throw new InconsistentFSStateException(sd.getRoot(),
        "has incompatible storage Id.");
  }

  if (sid == null) { // update id only if it was null
    sd.setStorageUuid(ssid);
  }

  // Update the datanode UUID if present.
  if (props.getProperty("datanodeUuid") != null) {
    String dnUuid = props.getProperty("datanodeUuid");

    if (getDatanodeUuid() == null) {
      setDatanodeUuid(dnUuid);
    } else if (getDatanodeUuid().compareTo(dnUuid) != 0) {
      throw new InconsistentFSStateException(sd.getRoot(),
          "Root " + sd.getRoot() + ": DatanodeUuid=" + dnUuid +
          ", does not match " + getDatanodeUuid() + " from other" +
          " StorageDirectory.");
    }
  }
}
 
Example #26
Source File: SecondaryNameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Analyze checkpoint directories.
 * Create directories if they do not exist.
 * Recover from an unsuccessful checkpoint if necessary.
 *
 * @throws IOException
 */
void recoverCreate(boolean format) throws IOException {
  storage.attemptRestoreRemovedStorage();
  storage.unlockAll();

  for (Iterator<StorageDirectory> it = 
               storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    boolean isAccessible = true;
    try { // create directories if don't exist yet
      if(!sd.getRoot().mkdirs()) {
        // do nothing, directory is already created
      }
    } catch(SecurityException se) {
      isAccessible = false;
    }
    if(!isAccessible)
      throw new InconsistentFSStateException(sd.getRoot(),
          "cannot access checkpoint directory.");
    
    if (format) {
      // Don't confirm, since this is just the secondary namenode.
      LOG.info("Formatting storage directory " + sd);
      sd.clearDirectory();
    }
    
    StorageState curState;
    try {
      curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // fail if any of the configured checkpoint dirs are inaccessible 
        throw new InconsistentFSStateException(sd.getRoot(),
              "checkpoint directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;  // it's ok since initially there is no current and VERSION
      case NORMAL:
        // Read the VERSION file. This verifies that:
        // (a) the VERSION file for each of the directories is the same,
        // and (b) when we connect to a NN, we can verify that the remote
        // node matches the same namespace that we ran on previously.
        storage.readProperties(sd);
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
  }
}
 
Example #27
Source File: FSImage.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * For each storage directory, performs recovery of incomplete transitions
 * (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
 * state into the dataDirStates map.
 * @param dataDirStates output of storage directory states
 * @return true if there is at least one valid formatted storage directory
 */
public static boolean recoverStorageDirs(StartupOption startOpt,
    NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates)
    throws IOException {
  boolean isFormatted = false;
  // This loop needs to be over all storage dirs, even shared dirs, to make
  // sure that we properly examine their state, but we make sure we don't
  // mutate the shared dir below in the actual loop.
  for (Iterator<StorageDirectory> it = 
                    storage.dirIterator(); it.hasNext();) {
    StorageDirectory sd = it.next();
    StorageState curState;
    if (startOpt == StartupOption.METADATAVERSION) {
      /* All we need is the layout version. */
      storage.readProperties(sd);
      return true;
    }

    try {
      curState = sd.analyzeStorage(startOpt, storage);
      // sd is locked but not opened
      switch(curState) {
      case NON_EXISTENT:
        // name-node fails if any of the configured storage dirs are missing
        throw new InconsistentFSStateException(sd.getRoot(),
                    "storage directory does not exist or is not accessible.");
      case NOT_FORMATTED:
        break;
      case NORMAL:
        break;
      default:  // recovery is possible
        sd.doRecover(curState);
      }
      if (curState != StorageState.NOT_FORMATTED 
          && startOpt != StartupOption.ROLLBACK) {
        // read and verify consistency with other directories
        storage.readProperties(sd, startOpt);
        isFormatted = true;
      }
      if (startOpt == StartupOption.IMPORT && isFormatted)
        // import of a checkpoint is allowed only into empty image directories
        throw new IOException("Cannot import image from a checkpoint. " 
            + " NameNode already contains an image in " + sd.getRoot());
    } catch (IOException ioe) {
      sd.unlock();
      throw ioe;
    }
    dataDirStates.put(sd,curState);
  }
  return isFormatted;
}