org.apache.hadoop.hdfs.server.common.Storage Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.common.Storage. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static void createStorageDirs(DataStorage storage, Configuration conf,
    int numDirs) throws IOException {
  List<Storage.StorageDirectory> dirs =
      new ArrayList<Storage.StorageDirectory>();
  List<String> dirStrings = new ArrayList<String>();
  for (int i = 0; i < numDirs; i++) {
    File loc = new File(BASE_DIR + "/data" + i);
    dirStrings.add(new Path(loc.toString()).toUri().toString());
    loc.mkdirs();
    dirs.add(createStorageDirectory(loc));
    when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
  }

  String dataDir = StringUtils.join(",", dirStrings);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
  when(storage.dirIterator()).thenReturn(dirs.iterator());
  when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
 
Example #2
Source File: TestBlockPoolSliceStorage.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test conversion from a block file path to its target trash
 * directory.
 */
public void getTrashDirectoryForBlockFile(String fileName, int nestingLevel) {
  final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
  final String blockFileName = fileName;

  String testFilePath =
      storage.getSingularStorageDir().getRoot() + File.separator +
          Storage.STORAGE_DIR_CURRENT +
          blockFileSubdir + blockFileName;

  String expectedTrashPath =
      storage.getSingularStorageDir().getRoot() + File.separator +
          BlockPoolSliceStorage.TRASH_ROOT_DIR +
          blockFileSubdir.substring(0, blockFileSubdir.length() - 1);

  LOG.info("Got subdir " + blockFileSubdir);
  LOG.info("Generated file path " + testFilePath);
  assertThat(storage.getTrashDirectory(new File(testFilePath)), is(expectedTrashPath));
}
 
Example #3
Source File: TestDFSStorageStateRecovery.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * For block pool, verify that the current and/or previous exist as indicated
 * by the method parameters.  If previous exists, verify that
 * it hasn't been modified by comparing the checksum of all it's
 * containing files with their original checksum.  It is assumed that
 * the server has recovered.
 * @param baseDirs directories pointing to block pool storage
 * @param bpid block pool Id
 * @param currentShouldExist current directory exists under storage
 * @param currentShouldExist previous directory exists under storage
 */
void checkResultBlockPool(String[] baseDirs, boolean currentShouldExist,
    boolean previousShouldExist) throws IOException
{
  if (currentShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
              false), UpgradeUtilities.checksumMasterBlockPoolContents());
    }
  }
  if (previousShouldExist) {
    for (int i = 0; i < baseDirs.length; i++) {
      File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
      assertTrue(bpPrevDir.isDirectory());
      assertEquals(
                   UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
                   false), UpgradeUtilities.checksumMasterBlockPoolContents());
    }
  }
}
 
Example #4
Source File: TestBlockPoolSliceStorage.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void getRestoreDirectoryForBlockFile(String fileName, int nestingLevel) {
  BlockPoolSliceStorage storage = makeBlockPoolStorage();
  final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
  final String blockFileName = fileName;

  String deletedFilePath =
      storage.getSingularStorageDir().getRoot() + File.separator +
      BlockPoolSliceStorage.TRASH_ROOT_DIR +
      blockFileSubdir + blockFileName;

  String expectedRestorePath =
      storage.getSingularStorageDir().getRoot() + File.separator +
          Storage.STORAGE_DIR_CURRENT +
          blockFileSubdir.substring(0, blockFileSubdir.length() - 1);

  LOG.info("Generated deleted file path " + deletedFilePath);
  assertThat(storage.getRestoreDirectory(new File(deletedFilePath)),
             is(expectedRestorePath));

}
 
Example #5
Source File: UpgradeUtilities.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a 
 * populated DFS filesystem.
 * This method populates for each parent directory, <code>parent/dirName</code>
 * with the content of block pool storage directory that comes from a singleton
 * datanode master (that contains version and block files). If the destination
 * directory does not exist, it will be created.  If the directory already 
 * exists, it will first be deleted.
 * 
 * @param parents parent directory where {@code dirName} is created
 * @param dirName directory under which storage directory is created
 * @param bpid block pool id for which the storage directory is created.
 * @return the array of created directories
 */
public static File[] createBlockPoolStorageDirs(String[] parents,
    String dirName, String bpid) throws Exception {
  File[] retVal = new File[parents.length];
  Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
      bpid, Storage.STORAGE_DIR_CURRENT));
  for (int i = 0; i < parents.length; i++) {
    File newDir = new File(parents[i] + "/current/" + bpid, dirName);
    createEmptyDirs(new String[] {newDir.toString()});
    LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
    localFS.copyToLocalFile(bpCurDir,
                            new Path(newDir.toString()),
                            false);
    retVal[i] = newDir;
  }
  return retVal;
}
 
Example #6
Source File: TestBlockPoolSliceStorage.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void getRestoreDirectoryForBlockFile(String fileName, int nestingLevel) {
  BlockPoolSliceStorage storage = makeBlockPoolStorage();
  final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
  final String blockFileName = fileName;

  String deletedFilePath =
      storage.getSingularStorageDir().getRoot() + File.separator +
      BlockPoolSliceStorage.TRASH_ROOT_DIR +
      blockFileSubdir + blockFileName;

  String expectedRestorePath =
      storage.getSingularStorageDir().getRoot() + File.separator +
          Storage.STORAGE_DIR_CURRENT +
          blockFileSubdir.substring(0, blockFileSubdir.length() - 1);

  LOG.info("Generated deleted file path " + deletedFilePath);
  assertThat(storage.getRestoreDirectory(new File(deletedFilePath)),
             is(expectedRestorePath));

}
 
Example #7
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
    Collection<StorageLocation> dataLocations, DataStorage storage) {
  Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
      dataLocations.size());
  for (StorageLocation sl: dataLocations) {
    failedLocationSet.add(sl.getFile().getAbsolutePath());
  }
  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory sd = it.next();
    failedLocationSet.remove(sd.getRoot().getAbsolutePath());
  }
  List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
      failedLocationSet.size());
  long failureDate = Time.now();
  for (String failedStorageLocation: failedLocationSet) {
    volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
        failureDate));
  }
  return volumeFailureInfos;
}
 
Example #8
Source File: TransferFsImage.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
    Storage dstStorage, boolean needDigest) throws IOException {
  String fileid = ImageServlet.getParamStringForImage(null,
      imageTxId, dstStorage);
  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
  
  List<File> dstFiles = dstStorage.getFiles(
      NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }
  
  MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
      dstFiles.get(0).length() + " bytes.");
  return hash;
}
 
Example #9
Source File: TestBlockPoolSliceStorage.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test conversion from a block file path to its target trash
 * directory.
 */
public void getTrashDirectoryForBlockFile(String fileName, int nestingLevel) {
  final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
  final String blockFileName = fileName;

  String testFilePath =
      storage.getSingularStorageDir().getRoot() + File.separator +
          Storage.STORAGE_DIR_CURRENT +
          blockFileSubdir + blockFileName;

  String expectedTrashPath =
      storage.getSingularStorageDir().getRoot() + File.separator +
          BlockPoolSliceStorage.TRASH_ROOT_DIR +
          blockFileSubdir.substring(0, blockFileSubdir.length() - 1);

  LOG.info("Got subdir " + blockFileSubdir);
  LOG.info("Generated file path " + testFilePath);
  assertThat(storage.getTrashDirectory(new File(testFilePath)), is(expectedTrashPath));
}
 
Example #10
Source File: TestDFSUpgradeWithHA.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that an HA NN will start if a previous upgrade was in progress.
 */
@Test
public void testStartingWithUpgradeInProgressSucceeds() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    // Simulate an upgrade having started.
    for (int i = 0; i < 2; i++) {
      for (URI uri : cluster.getNameDirs(i)) {
        File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS);
        LOG.info("creating previous tmp dir: " + prevTmp);
        assertTrue(prevTmp.mkdirs());
      }
    }

    cluster.restartNameNodes();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #11
Source File: EditLogBackupOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
                          JournalInfo journalInfo) // active name-node
throws IOException {
  super();
  this.bnRegistration = bnReg;
  this.journalInfo = journalInfo;
  InetSocketAddress bnAddress =
    NetUtils.createSocketAddr(bnRegistration.getAddress());
  try {
    this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
        bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
        true).getProxy();
  } catch(IOException e) {
    Storage.LOG.error("Error connecting to: " + bnAddress, e);
    throw e;
  }
  this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
  this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}
 
Example #12
Source File: EditLogFileInputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Read the header of fsedit log
 * @param in fsedit stream
 * @return the edit log version number
 * @throws IOException if error occurs
 */
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
    throws IOException, LogHeaderCorruptException {
  int logVersion;
  try {
    logVersion = in.readInt();
  } catch (EOFException eofe) {
    throw new LogHeaderCorruptException(
        "Reached EOF when reading log header");
  }
  if (verifyLayoutVersion &&
      (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
       logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
    throw new LogHeaderCorruptException(
        "Unexpected version of the file system log file: "
        + logVersion + ". Current version = "
        + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".");
  }
  return logVersion;
}
 
Example #13
Source File: EditLogFileInputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Read the header of fsedit log
 * @param in fsedit stream
 * @return the edit log version number
 * @throws IOException if error occurs
 */
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
    throws IOException, LogHeaderCorruptException {
  int logVersion;
  try {
    logVersion = in.readInt();
  } catch (EOFException eofe) {
    throw new LogHeaderCorruptException(
        "Reached EOF when reading log header");
  }
  if (verifyLayoutVersion &&
      (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
       logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
    throw new LogHeaderCorruptException(
        "Unexpected version of the file system log file: "
        + logVersion + ". Current version = "
        + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".");
  }
  return logVersion;
}
 
Example #14
Source File: NNUpgradeUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Perform the upgrade of the storage dir to the given storage info. The new
 * storage info is written into the current directory, and the previous.tmp
 * directory is renamed to previous.
 * 
 * @param sd the storage directory to upgrade
 * @param storage info about the new upgraded versions.
 * @throws IOException in the event of error
 */
public static void doUpgrade(StorageDirectory sd, Storage storage)
    throws IOException {
  LOG.info("Performing upgrade of storage directory " + sd.getRoot());
  try {
    // Write the version file, since saveFsImage only makes the
    // fsimage_<txid>, and the directory is otherwise empty.
    storage.writeProperties(sd);

    File prevDir = sd.getPreviousDir();
    File tmpDir = sd.getPreviousTmp();
    Preconditions.checkState(!prevDir.exists(),
        "previous directory must not exist for upgrade.");
    Preconditions.checkState(tmpDir.exists(),
        "previous.tmp directory must exist for upgrade.");

    // rename tmp to previous
    NNStorage.rename(tmpDir, prevDir);
  } catch (IOException ioe) {
    LOG.error("Unable to rename temp to previous for " + sd.getRoot(), ioe);
    throw ioe;
  }
}
 
Example #15
Source File: EditLogBackupOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
                          JournalInfo journalInfo) // active name-node
throws IOException {
  super();
  this.bnRegistration = bnReg;
  this.journalInfo = journalInfo;
  InetSocketAddress bnAddress =
    NetUtils.createSocketAddr(bnRegistration.getAddress());
  try {
    this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
        bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
        true).getProxy();
  } catch(IOException e) {
    Storage.LOG.error("Error connecting to: " + bnAddress, e);
    throw e;
  }
  this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
  this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}
 
Example #16
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
    Collection<StorageLocation> dataLocations, DataStorage storage) {
  Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
      dataLocations.size());
  for (StorageLocation sl: dataLocations) {
    failedLocationSet.add(sl.getFile().getAbsolutePath());
  }
  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory sd = it.next();
    failedLocationSet.remove(sd.getRoot().getAbsolutePath());
  }
  List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
      failedLocationSet.size());
  long failureDate = Time.now();
  for (String failedStorageLocation: failedLocationSet) {
    volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
        failureDate));
  }
  return volumeFailureInfos;
}
 
Example #17
Source File: UpgradeUtilities.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public static void createFederatedDatanodesVersionFiles(File[] parents,
    int namespaceId, StorageInfo version, String dirName) throws IOException {
  for (File parent : parents) {
    File nsRoot = NameSpaceSliceStorage.getNsRoot(namespaceId, parent);
    Properties props = new Properties();
    props.setProperty(NameSpaceSliceStorage.NAMESPACE_ID,
        String.valueOf(version.getNamespaceID()));
    props.setProperty(NameSpaceSliceStorage.CHECK_TIME,
        String.valueOf(version.getCTime()));
    props.setProperty(NameSpaceSliceStorage.LAYOUT_VERSION,
        String.valueOf(version.getLayoutVersion()));
    File nsVersionFile = new File(new File(nsRoot,
        dirName), "VERSION");
    Storage.writeProps(nsVersionFile, props);
  }
}
 
Example #18
Source File: TestStartup.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Corrupts the MD5 sum of the fsimage.
 * 
 * @param corruptAll
 *          whether to corrupt one or all of the MD5 sums in the configured
 *          namedirs
 * @throws IOException
 */
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
  List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
  // Corrupt the md5 files in all the namedirs
  for (URI uri: nameDirs) {
    // Directory layout looks like:
    // test/data/dfs/nameN/current/{fsimage,edits,...}
    File nameDir = new File(uri.getPath());
    File dfsDir = nameDir.getParentFile();
    assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
    // Set the md5 file to all zeros
    File imageFile = new File(nameDir,
        Storage.STORAGE_DIR_CURRENT + "/"
        + NNStorage.getImageFileName(0));
    MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
    // Only need to corrupt one if !corruptAll
    if (!corruptAll) {
      break;
    }
  }
}
 
Example #19
Source File: TransferFsImage.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static MD5Hash handleUploadImageRequest(HttpServletRequest request,
    long imageTxId, Storage dstStorage, InputStream stream,
    long advertisedSize, DataTransferThrottler throttler) throws IOException {

  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);

  List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }

  MD5Hash advertisedDigest = parseMD5Header(request);
  MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
      advertisedSize, advertisedDigest, fileName, stream, throttler);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
      + dstFiles.get(0).length() + " bytes.");
  return hash;
}
 
Example #20
Source File: TransferFsImage.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
    Storage dstStorage, boolean needDigest) throws IOException {
  String fileid = ImageServlet.getParamStringForImage(null,
      imageTxId, dstStorage);
  String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
  
  List<File> dstFiles = dstStorage.getFiles(
      NameNodeDirType.IMAGE, fileName);
  if (dstFiles.isEmpty()) {
    throw new IOException("No targets in destination storage!");
  }
  
  MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
  LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
      dstFiles.get(0).length() + " bytes.");
  return hash;
}
 
Example #21
Source File: TransferFsImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Client-side Method to fetch file from a server
 * Copies the response from the URL to a list of local files.
 * @param dstStorage if an error occurs writing to one of the files,
 *                   this storage object will be notified. 
 * @Return a digest of the received file if getChecksum is true
 */
static MD5Hash getFileClient(URL infoServer,
    String queryString, List<File> localPaths,
    Storage dstStorage, boolean getChecksum) throws IOException {
  URL url = new URL(infoServer, ImageServlet.PATH_SPEC + "?" + queryString);
  LOG.info("Opening connection to " + url);
  return doGetUrl(url, localPaths, dstStorage, getChecksum);
}
 
Example #22
Source File: TestDataStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Check whether the root is a valid BlockPoolSlice storage. */
private static void checkDir(File root, String bpid) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
  File bpRoot = new File(sd.getCurrentDir(), bpid);
  Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
  assertTrue(bpSd.getRoot().isDirectory());
  assertTrue(bpSd.getCurrentDir().isDirectory());
  assertTrue(bpSd.getVersionFile().isFile());
}
 
Example #23
Source File: ImageServlet.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Set the required parameters for uploading image
 * 
 * @param httpMethod instance of method to set the parameters
 * @param storage colon separated storageInfo string
 * @param txid txid of the image
 * @param imageFileSize size of the imagefile to be uploaded
 * @param nnf NameNodeFile Type
 * @return Returns map of parameters to be used with PUT request.
 */
static Map<String, String> getParamsForPutImage(Storage storage, long txid,
    long imageFileSize, NameNodeFile nnf) {
  Map<String, String> params = new HashMap<String, String>();
  params.put(TXID_PARAM, Long.toString(txid));
  params.put(STORAGEINFO_PARAM, storage.toColonSeparatedString());
  // setting the length of the file to be uploaded in separate property as
  // Content-Length only supports up to 2GB
  params.put(TransferFsImage.FILE_LENGTH, Long.toString(imageFileSize));
  params.put(IMAGE_FILE_TYPE, nnf.name());
  return params;
}
 
Example #24
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Removes a set of volumes from FsDataset.
 * @param volumesToRemove a set of absolute root path of each volume.
 * @param clearFailure set true to clear failure information.
 *
 * DataNode should call this function before calling
 * {@link DataStorage#removeVolumes(java.util.Collection)}.
 */
@Override
public synchronized void removeVolumes(
    Set<File> volumesToRemove, boolean clearFailure) {
  // Make sure that all volumes are absolute path.
  for (File vol : volumesToRemove) {
    Preconditions.checkArgument(vol.isAbsolute(),
        String.format("%s is not absolute path.", vol.getPath()));
  }
  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
    Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
    final File absRoot = sd.getRoot().getAbsoluteFile();
    if (volumesToRemove.contains(absRoot)) {
      LOG.info("Removing " + absRoot + " from FsDataset.");

      // Disable the volume from the service.
      asyncDiskService.removeVolume(sd.getCurrentDir());
      volumes.removeVolume(absRoot, clearFailure);

      // Removed all replica information for the blocks on the volume. Unlike
      // updating the volumeMap in addVolume(), this operation does not scan
      // disks.
      for (String bpid : volumeMap.getBlockPoolList()) {
        for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
             it.hasNext(); ) {
          ReplicaInfo block = it.next();
          final File absBasePath =
              new File(block.getVolume().getBasePath()).getAbsoluteFile();
          if (absBasePath.equals(absRoot)) {
            invalidate(bpid, block);
            it.remove();
          }
        }
      }

      storageMap.remove(sd.getStorageUuid());
    }
  }
  setupAsyncLazyPersistThreads();
}
 
Example #25
Source File: NNStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
    throws InconsistentFSStateException {
  if (bpid == null || bpid.equals("")) {
    throw new InconsistentFSStateException(storage, "file "
        + Storage.STORAGE_FILE_VERSION + " has no block pool Id.");
  }
  
  if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
    throw new InconsistentFSStateException(storage,
        "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
  }
  setBlockPoolID(bpid);
}
 
Example #26
Source File: TestFsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
  final int numExistingVolumes = dataset.getVolumes().size();
  List<NamespaceInfo> nsInfos = new ArrayList<>();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
  StorageLocation loc = StorageLocation.parse(newVolumePath);

  Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
  DataStorage.VolumeBuilder builder =
      new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
      anyListOf(NamespaceInfo.class)))
      .thenReturn(builder);

  dataset.addVolume(loc, nsInfos);
  assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());

  when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
  when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
  Set<File> volumesToRemove = new HashSet<>();
  volumesToRemove.add(loc.getFile());
  dataset.removeVolumes(volumesToRemove, true);
  assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
 
Example #27
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = dataset.getVolumes().size();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, dataset.getVolumes().size());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  for (int i = 0; i < numNewVolumes; i++) {
    actualVolumes.add(
        dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
Example #28
Source File: UpgradeUtilities.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static void createBlockPoolVersionFile(File bpDir,
    StorageInfo version, String bpid) throws IOException {
  // Create block pool version files
  if (DataNodeLayoutVersion.supports(
      LayoutVersion.Feature.FEDERATION, version.layoutVersion)) {
    File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
    BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
        bpid);
    File versionFile = new File(bpCurDir, "VERSION");
    StorageDirectory sd = new StorageDirectory(bpDir);
    bpStorage.writeProperties(versionFile, sd);
  }
}
 
Example #29
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
  final int numExistingVolumes = dataset.getVolumes().size();
  List<NamespaceInfo> nsInfos = new ArrayList<>();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
  StorageLocation loc = StorageLocation.parse(newVolumePath);

  Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
  DataStorage.VolumeBuilder builder =
      new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
      anyListOf(NamespaceInfo.class)))
      .thenReturn(builder);

  dataset.addVolume(loc, nsInfos);
  assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());

  when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
  when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
  Set<File> volumesToRemove = new HashSet<>();
  volumesToRemove.add(loc.getFile());
  dataset.removeVolumes(volumesToRemove, true);
  assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
 
Example #30
Source File: TestDFSStartupVersions.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the versions array.  This array stores all combinations 
 * of cross product:
 *  {oldLayoutVersion,currentLayoutVersion,futureLayoutVersion} X
 *    {currentNamespaceId,incorrectNamespaceId} X
 *      {pastFsscTime,currentFsscTime,futureFsscTime}
 */
private StorageInfo[] initializeVersions() throws Exception {
  int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
  int layoutVersionCur = UpgradeUtilities.getCurrentLayoutVersion();
  int layoutVersionNew = Integer.MIN_VALUE;
  int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
  int namespaceIdOld = Integer.MIN_VALUE;
  long fsscTimeOld = Long.MIN_VALUE;
  long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime(null);
  long fsscTimeNew = Long.MAX_VALUE;
  
  return new StorageInfo[] {
    new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeOld), // 0
    new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeCur), // 1
    new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeNew), // 2
    new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeOld), // 3
    new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeCur), // 4
    new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeNew), // 5
    new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeOld), // 6
    new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeCur), // 7
    new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeNew), // 8
    new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeOld), // 9
    new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeCur), // 10
    new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeNew), // 11
    new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeOld), // 12
    new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeCur), // 13
    new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeNew), // 14
    new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeOld), // 15
    new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeCur), // 16
    new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeNew), // 17
  };
}