org.apache.hadoop.hdfs.server.datanode.DatanodeUtil Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.DatanodeUtil. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo,
                                 RamDiskReplica replicaState) throws IOException {
  File metaFile = replicaState.getSavedMetaFile();
  File blockFile = replicaState.getSavedBlockFile();
  final long blockId = replicaInfo.getBlockId();
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);

  ReplicaInfo newReplicaInfo =
      new ReplicaBuilder(ReplicaState.FINALIZED)
          .setBlockId(blockId)
          .setLength(replicaInfo.getBytesOnDisk())
          .setGenerationStamp(replicaInfo.getGenerationStamp())
          .setFsVolume(replicaState.getLazyPersistVolume())
          .setDirectoryToUse(targetBlockFile.getParentFile())
          .build();
  return newReplicaInfo;
}
 
Example #2
Source File: LazyPersistTestCase.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected final boolean verifyBlockDeletedFromDir(File dir,
    LocatedBlocks locatedBlocks) {

  for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
    File targetDir =
      DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());

    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
    if (blockFile.exists()) {
      LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
    File metaFile = new File(targetDir,
      DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
        lb.getBlock().getGenerationStamp()));
    if (metaFile.exists()) {
      LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
  }
  return true;
}
 
Example #3
Source File: LazyPersistTestCase.java    From big-c with Apache License 2.0 6 votes vote down vote up
protected final boolean verifyBlockDeletedFromDir(File dir,
    LocatedBlocks locatedBlocks) {

  for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
    File targetDir =
      DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());

    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
    if (blockFile.exists()) {
      LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
    File metaFile = new File(targetDir,
      DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
        lb.getBlock().getGenerationStamp()));
    if (metaFile.exists()) {
      LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
  }
  return true;
}
 
Example #4
Source File: FsVolumeImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
Example #5
Source File: TestDatanodeRestart.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
    boolean changeBlockFile, 
    boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
Example #6
Source File: TestDatanodeRestart.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
    boolean changeBlockFile, 
    boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
    src = replicaInfo.getBlockFile();
  } else {
    src = replicaInfo.getMetaFile();
  }
  File dst = DatanodeUtil.getUnlinkTmpFile(src);
  if (isRename) {
    src.renameTo(dst);
  } else {
    FileInputStream in = new FileInputStream(src);
    try {
      FileOutputStream out = new FileOutputStream(dst);
      try {
        IOUtils.copyBytes(in, out, 1);
      } finally {
        out.close();
      }
    } finally {
      in.close();
    }
  }
}
 
Example #7
Source File: FsVolumeImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
Example #8
Source File: FsDatasetUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
static File getOrigFile(File unlinkTmpFile) {
  final String name = unlinkTmpFile.getName();
  if (!name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX)) {
    throw new IllegalArgumentException("unlinkTmpFile=" + unlinkTmpFile
        + " does not end with " + DatanodeUtil.UNLINK_BLOCK_SUFFIX);
  }
  final int n = name.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length(); 
  return new File(unlinkTmpFile.getParentFile(), name.substring(0, n));
}
 
Example #9
Source File: FsDatasetUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static File getOrigFile(File unlinkTmpFile) {
  final String name = unlinkTmpFile.getName();
  if (!name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX)) {
    throw new IllegalArgumentException("unlinkTmpFile=" + unlinkTmpFile
        + " does not end with " + DatanodeUtil.UNLINK_BLOCK_SUFFIX);
  }
  final int n = name.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length(); 
  return new File(unlinkTmpFile.getParentFile(), name.substring(0, n));
}
 
Example #10
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Copy the block and meta files for the given block to the given destination.
 * @return the new meta and block files.
 * @throws IOException
 */
static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta,
    File srcFile, File destRoot, boolean calculateChecksum)
    throws IOException {
  final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
  final File dstFile = new File(destDir, srcFile.getName());
  final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
  return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum);
}
 
Example #11
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
private File[] copyReplicaWithNewBlockIdAndGS(
    ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
    throws IOException {
  String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
  FsVolumeReference v = volumes.getNextVolume(
      replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes());
  final File tmpDir = ((FsVolumeImpl) v.getVolume())
      .getBlockPoolSlice(bpid).getTmpDir();
  final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
  final File dstBlockFile = new File(destDir, blockFileName);
  final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
  return copyBlockFiles(replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
      dstMetaFile, dstBlockFile, true);
}
 
Example #12
Source File: BlockPoolSlice.java    From big-c with Apache License 2.0 5 votes vote down vote up
File addBlock(Block b, File f) throws IOException {
  File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  if (!blockDir.exists()) {
    if (!blockDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + blockDir);
    }
  }
  File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
  File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
  dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
  return blockFile;
}
 
Example #13
Source File: BlockPoolSlice.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
File activateSavedReplica(Block b, File metaFile, File blockFile)
    throws IOException {
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  FileUtils.moveFile(blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  FileUtils.moveFile(metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
  return targetBlockFile;
}
 
Example #14
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 5 votes vote down vote up
public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
    long newGenStamp) throws IOException {
  File blockFile = getBlockFile(dnIndex, blk);
  File metaFile = FsDatasetUtil.findMetaFile(blockFile);
  return metaFile.renameTo(new File(DatanodeUtil.getMetaName(
      blockFile.getAbsolutePath(), newGenStamp)));
}
 
Example #15
Source File: LazyPersistTestCase.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure at least one non-transient volume has a saved copy of the replica.
 * An infinite loop is used to ensure the async lazy persist tasks are completely
 * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
 * either a successful pass or timeout failure.
 */
protected final void ensureLazyPersistBlocksAreSaved(
    LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
  final String bpid = cluster.getNamesystem().getBlockPoolId();
  List<? extends FsVolumeSpi> volumes =
    cluster.getDataNodes().get(0).getFSDataset().getVolumes();
  final Set<Long> persistedBlockIds = new HashSet<Long>();

  while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
    // Take 1 second sleep before each verification iteration
    Thread.sleep(1000);

    for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
      for (FsVolumeSpi v : volumes) {
        if (v.isTransientStorage()) {
          continue;
        }

        FsVolumeImpl volume = (FsVolumeImpl) v;
        File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();

        long blockId = lb.getBlock().getBlockId();
        File targetDir =
          DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
        File blockFile = new File(targetDir, lb.getBlock().getBlockName());
        if (blockFile.exists()) {
          // Found a persisted copy for this block and added to the Set
          persistedBlockIds.add(blockId);
        }
      }
    }
  }

  // We should have found a persisted copy for each located block.
  assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
 
Example #16
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Temporary files. They get moved to the finalized block directory when
 * the block is finalized.
 */
File createTmpFile(Block b) throws IOException {
  File f = new File(tmpDir, b.getBlockName());
  File tmpFile = DatanodeUtil.createFileWithExistsCheck(
      volume, b, f, fileIoProvider);
  // If any exception during creation, its expected that counter will not be
  // incremented, So no need to decrement
  incrNumBlocks();
  return tmpFile;
}
 
Example #17
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * RBW files. They get moved to the finalized block directory when
 * the block is finalized.
 */
File createRbwFile(Block b) throws IOException {
  File f = new File(rbwDir, b.getBlockName());
  File rbwFile = DatanodeUtil.createFileWithExistsCheck(
      volume, b, f, fileIoProvider);
  // If any exception during creation, its expected that counter will not be
  // incremented, So no need to decrement
  incrNumBlocks();
  return rbwFile;
}
 
Example #18
Source File: LazyPersistTestCase.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure at least one non-transient volume has a saved copy of the replica.
 * An infinite loop is used to ensure the async lazy persist tasks are completely
 * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
 * either a successful pass or timeout failure.
 */
protected final void ensureLazyPersistBlocksAreSaved(
    LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
  final String bpid = cluster.getNamesystem().getBlockPoolId();
  List<? extends FsVolumeSpi> volumes =
    cluster.getDataNodes().get(0).getFSDataset().getVolumes();
  final Set<Long> persistedBlockIds = new HashSet<Long>();

  while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
    // Take 1 second sleep before each verification iteration
    Thread.sleep(1000);

    for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
      for (FsVolumeSpi v : volumes) {
        if (v.isTransientStorage()) {
          continue;
        }

        FsVolumeImpl volume = (FsVolumeImpl) v;
        File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();

        long blockId = lb.getBlock().getBlockId();
        File targetDir =
          DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
        File blockFile = new File(targetDir, lb.getBlock().getBlockName());
        if (blockFile.exists()) {
          // Found a persisted copy for this block and added to the Set
          persistedBlockIds.add(blockId);
        }
      }
    }
  }

  // We should have found a persisted copy for each located block.
  assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
 
Example #19
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Copy the block and meta files for the given block to the given destination.
 * @return the new meta and block files.
 * @throws IOException
 */
static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta,
    File srcFile, File destRoot, boolean calculateChecksum)
    throws IOException {
  final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
  final File dstFile = new File(destDir, srcFile.getName());
  final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
  return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum);
}
 
Example #20
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private File[] copyReplicaWithNewBlockIdAndGS(
    ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
    throws IOException {
  String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
  FsVolumeReference v = volumes.getNextVolume(
      replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes());
  final File tmpDir = ((FsVolumeImpl) v.getVolume())
      .getBlockPoolSlice(bpid).getTmpDir();
  final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
  final File dstBlockFile = new File(destDir, blockFileName);
  final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
  return copyBlockFiles(replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
      dstMetaFile, dstBlockFile, true);
}
 
Example #21
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
    long newGenStamp) throws IOException {
  File blockFile = getBlockFile(dnIndex, blk);
  File metaFile = FsDatasetUtil.findMetaFile(blockFile);
  return metaFile.renameTo(new File(DatanodeUtil.getMetaName(
      blockFile.getAbsolutePath(), newGenStamp)));
}
 
Example #22
Source File: BlockPoolSlice.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
File activateSavedReplica(Block b, File metaFile, File blockFile)
    throws IOException {
  final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  final File targetBlockFile = new File(blockDir, blockFile.getName());
  final File targetMetaFile = new File(blockDir, metaFile.getName());
  FileUtils.moveFile(blockFile, targetBlockFile);
  FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
  FileUtils.moveFile(metaFile, targetMetaFile);
  FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
  return targetBlockFile;
}
 
Example #23
Source File: BlockPoolSlice.java    From hadoop with Apache License 2.0 5 votes vote down vote up
File addBlock(Block b, File f) throws IOException {
  File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  if (!blockDir.exists()) {
    if (!blockDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + blockDir);
    }
  }
  File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
  File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
  dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
  return blockFile;
}
 
Example #24
Source File: FsDatasetUtil.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static boolean isUnlinkTmpFile(File f) {
  return f.getName().endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
 
Example #25
Source File: FsDatasetUtil.java    From big-c with Apache License 2.0 4 votes vote down vote up
static boolean isUnlinkTmpFile(File f) {
  return f.getName().endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
 
Example #26
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
File addFinalizedBlock(Block b, ReplicaInfo replicaInfo) throws IOException {
  File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
  fileIoProvider.mkdirsWithExistsCheck(volume, blockDir);
  return FsDatasetImpl.moveBlockFiles(b, replicaInfo, blockDir);
}
 
Example #27
Source File: FsDatasetUtil.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static File getMetaFile(File f, long gs) {
  return new File(f.getParent(),
      DatanodeUtil.getMetaName(f.getName(), gs));
}
 
Example #28
Source File: BlockPoolSlice.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Temporary files. They get moved to the finalized block directory when
 * the block is finalized.
 */
File createTmpFile(Block b) throws IOException {
  File f = new File(tmpDir, b.getBlockName());
  return DatanodeUtil.createTmpFile(b, f);
}
 
Example #29
Source File: BlockPoolSlice.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * RBW files. They get moved to the finalized block directory when
 * the block is finalized.
 */
File createRbwFile(Block b) throws IOException {
  File f = new File(rbwDir, b.getBlockName());
  return DatanodeUtil.createTmpFile(b, f);
}
 
Example #30
Source File: BlockPoolSlice.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * RBW files. They get moved to the finalized block directory when
 * the block is finalized.
 */
File createRbwFile(Block b) throws IOException {
  File f = new File(rbwDir, b.getBlockName());
  return DatanodeUtil.createTmpFile(b, f);
}