Java Code Examples for org.apache.hadoop.hdfs.server.datanode.DataStorage#STORAGE_DIR_FINALIZED

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.DataStorage#STORAGE_DIR_FINALIZED . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FsVolumeImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
Example 2
Source File: FsVolumeImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
boolean isBPDirEmpty(String bpid) throws IOException {
  File volumeCurrentDir = this.getCurrentDir();
  File bpDir = new File(volumeCurrentDir, bpid);
  File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  File finalizedDir = new File(bpCurrentDir,
      DataStorage.STORAGE_DIR_FINALIZED);
  File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
  if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
      finalizedDir)) {
    return false;
  }
  if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
    return false;
  }
  return true;
}
 
Example 3
Source File: TestDFSFinalize.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
  String bpid) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  
  File dnCurDirs[] = new File[dataNodeDirs.length];
  for (int i = 0; i < dataNodeDirs.length; i++) {
    dnCurDirs[i] = new File(dataNodeDirs[i],"current");
    assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
            false), UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }

  if (bpid == null) {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
    }
  } else {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
      assertFalse(new File(bpRoot,"previous").isDirectory());
      
      File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
              bpCurFinalizeDir, true),
              UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
    }
  }
}
 
Example 4
Source File: TestDFSFinalize.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Verify that the current directory exists and that the previous directory
 * does not exist.  Verify that current hasn't been modified by comparing 
 * the checksum of all it's containing files with their original checksum.
 */
static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs,
  String bpid) throws Exception {
  List<File> dirs = Lists.newArrayList();
  for (int i = 0; i < nameNodeDirs.length; i++) {
    File curDir = new File(nameNodeDirs[i], "current");
    dirs.add(curDir);
    FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
  }
  
  FSImageTestUtil.assertParallelFilesAreIdentical(
      dirs, Collections.<String>emptySet());
  
  File dnCurDirs[] = new File[dataNodeDirs.length];
  for (int i = 0; i < dataNodeDirs.length; i++) {
    dnCurDirs[i] = new File(dataNodeDirs[i],"current");
    assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
            false), UpgradeUtilities.checksumMasterDataNodeContents());
  }
  for (int i = 0; i < nameNodeDirs.length; i++) {
    assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
  }

  if (bpid == null) {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      assertFalse(new File(dataNodeDirs[i],"previous").isDirectory());
    }
  } else {
    for (int i = 0; i < dataNodeDirs.length; i++) {
      File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, dnCurDirs[i]);
      assertFalse(new File(bpRoot,"previous").isDirectory());
      
      File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
              bpCurFinalizeDir, true),
              UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
    }
  }
}
 
Example 5
Source File: BlockPoolSlice.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
Example 6
Source File: UpgradeUtilities.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize the data structures used by this class.  
 * IMPORTANT NOTE: This method must be called once before calling 
 *                 any other public method on this class.  
 * <p>
 * Creates a singleton master populated storage
 * directory for a Namenode (contains edits, fsimage,
 * version, and time files) and a Datanode (contains version and
 * block files).  This can be a lengthy operation.
 */
public static void initialize() throws Exception {
  createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
  Configuration config = new HdfsConfiguration();
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
  MiniDFSCluster cluster = null;
  String bpid = null;
  try {
    // format data-node
    createEmptyDirs(new String[] {datanodeStorage.toString()});
    
    // format and start NameNode and start DataNode
    DFSTestUtil.formatNameNode(config);
    cluster =  new MiniDFSCluster.Builder(config)
                                 .numDataNodes(1)
                                 .startupOption(StartupOption.REGULAR)
                                 .format(false)
                                 .manageDataDfsDirs(false)
                                 .manageNameDfsDirs(false)
                                 .build();
      
    NamenodeProtocols namenode = cluster.getNameNodeRpc();
    namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
    namenodeStorageFsscTime = namenode.versionRequest().getCTime();
    namenodeStorageClusterID = namenode.versionRequest().getClusterID();
    namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
    
    FileSystem fs = FileSystem.get(config);
    Path baseDir = new Path("/TestUpgrade");
    fs.mkdirs(baseDir);
    
    // write some files
    int bufferSize = 4096;
    byte[] buffer = new byte[bufferSize];
    for(int i=0; i < bufferSize; i++)
      buffer[i] = (byte)('0' + i % 50);
    writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
    writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
    
    // save image
    namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    namenode.saveNamespace();
    namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // write more files
    writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
    writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
    bpid = cluster.getNamesystem(0).getBlockPoolId();
  } finally {
    // shutdown
    if (cluster != null) cluster.shutdown();
    FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock"));
    FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
  }
  namenodeStorageChecksum = checksumContents(NAME_NODE, 
      new File(namenodeStorage, "current"), false);
  File dnCurDir = new File(datanodeStorage, "current");
  datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
  
  File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
      "current");
  blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
  
  File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
      "current/"+DataStorage.STORAGE_DIR_FINALIZED);
  blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE,
      bpCurFinalizeDir, true);
  
  File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
      "current/"+DataStorage.STORAGE_DIR_RBW);
  blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir,
      false);
}
 
Example 7
Source File: BlockPoolSlice.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
Example 8
Source File: UpgradeUtilities.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize the data structures used by this class.  
 * IMPORTANT NOTE: This method must be called once before calling 
 *                 any other public method on this class.  
 * <p>
 * Creates a singleton master populated storage
 * directory for a Namenode (contains edits, fsimage,
 * version, and time files) and a Datanode (contains version and
 * block files).  This can be a lengthy operation.
 */
public static void initialize() throws Exception {
  createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
  Configuration config = new HdfsConfiguration();
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
  MiniDFSCluster cluster = null;
  String bpid = null;
  try {
    // format data-node
    createEmptyDirs(new String[] {datanodeStorage.toString()});
    
    // format and start NameNode and start DataNode
    DFSTestUtil.formatNameNode(config);
    cluster =  new MiniDFSCluster.Builder(config)
                                 .numDataNodes(1)
                                 .startupOption(StartupOption.REGULAR)
                                 .format(false)
                                 .manageDataDfsDirs(false)
                                 .manageNameDfsDirs(false)
                                 .build();
      
    NamenodeProtocols namenode = cluster.getNameNodeRpc();
    namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
    namenodeStorageFsscTime = namenode.versionRequest().getCTime();
    namenodeStorageClusterID = namenode.versionRequest().getClusterID();
    namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
    
    FileSystem fs = FileSystem.get(config);
    Path baseDir = new Path("/TestUpgrade");
    fs.mkdirs(baseDir);
    
    // write some files
    int bufferSize = 4096;
    byte[] buffer = new byte[bufferSize];
    for(int i=0; i < bufferSize; i++)
      buffer[i] = (byte)('0' + i % 50);
    writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
    writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
    
    // save image
    namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    namenode.saveNamespace();
    namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // write more files
    writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
    writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
    bpid = cluster.getNamesystem(0).getBlockPoolId();
  } finally {
    // shutdown
    if (cluster != null) cluster.shutdown();
    FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock"));
    FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
  }
  namenodeStorageChecksum = checksumContents(NAME_NODE, 
      new File(namenodeStorage, "current"), false);
  File dnCurDir = new File(datanodeStorage, "current");
  datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
  
  File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
      "current");
  blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
  
  File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
      "current/"+DataStorage.STORAGE_DIR_FINALIZED);
  blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE,
      bpCurFinalizeDir, true);
  
  File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
      "current/"+DataStorage.STORAGE_DIR_RBW);
  blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir,
      false);
}
 
Example 9
Source File: BlockPoolSlice.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/**
 * Create a blook pool slice
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @param timer include methods for getting time
 * @throws IOException Error making directories
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
               Configuration conf, Timer timer) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.fileIoProvider = volume.getFileIoProvider();
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  this.cachedDfsUsedCheckTime =
      conf.getLong(
          DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS,
          DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_DEFAULT_MS);

  this.maxDataLength = conf.getInt(
      CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH,
      CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);

  this.timer = timer;

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    fileIoProvider.fullyDelete(volume, tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);

  // create the rbw and tmp directories if they don't exist.
  fileIoProvider.mkdirs(volume, rbwDir);
  fileIoProvider.mkdirs(volume, tmpDir);

  if (addReplicaThreadPool == null) {
    // initialize add replica fork join pool
    initializeAddReplicaPool(conf);
  }
  // Make the dfs usage to be saved during shutdown.
  shutdownHook = new Runnable() {
    @Override
    public void run() {
      addReplicaThreadPool.shutdownNow();
    }
  };
  ShutdownHookManager.get().addShutdownHook(shutdownHook,
      SHUTDOWN_HOOK_PRIORITY);
}
 
Example 10
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Get finalized directory for a block pool
 * @param storageDir storage directory
 * @param bpid Block pool Id
 * @return finalized directory for a block pool
 */
public static File getFinalizedDir(File storageDir, String bpid) {
  return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT)
      + DataStorage.STORAGE_DIR_FINALIZED );
}
 
Example 11
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Get finalized directory for a block pool
 * @param storageDir storage directory
 * @param bpid Block pool Id
 * @return finalized directory for a block pool
 */
public static File getFinalizedDir(File storageDir, String bpid) {
  return new File(getBPDir(storageDir, bpid, Storage.STORAGE_DIR_CURRENT)
      + DataStorage.STORAGE_DIR_FINALIZED );
}