org.apache.hadoop.fs.DU Java Examples

The following examples show how to use org.apache.hadoop.fs.DU. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FSDataset.java    From RDFS with Apache License 2.0 5 votes vote down vote up
FSVolume(FSDataset dataset, File currentDir, Configuration conf) throws IOException {
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  this.usage = new DF(parent, conf);
  this.reserved = usage.getReserved();
  this.dataset = dataset;
  this.namespaceMap = new NamespaceMap();
  this.dfsUsage = new DU(currentDir, conf);
  this.dfsUsage.start();
}
 
Example #2
Source File: BlockPoolSlice.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}
 
Example #3
Source File: BlockPoolSlice.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Create a blook pool slice 
 * @param bpid Block pool Id
 * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
 * @param bpDir directory corresponding to the BlockPool
 * @param conf configuration
 * @throws IOException
 */
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
    Configuration conf) throws IOException {
  this.bpid = bpid;
  this.volume = volume;
  this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
  this.finalizedDir = new File(
      currentDir, DataStorage.STORAGE_DIR_FINALIZED);
  this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
  if (!this.finalizedDir.exists()) {
    if (!this.finalizedDir.mkdirs()) {
      throw new IOException("Failed to mkdirs " + this.finalizedDir);
    }
  }

  this.deleteDuplicateReplicas = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
      DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);

  // Files that were being written when the datanode was last shutdown
  // are now moved back to the data directory. It is possible that
  // in the future, we might want to do some sort of datanode-local
  // recovery for these blocks. For example, crc validation.
  //
  this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
  if (tmpDir.exists()) {
    FileUtil.fullyDelete(tmpDir);
  }
  this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
  final boolean supportAppends = conf.getBoolean(
      DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
      DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
  if (rbwDir.exists() && !supportAppends) {
    FileUtil.fullyDelete(rbwDir);
  }
  if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
    if (!rbwDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + rbwDir.toString());
    }
  }
  if (!tmpDir.mkdirs()) {
    if (!tmpDir.isDirectory()) {
      throw new IOException("Mkdirs failed to create " + tmpDir.toString());
    }
  }
  // Use cached value initially if available. Or the following call will
  // block until the initial du command completes.
  this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
  this.dfsUsage.start();

  // Make the dfs usage to be saved during shutdown.
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        if (!dfsUsedSaved) {
          saveDfsUsed();
        }
      }
    }, SHUTDOWN_HOOK_PRIORITY);
}