Java Code Examples for org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite

The following examples show how to use org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   Source File: INodeDirectory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void destroyAndCollectBlocks(final BlockStoragePolicySuite bsps,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.clear(bsps, this, collectedBlocks, removedINodes);
  }
  for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
    child.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
  }
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
}
 
Example 2
Source Project: hadoop   Source File: Mover.java    License: Apache License 2.0 6 votes vote down vote up
Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
  this.retryMaxAttempts = conf.getInt(
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
  this.retryCount = retryCount;
  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
 
Example 3
Source Project: hadoop   Source File: TestStoragePolicySummary.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleHots() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long>  expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 1l);
  expectedOutput.put("HOT|DISK:3(HOT)", 1l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
 
Example 4
Source Project: hadoop   Source File: FSDirStatAndListingOp.java    License: Apache License 2.0 6 votes vote down vote up
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
    boolean includeStoragePolicy)
  throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
    if (fsd.getINode4DotSnapshot(srcs) != null) {
      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
          BlockStoragePolicySuite.ID_UNSPECIFIED);
    }
    return null;
  }

  fsd.readLock();
  try {
    final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
    return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
  } finally {
    fsd.readUnlock();
  }
}
 
Example 5
Source Project: hbase   Source File: TestHFileOutputFormat2.java    License: Apache License 2.0 6 votes vote down vote up
private String getStoragePolicyNameForOldHDFSVersion(FileSystem fs, Path path) {
  try {
    if (fs instanceof DistributedFileSystem) {
      DistributedFileSystem dfs = (DistributedFileSystem) fs;
      HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
      if (null != status) {
        byte storagePolicyId = status.getStoragePolicy();
        Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
        if (storagePolicyId != idUnspecified.getByte(BlockStoragePolicySuite.class)) {
          BlockStoragePolicy[] policies = dfs.getStoragePolicies();
          for (BlockStoragePolicy policy : policies) {
            if (policy.getId() == storagePolicyId) {
              return policy.getName();
            }
          }
        }
      }
    }
  } catch (Throwable e) {
    LOG.warn("failed to get block storage policy of [" + path + "]", e);
  }

  return null;
}
 
Example 6
Source Project: hadoop   Source File: INodeReference.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  // if this.lastSnapshotId < lastSnapshotId, the rename of the referred 
  // node happened before the rename of its ancestor. This should be 
  // impossible since for WithName node we only count its children at the 
  // time of the rename. 
  Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID
      || this.lastSnapshotId >= lastSnapshotId);
  final INode referred = this.getReferredINode().asReference()
      .getReferredINode();
  // We will continue the quota usage computation using the same snapshot id
  // as time line (if the given snapshot id is valid). Also, we cannot use 
  // cache for the referred node since its cached quota may have already 
  // been updated by changes in the current tree.
  int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? 
      lastSnapshotId : this.lastSnapshotId;
  return referred.computeQuotaUsage(bsps, blockStoragePolicyId, counts,
      false, id);
}
 
Example 7
Source Project: big-c   Source File: Mover.java    License: Apache License 2.0 6 votes vote down vote up
Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
  final long movedWinWidth = conf.getLong(
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
      DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
  final int moverThreads = conf.getInt(
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY,
      DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT);
  final int maxConcurrentMovesPerNode = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
  this.retryMaxAttempts = conf.getInt(
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
      DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
  this.retryCount = retryCount;
  this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(),
      Collections.<String> emptySet(), movedWinWidth, moverThreads, 0,
      maxConcurrentMovesPerNode, conf);
  this.storages = new StorageMap();
  this.targetPaths = nnc.getTargetPaths();
  this.blockStoragePolicies = new BlockStoragePolicy[1 <<
      BlockStoragePolicySuite.ID_BIT_LENGTH];
}
 
Example 8
Source Project: hadoop   Source File: FSDirRenameOp.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
 
Example 9
Source Project: big-c   Source File: FSDirRenameOp.java    License: Apache License 2.0 6 votes vote down vote up
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
 
Example 10
Source Project: big-c   Source File: FSDirRenameOp.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
 * dstInodes[dstInodes.length-1]
 */
private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
    INodesInPath dst) throws QuotaExceededException {
  if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
    // Do not check quota if edits log is still being processed
    return;
  }
  int i = 0;
  while(src.getINode(i) == dst.getINode(i)) { i++; }
  // src[i - 1] is the last common ancestor.
  BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
  final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);

  // Reduce the required quota by dst that is being removed
  final INode dstINode = dst.getLastINode();
  if (dstINode != null) {
    delta.subtract(dstINode.computeQuotaUsage(bsps));
  }
  FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1));
}
 
Example 11
Source Project: hadoop   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 6 votes vote down vote up
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
 
Example 12
Source Project: big-c   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 6 votes vote down vote up
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(bsps, counts, false);
        inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
 
Example 13
Source Project: hadoop   Source File: TestStoragePolicySummary.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleHotsWithDifferentCounts() {
  BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
  StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
  BlockStoragePolicy hot = bsps.getPolicy("HOT");
  sts.add(new StorageType[]{StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK},hot);
  sts.add(new StorageType[]{StorageType.DISK,
      StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
  Map<String, Long> actualOutput = convertToStringMap(sts);
  Assert.assertEquals(4,actualOutput.size());
  Map<String, Long> expectedOutput = new HashMap<>();
  expectedOutput.put("HOT|DISK:1(HOT)", 1l);
  expectedOutput.put("HOT|DISK:2(HOT)", 2l);
  expectedOutput.put("HOT|DISK:3(HOT)", 2l);
  expectedOutput.put("HOT|DISK:4(HOT)", 1l);
  Assert.assertEquals(expectedOutput,actualOutput);
}
 
Example 14
Source Project: hadoop   Source File: INodeDirectory.java    License: Apache License 2.0 6 votes vote down vote up
void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {
  DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
  if (quota != null) {
    // already has quota; so set the quota to the new values
    if (type != null) {
      quota.setQuota(ssQuota, type);
    } else {
      quota.setQuota(nsQuota, ssQuota);
    }
    if (!isQuotaSet() && !isRoot()) {
      removeFeature(quota);
    }
  } else {
    final QuotaCounts c = computeQuotaUsage(bsps);
    DirectoryWithQuotaFeature.Builder builder =
        new DirectoryWithQuotaFeature.Builder().nameSpaceQuota(nsQuota);
    if (type != null) {
      builder.typeQuota(type, ssQuota);
    } else {
      builder.storageSpaceQuota(ssQuota);
    }
    addDirectoryWithQuotaFeature(builder.build()).setSpaceConsumed(c);
  }
}
 
Example 15
Source Project: big-c   Source File: INodeSymlink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts computeQuotaUsage(
    BlockStoragePolicySuite bsps, byte blockStoragePolicyId,
    QuotaCounts counts, boolean useCache, int lastSnapshotId) {
  counts.addNameSpace(1);
  return counts;
}
 
Example 16
Source Project: big-c   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();

  // we are computing the quota usage for a specific snapshot here, i.e., the
  // computation only includes files/directories that exist at the time of the
  // given snapshot
  if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
      && !(useCache && isQuotaSet())) {
    ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
    for (INode child : childrenList) {
      final byte childPolicyId = child.getStoragePolicyIDForQuota(blockStoragePolicyId);
      child.computeQuotaUsage(bsps, childPolicyId, counts, useCache,
          lastSnapshotId);
    }
    counts.addNameSpace(1);
    return counts;
  }
  
  // compute the quota usage in the scope of the current directory tree
  final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
  if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
    return q.AddCurrentSpaceUsage(counts);
  } else {
    useCache = q != null && !q.isQuotaSet() ? false : useCache;
    return computeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts,
        useCache, lastSnapshotId);
  }
}
 
Example 17
Source Project: big-c   Source File: DirectorySnapshottableFeature.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Remove the snapshot with the given name from {@link #snapshotsByNames},
 * and delete all the corresponding DirectoryDiff.
 *
 * @param snapshotRoot The directory where we take snapshots
 * @param snapshotName The name of the snapshot to be removed
 * @param collectedBlocks Used to collect information to update blocksMap
 * @return The removed snapshot. Null if no snapshot with the given name
 *         exists.
 */
public Snapshot removeSnapshot(BlockStoragePolicySuite bsps, INodeDirectory snapshotRoot,
    String snapshotName, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) throws SnapshotException {
  final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
  if (i < 0) {
    throw new SnapshotException("Cannot delete snapshot " + snapshotName
        + " from path " + snapshotRoot.getFullPathName()
        + ": the snapshot does not exist.");
  } else {
    final Snapshot snapshot = snapshotsByNames.get(i);
    int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
    try {
      QuotaCounts counts = snapshotRoot.cleanSubtree(bsps, snapshot.getId(),
          prior, collectedBlocks, removedINodes);
      INodeDirectory parent = snapshotRoot.getParent();
      if (parent != null) {
        // there will not be any WithName node corresponding to the deleted
        // snapshot, thus only update the quota usage in the current tree
        parent.addSpaceConsumed(counts.negation(), true);
      }
    } catch(QuotaExceededException e) {
      INode.LOG.error("BUG: removeSnapshot increases namespace usage.", e);
    }
    // remove from snapshotsByNames after successfully cleaning the subtree
    snapshotsByNames.remove(i);
    return snapshot;
  }
}
 
Example 18
Source Project: big-c   Source File: DirectorySnapshottableFeature.java    License: Apache License 2.0 5 votes vote down vote up
public ContentSummaryComputationContext computeContentSummary(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory snapshotRoot,
    final ContentSummaryComputationContext summary) {
  snapshotRoot.computeContentSummary(summary);
  summary.getCounts().addContent(Content.SNAPSHOT, snapshotsByNames.size());
  summary.getCounts().addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
  return summary;
}
 
Example 19
Source Project: big-c   Source File: INode.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages.
 * Entry point for FSDirectory where blockStoragePolicyId is given its initial
 * value.
 */
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) {
  final byte storagePolicyId = isSymlink() ?
      BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID();
  return computeQuotaUsage(bsps, storagePolicyId,
      new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID);
}
 
Example 20
Source Project: NNAnalytics   Source File: GSetGenerator.java    License: Apache License 2.0 5 votes vote down vote up
public void generateGSet(
    GSet<INode, INodeWithAdditionalFields> newGSet,
    INodeDirectory parent,
    int filesPerDir,
    int numOfDirsPerDepth,
    short depth)
    throws IOException {
  if (depth == 0) {
    return;
  }
  List<INodeDirectory> childrenDirs = new ArrayList<>();
  for (int j = 1; j <= numOfDirsPerDepth; j++) {
    long mAndAtime = now - rand.nextLong(TimeUnit.DAYS.toMillis(365)); // 1 year
    int dirId = ID++;
    INodeDirectory dir =
        new INodeDirectory(dirId, ("dir" + j).getBytes(CHARSET), status, mAndAtime);
    boolean childAdded = parent.addChild(dir);
    if (rand.nextBoolean()) {
      dir.setQuota(BlockStoragePolicySuite.createDefaultSuite(), 9000L, 9999999999L, null);
      dir.getDirectoryWithQuotaFeature()
          .setSpaceConsumed(rand.nextLong(5000L), rand.nextLong(9999999991L), null);
    }
    if (childAdded) {
      generateFilesForDirectory(gset, dir, filesPerDir);
      dir.setParent(parent);
      INodeWithAdditionalFields booted = newGSet.put(dir);
      if (booted == null) {
        DIRS_MADE++;
      }
    }
    childrenDirs.add(dir);
  }
  for (INodeDirectory childrenDir : childrenDirs) {
    generateGSet(newGSet, childrenDir, filesPerDir, numOfDirsPerDepth, (short) (depth - 1));
  }
}
 
Example 21
Source Project: NNAnalytics   Source File: GSetGenerator.java    License: Apache License 2.0 5 votes vote down vote up
public void generateGSet(
    GSet<INode, INodeWithAdditionalFields> newGSet,
    INodeDirectory parent,
    int filesPerDir,
    int numOfDirsPerDepth,
    short depth)
    throws IOException {
  if (depth == 0) {
    return;
  }
  List<INodeDirectory> childrenDirs = new ArrayList<>();
  for (int j = 1; j <= numOfDirsPerDepth; j++) {
    long mAndAtime = now - rand.nextLong(TimeUnit.DAYS.toMillis(365)); // 1 year
    int dirId = ID++;
    INodeDirectory dir =
        new INodeDirectory(dirId, ("dir" + j).getBytes(CHARSET), status, mAndAtime);
    boolean childAdded = parent.addChild(dir);
    if (rand.nextBoolean()) {
      dir.setQuota(BlockStoragePolicySuite.createDefaultSuite(), 9000L, 9999999999L, null);
      dir.getDirectoryWithQuotaFeature()
          .setSpaceConsumed(rand.nextLong(5000L), rand.nextLong(9999999991L), null);
    }
    if (childAdded) {
      generateFilesForDirectory(gset, dir, filesPerDir);
      dir.setParent(parent);
      INodeWithAdditionalFields booted = newGSet.put(dir);
      if (booted == null) {
        DIRS_MADE++;
      }
    }
    childrenDirs.add(dir);
  }
  for (INodeDirectory childrenDir : childrenDirs) {
    generateGSet(newGSet, childrenDir, filesPerDir, numOfDirsPerDepth, (short) (depth - 1));
  }
}
 
Example 22
Source Project: NNAnalytics   Source File: GSetGenerator.java    License: Apache License 2.0 5 votes vote down vote up
public void generateGSet(
    GSet<INode, INodeWithAdditionalFields> newGSet,
    INodeDirectory parent,
    int filesPerDir,
    int numOfDirsPerDepth,
    short depth)
    throws IOException {
  if (depth == 0) {
    return;
  }
  List<INodeDirectory> childrenDirs = new ArrayList<>();
  for (int j = 1; j <= numOfDirsPerDepth; j++) {
    long mAndAtime = now - rand.nextLong(TimeUnit.DAYS.toMillis(365)); // 1 year
    int dirId = ID++;
    INodeDirectory dir =
        new INodeDirectory(dirId, ("dir" + j).getBytes(CHARSET), status, mAndAtime);
    boolean childAdded = parent.addChild(dir);
    if (rand.nextBoolean()) {
      dir.setQuota(BlockStoragePolicySuite.createDefaultSuite(), 9000L, 9999999999L, null);
      dir.getDirectoryWithQuotaFeature()
          .setSpaceConsumed(rand.nextLong(5000L), rand.nextLong(9999999991L), null);
    }
    if (childAdded) {
      generateFilesForDirectory(gset, dir, filesPerDir);
      dir.setParent(parent);
      INodeWithAdditionalFields booted = newGSet.put(dir);
      if (booted == null) {
        DIRS_MADE++;
      }
    }
    childrenDirs.add(dir);
  }
  for (INodeDirectory childrenDir : childrenDirs) {
    generateGSet(newGSet, childrenDir, filesPerDir, numOfDirsPerDepth, (short) (depth - 1));
  }
}
 
Example 23
Source Project: NNAnalytics   Source File: GSetGenerator.java    License: Apache License 2.0 5 votes vote down vote up
public void generateGSet(
    GSet<INode, INodeWithAdditionalFields> newGSet,
    INodeDirectory parent,
    int filesPerDir,
    int numOfDirsPerDepth,
    short depth)
    throws IOException {
  if (depth == 0) {
    return;
  }
  List<INodeDirectory> childrenDirs = new ArrayList<>();
  for (int j = 1; j <= numOfDirsPerDepth; j++) {
    long mAndAtime = now - rand.nextLong(TimeUnit.DAYS.toMillis(365)); // 1 year
    int dirId = ID++;
    INodeDirectory dir =
        new INodeDirectory(dirId, ("dir" + j).getBytes(CHARSET), status, mAndAtime);
    boolean childAdded = parent.addChild(dir);
    if (rand.nextBoolean()) {
      dir.setQuota(BlockStoragePolicySuite.createDefaultSuite(), 9000L, 9999999999L, null);
      dir.getDirectoryWithQuotaFeature()
          .setSpaceConsumed(rand.nextLong(5000L), rand.nextLong(9999999991L), null);
    }
    if (childAdded) {
      generateFilesForDirectory(gset, dir, filesPerDir);
      dir.setParent(parent);
      INodeWithAdditionalFields booted = newGSet.put(dir);
      if (booted == null) {
        DIRS_MADE++;
      }
    }
    childrenDirs.add(dir);
  }
  for (INodeDirectory childrenDir : childrenDirs) {
    generateGSet(newGSet, childrenDir, filesPerDir, numOfDirsPerDepth, (short) (depth - 1));
  }
}
 
Example 24
Source Project: big-c   Source File: INodeReference.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps, int snapshot, int prior,
    BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes) {
  if (snapshot == Snapshot.CURRENT_STATE_ID
      && prior == Snapshot.NO_SNAPSHOT_ID) {
    QuotaCounts counts = new QuotaCounts.Builder().build();
    this.computeQuotaUsage(bsps, counts, true);
    destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    return counts;
  } else {
    // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to 
    // the previous WithName instance
    if (prior == Snapshot.NO_SNAPSHOT_ID) {
      prior = getPriorSnapshot(this);
    }
    // if prior is not NO_SNAPSHOT_ID, and prior is not before the
    // to-be-deleted snapshot, we can quit here and leave the snapshot
    // deletion work to the src tree of rename
    if (snapshot != Snapshot.CURRENT_STATE_ID
        && prior != Snapshot.NO_SNAPSHOT_ID
        && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) {
      return new QuotaCounts.Builder().build();
    }
    return getReferredINode().cleanSubtree(bsps, snapshot, prior,
        collectedBlocks, removedINodes);
  }
}
 
Example 25
Source Project: hadoop   Source File: JsonUtil.java    License: Apache License 2.0 5 votes vote down vote up
/** Convert a Json map to a HdfsFileStatus object. */
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
  if (json == null) {
    return null;
  }

  final Map<?, ?> m = includesType ? 
      (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
  final String localName = (String) m.get("pathSuffix");
  final PathType type = PathType.valueOf((String) m.get("type"));
  final byte[] symlink = type != PathType.SYMLINK? null
      : DFSUtil.string2Bytes((String)m.get("symlink"));

  final long len = ((Number) m.get("length")).longValue();
  final String owner = (String) m.get("owner");
  final String group = (String) m.get("group");
  final FsPermission permission = toFsPermission((String) m.get("permission"),
    (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
  final long aTime = ((Number) m.get("accessTime")).longValue();
  final long mTime = ((Number) m.get("modificationTime")).longValue();
  final long blockSize = ((Number) m.get("blockSize")).longValue();
  final short replication = ((Number) m.get("replication")).shortValue();
  final long fileId = m.containsKey("fileId") ?
      ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
  final int childrenNum = getInt(m, "childrenNum", -1);
  final byte storagePolicy = m.containsKey("storagePolicy") ?
      (byte) ((Number) m.get("storagePolicy")).longValue() :
      BlockStoragePolicySuite.ID_UNSPECIFIED;
  return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
      blockSize, mTime, aTime, permission, owner, group, symlink,
      DFSUtil.string2Bytes(localName), fileId, childrenNum, null, storagePolicy);
}
 
Example 26
Source Project: big-c   Source File: INodeSymlink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps,
    final int snapshotId, int priorSnapshotId,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID
      && priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
    destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
  }
  return new QuotaCounts.Builder().nameSpace(1).build();
}
 
Example 27
Source Project: hadoop   Source File: FSDirStatAndListingOp.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
Example 28
Source Project: big-c   Source File: StoragePolicyAdmin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final String path = StringUtils.popOptionWithArgument("-path", args);
  if (path == null) {
    System.err.println("Please specify the path with -path.\nUsage:" +
        getLongUsage());
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    HdfsFileStatus status = dfs.getClient().getFileInfo(path);
    if (status == null) {
      System.err.println("File/Directory does not exist: " + path);
      return 2;
    }
    byte storagePolicyId = status.getStoragePolicy();
    if (storagePolicyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
      System.out.println("The storage policy of " + path + " is unspecified");
      return 0;
    }
    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
    for (BlockStoragePolicy p : policies) {
      if (p.getId() == storagePolicyId) {
        System.out.println("The storage policy of " + path + ":\n" + p);
        return 0;
      }
    }
  } catch (Exception e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  System.err.println("Cannot identify the storage policy for " + path);
  return 2;
}
 
Example 29
Source Project: hadoop   Source File: INodeSymlink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps,
    final int snapshotId, int priorSnapshotId,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID
      && priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
    destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
  }
  return new QuotaCounts.Builder().nameSpace(1).build();
}
 
Example 30
Source Project: big-c   Source File: FileDiff.java    License: Apache License 2.0 5 votes vote down vote up
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeFile currentINode,
    FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
  assert sf != null : "FileWithSnapshotFeature is null";
  return sf.updateQuotaAndCollectBlocks(
      bsps, currentINode, posterior, collectedBlocks, removedINodes);
}