Java Code Examples for org.apache.hadoop.hdfs.util.ReadOnlyList

The following examples show how to use org.apache.hadoop.hdfs.util.ReadOnlyList. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: FSPermissionChecker.java    License: Apache License 2.0 6 votes vote down vote up
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
 
Example 2
Source Project: hadoop   Source File: FSImageFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
 
Example 3
Source Project: hadoop   Source File: TestSnapshotRename.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
 
Example 4
Source Project: big-c   Source File: FSPermissionChecker.java    License: Apache License 2.0 6 votes vote down vote up
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
 
Example 5
Source Project: big-c   Source File: FSImageFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
 
Example 6
Source Project: big-c   Source File: TestSnapshotRename.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
 
Example 7
Source Project: hadoop   Source File: FSDirStatAndListingOp.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
Example 8
Source Project: hadoop   Source File: FSDirRenameOp.java    License: Apache License 2.0 5 votes vote down vote up
private static void validateOverwrite(
    String src, String dst, boolean overwrite, INode srcInode, INode dstInode)
    throws IOException {
  String error;// It's OK to rename a file to a symlink and vice versa
  if (dstInode.isDirectory() != srcInode.isDirectory()) {
    error = "Source " + src + " and destination " + dst
        + " must both be directories";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new IOException(error);
  }
  if (!overwrite) { // If destination exists, overwrite flag must be true
    error = "rename destination " + dst + " already exists";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new FileAlreadyExistsException(error);
  }
  if (dstInode.isDirectory()) {
    final ReadOnlyList<INode> children = dstInode.asDirectory()
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    if (!children.isEmpty()) {
      error = "rename destination directory is not empty: " + dst;
      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
          + error);
      throw new IOException(error);
    }
  }
}
 
Example 9
Source Project: hadoop   Source File: FSImageFormatPBINode.java    License: Apache License 2.0 5 votes vote down vote up
void serializeINodeDirectorySection(OutputStream out) throws IOException {
  Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
      .getINodeMap().getMapIterator();
  final ArrayList<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    if (!n.isDirectory()) {
      continue;
    }

    ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
        Snapshot.CURRENT_STATE_ID);
    if (children.size() > 0) {
      INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
          DirEntry.newBuilder().setParent(n.getId());
      for (INode inode : children) {
        if (!inode.isReference()) {
          b.addChildren(inode.getId());
        } else {
          refList.add(inode.asReference());
          b.addRefChildren(refList.size() - 1);
        }
      }
      INodeDirectorySection.DirEntry e = b.build();
      e.writeDelimitedTo(out);
    }

    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary,
      FSImageFormatProtobuf.SectionName.INODE_DIR);
}
 
Example 10
Source Project: hadoop   Source File: CacheManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Computes the needed number of bytes and files for a path.
 * @return CacheDirectiveStats describing the needed stats for this path
 */
private CacheDirectiveStats computeNeeded(String path, short replication) {
  FSDirectory fsDir = namesystem.getFSDirectory();
  INode node;
  long requestedBytes = 0;
  long requestedFiles = 0;
  CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
  try {
    node = fsDir.getINode(path);
  } catch (UnresolvedLinkException e) {
    // We don't cache through symlinks
    return builder.build();
  }
  if (node == null) {
    return builder.build();
  }
  if (node.isFile()) {
    requestedFiles = 1;
    INodeFile file = node.asFile();
    requestedBytes = file.computeFileSize();
  } else if (node.isDirectory()) {
    INodeDirectory dir = node.asDirectory();
    ReadOnlyList<INode> children = dir
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    requestedFiles = children.size();
    for (INode child : children) {
      if (child.isFile()) {
        requestedBytes += child.asFile().computeFileSize();
      }
    }
  }
  return new CacheDirectiveStats.Builder()
      .setBytesNeeded(requestedBytes)
      .setFilesCached(requestedFiles)
      .build();
}
 
Example 11
Source Project: hadoop   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Save snapshots and snapshot quota for a snapshottable directory.
 * @param current The directory that the snapshots belongs to.
 * @param out The {@link DataOutput} to write.
 * @throws IOException
 */
public static void saveSnapshots(INodeDirectory current, DataOutput out)
    throws IOException {
  DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
  Preconditions.checkArgument(sf != null);
  // list of snapshots in snapshotsByNames
  ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  out.writeInt(snapshots.size());
  for (Snapshot s : snapshots) {
    // write the snapshot id
    out.writeInt(s.getId());
  }
  // snapshot quota
  out.writeInt(sf.getSnapshotQuota());
}
 
Example 12
Source Project: hadoop   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return The children list of a directory in a snapshot.
 *         Since the snapshot is read-only, the logical view of the list is
 *         never changed although the internal data structure may mutate.
 */
private ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
  return new ReadOnlyList<INode>() {
    private List<INode> children = null;

    private List<INode> initChildren() {
      if (children == null) {
        final ChildrenDiff combined = new ChildrenDiff();
        for (DirectoryDiff d = DirectoryDiff.this; d != null; 
            d = d.getPosterior()) {
          combined.combinePosterior(d.diff, null);
        }
        children = combined.apply2Current(ReadOnlyList.Util.asList(
            currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
      }
      return children;
    }

    @Override
    public Iterator<INode> iterator() {
      return initChildren().iterator();
    }

    @Override
    public boolean isEmpty() {
      return childrenSize == 0;
    }

    @Override
    public int size() {
      return childrenSize;
    }

    @Override
    public INode get(int i) {
      return initChildren().get(i);
    }
  };
}
 
Example 13
Source Project: hadoop   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return If there is no corresponding directory diff for the given
 *         snapshot, this means that the current children list should be
 *         returned for the snapshot. Otherwise we calculate the children list
 *         for the snapshot and return it. 
 */
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
    final int snapshotId) {
  final DirectoryDiff diff = diffs.getDiffById(snapshotId);
  return diff != null ? diff.getChildrenList(currentINode) : currentINode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
}
 
Example 14
Source Project: hadoop   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @param name the name of the child
 * @param snapshotId
 *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
 *          from the corresponding snapshot; otherwise, get the result from
 *          the current directory.
 * @return the child inode.
 */
public INode getChild(byte[] name, int snapshotId) {
  DirectoryWithSnapshotFeature sf;
  if (snapshotId == Snapshot.CURRENT_STATE_ID || 
      (sf = getDirectoryWithSnapshotFeature()) == null) {
    ReadOnlyList<INode> c = getCurrentChildrenList();
    final int i = ReadOnlyList.Util.binarySearch(c, name);
    return i < 0 ? null : c.get(i);
  }
  
  return sf.getChild(this, name, snapshotId);
}
 
Example 15
Source Project: hadoop   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Given a child's name, return the index of the next child
 *
 * @param name a child's name
 * @return the index of the next child
 */
static int nextChild(ReadOnlyList<INode> children, byte[] name) {
  if (name.length == 0) { // empty name
    return 0;
  }
  int nextPos = ReadOnlyList.Util.binarySearch(children, name) + 1;
  if (nextPos >= 0) {
    return nextPos;
  }
  return -nextPos;
}
 
Example 16
Source Project: hadoop   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();

  // we are computing the quota usage for a specific snapshot here, i.e., the
  // computation only includes files/directories that exist at the time of the
  // given snapshot
  if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
      && !(useCache && isQuotaSet())) {
    ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
    for (INode child : childrenList) {
      final byte childPolicyId = child.getStoragePolicyIDForQuota(blockStoragePolicyId);
      child.computeQuotaUsage(bsps, childPolicyId, counts, useCache,
          lastSnapshotId);
    }
    counts.addNameSpace(1);
    return counts;
  }
  
  // compute the quota usage in the scope of the current directory tree
  final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
  if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
    return q.AddCurrentSpaceUsage(counts);
  } else {
    useCache = q != null && !q.isQuotaSet() ? false : useCache;
    return computeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts,
        useCache, lastSnapshotId);
  }
}
 
Example 17
Source Project: hadoop   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().addContent(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
Example 18
Source Project: big-c   Source File: FSDirStatAndListingOp.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
Example 19
Source Project: big-c   Source File: FSDirRenameOp.java    License: Apache License 2.0 5 votes vote down vote up
private static void validateOverwrite(
    String src, String dst, boolean overwrite, INode srcInode, INode dstInode)
    throws IOException {
  String error;// It's OK to rename a file to a symlink and vice versa
  if (dstInode.isDirectory() != srcInode.isDirectory()) {
    error = "Source " + src + " and destination " + dst
        + " must both be directories";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new IOException(error);
  }
  if (!overwrite) { // If destination exists, overwrite flag must be true
    error = "rename destination " + dst + " already exists";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new FileAlreadyExistsException(error);
  }
  if (dstInode.isDirectory()) {
    final ReadOnlyList<INode> children = dstInode.asDirectory()
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    if (!children.isEmpty()) {
      error = "rename destination directory is not empty: " + dst;
      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
          + error);
      throw new IOException(error);
    }
  }
}
 
Example 20
Source Project: big-c   Source File: FSImageFormatPBINode.java    License: Apache License 2.0 5 votes vote down vote up
void serializeINodeDirectorySection(OutputStream out) throws IOException {
  Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
      .getINodeMap().getMapIterator();
  final ArrayList<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    if (!n.isDirectory()) {
      continue;
    }

    ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
        Snapshot.CURRENT_STATE_ID);
    if (children.size() > 0) {
      INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
          DirEntry.newBuilder().setParent(n.getId());
      for (INode inode : children) {
        if (!inode.isReference()) {
          b.addChildren(inode.getId());
        } else {
          refList.add(inode.asReference());
          b.addRefChildren(refList.size() - 1);
        }
      }
      INodeDirectorySection.DirEntry e = b.build();
      e.writeDelimitedTo(out);
    }

    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary,
      FSImageFormatProtobuf.SectionName.INODE_DIR);
}
 
Example 21
Source Project: big-c   Source File: CacheManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Computes the needed number of bytes and files for a path.
 * @return CacheDirectiveStats describing the needed stats for this path
 */
private CacheDirectiveStats computeNeeded(String path, short replication) {
  FSDirectory fsDir = namesystem.getFSDirectory();
  INode node;
  long requestedBytes = 0;
  long requestedFiles = 0;
  CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
  try {
    node = fsDir.getINode(path);
  } catch (UnresolvedLinkException e) {
    // We don't cache through symlinks
    return builder.build();
  }
  if (node == null) {
    return builder.build();
  }
  if (node.isFile()) {
    requestedFiles = 1;
    INodeFile file = node.asFile();
    requestedBytes = file.computeFileSize();
  } else if (node.isDirectory()) {
    INodeDirectory dir = node.asDirectory();
    ReadOnlyList<INode> children = dir
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    requestedFiles = children.size();
    for (INode child : children) {
      if (child.isFile()) {
        requestedBytes += child.asFile().computeFileSize();
      }
    }
  }
  return new CacheDirectiveStats.Builder()
      .setBytesNeeded(requestedBytes)
      .setFilesCached(requestedFiles)
      .build();
}
 
Example 22
Source Project: big-c   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Save snapshots and snapshot quota for a snapshottable directory.
 * @param current The directory that the snapshots belongs to.
 * @param out The {@link DataOutput} to write.
 * @throws IOException
 */
public static void saveSnapshots(INodeDirectory current, DataOutput out)
    throws IOException {
  DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
  Preconditions.checkArgument(sf != null);
  // list of snapshots in snapshotsByNames
  ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  out.writeInt(snapshots.size());
  for (Snapshot s : snapshots) {
    // write the snapshot id
    out.writeInt(s.getId());
  }
  // snapshot quota
  out.writeInt(sf.getSnapshotQuota());
}
 
Example 23
Source Project: big-c   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return The children list of a directory in a snapshot.
 *         Since the snapshot is read-only, the logical view of the list is
 *         never changed although the internal data structure may mutate.
 */
private ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
  return new ReadOnlyList<INode>() {
    private List<INode> children = null;

    private List<INode> initChildren() {
      if (children == null) {
        final ChildrenDiff combined = new ChildrenDiff();
        for (DirectoryDiff d = DirectoryDiff.this; d != null; 
            d = d.getPosterior()) {
          combined.combinePosterior(d.diff, null);
        }
        children = combined.apply2Current(ReadOnlyList.Util.asList(
            currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
      }
      return children;
    }

    @Override
    public Iterator<INode> iterator() {
      return initChildren().iterator();
    }

    @Override
    public boolean isEmpty() {
      return childrenSize == 0;
    }

    @Override
    public int size() {
      return childrenSize;
    }

    @Override
    public INode get(int i) {
      return initChildren().get(i);
    }
  };
}
 
Example 24
Source Project: big-c   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return If there is no corresponding directory diff for the given
 *         snapshot, this means that the current children list should be
 *         returned for the snapshot. Otherwise we calculate the children list
 *         for the snapshot and return it. 
 */
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
    final int snapshotId) {
  final DirectoryDiff diff = diffs.getDiffById(snapshotId);
  return diff != null ? diff.getChildrenList(currentINode) : currentINode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
}
 
Example 25
Source Project: big-c   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @param name the name of the child
 * @param snapshotId
 *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
 *          from the corresponding snapshot; otherwise, get the result from
 *          the current directory.
 * @return the child inode.
 */
public INode getChild(byte[] name, int snapshotId) {
  DirectoryWithSnapshotFeature sf;
  if (snapshotId == Snapshot.CURRENT_STATE_ID || 
      (sf = getDirectoryWithSnapshotFeature()) == null) {
    ReadOnlyList<INode> c = getCurrentChildrenList();
    final int i = ReadOnlyList.Util.binarySearch(c, name);
    return i < 0 ? null : c.get(i);
  }
  
  return sf.getChild(this, name, snapshotId);
}
 
Example 26
Source Project: big-c   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Given a child's name, return the index of the next child
 *
 * @param name a child's name
 * @return the index of the next child
 */
static int nextChild(ReadOnlyList<INode> children, byte[] name) {
  if (name.length == 0) { // empty name
    return 0;
  }
  int nextPos = ReadOnlyList.Util.binarySearch(children, name) + 1;
  if (nextPos >= 0) {
    return nextPos;
  }
  return -nextPos;
}
 
Example 27
Source Project: big-c   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();

  // we are computing the quota usage for a specific snapshot here, i.e., the
  // computation only includes files/directories that exist at the time of the
  // given snapshot
  if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
      && !(useCache && isQuotaSet())) {
    ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
    for (INode child : childrenList) {
      final byte childPolicyId = child.getStoragePolicyIDForQuota(blockStoragePolicyId);
      child.computeQuotaUsage(bsps, childPolicyId, counts, useCache,
          lastSnapshotId);
    }
    counts.addNameSpace(1);
    return counts;
  }
  
  // compute the quota usage in the scope of the current directory tree
  final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
  if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
    return q.AddCurrentSpaceUsage(counts);
  } else {
    useCache = q != null && !q.isQuotaSet() ? false : useCache;
    return computeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts,
        useCache, lastSnapshotId);
  }
}
 
Example 28
Source Project: big-c   Source File: INodeDirectory.java    License: Apache License 2.0 5 votes vote down vote up
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().addContent(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
Example 29
Source Project: hadoop   Source File: FSDirStatAndListingOp.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Get a partial listing of the indicated directory
 *
 * We will stop when any of the following conditions is met:
 * 1) this.lsLimit files have been added
 * 2) needLocation is true AND enough files have been added such
 * that at least this.lsLimit block locations are in the response
 *
 * @param fsd FSDirectory
 * @param iip the INodesInPath instance containing all the INodes along the
 *            path
 * @param src the directory name
 * @param startAfter the name to start listing after
 * @param needLocation if block locations are returned
 * @return a partial listing starting after startAfter
 */
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
    String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
    throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  final boolean isRawPath = FSDirectory.isReservedRawName(src);

  fsd.readLock();
  try {
    if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
      return getSnapshotsListing(fsd, srcs, startAfter);
    }
    final int snapshot = iip.getPathSnapshotId();
    final INode targetNode = iip.getLastINode();
    if (targetNode == null)
      return null;
    byte parentStoragePolicy = isSuperUser ?
        targetNode.getStoragePolicyID() : BlockStoragePolicySuite
        .ID_UNSPECIFIED;

    if (!targetNode.isDirectory()) {
      return new DirectoryListing(
          new HdfsFileStatus[]{createFileStatus(fsd, src,
              HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
              parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
    }

    final INodeDirectory dirInode = targetNode.asDirectory();
    final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
    int startChild = INodeDirectory.nextChild(contents, startAfter);
    int totalNumChildren = contents.size();
    int numOfListing = Math.min(totalNumChildren - startChild,
        fsd.getLsLimit());
    int locationBudget = fsd.getLsLimit();
    int listingCnt = 0;
    HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i=0; i<numOfListing && locationBudget>0; i++) {
      INode cur = contents.get(startChild+i);
      byte curPolicy = isSuperUser && !cur.isSymlink()?
          cur.getLocalStoragePolicyID():
          BlockStoragePolicySuite.ID_UNSPECIFIED;
      listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
          needLocation, getStoragePolicyID(curPolicy,
              parentStoragePolicy), snapshot, isRawPath, iip);
      listingCnt++;
      if (needLocation) {
          // Once we  hit lsLimit locations, stop.
          // This helps to prevent excessively large response payloads.
          // Approximate #locations with locatedBlockCount() * repl_factor
          LocatedBlocks blks =
              ((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
          locationBudget -= (blks == null) ? 0 :
             blks.locatedBlockCount() * listing[i].getReplication();
      }
    }
    // truncate return array if necessary
    if (listingCnt < numOfListing) {
        listing = Arrays.copyOf(listing, listingCnt);
    }
    return new DirectoryListing(
        listing, totalNumChildren-startChild-listingCnt);
  } finally {
    fsd.readUnlock();
  }
}
 
Example 30
Source Project: hadoop   Source File: Snapshot.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public ReadOnlyList<INode> getChildrenList(int snapshotId) {
  return getParent().getChildrenList(snapshotId);
}