org.apache.hadoop.hdfs.util.ReadOnlyList Java Examples

The following examples show how to use org.apache.hadoop.hdfs.util.ReadOnlyList. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FSPermissionChecker.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
 
Example #2
Source File: FSImageFormat.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
 
Example #3
Source File: TestSnapshotRename.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
 
Example #4
Source File: TestSnapshotRename.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
 
Example #5
Source File: FSImageFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Save children INodes.
 * @param children The list of children INodes
 * @param out The DataOutputStream to write
 * @param inSnapshot Whether the parent directory or its ancestor is in
 *                   the deleted list of some snapshot (caused by rename or
 *                   deletion)
 * @param counter Counter to increment for namenode startup progress
 * @return Number of children that are directory
 */
private int saveChildren(ReadOnlyList<INode> children,
    DataOutputStream out, boolean inSnapshot, Counter counter)
    throws IOException {
  // Write normal children INode.
  out.writeInt(children.size());
  int dirNum = 0;
  for(INode child : children) {
    // print all children first
    // TODO: for HDFS-5428, we cannot change the format/content of fsimage
    // here, thus even if the parent directory is in snapshot, we still
    // do not handle INodeUC as those stored in deleted list
    saveINode2Image(child, out, false, referenceMap, counter);
    if (child.isDirectory()) {
      dirNum++;
    } else if (inSnapshot && child.isFile()
        && child.asFile().isUnderConstruction()) {
      this.snapshotUCMap.put(child.getId(), child.asFile());
    }
    if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  return dirNum;
}
 
Example #6
Source File: FSPermissionChecker.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Guarded by {@link FSNamesystem#readLock()} */
private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
    int snapshotId, FsAction access, boolean ignoreEmptyDir)
    throws AccessControlException {
  if (inode == null || !inode.isDirectory()) {
    return;
  }

  Stack<INodeDirectory> directories = new Stack<INodeDirectory>();
  for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) {
    INodeDirectory d = directories.pop();
    ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
    if (!(cList.isEmpty() && ignoreEmptyDir)) {
      //TODO have to figure this out with inodeattribute provider
      check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
          inode.getFullPathName(), access);
    }

    for(INode child : cList) {
      if (child.isDirectory()) {
        directories.push(child.asDirectory());
      }
    }
  }
}
 
Example #7
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Computes the needed number of bytes and files for a path.
 * @return CacheDirectiveStats describing the needed stats for this path
 */
private CacheDirectiveStats computeNeeded(String path, short replication) {
  FSDirectory fsDir = namesystem.getFSDirectory();
  INode node;
  long requestedBytes = 0;
  long requestedFiles = 0;
  CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
  try {
    node = fsDir.getINode(path);
  } catch (UnresolvedLinkException e) {
    // We don't cache through symlinks
    return builder.build();
  }
  if (node == null) {
    return builder.build();
  }
  if (node.isFile()) {
    requestedFiles = 1;
    INodeFile file = node.asFile();
    requestedBytes = file.computeFileSize();
  } else if (node.isDirectory()) {
    INodeDirectory dir = node.asDirectory();
    ReadOnlyList<INode> children = dir
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    requestedFiles = children.size();
    for (INode child : children) {
      if (child.isFile()) {
        requestedBytes += child.asFile().computeFileSize();
      }
    }
  }
  return new CacheDirectiveStats.Builder()
      .setBytesNeeded(requestedBytes)
      .setFilesCached(requestedFiles)
      .build();
}
 
Example #8
Source File: FSImageFormatPBINode.java    From big-c with Apache License 2.0 5 votes vote down vote up
void serializeINodeDirectorySection(OutputStream out) throws IOException {
  Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
      .getINodeMap().getMapIterator();
  final ArrayList<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    if (!n.isDirectory()) {
      continue;
    }

    ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
        Snapshot.CURRENT_STATE_ID);
    if (children.size() > 0) {
      INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
          DirEntry.newBuilder().setParent(n.getId());
      for (INode inode : children) {
        if (!inode.isReference()) {
          b.addChildren(inode.getId());
        } else {
          refList.add(inode.asReference());
          b.addRefChildren(refList.size() - 1);
        }
      }
      INodeDirectorySection.DirEntry e = b.build();
      e.writeDelimitedTo(out);
    }

    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary,
      FSImageFormatProtobuf.SectionName.INODE_DIR);
}
 
Example #9
Source File: FSDirRenameOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void validateOverwrite(
    String src, String dst, boolean overwrite, INode srcInode, INode dstInode)
    throws IOException {
  String error;// It's OK to rename a file to a symlink and vice versa
  if (dstInode.isDirectory() != srcInode.isDirectory()) {
    error = "Source " + src + " and destination " + dst
        + " must both be directories";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new IOException(error);
  }
  if (!overwrite) { // If destination exists, overwrite flag must be true
    error = "rename destination " + dst + " already exists";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new FileAlreadyExistsException(error);
  }
  if (dstInode.isDirectory()) {
    final ReadOnlyList<INode> children = dstInode.asDirectory()
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    if (!children.isEmpty()) {
      error = "rename destination directory is not empty: " + dst;
      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
          + error);
      throw new IOException(error);
    }
  }
}
 
Example #10
Source File: DirectoryWithSnapshotFeature.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @return If there is no corresponding directory diff for the given
 *         snapshot, this means that the current children list should be
 *         returned for the snapshot. Otherwise we calculate the children list
 *         for the snapshot and return it. 
 */
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
    final int snapshotId) {
  final DirectoryDiff diff = diffs.getDiffById(snapshotId);
  return diff != null ? diff.getChildrenList(currentINode) : currentINode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
}
 
Example #11
Source File: FSDirStatAndListingOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
Example #12
Source File: INodeDirectory.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Given a child's name, return the index of the next child
 *
 * @param name a child's name
 * @return the index of the next child
 */
static int nextChild(ReadOnlyList<INode> children, byte[] name) {
  if (name.length == 0) { // empty name
    return 0;
  }
  int nextPos = ReadOnlyList.Util.binarySearch(children, name) + 1;
  if (nextPos >= 0) {
    return nextPos;
  }
  return -nextPos;
}
 
Example #13
Source File: INodeDirectory.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();

  // we are computing the quota usage for a specific snapshot here, i.e., the
  // computation only includes files/directories that exist at the time of the
  // given snapshot
  if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
      && !(useCache && isQuotaSet())) {
    ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
    for (INode child : childrenList) {
      final byte childPolicyId = child.getStoragePolicyIDForQuota(blockStoragePolicyId);
      child.computeQuotaUsage(bsps, childPolicyId, counts, useCache,
          lastSnapshotId);
    }
    counts.addNameSpace(1);
    return counts;
  }
  
  // compute the quota usage in the scope of the current directory tree
  final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
  if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
    return q.AddCurrentSpaceUsage(counts);
  } else {
    useCache = q != null && !q.isQuotaSet() ? false : useCache;
    return computeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts,
        useCache, lastSnapshotId);
  }
}
 
Example #14
Source File: INodeDirectory.java    From big-c with Apache License 2.0 5 votes vote down vote up
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().addContent(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
Example #15
Source File: SnapshotFSImageFormat.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Save snapshots and snapshot quota for a snapshottable directory.
 * @param current The directory that the snapshots belongs to.
 * @param out The {@link DataOutput} to write.
 * @throws IOException
 */
public static void saveSnapshots(INodeDirectory current, DataOutput out)
    throws IOException {
  DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
  Preconditions.checkArgument(sf != null);
  // list of snapshots in snapshotsByNames
  ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  out.writeInt(snapshots.size());
  for (Snapshot s : snapshots) {
    // write the snapshot id
    out.writeInt(s.getId());
  }
  // snapshot quota
  out.writeInt(sf.getSnapshotQuota());
}
 
Example #16
Source File: INodeDirectory.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @param name the name of the child
 * @param snapshotId
 *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
 *          from the corresponding snapshot; otherwise, get the result from
 *          the current directory.
 * @return the child inode.
 */
public INode getChild(byte[] name, int snapshotId) {
  DirectoryWithSnapshotFeature sf;
  if (snapshotId == Snapshot.CURRENT_STATE_ID || 
      (sf = getDirectoryWithSnapshotFeature()) == null) {
    ReadOnlyList<INode> c = getCurrentChildrenList();
    final int i = ReadOnlyList.Util.binarySearch(c, name);
    return i < 0 ? null : c.get(i);
  }
  
  return sf.getChild(this, name, snapshotId);
}
 
Example #17
Source File: DirectoryWithSnapshotFeature.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @return The children list of a directory in a snapshot.
 *         Since the snapshot is read-only, the logical view of the list is
 *         never changed although the internal data structure may mutate.
 */
private ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
  return new ReadOnlyList<INode>() {
    private List<INode> children = null;

    private List<INode> initChildren() {
      if (children == null) {
        final ChildrenDiff combined = new ChildrenDiff();
        for (DirectoryDiff d = DirectoryDiff.this; d != null; 
            d = d.getPosterior()) {
          combined.combinePosterior(d.diff, null);
        }
        children = combined.apply2Current(ReadOnlyList.Util.asList(
            currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
      }
      return children;
    }

    @Override
    public Iterator<INode> iterator() {
      return initChildren().iterator();
    }

    @Override
    public boolean isEmpty() {
      return childrenSize == 0;
    }

    @Override
    public int size() {
      return childrenSize;
    }

    @Override
    public INode get(int i) {
      return initChildren().get(i);
    }
  };
}
 
Example #18
Source File: INodeDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().addContent(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
Example #19
Source File: INodeDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();

  // we are computing the quota usage for a specific snapshot here, i.e., the
  // computation only includes files/directories that exist at the time of the
  // given snapshot
  if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
      && !(useCache && isQuotaSet())) {
    ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
    for (INode child : childrenList) {
      final byte childPolicyId = child.getStoragePolicyIDForQuota(blockStoragePolicyId);
      child.computeQuotaUsage(bsps, childPolicyId, counts, useCache,
          lastSnapshotId);
    }
    counts.addNameSpace(1);
    return counts;
  }
  
  // compute the quota usage in the scope of the current directory tree
  final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
  if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
    return q.AddCurrentSpaceUsage(counts);
  } else {
    useCache = q != null && !q.isQuotaSet() ? false : useCache;
    return computeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts,
        useCache, lastSnapshotId);
  }
}
 
Example #20
Source File: FSDirStatAndListingOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
Example #21
Source File: FSDirRenameOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void validateOverwrite(
    String src, String dst, boolean overwrite, INode srcInode, INode dstInode)
    throws IOException {
  String error;// It's OK to rename a file to a symlink and vice versa
  if (dstInode.isDirectory() != srcInode.isDirectory()) {
    error = "Source " + src + " and destination " + dst
        + " must both be directories";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new IOException(error);
  }
  if (!overwrite) { // If destination exists, overwrite flag must be true
    error = "rename destination " + dst + " already exists";
    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
        + error);
    throw new FileAlreadyExistsException(error);
  }
  if (dstInode.isDirectory()) {
    final ReadOnlyList<INode> children = dstInode.asDirectory()
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    if (!children.isEmpty()) {
      error = "rename destination directory is not empty: " + dst;
      NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
          + error);
      throw new IOException(error);
    }
  }
}
 
Example #22
Source File: FSImageFormatPBINode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void serializeINodeDirectorySection(OutputStream out) throws IOException {
  Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
      .getINodeMap().getMapIterator();
  final ArrayList<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    if (!n.isDirectory()) {
      continue;
    }

    ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
        Snapshot.CURRENT_STATE_ID);
    if (children.size() > 0) {
      INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
          DirEntry.newBuilder().setParent(n.getId());
      for (INode inode : children) {
        if (!inode.isReference()) {
          b.addChildren(inode.getId());
        } else {
          refList.add(inode.asReference());
          b.addRefChildren(refList.size() - 1);
        }
      }
      INodeDirectorySection.DirEntry e = b.build();
      e.writeDelimitedTo(out);
    }

    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary,
      FSImageFormatProtobuf.SectionName.INODE_DIR);
}
 
Example #23
Source File: INodeDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Given a child's name, return the index of the next child
 *
 * @param name a child's name
 * @return the index of the next child
 */
static int nextChild(ReadOnlyList<INode> children, byte[] name) {
  if (name.length == 0) { // empty name
    return 0;
  }
  int nextPos = ReadOnlyList.Util.binarySearch(children, name) + 1;
  if (nextPos >= 0) {
    return nextPos;
  }
  return -nextPos;
}
 
Example #24
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Computes the needed number of bytes and files for a path.
 * @return CacheDirectiveStats describing the needed stats for this path
 */
private CacheDirectiveStats computeNeeded(String path, short replication) {
  FSDirectory fsDir = namesystem.getFSDirectory();
  INode node;
  long requestedBytes = 0;
  long requestedFiles = 0;
  CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
  try {
    node = fsDir.getINode(path);
  } catch (UnresolvedLinkException e) {
    // We don't cache through symlinks
    return builder.build();
  }
  if (node == null) {
    return builder.build();
  }
  if (node.isFile()) {
    requestedFiles = 1;
    INodeFile file = node.asFile();
    requestedBytes = file.computeFileSize();
  } else if (node.isDirectory()) {
    INodeDirectory dir = node.asDirectory();
    ReadOnlyList<INode> children = dir
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    requestedFiles = children.size();
    for (INode child : children) {
      if (child.isFile()) {
        requestedBytes += child.asFile().computeFileSize();
      }
    }
  }
  return new CacheDirectiveStats.Builder()
      .setBytesNeeded(requestedBytes)
      .setFilesCached(requestedFiles)
      .build();
}
 
Example #25
Source File: INodeDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @param name the name of the child
 * @param snapshotId
 *          if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
 *          from the corresponding snapshot; otherwise, get the result from
 *          the current directory.
 * @return the child inode.
 */
public INode getChild(byte[] name, int snapshotId) {
  DirectoryWithSnapshotFeature sf;
  if (snapshotId == Snapshot.CURRENT_STATE_ID || 
      (sf = getDirectoryWithSnapshotFeature()) == null) {
    ReadOnlyList<INode> c = getCurrentChildrenList();
    final int i = ReadOnlyList.Util.binarySearch(c, name);
    return i < 0 ? null : c.get(i);
  }
  
  return sf.getChild(this, name, snapshotId);
}
 
Example #26
Source File: DirectoryWithSnapshotFeature.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @return If there is no corresponding directory diff for the given
 *         snapshot, this means that the current children list should be
 *         returned for the snapshot. Otherwise we calculate the children list
 *         for the snapshot and return it. 
 */
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
    final int snapshotId) {
  final DirectoryDiff diff = diffs.getDiffById(snapshotId);
  return diff != null ? diff.getChildrenList(currentINode) : currentINode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
}
 
Example #27
Source File: DirectoryWithSnapshotFeature.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @return The children list of a directory in a snapshot.
 *         Since the snapshot is read-only, the logical view of the list is
 *         never changed although the internal data structure may mutate.
 */
private ReadOnlyList<INode> getChildrenList(final INodeDirectory currentDir) {
  return new ReadOnlyList<INode>() {
    private List<INode> children = null;

    private List<INode> initChildren() {
      if (children == null) {
        final ChildrenDiff combined = new ChildrenDiff();
        for (DirectoryDiff d = DirectoryDiff.this; d != null; 
            d = d.getPosterior()) {
          combined.combinePosterior(d.diff, null);
        }
        children = combined.apply2Current(ReadOnlyList.Util.asList(
            currentDir.getChildrenList(Snapshot.CURRENT_STATE_ID)));
      }
      return children;
    }

    @Override
    public Iterator<INode> iterator() {
      return initChildren().iterator();
    }

    @Override
    public boolean isEmpty() {
      return childrenSize == 0;
    }

    @Override
    public int size() {
      return childrenSize;
    }

    @Override
    public INode get(int i) {
      return initChildren().get(i);
    }
  };
}
 
Example #28
Source File: SnapshotFSImageFormat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Save snapshots and snapshot quota for a snapshottable directory.
 * @param current The directory that the snapshots belongs to.
 * @param out The {@link DataOutput} to write.
 * @throws IOException
 */
public static void saveSnapshots(INodeDirectory current, DataOutput out)
    throws IOException {
  DirectorySnapshottableFeature sf = current.getDirectorySnapshottableFeature();
  Preconditions.checkArgument(sf != null);
  // list of snapshots in snapshotsByNames
  ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  out.writeInt(snapshots.size());
  for (Snapshot s : snapshots) {
    // write the snapshot id
    out.writeInt(s.getId());
  }
  // snapshot quota
  out.writeInt(sf.getSnapshotQuota());
}
 
Example #29
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());
  
  restartClusterAndCheckImage(true);
}
 
Example #30
Source File: INodeDirectory.java    From big-c with Apache License 2.0 4 votes vote down vote up
private ReadOnlyList<INode> getCurrentChildrenList() {
  return children == null ? ReadOnlyList.Util.<INode> emptyList()
      : ReadOnlyList.Util.asReadOnlyList(children);
}