Java Code Examples for org.apache.hadoop.hdfs.server.namenode.INode#isDirectory()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.INode#isDirectory() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SnapshotDiffInfo.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Generate a {@link SnapshotDiffReport} based on detailed diff information.
 * @return A {@link SnapshotDiffReport} describing the difference
 */
public SnapshotDiffReport generateReport() {
  List<DiffReportEntry> diffReportList = new ArrayList<DiffReportEntry>();
  for (Map.Entry<INode,byte[][]> drEntry : diffMap.entrySet()) {
    INode node = drEntry.getKey();
    byte[][] path = drEntry.getValue();
    diffReportList.add(new DiffReportEntry(DiffType.MODIFY, path, null));
    if (node.isDirectory()) {
      List<DiffReportEntry> subList = generateReport(dirDiffMap.get(node),
          path, isFromEarlier(), renameMap);
      diffReportList.addAll(subList);
    }
  }
  return new SnapshotDiffReport(snapshotRoot.getFullPathName(),
      Snapshot.getSnapshotName(from), Snapshot.getSnapshotName(to),
      diffReportList);
}
 
Example 2
Source File: SnapshotDiffInfo.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Generate a {@link SnapshotDiffReport} based on detailed diff information.
 * @return A {@link SnapshotDiffReport} describing the difference
 */
public SnapshotDiffReport generateReport() {
  List<DiffReportEntry> diffReportList = new ArrayList<DiffReportEntry>();
  for (Map.Entry<INode,byte[][]> drEntry : diffMap.entrySet()) {
    INode node = drEntry.getKey();
    byte[][] path = drEntry.getValue();
    diffReportList.add(new DiffReportEntry(DiffType.MODIFY, path, null));
    if (node.isDirectory()) {
      List<DiffReportEntry> subList = generateReport(dirDiffMap.get(node),
          path, isFromEarlier(), renameMap);
      diffReportList.addAll(subList);
    }
  }
  return new SnapshotDiffReport(snapshotRoot.getFullPathName(),
      Snapshot.getSnapshotName(from), Snapshot.getSnapshotName(to),
      diffReportList);
}
 
Example 3
Source File: Snapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Find the latest snapshot that 1) covers the given inode (which means the
 * snapshot was either taken on the inode or taken on an ancestor of the
 * inode), and 2) was taken before the given snapshot (if the given snapshot 
 * is not null).
 * 
 * @param inode the given inode that the returned snapshot needs to cover
 * @param anchor the returned snapshot should be taken before this given id.
 * @return id of the latest snapshot that covers the given inode and was taken 
 *         before the the given snapshot (if it is not null).
 */
public static int findLatestSnapshot(INode inode, final int anchor) {
  int latest = NO_SNAPSHOT_ID;
  for(; inode != null; inode = inode.getParent()) {
    if (inode.isDirectory()) {
      final INodeDirectory dir = inode.asDirectory();
      if (dir.isWithSnapshot()) {
        latest = dir.getDiffs().updatePrior(anchor, latest);
      }
    }
  }
  return latest;
}
 
Example 4
Source File: DirectoryWithSnapshotFeature.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Get the list of INodeDirectory contained in the deleted list */
private void getDirsInDeleted(List<INodeDirectory> dirList) {
  for (INode node : getList(ListType.DELETED)) {
    if (node.isDirectory()) {
      dirList.add(node.asDirectory());
    }
  }
}
 
Example 5
Source File: Snapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Find the latest snapshot that 1) covers the given inode (which means the
 * snapshot was either taken on the inode or taken on an ancestor of the
 * inode), and 2) was taken before the given snapshot (if the given snapshot 
 * is not null).
 * 
 * @param inode the given inode that the returned snapshot needs to cover
 * @param anchor the returned snapshot should be taken before this given id.
 * @return id of the latest snapshot that covers the given inode and was taken 
 *         before the the given snapshot (if it is not null).
 */
public static int findLatestSnapshot(INode inode, final int anchor) {
  int latest = NO_SNAPSHOT_ID;
  for(; inode != null; inode = inode.getParent()) {
    if (inode.isDirectory()) {
      final INodeDirectory dir = inode.asDirectory();
      if (dir.isWithSnapshot()) {
        latest = dir.getDiffs().updatePrior(anchor, latest);
      }
    }
  }
  return latest;
}
 
Example 6
Source File: DirectoryWithSnapshotFeature.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Get the list of INodeDirectory contained in the deleted list */
private void getDirsInDeleted(List<INodeDirectory> dirList) {
  for (INode node : getList(ListType.DELETED)) {
    if (node.isDirectory()) {
      dirList.add(node.asDirectory());
    }
  }
}
 
Example 7
Source File: DirectoryWithSnapshotFeature.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Destroy a subtree under a DstReference node.
 */
public static void destroyDstSubtree(
    final BlockStoragePolicySuite bsps, INode inode, final int snapshot,
    final int prior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) throws QuotaExceededException {
  Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
  if (inode.isReference()) {
    if (inode instanceof INodeReference.WithName
        && snapshot != Snapshot.CURRENT_STATE_ID) {
      // this inode has been renamed before the deletion of the DstReference
      // subtree
      inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
    } else { 
      // for DstReference node, continue this process to its subtree
      destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot,
          prior, collectedBlocks, removedINodes);
    }
  } else if (inode.isFile()) {
    inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
  } else if (inode.isDirectory()) {
    Map<INode, INode> excludedNodes = null;
    INodeDirectory dir = inode.asDirectory();
    DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
    if (sf != null) {
      DirectoryDiffList diffList = sf.getDiffs();
      DirectoryDiff priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
        excludedNodes = cloneDiffList(dList);
      }
      
      if (snapshot != Snapshot.CURRENT_STATE_ID) {
        diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks,
            removedINodes);
      }
      priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks,
            removedINodes);
      }
    }
    for (INode child : inode.asDirectory().getChildrenList(prior)) {
      if (excludedNodes != null && excludedNodes.containsKey(child)) {
        continue;
      }
      destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks,
          removedINodes);
    }
  }
}
 
Example 8
Source File: DirectoryWithSnapshotFeature.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Clean an inode while we move it from the deleted list of post to the
 * deleted list of prior.
 * @param bsps The block storage policy suite.
 * @param inode The inode to clean.
 * @param post The post snapshot.
 * @param prior The id of the prior snapshot.
 * @param collectedBlocks Used to collect blocks for later deletion.
 * @return Quota usage update.
 */
private static QuotaCounts cleanDeletedINode(
    final BlockStoragePolicySuite bsps, INode inode,
    final int post, final int prior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  Deque<INode> queue = new ArrayDeque<INode>();
  queue.addLast(inode);
  while (!queue.isEmpty()) {
    INode topNode = queue.pollFirst();
    if (topNode instanceof INodeReference.WithName) {
      INodeReference.WithName wn = (INodeReference.WithName) topNode;
      if (wn.getLastSnapshotId() >= post) {
        INodeReference.WithCount wc =
            (INodeReference.WithCount) wn.getReferredINode();
        if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
          // this wn is the last wn inside of the wc, also the dstRef node has
          // been deleted. In this case, we should treat the referred file/dir
          // as normal case
          queue.add(wc.getReferredINode());
        } else {
          wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
        }
      }
      // For DstReference node, since the node is not in the created list of
      // prior, we should treat it as regular file/dir
    } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
      INodeFile file = topNode.asFile();
      counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file,
          collectedBlocks, removedINodes));
    } else if (topNode.isDirectory()) {
      INodeDirectory dir = topNode.asDirectory();
      ChildrenDiff priorChildrenDiff = null;
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        // delete files/dirs created after prior. Note that these
        // files/dirs, along with inode, were deleted right after post.
        DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          priorChildrenDiff = priorDiff.getChildrenDiff();
          counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir,
              collectedBlocks, removedINodes));
        }
      }
      
      for (INode child : dir.getChildrenList(prior)) {
        if (priorChildrenDiff != null
            && priorChildrenDiff.search(ListType.DELETED,
                child.getLocalNameBytes()) != null) {
          continue;
        }
        queue.addLast(child);
      }
    }
  }
  return counts;
}
 
Example 9
Source File: CacheReplicationMonitor.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Scan all CacheDirectives.  Use the information to figure out
 * what cache replication factor each block should have.
 */
private void rescanCacheDirectives() {
  FSDirectory fsDir = namesystem.getFSDirectory();
  final long now = new Date().getTime();
  for (CacheDirective directive : cacheManager.getCacheDirectives()) {
    scannedDirectives++;
    // Skip processing this entry if it has expired
    if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
      LOG.debug("Directive {}: the directive expired at {} (now = {})",
           directive.getId(), directive.getExpiryTime(), now);
      continue;
    }
    String path = directive.getPath();
    INode node;
    try {
      node = fsDir.getINode(path);
    } catch (UnresolvedLinkException e) {
      // We don't cache through symlinks
      LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
              + "path {}", directive.getId(), path
      );
      continue;
    }
    if (node == null)  {
      LOG.debug("Directive {}: No inode found at {}", directive.getId(),
          path);
    } else if (node.isDirectory()) {
      INodeDirectory dir = node.asDirectory();
      ReadOnlyList<INode> children = dir
          .getChildrenList(Snapshot.CURRENT_STATE_ID);
      for (INode child : children) {
        if (child.isFile()) {
          rescanFile(directive, child.asFile());
        }
      }
    } else if (node.isFile()) {
      rescanFile(directive, node.asFile());
    } else {
      LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
          directive.getId(), node);
    }
  }
}
 
Example 10
Source File: DirectoryWithSnapshotFeature.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Destroy a subtree under a DstReference node.
 */
public static void destroyDstSubtree(
    final BlockStoragePolicySuite bsps, INode inode, final int snapshot,
    final int prior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) throws QuotaExceededException {
  Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
  if (inode.isReference()) {
    if (inode instanceof INodeReference.WithName
        && snapshot != Snapshot.CURRENT_STATE_ID) {
      // this inode has been renamed before the deletion of the DstReference
      // subtree
      inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
    } else { 
      // for DstReference node, continue this process to its subtree
      destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot,
          prior, collectedBlocks, removedINodes);
    }
  } else if (inode.isFile()) {
    inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
  } else if (inode.isDirectory()) {
    Map<INode, INode> excludedNodes = null;
    INodeDirectory dir = inode.asDirectory();
    DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
    if (sf != null) {
      DirectoryDiffList diffList = sf.getDiffs();
      DirectoryDiff priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
        excludedNodes = cloneDiffList(dList);
      }
      
      if (snapshot != Snapshot.CURRENT_STATE_ID) {
        diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks,
            removedINodes);
      }
      priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks,
            removedINodes);
      }
    }
    for (INode child : inode.asDirectory().getChildrenList(prior)) {
      if (excludedNodes != null && excludedNodes.containsKey(child)) {
        continue;
      }
      destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks,
          removedINodes);
    }
  }
}
 
Example 11
Source File: DirectoryWithSnapshotFeature.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Clean an inode while we move it from the deleted list of post to the
 * deleted list of prior.
 * @param bsps The block storage policy suite.
 * @param inode The inode to clean.
 * @param post The post snapshot.
 * @param prior The id of the prior snapshot.
 * @param collectedBlocks Used to collect blocks for later deletion.
 * @return Quota usage update.
 */
private static QuotaCounts cleanDeletedINode(
    final BlockStoragePolicySuite bsps, INode inode,
    final int post, final int prior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  Deque<INode> queue = new ArrayDeque<INode>();
  queue.addLast(inode);
  while (!queue.isEmpty()) {
    INode topNode = queue.pollFirst();
    if (topNode instanceof INodeReference.WithName) {
      INodeReference.WithName wn = (INodeReference.WithName) topNode;
      if (wn.getLastSnapshotId() >= post) {
        wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
      }
      // For DstReference node, since the node is not in the created list of
      // prior, we should treat it as regular file/dir
    } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
      INodeFile file = topNode.asFile();
      counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file,
          collectedBlocks, removedINodes));
    } else if (topNode.isDirectory()) {
      INodeDirectory dir = topNode.asDirectory();
      ChildrenDiff priorChildrenDiff = null;
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        // delete files/dirs created after prior. Note that these
        // files/dirs, along with inode, were deleted right after post.
        DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          priorChildrenDiff = priorDiff.getChildrenDiff();
          counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir,
              collectedBlocks, removedINodes));
        }
      }
      
      for (INode child : dir.getChildrenList(prior)) {
        if (priorChildrenDiff != null
            && priorChildrenDiff.search(ListType.DELETED,
                child.getLocalNameBytes()) != null) {
          continue;
        }
        queue.addLast(child);
      }
    }
  }
  return counts;
}
 
Example 12
Source File: CacheReplicationMonitor.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Scan all CacheDirectives.  Use the information to figure out
 * what cache replication factor each block should have.
 */
private void rescanCacheDirectives() {
  FSDirectory fsDir = namesystem.getFSDirectory();
  final long now = new Date().getTime();
  for (CacheDirective directive : cacheManager.getCacheDirectives()) {
    scannedDirectives++;
    // Skip processing this entry if it has expired
    if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
      LOG.debug("Directive {}: the directive expired at {} (now = {})",
           directive.getId(), directive.getExpiryTime(), now);
      continue;
    }
    String path = directive.getPath();
    INode node;
    try {
      node = fsDir.getINode(path);
    } catch (UnresolvedLinkException e) {
      // We don't cache through symlinks
      LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
              + "path {}", directive.getId(), path
      );
      continue;
    }
    if (node == null)  {
      LOG.debug("Directive {}: No inode found at {}", directive.getId(),
          path);
    } else if (node.isDirectory()) {
      INodeDirectory dir = node.asDirectory();
      ReadOnlyList<INode> children = dir
          .getChildrenList(Snapshot.CURRENT_STATE_ID);
      for (INode child : children) {
        if (child.isFile()) {
          rescanFile(directive, child.asFile());
        }
      }
    } else if (node.isFile()) {
      rescanFile(directive, node.asFile());
    } else {
      LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
          directive.getId(), node);
    }
  }
}