org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: INodeReference.java From hadoop with Apache License 2.0 | 6 votes |
@Override public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps, byte blockStoragePolicyId, QuotaCounts counts, boolean useCache, int lastSnapshotId) { // if this.lastSnapshotId < lastSnapshotId, the rename of the referred // node happened before the rename of its ancestor. This should be // impossible since for WithName node we only count its children at the // time of the rename. Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID || this.lastSnapshotId >= lastSnapshotId); final INode referred = this.getReferredINode().asReference() .getReferredINode(); // We will continue the quota usage computation using the same snapshot id // as time line (if the given snapshot id is valid). Also, we cannot use // cache for the referred node since its cached quota may have already // been updated by changes in the current tree. int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? lastSnapshotId : this.lastSnapshotId; return referred.computeQuotaUsage(bsps, blockStoragePolicyId, counts, false, id); }
Example #2
Source File: FSDirRenameOp.java From hadoop with Apache License 2.0 | 6 votes |
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks) throws QuotaExceededException { Preconditions.checkState(oldDstChild != null); List<INode> removedINodes = new ChunkedArrayList<>(); final boolean filesDeleted; if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) { oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); filesDeleted = true; } else { filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID, dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes) .getNameSpace() >= 0; } fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false); return filesDeleted; }
Example #3
Source File: INodeDirectory.java From big-c with Apache License 2.0 | 6 votes |
/** Call cleanSubtree(..) recursively down the subtree. */ public QuotaCounts cleanSubtreeRecursively(final BlockStoragePolicySuite bsps, final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes, final Map<INode, INode> excludedNodes) { QuotaCounts counts = new QuotaCounts.Builder().build(); // in case of deletion snapshot, since this call happens after we modify // the diff list, the snapshot to be deleted has been combined or renamed // to its latest previous snapshot. (besides, we also need to consider nodes // created after prior but before snapshot. this will be done in // DirectoryWithSnapshotFeature) int s = snapshot != Snapshot.CURRENT_STATE_ID && prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot; for (INode child : getChildrenList(s)) { if (snapshot != Snapshot.CURRENT_STATE_ID && excludedNodes != null && excludedNodes.containsKey(child)) { continue; } else { QuotaCounts childCounts = child.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); counts.add(childCounts); } } return counts; }
Example #4
Source File: FSDirSnapshotOp.java From big-c with Apache License 2.0 | 6 votes |
/** * Check if the given INode (or one of its descendants) is snapshottable and * already has snapshots. * * @param target The given INode * @param snapshottableDirs The list of directories that are snapshottable * but do not have snapshots yet */ static void checkSnapshot( INode target, List<INodeDirectory> snapshottableDirs) throws SnapshotException { if (target.isDirectory()) { INodeDirectory targetDir = target.asDirectory(); DirectorySnapshottableFeature sf = targetDir .getDirectorySnapshottableFeature(); if (sf != null) { if (sf.getNumSnapshots() > 0) { String fullPath = targetDir.getFullPathName(); throw new SnapshotException("The directory " + fullPath + " cannot be deleted since " + fullPath + " is snapshottable and already has snapshots"); } else { if (snapshottableDirs != null) { snapshottableDirs.add(targetDir); } } } for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) { checkSnapshot(child, snapshottableDirs); } } }
Example #5
Source File: INodeReference.java From hadoop with Apache License 2.0 | 6 votes |
private int getSelfSnapshot(final int prior) { WithCount wc = (WithCount) getReferredINode().asReference(); INode referred = wc.getReferredINode(); int lastSnapshot = Snapshot.CURRENT_STATE_ID; if (referred.isFile() && referred.asFile().isWithSnapshot()) { lastSnapshot = referred.asFile().getDiffs().getLastSnapshotId(); } else if (referred.isDirectory()) { DirectoryWithSnapshotFeature sf = referred.asDirectory() .getDirectoryWithSnapshotFeature(); if (sf != null) { lastSnapshot = sf.getLastSnapshotId(); } } if (lastSnapshot != Snapshot.CURRENT_STATE_ID && lastSnapshot != prior) { return lastSnapshot; } else { return Snapshot.CURRENT_STATE_ID; } }
Example #6
Source File: INodeDirectory.java From big-c with Apache License 2.0 | 6 votes |
@Override public void destroyAndCollectBlocks(final BlockStoragePolicySuite bsps, final BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); if (sf != null) { sf.clear(bsps, this, collectedBlocks, removedINodes); } for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) { child.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); } if (getAclFeature() != null) { AclStorage.removeAclFeature(getAclFeature()); } clear(); removedINodes.add(this); }
Example #7
Source File: INodeDirectory.java From big-c with Apache License 2.0 | 6 votes |
/** * Undo the rename operation for the dst tree, i.e., if the rename operation * (with OVERWRITE option) removes a file/dir from the dst tree, add it back * and delete possible record in the deleted list. */ public void undoRename4DstParent(final BlockStoragePolicySuite bsps, final INode deletedChild, int latestSnapshotId) throws QuotaExceededException { DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); Preconditions.checkState(sf != null, "Directory does not have snapshot feature"); boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED, deletedChild); int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId; final boolean added = addChild(deletedChild, true, sid); // update quota usage if adding is successfully and the old child has not // been stored in deleted list before if (added && !removeDeletedChild) { final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps); addSpaceConsumed(counts, false); } }
Example #8
Source File: TestINodeFile.java From big-c with Apache License 2.0 | 6 votes |
/** * For a given path, build a tree of INodes and return the leaf node. */ private INode createTreeOfInodes(String path) throws QuotaExceededException { byte[][] components = INode.getPathComponents(path); FsPermission perm = FsPermission.createImmutable((short)0755); PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm); long id = 0; INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0); INodeDirectory dir = null; for (byte[] component : components) { if (component.length == 0) { continue; } System.out.println("Adding component " + DFSUtil.bytes2String(component)); dir = new INodeDirectory(++id, component, permstatus, 0); prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID); prev = dir; } return dir; // Last Inode in the chain }
Example #9
Source File: INodeReference.java From hadoop with Apache License 2.0 | 6 votes |
/** * When destroying a reference node (WithName or DstReference), we call this * method to identify the snapshot which is the latest snapshot before the * reference node's creation. */ static int getPriorSnapshot(INodeReference ref) { WithCount wc = (WithCount) ref.getReferredINode(); WithName wn = null; if (ref instanceof DstReference) { wn = wc.getLastWithName(); } else if (ref instanceof WithName) { wn = wc.getPriorWithName((WithName) ref); } if (wn != null) { INode referred = wc.getReferredINode(); if (referred.isFile() && referred.asFile().isWithSnapshot()) { return referred.asFile().getDiffs().getPrior(wn.lastSnapshotId); } else if (referred.isDirectory()) { DirectoryWithSnapshotFeature sf = referred.asDirectory() .getDirectoryWithSnapshotFeature(); if (sf != null) { return sf.getDiffs().getPrior(wn.lastSnapshotId); } } } return Snapshot.NO_SNAPSHOT_ID; }
Example #10
Source File: INode.java From big-c with Apache License 2.0 | 6 votes |
/** * When {@link #recordModification} is called on a referred node, * this method tells which snapshot the modification should be * associated with: the snapshot that belongs to the SRC tree of the rename * operation, or the snapshot belonging to the DST tree. * * @param latestInDst * id of the latest snapshot in the DST tree above the reference node * @return True: the modification should be recorded in the snapshot that * belongs to the SRC tree. False: the modification should be * recorded in the snapshot that belongs to the DST tree. */ public final boolean shouldRecordInSrcSnapshot(final int latestInDst) { Preconditions.checkState(!isReference()); if (latestInDst == Snapshot.CURRENT_STATE_ID) { return true; } INodeReference withCount = getParentReference(); if (withCount != null) { int dstSnapshotId = withCount.getParentReference().getDstSnapshotId(); if (dstSnapshotId != Snapshot.CURRENT_STATE_ID && dstSnapshotId >= latestInDst) { return true; } } return false; }
Example #11
Source File: FSDirRenameOp.java From big-c with Apache License 2.0 | 6 votes |
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks) throws QuotaExceededException { Preconditions.checkState(oldDstChild != null); List<INode> removedINodes = new ChunkedArrayList<>(); final boolean filesDeleted; if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) { oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); filesDeleted = true; } else { filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID, dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes) .getNameSpace() >= 0; } fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false); return filesDeleted; }
Example #12
Source File: INodeDirectory.java From hadoop with Apache License 2.0 | 6 votes |
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild, int latestSnapshotId) { Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID); if (oldChild instanceof INodeReference.WithName) { return (INodeReference.WithName)oldChild; } final INodeReference.WithCount withCount; if (oldChild.isReference()) { Preconditions.checkState(oldChild instanceof INodeReference.DstReference); withCount = (INodeReference.WithCount) oldChild.asReference() .getReferredINode(); } else { withCount = new INodeReference.WithCount(null, oldChild); } final INodeReference.WithName ref = new INodeReference.WithName(this, withCount, oldChild.getLocalNameBytes(), latestSnapshotId); replaceChild(oldChild, ref, null); return ref; }
Example #13
Source File: FSDirSnapshotOp.java From hadoop with Apache License 2.0 | 6 votes |
/** * Check if the given INode (or one of its descendants) is snapshottable and * already has snapshots. * * @param target The given INode * @param snapshottableDirs The list of directories that are snapshottable * but do not have snapshots yet */ static void checkSnapshot( INode target, List<INodeDirectory> snapshottableDirs) throws SnapshotException { if (target.isDirectory()) { INodeDirectory targetDir = target.asDirectory(); DirectorySnapshottableFeature sf = targetDir .getDirectorySnapshottableFeature(); if (sf != null) { if (sf.getNumSnapshots() > 0) { String fullPath = targetDir.getFullPathName(); throw new SnapshotException("The directory " + fullPath + " cannot be deleted since " + fullPath + " is snapshottable and already has snapshots"); } else { if (snapshottableDirs != null) { snapshottableDirs.add(targetDir); } } } for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) { checkSnapshot(child, snapshottableDirs); } } }
Example #14
Source File: INodeReference.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { int snapshot = getSelfSnapshot(); if (removeReference(this) <= 0) { getReferredINode().destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes); } else { int prior = getPriorSnapshot(this); INode referred = getReferredINode().asReference().getReferredINode(); if (snapshot != Snapshot.NO_SNAPSHOT_ID) { if (prior != Snapshot.NO_SNAPSHOT_ID && snapshot <= prior) { // the snapshot to be deleted has been deleted while traversing // the src tree of the previous rename operation. This usually // happens when rename's src and dst are under the same // snapshottable directory. E.g., the following operation sequence: // 1. create snapshot s1 on /test // 2. rename /test/foo/bar to /test/foo2/bar // 3. create snapshot s2 on /test // 4. rename foo2 again // 5. delete snapshot s2 return; } try { QuotaCounts counts = referred.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes); INodeReference ref = getReferredINode().getParentReference(); if (ref != null) { ref.addSpaceConsumed(counts.negation(), true); } } catch (QuotaExceededException e) { LOG.error("should not exceed quota while snapshot deletion", e); } } } }
Example #15
Source File: FSImageFormatPBINode.java From big-c with Apache License 2.0 | 5 votes |
void serializeINodeDirectorySection(OutputStream out) throws IOException { Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory() .getINodeMap().getMapIterator(); final ArrayList<INodeReference> refList = parent.getSaverContext() .getRefList(); int i = 0; while (iter.hasNext()) { INodeWithAdditionalFields n = iter.next(); if (!n.isDirectory()) { continue; } ReadOnlyList<INode> children = n.asDirectory().getChildrenList( Snapshot.CURRENT_STATE_ID); if (children.size() > 0) { INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. DirEntry.newBuilder().setParent(n.getId()); for (INode inode : children) { if (!inode.isReference()) { b.addChildren(inode.getId()); } else { refList.add(inode.asReference()); b.addRefChildren(refList.size() - 1); } } INodeDirectorySection.DirEntry e = b.build(); e.writeDelimitedTo(out); } ++i; if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } } parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE_DIR); }
Example #16
Source File: CacheManager.java From big-c with Apache License 2.0 | 5 votes |
/** * Computes the needed number of bytes and files for a path. * @return CacheDirectiveStats describing the needed stats for this path */ private CacheDirectiveStats computeNeeded(String path, short replication) { FSDirectory fsDir = namesystem.getFSDirectory(); INode node; long requestedBytes = 0; long requestedFiles = 0; CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder(); try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks return builder.build(); } if (node == null) { return builder.build(); } if (node.isFile()) { requestedFiles = 1; INodeFile file = node.asFile(); requestedBytes = file.computeFileSize(); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); requestedFiles = children.size(); for (INode child : children) { if (child.isFile()) { requestedBytes += child.asFile().computeFileSize(); } } } return new CacheDirectiveStats.Builder() .setBytesNeeded(requestedBytes) .setFilesCached(requestedFiles) .build(); }
Example #17
Source File: INodeWithAdditionalFields.java From big-c with Apache License 2.0 | 5 votes |
@Override final String getUserName(int snapshotId) { if (snapshotId != Snapshot.CURRENT_STATE_ID) { return getSnapshotINode(snapshotId).getUserName(); } return PermissionStatusFormat.getUser(permission); }
Example #18
Source File: FSImageFormatPBINode.java From hadoop with Apache License 2.0 | 5 votes |
void serializeINodeDirectorySection(OutputStream out) throws IOException { Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory() .getINodeMap().getMapIterator(); final ArrayList<INodeReference> refList = parent.getSaverContext() .getRefList(); int i = 0; while (iter.hasNext()) { INodeWithAdditionalFields n = iter.next(); if (!n.isDirectory()) { continue; } ReadOnlyList<INode> children = n.asDirectory().getChildrenList( Snapshot.CURRENT_STATE_ID); if (children.size() > 0) { INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. DirEntry.newBuilder().setParent(n.getId()); for (INode inode : children) { if (!inode.isReference()) { b.addChildren(inode.getId()); } else { refList.add(inode.asReference()); b.addRefChildren(refList.size() - 1); } } INodeDirectorySection.DirEntry e = b.build(); e.writeDelimitedTo(out); } ++i; if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { context.checkCancelled(); } } parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE_DIR); }
Example #19
Source File: FSDirSnapshotOp.java From big-c with Apache License 2.0 | 5 votes |
/** * Create a snapshot * @param snapshotRoot The directory path where the snapshot is taken * @param snapshotName The name of the snapshot */ static String createSnapshot( FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot, String snapshotName, boolean logRetryCache) throws IOException { final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot); if (fsd.isPermissionEnabled()) { FSPermissionChecker pc = fsd.getPermissionChecker(); fsd.checkOwner(pc, iip); } if (snapshotName == null || snapshotName.isEmpty()) { snapshotName = Snapshot.generateDefaultSnapshotName(); } else if (!DFSUtil.isValidNameForComponent(snapshotName)) { throw new InvalidPathException("Invalid snapshot name: " + snapshotName); } String snapshotPath = null; verifySnapshotName(fsd, snapshotName, snapshotRoot); fsd.writeLock(); try { snapshotPath = snapshotManager.createSnapshot(iip, snapshotRoot, snapshotName); } finally { fsd.writeUnlock(); } fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName, logRetryCache); return snapshotPath; }
Example #20
Source File: INodeWithAdditionalFields.java From big-c with Apache License 2.0 | 5 votes |
@Override public final AclFeature getAclFeature(int snapshotId) { if (snapshotId != Snapshot.CURRENT_STATE_ID) { return getSnapshotINode(snapshotId).getAclFeature(); } return getFeature(AclFeature.class); }
Example #21
Source File: INodeWithAdditionalFields.java From hadoop with Apache License 2.0 | 5 votes |
@Override XAttrFeature getXAttrFeature(int snapshotId) { if (snapshotId != Snapshot.CURRENT_STATE_ID) { return getSnapshotINode(snapshotId).getXAttrFeature(); } return getFeature(XAttrFeature.class); }
Example #22
Source File: INode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages. * Entry point for FSDirectory where blockStoragePolicyId is given its initial * value. */ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { final byte storagePolicyId = isSymlink() ? BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, new QuotaCounts.Builder().build(), true, Snapshot.CURRENT_STATE_ID); }
Example #23
Source File: INode.java From hadoop with Apache License 2.0 | 5 votes |
public final QuotaCounts computeQuotaUsage( BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache) { final byte storagePolicyId = isSymlink() ? BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, counts, useCache, Snapshot.CURRENT_STATE_ID); }
Example #24
Source File: INode.java From big-c with Apache License 2.0 | 5 votes |
public final QuotaCounts computeQuotaUsage( BlockStoragePolicySuite bsps, QuotaCounts counts, boolean useCache) { final byte storagePolicyId = isSymlink() ? BlockStoragePolicySuite.ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, counts, useCache, Snapshot.CURRENT_STATE_ID); }
Example #25
Source File: INode.java From hadoop with Apache License 2.0 | 5 votes |
/** * Dump the subtree starting from this inode. * @return a text representation of the tree. */ @VisibleForTesting public final StringBuffer dumpTreeRecursively() { final StringWriter out = new StringWriter(); dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), Snapshot.CURRENT_STATE_ID); return out.getBuffer(); }
Example #26
Source File: INodeDirectory.java From hadoop with Apache License 2.0 | 5 votes |
@Override public ContentSummaryComputationContext computeContentSummary( ContentSummaryComputationContext summary) { final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); if (sf != null) { sf.computeContentSummary4Snapshot(summary.getBlockStoragePolicySuite(), summary.getCounts()); } final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature(); if (q != null) { return q.computeContentSummary(this, summary); } else { return computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID); } }
Example #27
Source File: INodeWithAdditionalFields.java From big-c with Apache License 2.0 | 5 votes |
@Override final String getGroupName(int snapshotId) { if (snapshotId != Snapshot.CURRENT_STATE_ID) { return getSnapshotINode(snapshotId).getGroupName(); } return PermissionStatusFormat.getGroup(permission); }
Example #28
Source File: INodeDirectory.java From hadoop with Apache License 2.0 | 5 votes |
/** * Save the child to the latest snapshot. * * @return the child inode, which may be replaced. */ public INode saveChild2Snapshot(final INode child, final int latestSnapshotId, final INode snapshotCopy) { if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) { return child; } // add snapshot feature if necessary DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); if (sf == null) { sf = this.addSnapshotFeature(null); } return sf.saveChild2Snapshot(this, child, latestSnapshotId, snapshotCopy); }
Example #29
Source File: INodeDirectory.java From hadoop with Apache License 2.0 | 5 votes |
/** * @param name the name of the child * @param snapshotId * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result * from the corresponding snapshot; otherwise, get the result from * the current directory. * @return the child inode. */ public INode getChild(byte[] name, int snapshotId) { DirectoryWithSnapshotFeature sf; if (snapshotId == Snapshot.CURRENT_STATE_ID || (sf = getDirectoryWithSnapshotFeature()) == null) { ReadOnlyList<INode> c = getCurrentChildrenList(); final int i = ReadOnlyList.Util.binarySearch(c, name); return i < 0 ? null : c.get(i); } return sf.getChild(this, name, snapshotId); }
Example #30
Source File: INodeDirectory.java From hadoop with Apache License 2.0 | 5 votes |
/** * Search for the given INode in the children list and the deleted lists of * snapshots. * @return {@link Snapshot#CURRENT_STATE_ID} if the inode is in the children * list; {@link Snapshot#NO_SNAPSHOT_ID} if the inode is neither in the * children list nor in any snapshot; otherwise the snapshot id of the * corresponding snapshot diff list. */ public int searchChild(INode inode) { INode child = getChild(inode.getLocalNameBytes(), Snapshot.CURRENT_STATE_ID); if (child != inode) { // inode is not in parent's children list, thus inode must be in // snapshot. identify the snapshot id and later add it into the path DirectoryDiffList diffs = getDiffs(); if (diffs == null) { return Snapshot.NO_SNAPSHOT_ID; } return diffs.findSnapshotDeleted(inode); } else { return Snapshot.CURRENT_STATE_ID; } }