Java Code Examples for org.apache.hadoop.hdfs.server.namenode.INodeDirectory

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.INodeDirectory. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: NNAnalytics   Source File: Helper.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns function that maps an inode to its parent directory down to a specific depth.
 *
 * @param dirDepth the depth of the parent to fetch
 * @return a function
 */
public static Function<INode, String> getDirectoryAtDepthFunction(int dirDepth) {
  return node -> {
    try {
      INodeDirectory parent = node.getParent();
      int topParentDepth = new Path(parent.getFullPathName()).depth();
      if (topParentDepth < dirDepth) {
        return "NO_MAPPING";
      }
      for (int parentTravs = topParentDepth; parentTravs > dirDepth; parentTravs--) {
        parent = parent.getParent();
      }
      return parent.getFullPathName().intern();
    } catch (Exception e) {
      return "NO_MAPPING";
    }
  };
}
 
Example 2
Source Project: big-c   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Load a node stored in the created list from fsimage.
 * @param createdNodeName The name of the created node.
 * @param parent The directory that the created list belongs to.
 * @return The created node.
 */
public static INode loadCreated(byte[] createdNodeName,
    INodeDirectory parent) throws IOException {
  // the INode in the created list should be a reference to another INode
  // in posterior SnapshotDiffs or one of the current children
  for (DirectoryDiff postDiff : parent.getDiffs()) {
    final INode d = postDiff.getChildrenDiff().search(ListType.DELETED,
        createdNodeName);
    if (d != null) {
      return d;
    } // else go to the next SnapshotDiff
  } 
  // use the current child
  INode currentChild = parent.getChild(createdNodeName,
      Snapshot.CURRENT_STATE_ID);
  if (currentChild == null) {
    throw new IOException("Cannot find an INode associated with the INode "
        + DFSUtil.bytes2String(createdNodeName)
        + " in created list while loading FSImage.");
  }
  return currentChild;
}
 
Example 3
Source Project: hadoop   Source File: Snapshot.java    License: Apache License 2.0 6 votes vote down vote up
Root(INodeDirectory other) {
  // Always preserve ACL, XAttr.
  super(other, false, Lists.newArrayList(
    Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate<Feature>() {

      @Override
      public boolean apply(Feature input) {
        if (AclFeature.class.isInstance(input) 
            || XAttrFeature.class.isInstance(input)) {
          return true;
        }
        return false;
      }
      
    }))
    .toArray(new Feature[0]));
}
 
Example 4
Source Project: hadoop   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Load the deleted list from the fsimage.
 * 
 * @param parent The directory that the deleted list belongs to.
 * @param createdList The created list associated with the deleted list in 
 *                    the same Diff.
 * @param in The {@link DataInput} to read.
 * @param loader The {@link Loader} instance.
 * @return The deleted list.
 */
private static List<INode> loadDeletedList(INodeDirectory parent,
    List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
    throws IOException {
  int deletedSize = in.readInt();
  List<INode> deletedList = new ArrayList<INode>(deletedSize);
  for (int i = 0; i < deletedSize; i++) {
    final INode deleted = loader.loadINodeWithLocalName(true, in, true);
    deletedList.add(deleted);
    // set parent: the parent field of an INode in the deleted list is not 
    // useful, but set the parent here to be consistent with the original 
    // fsdir tree.
    deleted.setParent(parent);
    if (deleted.isFile()) {
      loader.updateBlocksMap(deleted.asFile());
    }
  }
  return deletedList;
}
 
Example 5
Source Project: hadoop   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Load snapshots and snapshotQuota for a Snapshottable directory.
 *
 * @param snapshottableParent
 *          The snapshottable directory for loading.
 * @param numSnapshots
 *          The number of snapshots that the directory has.
 * @param loader
 *          The loader
 */
public static void loadSnapshotList(INodeDirectory snapshottableParent,
    int numSnapshots, DataInput in, FSImageFormat.Loader loader)
    throws IOException {
  DirectorySnapshottableFeature sf = snapshottableParent
      .getDirectorySnapshottableFeature();
  Preconditions.checkArgument(sf != null);
  for (int i = 0; i < numSnapshots; i++) {
    // read snapshots
    final Snapshot s = loader.getSnapshot(in);
    s.getRoot().setParent(snapshottableParent);
    sf.addSnapshot(s);
  }
  int snapshotQuota = in.readInt();
  snapshottableParent.setSnapshotQuota(snapshotQuota);
}
 
Example 6
Source Project: big-c   Source File: SnapshotManager.java    License: Apache License 2.0 6 votes vote down vote up
private void checkNestedSnapshottable(INodeDirectory dir, String path)
    throws SnapshotException {
  if (allowNestedSnapshots) {
    return;
  }

  for(INodeDirectory s : snapshottables.values()) {
    if (s.isAncestorDirectory(dir)) {
      throw new SnapshotException(
          "Nested snapshottable directories not allowed: path=" + path
          + ", the subdirectory " + s.getFullPathName()
          + " is already a snapshottable directory.");
    }
    if (dir.isAncestorDirectory(s)) {
      throw new SnapshotException(
          "Nested snapshottable directories not allowed: path=" + path
          + ", the ancestor " + s.getFullPathName()
          + " is already a snapshottable directory.");
    }
  }
}
 
Example 7
Source Project: big-c   Source File: TestSnapshotDeletion.java    License: Apache License 2.0 6 votes vote down vote up
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INodeDirectory dirNode = getDir(fsdir, dirPath);
  assertTrue(dirNode.isQuotaSet());
  QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      q.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      q.getStorageSpace());
  QuotaCounts counts = new QuotaCounts.Builder().build();
  dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.getStorageSpace());
}
 
Example 8
Source Project: hadoop   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 6 votes vote down vote up
/** clear the created list */
private QuotaCounts destroyCreatedList(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentINode,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  final List<INode> createdList = getList(ListType.CREATED);
  for (INode c : createdList) {
    c.computeQuotaUsage(bsps, counts, true);
    c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    // c should be contained in the children list, remove it
    currentINode.removeChild(c);
  }
  createdList.clear();
  return counts;
}
 
Example 9
Source Project: hadoop   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 6 votes vote down vote up
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory currentDir, final DirectoryDiff posterior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final QuotaCounts counts = new QuotaCounts.Builder().build();
  diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
    /** Collect blocks for deleted files. */
    @Override
    public void process(INode inode) {
      if (inode != null) {
        inode.computeQuotaUsage(bsps, counts, false);
        inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
      }
    }
  });
  return counts;
}
 
Example 10
Source Project: big-c   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Load {@link DirectoryDiff} from fsimage.
 * @param parent The directory that the SnapshotDiff belongs to.
 * @param in The {@link DataInput} instance to read.
 * @param loader The {@link Loader} instance that this loading procedure is 
 *               using.
 * @return A {@link DirectoryDiff}.
 */
private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent,
    DataInput in, FSImageFormat.Loader loader) throws IOException {
  // 1. Read the full path of the Snapshot root to identify the Snapshot
  final Snapshot snapshot = loader.getSnapshot(in);

  // 2. Load DirectoryDiff#childrenSize
  int childrenSize = in.readInt();
  
  // 3. Load DirectoryDiff#snapshotINode 
  INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff(
      snapshot, in, loader);
  
  // 4. Load the created list in SnapshotDiff#Diff
  List<INode> createdList = loadCreatedList(parent, in);
  
  // 5. Load the deleted list in SnapshotDiff#Diff
  List<INode> deletedList = loadDeletedList(parent, createdList, in, loader);
  
  // 6. Compose the SnapshotDiff
  List<DirectoryDiff> diffs = parent.getDiffs().asList();
  DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode,
      diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList,
      deletedList, snapshotINode == snapshot.getRoot());
  return sdiff;
}
 
Example 11
Source Project: hadoop   Source File: FSImageFormatPBSnapshot.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Load the snapshots section from fsimage. Also add snapshottable feature
 * to snapshottable directories.
 */
public void loadSnapshotSection(InputStream in) throws IOException {
  SnapshotManager sm = fsn.getSnapshotManager();
  SnapshotSection section = SnapshotSection.parseDelimitedFrom(in);
  int snum = section.getNumSnapshots();
  sm.setNumSnapshots(snum);
  sm.setSnapshotCounter(section.getSnapshotCounter());
  for (long sdirId : section.getSnapshottableDirList()) {
    INodeDirectory dir = fsDir.getInode(sdirId).asDirectory();
    if (!dir.isSnapshottable()) {
      dir.addSnapshottableFeature();
    } else {
      // dir is root, and admin set root to snapshottable before
      dir.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
    }
    sm.addSnapshottable(dir);
  }
  loadSnapshots(in, snum);
}
 
Example 12
Source Project: big-c   Source File: FSImageFormatPBSnapshot.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Load the snapshots section from fsimage. Also add snapshottable feature
 * to snapshottable directories.
 */
public void loadSnapshotSection(InputStream in) throws IOException {
  SnapshotManager sm = fsn.getSnapshotManager();
  SnapshotSection section = SnapshotSection.parseDelimitedFrom(in);
  int snum = section.getNumSnapshots();
  sm.setNumSnapshots(snum);
  sm.setSnapshotCounter(section.getSnapshotCounter());
  for (long sdirId : section.getSnapshottableDirList()) {
    INodeDirectory dir = fsDir.getInode(sdirId).asDirectory();
    if (!dir.isSnapshottable()) {
      dir.addSnapshottableFeature();
    } else {
      // dir is root, and admin set root to snapshottable before
      dir.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
    }
    sm.addSnapshottable(dir);
  }
  loadSnapshots(in, snum);
}
 
Example 13
Source Project: hadoop   Source File: TestSnapshotDeletion.java    License: Apache License 2.0 6 votes vote down vote up
private void checkQuotaUsageComputation(final Path dirPath,
    final long expectedNs, final long expectedDs) throws IOException {
  INodeDirectory dirNode = getDir(fsdir, dirPath);
  assertTrue(dirNode.isQuotaSet());
  QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      q.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      q.getStorageSpace());
  QuotaCounts counts = new QuotaCounts.Builder().build();
  dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), counts, false);
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
      counts.getNameSpace());
  assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
      counts.getStorageSpace());
}
 
Example 14
Source Project: hadoop   Source File: TestSnapshotRename.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
 
Example 15
Source Project: hadoop   Source File: TestSnapshotRename.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Rename snapshot(s), and check the correctness of the snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
@Test (timeout=60000)
public void testSnapshotList() throws Exception {
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  // Create three snapshots for sub1
  SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
  SnapshotTestHelper.createSnapshot(hdfs, sub1, "s3");
  
  // Rename s3 to s22
  hdfs.renameSnapshot(sub1, "s3", "s22");
  // Check the snapshots list
  INodeDirectory srcRoot = fsdir.getINode(sub1.toString()).asDirectory();
  checkSnapshotList(srcRoot, new String[] { "s1", "s2", "s22" },
      new String[] { "s1", "s2", "s22" });
  
  // Rename s1 to s4
  hdfs.renameSnapshot(sub1, "s1", "s4");
  checkSnapshotList(srcRoot, new String[] { "s2", "s22", "s4" },
      new String[] { "s4", "s2", "s22" });
  
  // Rename s22 to s0
  hdfs.renameSnapshot(sub1, "s22", "s0");
  checkSnapshotList(srcRoot, new String[] { "s0", "s2", "s4" },
      new String[] { "s4", "s2", "s0" });
}
 
Example 16
Source Project: big-c   Source File: SnapshotManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Compute the difference between two snapshots of a directory, or between a
 * snapshot of the directory and its current tree.
 */
public SnapshotDiffReport diff(final INodesInPath iip,
    final String snapshotRootPath, final String from,
    final String to) throws IOException {
  // Find the source root directory path where the snapshots were taken.
  // All the check for path has been included in the valueOf method.
  final INodeDirectory snapshotRoot = getSnapshottableRoot(iip);

  if ((from == null || from.isEmpty())
      && (to == null || to.isEmpty())) {
    // both fromSnapshot and toSnapshot indicate the current tree
    return new SnapshotDiffReport(snapshotRootPath, from, to,
        Collections.<DiffReportEntry> emptyList());
  }
  final SnapshotDiffInfo diffs = snapshotRoot
      .getDirectorySnapshottableFeature().computeDiff(snapshotRoot, from, to);
  return diffs != null ? diffs.generateReport() : new SnapshotDiffReport(
      snapshotRootPath, from, to, Collections.<DiffReportEntry> emptyList());
}
 
Example 17
Source Project: big-c   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Load the deleted list from the fsimage.
 * 
 * @param parent The directory that the deleted list belongs to.
 * @param createdList The created list associated with the deleted list in 
 *                    the same Diff.
 * @param in The {@link DataInput} to read.
 * @param loader The {@link Loader} instance.
 * @return The deleted list.
 */
private static List<INode> loadDeletedList(INodeDirectory parent,
    List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
    throws IOException {
  int deletedSize = in.readInt();
  List<INode> deletedList = new ArrayList<INode>(deletedSize);
  for (int i = 0; i < deletedSize; i++) {
    final INode deleted = loader.loadINodeWithLocalName(true, in, true);
    deletedList.add(deleted);
    // set parent: the parent field of an INode in the deleted list is not 
    // useful, but set the parent here to be consistent with the original 
    // fsdir tree.
    deleted.setParent(parent);
    if (deleted.isFile()) {
      loader.updateBlocksMap(deleted.asFile());
    }
  }
  return deletedList;
}
 
Example 18
Source Project: big-c   Source File: SnapshotManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
* Find the source root directory where the snapshot will be taken
* for a given path.
*
* @return Snapshottable directory.
* @throws IOException
*           Throw IOException when the given path does not lead to an
*           existing snapshottable directory.
*/
public INodeDirectory getSnapshottableRoot(final INodesInPath iip)
    throws IOException {
  final String path = iip.getPath();
  final INodeDirectory dir = INodeDirectory.valueOf(iip.getLastINode(), path);
  if (!dir.isSnapshottable()) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + path);
  }
  return dir;
}
 
Example 19
Source Project: big-c   Source File: SnapshotFSImageFormat.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Load the created list from fsimage.
 * @param parent The directory that the created list belongs to.
 * @param in The {@link DataInput} to read.
 * @return The created list.
 */
private static List<INode> loadCreatedList(INodeDirectory parent,
    DataInput in) throws IOException {
  // read the size of the created list
  int createdSize = in.readInt();
  List<INode> createdList = new ArrayList<INode>(createdSize);
  for (int i = 0; i < createdSize; i++) {
    byte[] createdNodeName = FSImageSerialization.readLocalName(in);
    INode created = loadCreated(createdNodeName, parent);
    createdList.add(created);
  }
  return createdList;
}
 
Example 20
Source Project: hadoop   Source File: SnapshotManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Remove snapshottable directories from {@link #snapshottables} */
public void removeSnapshottable(List<INodeDirectory> toRemove) {
  if (toRemove != null) {
    for (INodeDirectory s : toRemove) {
      removeSnapshottable(s);
    }
  }
}
 
Example 21
Source Project: hadoop   Source File: TestRenameWithSnapshots.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we clean the whole subtree under a DstReference node after 
 * deleting a snapshot.
 * see HDFS-5476.
 */
@Test
public void testCleanDstReference() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  hdfs.mkdirs(bar);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  
  // create file after s0 so that the file should not be included in s0
  final Path fileInBar = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
  // rename foo --> foo2
  final Path foo2 = new Path(test, "foo2");
  hdfs.rename(foo, foo2);
  // create snapshot s1, note the file is included in s1
  hdfs.createSnapshot(test, "s1");
  // delete bar and foo2
  hdfs.delete(new Path(foo2, "bar"), true);
  hdfs.delete(foo2, true);
  
  final Path sfileInBar = SnapshotTestHelper.getSnapshotPath(test, "s1",
      "foo2/bar/file");
  assertTrue(hdfs.exists(sfileInBar));
  
  hdfs.deleteSnapshot(test, "s1");
  assertFalse(hdfs.exists(sfileInBar));
  
  restartClusterAndCheckImage(true);
  // make sure the file under bar is deleted 
  final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
      "foo/bar");
  INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
  assertEquals(0, barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
  List<DirectoryDiff> diffList = barNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertEquals(0, diff.getChildrenDiff().getList(ListType.DELETED).size());
  assertEquals(0, diff.getChildrenDiff().getList(ListType.CREATED).size());
}
 
Example 22
Source Project: big-c   Source File: SnapshotManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * List all the snapshottable directories that are owned by the current user.
 * @param userName Current user name.
 * @return Snapshottable directories that are owned by the current user,
 *         represented as an array of {@link SnapshottableDirectoryStatus}. If
 *         {@code userName} is null, return all the snapshottable dirs.
 */
public SnapshottableDirectoryStatus[] getSnapshottableDirListing(
    String userName) {
  if (snapshottables.isEmpty()) {
    return null;
  }
  
  List<SnapshottableDirectoryStatus> statusList = 
      new ArrayList<SnapshottableDirectoryStatus>();
  for (INodeDirectory dir : snapshottables.values()) {
    if (userName == null || userName.equals(dir.getUserName())) {
      SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
          dir.getModificationTime(), dir.getAccessTime(),
          dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
          dir.getLocalNameBytes(), dir.getId(), 
          dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
          dir.getDirectorySnapshottableFeature().getNumSnapshots(),
          dir.getDirectorySnapshottableFeature().getSnapshotQuota(),
          dir.getParent() == null ? DFSUtil.EMPTY_BYTES :
              DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
      statusList.add(status);
    }
  }
  Collections.sort(statusList, SnapshottableDirectoryStatus.COMPARATOR);
  return statusList.toArray(
      new SnapshottableDirectoryStatus[statusList.size()]);
}
 
Example 23
Source Project: big-c   Source File: TestRenameWithSnapshots.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Rename a single file across snapshottable dirs.
 */
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  // change the replication factor of foo
  hdfs.setReplication(newfoo, REPL_1);
  
  // /dir2/.snapshot/s2/foo should still work
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo");
  assertTrue(hdfs.exists(foo_s2));
  FileStatus status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());
  
  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
      "foo");
  assertFalse(hdfs.exists(foo_s3));
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
 
Example 24
Source Project: hadoop   Source File: SnapshotManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Write {@link #snapshotCounter}, {@link #numSnapshots},
 * and all snapshots to the DataOutput.
 */
public void write(DataOutput out) throws IOException {
  out.writeInt(snapshotCounter);
  out.writeInt(numSnapshots.get());

  // write all snapshots.
  for(INodeDirectory snapshottableDir : snapshottables.values()) {
    for (Snapshot s : snapshottableDir.getDirectorySnapshottableFeature()
        .getSnapshotList()) {
      s.write(out);
    }
  }
}
 
Example 25
Source Project: hadoop   Source File: SnapshotManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * List all the snapshottable directories that are owned by the current user.
 * @param userName Current user name.
 * @return Snapshottable directories that are owned by the current user,
 *         represented as an array of {@link SnapshottableDirectoryStatus}. If
 *         {@code userName} is null, return all the snapshottable dirs.
 */
public SnapshottableDirectoryStatus[] getSnapshottableDirListing(
    String userName) {
  if (snapshottables.isEmpty()) {
    return null;
  }
  
  List<SnapshottableDirectoryStatus> statusList = 
      new ArrayList<SnapshottableDirectoryStatus>();
  for (INodeDirectory dir : snapshottables.values()) {
    if (userName == null || userName.equals(dir.getUserName())) {
      SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
          dir.getModificationTime(), dir.getAccessTime(),
          dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
          dir.getLocalNameBytes(), dir.getId(), 
          dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
          dir.getDirectorySnapshottableFeature().getNumSnapshots(),
          dir.getDirectorySnapshottableFeature().getSnapshotQuota(),
          dir.getParent() == null ? DFSUtil.EMPTY_BYTES :
              DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
      statusList.add(status);
    }
  }
  Collections.sort(statusList, SnapshottableDirectoryStatus.COMPARATOR);
  return statusList.toArray(
      new SnapshottableDirectoryStatus[statusList.size()]);
}
 
Example 26
Source Project: big-c   Source File: TestRenameWithSnapshots.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test rename where the src/dst directories are both snapshottable 
 * directories without snapshots. In such case we need to update the 
 * snapshottable dir list in SnapshotManager.
 */
@Test (timeout=60000)
public void testRenameAndUpdateSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(sdir2, "bar");
  hdfs.mkdirs(foo);
  hdfs.mkdirs(bar);
  
  hdfs.allowSnapshot(foo);
  SnapshotTestHelper.createSnapshot(hdfs, bar, snap1);
  assertEquals(2, fsn.getSnapshottableDirListing().length);
  
  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  long fooId = fooNode.getId();
  
  try {
    hdfs.rename(foo, bar, Rename.OVERWRITE);
    fail("Expect exception since " + bar
        + " is snapshottable and already has snapshots");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains(bar.toString()
        + " is snapshottable and already has snapshots", e);
  }
  
  hdfs.deleteSnapshot(bar, snap1);
  hdfs.rename(foo, bar, Rename.OVERWRITE);
  SnapshottableDirectoryStatus[] dirs = fsn.getSnapshottableDirListing();
  assertEquals(1, dirs.length);
  assertEquals(bar, dirs[0].getFullPath());
  assertEquals(fooId, dirs[0].getDirStatus().getFileId());
}
 
Example 27
Source Project: big-c   Source File: DirectoryWithSnapshotFeature.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return If there is no corresponding directory diff for the given
 *         snapshot, this means that the current children list should be
 *         returned for the snapshot. Otherwise we calculate the children list
 *         for the snapshot and return it. 
 */
public ReadOnlyList<INode> getChildrenList(INodeDirectory currentINode,
    final int snapshotId) {
  final DirectoryDiff diff = diffs.getDiffById(snapshotId);
  return diff != null ? diff.getChildrenList(currentINode) : currentINode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
}
 
Example 28
Source Project: hadoop   Source File: SnapshotManager.java    License: Apache License 2.0 5 votes vote down vote up
public static SnapshottableDirectoryStatus.Bean toBean(INodeDirectory d) {
  return new SnapshottableDirectoryStatus.Bean(
      d.getFullPathName(),
      d.getDirectorySnapshottableFeature().getNumSnapshots(),
      d.getDirectorySnapshottableFeature().getSnapshotQuota(),
      d.getModificationTime(),
      Short.valueOf(Integer.toOctalString(
          d.getFsPermissionShort())),
      d.getUserName(),
      d.getGroupName());
}
 
Example 29
Source Project: big-c   Source File: DirectorySnapshottableFeature.java    License: Apache License 2.0 5 votes vote down vote up
public ContentSummaryComputationContext computeContentSummary(
    final BlockStoragePolicySuite bsps,
    final INodeDirectory snapshotRoot,
    final ContentSummaryComputationContext summary) {
  snapshotRoot.computeContentSummary(summary);
  summary.getCounts().addContent(Content.SNAPSHOT, snapshotsByNames.size());
  summary.getCounts().addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
  return summary;
}
 
Example 30
Source Project: big-c   Source File: TestRenameWithSnapshots.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we clean the whole subtree under a DstReference node after 
 * deleting a snapshot.
 * see HDFS-5476.
 */
@Test
public void testCleanDstReference() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  hdfs.mkdirs(bar);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  
  // create file after s0 so that the file should not be included in s0
  final Path fileInBar = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
  // rename foo --> foo2
  final Path foo2 = new Path(test, "foo2");
  hdfs.rename(foo, foo2);
  // create snapshot s1, note the file is included in s1
  hdfs.createSnapshot(test, "s1");
  // delete bar and foo2
  hdfs.delete(new Path(foo2, "bar"), true);
  hdfs.delete(foo2, true);
  
  final Path sfileInBar = SnapshotTestHelper.getSnapshotPath(test, "s1",
      "foo2/bar/file");
  assertTrue(hdfs.exists(sfileInBar));
  
  hdfs.deleteSnapshot(test, "s1");
  assertFalse(hdfs.exists(sfileInBar));
  
  restartClusterAndCheckImage(true);
  // make sure the file under bar is deleted 
  final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
      "foo/bar");
  INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
  assertEquals(0, barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
  List<DirectoryDiff> diffList = barNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertEquals(0, diff.getChildrenDiff().getList(ListType.DELETED).size());
  assertEquals(0, diff.getChildrenDiff().getList(ListType.CREATED).size());
}