Java Code Examples for org.apache.hadoop.hdfs.server.namenode.INodeDirectory#getSnapshot()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.INodeDirectory#getSnapshot() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Rename a single file across snapshottable dirs.
 */
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  // change the replication factor of foo
  hdfs.setReplication(newfoo, REPL_1);
  
  // /dir2/.snapshot/s2/foo should still work
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo");
  assertTrue(hdfs.exists(foo_s2));
  FileStatus status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());
  
  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
      "foo");
  assertFalse(hdfs.exists(foo_s3));
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
 
Example 2
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Rename a single file across snapshottable dirs.
 */
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  // change the replication factor of foo
  hdfs.setReplication(newfoo, REPL_1);
  
  // /dir2/.snapshot/s2/foo should still work
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo");
  assertTrue(hdfs.exists(foo_s2));
  FileStatus status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());
  
  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
      "foo");
  assertFalse(hdfs.exists(foo_s3));
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
 
Example 3
Source File: TestSetQuotaWithSnapshot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test clear quota of a snapshottable dir or a dir with snapshot.
 */
@Test
public void testClearQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);
  
  hdfs.allowSnapshot(dir);
  hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
      HdfsConstants.QUOTA_DONT_SET);
  INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(0, dirNode.getDiffs().asList().size());
  
  hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
      HdfsConstants.QUOTA_DONT_SET - 1);
  dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(0, dirNode.getDiffs().asList().size());
  
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(0, dirNode.getDiffs().asList().size());
  
  // allow snapshot on dir and create snapshot s1
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  
  // clear quota of dir
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  // dir should still be a snapshottable directory
  dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(1, dirNode.getDiffs().asList().size());
  SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
  assertEquals(1, status.length);
  assertEquals(dir, status[0].getFullPath());
  
  final Path subDir = new Path(dir, "sub");
  hdfs.mkdirs(subDir);
  hdfs.createSnapshot(dir, "s2");
  final Path file = new Path(subDir, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  INode subNode = fsdir.getINode4Write(subDir.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
  List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
  assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
  List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
  assertEquals(1, createdList.size());
  assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
}
 
Example 4
Source File: TestSetQuotaWithSnapshot.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test clear quota of a snapshottable dir or a dir with snapshot.
 */
@Test
public void testClearQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);
  
  hdfs.allowSnapshot(dir);
  hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
      HdfsConstants.QUOTA_DONT_SET);
  INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(0, dirNode.getDiffs().asList().size());
  
  hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
      HdfsConstants.QUOTA_DONT_SET - 1);
  dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(0, dirNode.getDiffs().asList().size());
  
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(0, dirNode.getDiffs().asList().size());
  
  // allow snapshot on dir and create snapshot s1
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  
  // clear quota of dir
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  // dir should still be a snapshottable directory
  dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
  assertTrue(dirNode.isSnapshottable());
  assertEquals(1, dirNode.getDiffs().asList().size());
  SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
  assertEquals(1, status.length);
  assertEquals(dir, status[0].getFullPath());
  
  final Path subDir = new Path(dir, "sub");
  hdfs.mkdirs(subDir);
  hdfs.createSnapshot(dir, "s2");
  final Path file = new Path(subDir, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  INode subNode = fsdir.getINode4Write(subDir.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
  List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
  assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
  List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
  assertEquals(1, createdList.size());
  assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
}
 
Example 5
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  restartClusterAndCheckImage(true);
}
 
Example 6
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  // create a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
  
  // rename bar to /foo/.snapshot which is invalid
  final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
  try {
    hdfs.rename(bar, invalid);
    fail("expect exception since invalid name is used for rename");
  } catch (Exception e) {
    GenericTestUtils.assertExceptionContains("\"" +
        HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
  }
  
  // check
  INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
      .asDirectory();
  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  // this diff is generated while renaming
  Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
  assertEquals(s1.getId(), diff.getSnapshotId());
  // after undo, the diff should be empty
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  
  // bar was converted to filewithsnapshot while renaming
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  assertSame(barNode, children.get(0));
  assertSame(fooNode, barNode.getParent());
  List<FileDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1, barDiffList.size());
  FileDiff barDiff = barDiffList.get(0);
  assertEquals(s1.getId(), barDiff.getSnapshotId());
  
  // restart cluster multiple times to make sure the fsimage and edits log are
  // correct. Note that when loading fsimage, foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPL).build();
  cluster.waitActive();
  restartClusterAndCheckImage(true);
}
 
Example 7
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the undo section of the second-time rename.
 */
@Test
public void testRenameUndo_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
          Mockito.anyInt());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  
  final Path foo_dir2 = new Path(sdir2, "foo2");
  final Path foo_dir3 = new Path(sdir3, "foo3");
  hdfs.rename(foo, foo_dir2);
  boolean result = hdfs.rename(foo_dir2, foo_dir3);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  ReadOnlyList<INode> dir2Children = dir2Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
  assertEquals(1, dir2Diffs.size());
  assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
  ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo2");
  assertFalse(hdfs.exists(foo_s2));
  
  INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
  assertTrue(fooNode instanceof INodeReference.DstReference);
  List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
  
  // create snapshot on sdir2 and rename again
  hdfs.createSnapshot(sdir2, "s3");
  result = hdfs.rename(foo_dir2, foo_dir3);
  assertFalse(result);

  // check internal details again
  dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
  fooNode = fsdir.getINode4Write(foo_dir2.toString());
  dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  dir2Diffs = dir2Node.getDiffs().asList();
  assertEquals(2, dir2Diffs.size());
  assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
  assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId());
  
  childrenDiff = dir2Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
  
  childrenDiff = dir2Diffs.get(1).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
  
  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo2");
  assertFalse(hdfs.exists(foo_s2));
  assertTrue(hdfs.exists(foo_s3));
  
  assertTrue(fooNode instanceof INodeReference.DstReference);
  fooDiffs = fooNode.asDirectory().getDiffs().asList();
  assertEquals(2, fooDiffs.size());
  assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
  assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId());
}
 
Example 8
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir after taking the snapshot.
 */
@Test
public void testRenameUndo_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  
  // create foo after taking snapshot
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          Mockito.anyInt());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
  
  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  ReadOnlyList<INode> dir1Children = dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
  
  // after the undo of rename, the created list of sdir1 should contain 
  // 1 element
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
  
  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  assertFalse(hdfs.exists(foo_s1));
  
  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node.isWithSnapshot());
  ReadOnlyList<INode> dir2Children = dir2Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
 
Example 9
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir before taking the snapshot.
 */
@Test
public void testRenameUndo_1() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  
  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
         Mockito.anyInt());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
  
  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  ReadOnlyList<INode> dir1Children = dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
  
  // after the undo of rename, both the created and deleted list of sdir1
  // should be empty
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
  
  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
  List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
  assertTrue(fooNode_s1 == fooNode);
  
  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node.isWithSnapshot());
  ReadOnlyList<INode> dir2Children = dir2Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
 
Example 10
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * After rename, delete the snapshot in src
 */
@Test
public void testRenameDirAndDeleteSnapshot_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  
  final Path bar2 = new Path(newfoo, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  
  hdfs.createSnapshot(sdir1, "s4");
  hdfs.delete(newfoo, true);
  
  final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
      "foo/bar2");
  assertTrue(hdfs.exists(bar2_s4));
  final Path bar_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
      "foo/bar");
  assertTrue(hdfs.exists(bar_s4));
      
  // delete snapshot s4. The diff of s4 should be combined to s3
  hdfs.deleteSnapshot(sdir1, "s4");
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  
  Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
  assertFalse(hdfs.exists(bar_s3));
  bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar");
  assertTrue(hdfs.exists(bar_s3));
  Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar2");
  assertFalse(hdfs.exists(bar2_s3));
  bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar2");
  assertFalse(hdfs.exists(bar2_s3));
  
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");
  final Path bar_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo/bar");
  assertTrue(hdfs.exists(bar_s2));
  
  // check internal details
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
  INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
  assertEquals(1, fooWC.getReferenceCount());
  INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
  List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
  assertEquals(1, diffs.size());
  assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
  
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  
  // delete snapshot s2.
  hdfs.deleteSnapshot(sdir2, "s2");
  assertFalse(hdfs.exists(bar_s2));
  restartClusterAndCheckImage(true);
  // make sure the whole referred subtree has been destroyed
  QuotaCounts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q.getNameSpace());
  assertEquals(0, q.getStorageSpace());
  
  hdfs.deleteSnapshot(sdir1, "s1");
  restartClusterAndCheckImage(true);
  q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q.getNameSpace());
  assertEquals(0, q.getStorageSpace());
}
 
Example 11
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test renaming a file and then delete snapshots.
 */
@Test
public void testRenameFileAndDeleteSnapshot() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  hdfs.setReplication(newfoo, REPL_1);
  
  hdfs.createSnapshot(sdir1, "s4");
  hdfs.setReplication(newfoo, REPL_2);
  
  FileStatus status = hdfs.getFileStatus(newfoo);
  assertEquals(REPL_2, status.getReplication());
  final Path foo_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo");
  status = hdfs.getFileStatus(foo_s4);
  assertEquals(REPL_1, status.getReplication());
  
  hdfs.createSnapshot(sdir1, "s5");
  final Path foo_s5 = SnapshotTestHelper.getSnapshotPath(sdir1, "s5", "foo");
  status = hdfs.getFileStatus(foo_s5);
  assertEquals(REPL_2, status.getReplication());
  
  // delete snapshot s5.
  hdfs.deleteSnapshot(sdir1, "s5");
  restartClusterAndCheckImage(true);
  assertFalse(hdfs.exists(foo_s5));
  status = hdfs.getFileStatus(foo_s4);
  assertEquals(REPL_1, status.getReplication());
  
  // delete snapshot s4.
  hdfs.deleteSnapshot(sdir1, "s4");
  
  assertFalse(hdfs.exists(foo_s4));
  Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
  assertFalse(hdfs.exists(foo_s3));
  foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo");
  assertFalse(hdfs.exists(foo_s3));
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
  assertTrue(hdfs.exists(foo_s2));
  status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());
  
  INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(1, snode.getDiffs().asList().size());
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
  
  // restart cluster
  restartClusterAndCheckImage(true);
  
  // delete snapshot s2.
  hdfs.deleteSnapshot(sdir2, "s2");
  assertFalse(hdfs.exists(foo_s2));
  
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  hdfs.deleteSnapshot(sdir1, "s3");
  restartClusterAndCheckImage(true);
  hdfs.deleteSnapshot(sdir1, "s1");
  restartClusterAndCheckImage(true);
}
 
Example 12
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Unit test for HDFS-4842.
 */
@Test
public void testRenameDirAndDeleteSnapshot_7() throws Exception {
  fsn.getSnapshotManager().setAllowNestedSnapshots(true);
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  
  final Path foo = new Path(dir2, "foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
  
  // take a snapshot s0 and s1 on /test
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  SnapshotTestHelper.createSnapshot(hdfs, test, "s1");
  // delete file so we have a snapshot copy for s1 in bar
  hdfs.delete(file, true);
  
  // create another snapshot on dir2
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // rename foo from dir2 to dir1
  final Path newfoo = new Path(dir1, foo.getName());
  hdfs.rename(foo, newfoo);
  
  // delete snapshot s1
  hdfs.deleteSnapshot(test, "s1");
  
  // make sure the snapshot copy of file in s1 is merged to s0. For 
  // HDFS-4842, we need to make sure that we do not wrongly use s2 as the
  // prior snapshot of s1.
  final Path file_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2",
      "foo/bar/file");
  assertFalse(hdfs.exists(file_s2));
  final Path file_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
      "dir2/foo/bar/file");
  assertTrue(hdfs.exists(file_s0));
  
  // check dir1: foo should be in the created list of s0
  INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
      .asDirectory();
  List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
  assertEquals(1, dir1DiffList.size());
  List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
      .getList(ListType.DELETED);
  assertTrue(dList.isEmpty());
  List<INode> cList = dir1DiffList.get(0).getChildrenDiff()
      .getList(ListType.CREATED);
  assertEquals(1, cList.size());
  INode cNode = cList.get(0);
  INode fooNode = fsdir.getINode4Write(newfoo.toString());
  assertSame(cNode, fooNode);
  
  // check foo and its subtree
  final Path newbar = new Path(newfoo, bar.getName());
  INodeDirectory barNode = fsdir.getINode4Write(newbar.toString())
      .asDirectory();
  assertSame(fooNode.asDirectory(), barNode.getParent());
  // bar should only have a snapshot diff for s0
  List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1, barDiffList.size());
  DirectoryDiff diff = barDiffList.get(0);
  INodeDirectory testNode = fsdir.getINode4Write(test.toString())
      .asDirectory();
  Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
  assertEquals(s0.getId(), diff.getSnapshotId());
  // and file should be stored in the deleted list of this snapshot diff
  assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED)
      .get(0).getLocalName());
  
  // check dir2: a WithName instance for foo should be in the deleted list
  // of the snapshot diff for s2
  INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
      .asDirectory();
  List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
  // dir2Node should contain 1 snapshot diffs for s2
  assertEquals(1, dir2DiffList.size());
  dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
  assertEquals(1, dList.size());
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", 
      foo.getName());
  INodeReference.WithName fooNode_s2 = 
      (INodeReference.WithName) fsdir.getINode(foo_s2.toString());
  assertSame(dList.get(0), fooNode_s2);
  assertSame(fooNode.asReference().getReferredINode(),
      fooNode_s2.getReferredINode());
  
  restartClusterAndCheckImage(true);
}
 
Example 13
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(7, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());
  
  restartClusterAndCheckImage(true);
}
 
Example 14
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q1.getNameSpace());
  final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(1, q2.getNameSpace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectory fooNode = wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  restartClusterAndCheckImage(true);
}
 
Example 15
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Unit test for HDFS-4842.
 */
@Test
public void testRenameDirAndDeleteSnapshot_7() throws Exception {
  fsn.getSnapshotManager().setAllowNestedSnapshots(true);
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  
  final Path foo = new Path(dir2, "foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
  
  // take a snapshot s0 and s1 on /test
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  SnapshotTestHelper.createSnapshot(hdfs, test, "s1");
  // delete file so we have a snapshot copy for s1 in bar
  hdfs.delete(file, true);
  
  // create another snapshot on dir2
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // rename foo from dir2 to dir1
  final Path newfoo = new Path(dir1, foo.getName());
  hdfs.rename(foo, newfoo);
  
  // delete snapshot s1
  hdfs.deleteSnapshot(test, "s1");
  
  // make sure the snapshot copy of file in s1 is merged to s0. For 
  // HDFS-4842, we need to make sure that we do not wrongly use s2 as the
  // prior snapshot of s1.
  final Path file_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2",
      "foo/bar/file");
  assertFalse(hdfs.exists(file_s2));
  final Path file_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
      "dir2/foo/bar/file");
  assertTrue(hdfs.exists(file_s0));
  
  // check dir1: foo should be in the created list of s0
  INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
      .asDirectory();
  List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
  assertEquals(1, dir1DiffList.size());
  List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
      .getList(ListType.DELETED);
  assertTrue(dList.isEmpty());
  List<INode> cList = dir1DiffList.get(0).getChildrenDiff()
      .getList(ListType.CREATED);
  assertEquals(1, cList.size());
  INode cNode = cList.get(0);
  INode fooNode = fsdir.getINode4Write(newfoo.toString());
  assertSame(cNode, fooNode);
  
  // check foo and its subtree
  final Path newbar = new Path(newfoo, bar.getName());
  INodeDirectory barNode = fsdir.getINode4Write(newbar.toString())
      .asDirectory();
  assertSame(fooNode.asDirectory(), barNode.getParent());
  // bar should only have a snapshot diff for s0
  List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1, barDiffList.size());
  DirectoryDiff diff = barDiffList.get(0);
  INodeDirectory testNode = fsdir.getINode4Write(test.toString())
      .asDirectory();
  Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
  assertEquals(s0.getId(), diff.getSnapshotId());
  // and file should be stored in the deleted list of this snapshot diff
  assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED)
      .get(0).getLocalName());
  
  // check dir2: a WithName instance for foo should be in the deleted list
  // of the snapshot diff for s2
  INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
      .asDirectory();
  List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
  // dir2Node should contain 1 snapshot diffs for s2
  assertEquals(1, dir2DiffList.size());
  dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
  assertEquals(1, dList.size());
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", 
      foo.getName());
  INodeReference.WithName fooNode_s2 = 
      (INodeReference.WithName) fsdir.getINode(foo_s2.toString());
  assertSame(dList.get(0), fooNode_s2);
  assertSame(fooNode.asReference().getReferredINode(),
      fooNode_s2.getReferredINode());
  
  restartClusterAndCheckImage(true);
}
 
Example 16
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the undo section of the second-time rename.
 */
@Test
public void testRenameUndo_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path sdir3 = new Path("/dir3");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  hdfs.mkdirs(sdir3);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
  INodeDirectory mockDir3 = spy(dir3);
  doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
          Mockito.anyInt());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
  
  final Path foo_dir2 = new Path(sdir2, "foo2");
  final Path foo_dir3 = new Path(sdir3, "foo3");
  hdfs.rename(foo, foo_dir2);
  boolean result = hdfs.rename(foo_dir2, foo_dir3);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  ReadOnlyList<INode> dir2Children = dir2Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
  assertEquals(1, dir2Diffs.size());
  assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
  ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo2");
  assertFalse(hdfs.exists(foo_s2));
  
  INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
  assertTrue(fooNode instanceof INodeReference.DstReference);
  List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
  
  // create snapshot on sdir2 and rename again
  hdfs.createSnapshot(sdir2, "s3");
  result = hdfs.rename(foo_dir2, foo_dir3);
  assertFalse(result);

  // check internal details again
  dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
  fooNode = fsdir.getINode4Write(foo_dir2.toString());
  dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  dir2Diffs = dir2Node.getDiffs().asList();
  assertEquals(2, dir2Diffs.size());
  assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
  assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId());
  
  childrenDiff = dir2Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
  
  childrenDiff = dir2Diffs.get(1).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
  
  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo2");
  assertFalse(hdfs.exists(foo_s2));
  assertTrue(hdfs.exists(foo_s3));
  
  assertTrue(fooNode instanceof INodeReference.DstReference);
  fooDiffs = fooNode.asDirectory().getDiffs().asList();
  assertEquals(2, fooDiffs.size());
  assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
  assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId());
}
 
Example 17
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir after taking the snapshot.
 */
@Test
public void testRenameUndo_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  
  // create foo after taking snapshot
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          Mockito.anyInt());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
  
  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  ReadOnlyList<INode> dir1Children = dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
  
  // after the undo of rename, the created list of sdir1 should contain 
  // 1 element
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
  
  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  assertFalse(hdfs.exists(foo_s1));
  
  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node.isWithSnapshot());
  ReadOnlyList<INode> dir2Children = dir2Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
 
Example 18
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir before taking the snapshot.
 */
@Test
public void testRenameUndo_1() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  
  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
         Mockito.anyInt());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
  
  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
      .asDirectory();
  Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
  ReadOnlyList<INode> dir1Children = dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
  
  // after the undo of rename, both the created and deleted list of sdir1
  // should be empty
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
  
  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
  List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
  assertTrue(fooNode_s1 == fooNode);
  
  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node.isWithSnapshot());
  ReadOnlyList<INode> dir2Children = dir2Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
 
Example 19
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * After rename, delete the snapshot in src
 */
@Test
public void testRenameDirAndDeleteSnapshot_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  
  final Path bar2 = new Path(newfoo, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  
  hdfs.createSnapshot(sdir1, "s4");
  hdfs.delete(newfoo, true);
  
  final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
      "foo/bar2");
  assertTrue(hdfs.exists(bar2_s4));
  final Path bar_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
      "foo/bar");
  assertTrue(hdfs.exists(bar_s4));
      
  // delete snapshot s4. The diff of s4 should be combined to s3
  hdfs.deleteSnapshot(sdir1, "s4");
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  
  Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
  assertFalse(hdfs.exists(bar_s3));
  bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar");
  assertTrue(hdfs.exists(bar_s3));
  Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar2");
  assertFalse(hdfs.exists(bar2_s3));
  bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar2");
  assertFalse(hdfs.exists(bar2_s3));
  
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");
  final Path bar_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo/bar");
  assertTrue(hdfs.exists(bar_s2));
  
  // check internal details
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
  INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
  assertEquals(1, fooWC.getReferenceCount());
  INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
  List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
  assertEquals(1, diffs.size());
  assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
  
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  
  // delete snapshot s2.
  hdfs.deleteSnapshot(sdir2, "s2");
  assertFalse(hdfs.exists(bar_s2));
  restartClusterAndCheckImage(true);
  // make sure the whole referred subtree has been destroyed
  QuotaCounts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q.getNameSpace());
  assertEquals(0, q.getStorageSpace());
  
  hdfs.deleteSnapshot(sdir1, "s1");
  restartClusterAndCheckImage(true);
  q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
  assertEquals(3, q.getNameSpace());
  assertEquals(0, q.getStorageSpace());
}
 
Example 20
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test renaming a file and then delete snapshots.
 */
@Test
public void testRenameFileAndDeleteSnapshot() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  hdfs.setReplication(newfoo, REPL_1);
  
  hdfs.createSnapshot(sdir1, "s4");
  hdfs.setReplication(newfoo, REPL_2);
  
  FileStatus status = hdfs.getFileStatus(newfoo);
  assertEquals(REPL_2, status.getReplication());
  final Path foo_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo");
  status = hdfs.getFileStatus(foo_s4);
  assertEquals(REPL_1, status.getReplication());
  
  hdfs.createSnapshot(sdir1, "s5");
  final Path foo_s5 = SnapshotTestHelper.getSnapshotPath(sdir1, "s5", "foo");
  status = hdfs.getFileStatus(foo_s5);
  assertEquals(REPL_2, status.getReplication());
  
  // delete snapshot s5.
  hdfs.deleteSnapshot(sdir1, "s5");
  restartClusterAndCheckImage(true);
  assertFalse(hdfs.exists(foo_s5));
  status = hdfs.getFileStatus(foo_s4);
  assertEquals(REPL_1, status.getReplication());
  
  // delete snapshot s4.
  hdfs.deleteSnapshot(sdir1, "s4");
  
  assertFalse(hdfs.exists(foo_s4));
  Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
  assertFalse(hdfs.exists(foo_s3));
  foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo");
  assertFalse(hdfs.exists(foo_s3));
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
  assertTrue(hdfs.exists(foo_s2));
  status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());
  
  INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(1, snode.getDiffs().asList().size());
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
  
  // restart cluster
  restartClusterAndCheckImage(true);
  
  // delete snapshot s2.
  hdfs.deleteSnapshot(sdir2, "s2");
  assertFalse(hdfs.exists(foo_s2));
  
  // restart the cluster and check fsimage
  restartClusterAndCheckImage(true);
  hdfs.deleteSnapshot(sdir1, "s3");
  restartClusterAndCheckImage(true);
  hdfs.deleteSnapshot(sdir1, "s1");
  restartClusterAndCheckImage(true);
}