Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#allowSnapshot()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#allowSnapshot() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFsck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test for including the snapshot files in fsck report
 */
@Test
public void testFsckForSnapshotFiles() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  try {
    String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
        "-files");
    assertTrue(runFsck.contains("HEALTHY"));
    final String fileName = "/srcdat";
    DistributedFileSystem hdfs = cluster.getFileSystem();
    Path file1 = new Path(fileName);
    DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
    hdfs.allowSnapshot(new Path("/"));
    hdfs.createSnapshot(new Path("/"), "mySnapShot");
    runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
    assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
    runFsck = runFsck(conf, 0, true, "/", "-files");
    assertFalse(runFsck.contains("mySnapShot"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestFsck.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test for including the snapshot files in fsck report
 */
@Test
public void testFsckForSnapshotFiles() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  try {
    String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
        "-files");
    assertTrue(runFsck.contains("HEALTHY"));
    final String fileName = "/srcdat";
    DistributedFileSystem hdfs = cluster.getFileSystem();
    Path file1 = new Path(fileName);
    DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
    hdfs.allowSnapshot(new Path("/"));
    hdfs.createSnapshot(new Path("/"), "mySnapShot");
    runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
    assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
    runFsck = runFsck(conf, 0, true, "/", "-files");
    assertFalse(runFsck.contains("mySnapShot"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Allow snapshot on a directory.
 * Usage: hdfs dfsadmin -allowSnapshot snapshotDir
 * @param argv List of of command line parameters.
 * @exception IOException
 */
public void allowSnapshot(String[] argv) throws IOException {   
  DistributedFileSystem dfs = getDFS();
  try {
    dfs.allowSnapshot(new Path(argv[1]));
  } catch (SnapshotException e) {
    throw new RemoteException(e.getClass().getName(), e.getMessage());
  }
  System.out.println("Allowing snaphot on " + argv[1] + " succeeded");
}
 
Example 4
Source File: TestWebHDFS.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test snapshot creation through WebHdfs
 */
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);

    try {
      webHdfs.createSnapshot(foo);
      fail("Cannot create snapshot on a non-snapshottable directory");
    } catch (Exception e) {
      GenericTestUtils.assertExceptionContains(
          "Directory is not a snapshottable directory", e);
    }

    // allow snapshots on /foo
    dfs.allowSnapshot(foo);
    // create snapshots on foo using WebHdfs
    webHdfs.createSnapshot(foo, "s1");
    // create snapshot without specifying name
    final Path spath = webHdfs.createSnapshot(foo, null);

    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 5
Source File: TestWebHDFS.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 6
Source File: TestWebHDFS.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test snapshot rename through WebHdfs
 */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // rename s1 to s2
    webHdfs.renameSnapshot(foo, "s1", "s2");
    Assert.assertFalse(webHdfs.exists(s1path));
    final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
    Assert.assertTrue(webHdfs.exists(s2path));

    webHdfs.deleteSnapshot(foo, "s2");
    Assert.assertFalse(webHdfs.exists(s2path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 7
Source File: SnapshotTestHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Create snapshot for a dir using a given snapshot name
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The dir to be snapshotted
 * @param snapshotName The name of the snapshot
 * @return The path of the snapshot root
 */
public static Path createSnapshot(DistributedFileSystem hdfs,
    Path snapshotRoot, String snapshotName) throws Exception {
  LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot);
  assertTrue(hdfs.exists(snapshotRoot));
  hdfs.allowSnapshot(snapshotRoot);
  hdfs.createSnapshot(snapshotRoot, snapshotName);
  // set quota to a large value for testing counts
  hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
}
 
Example 8
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Allow snapshot on a directory.
 * Usage: hdfs dfsadmin -allowSnapshot snapshotDir
 * @param argv List of of command line parameters.
 * @exception IOException
 */
public void allowSnapshot(String[] argv) throws IOException {   
  DistributedFileSystem dfs = getDFS();
  try {
    dfs.allowSnapshot(new Path(argv[1]));
  } catch (SnapshotException e) {
    throw new RemoteException(e.getClass().getName(), e.getMessage());
  }
  System.out.println("Allowing snaphot on " + argv[1] + " succeeded");
}
 
Example 9
Source File: TestWebHDFS.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test snapshot creation through WebHdfs
 */
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);

    try {
      webHdfs.createSnapshot(foo);
      fail("Cannot create snapshot on a non-snapshottable directory");
    } catch (Exception e) {
      GenericTestUtils.assertExceptionContains(
          "Directory is not a snapshottable directory", e);
    }

    // allow snapshots on /foo
    dfs.allowSnapshot(foo);
    // create snapshots on foo using WebHdfs
    webHdfs.createSnapshot(foo, "s1");
    // create snapshot without specifying name
    final Path spath = webHdfs.createSnapshot(foo, null);

    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 10
Source File: TestWebHDFS.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 11
Source File: TestWebHDFS.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test snapshot rename through WebHdfs
 */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // rename s1 to s2
    webHdfs.renameSnapshot(foo, "s1", "s2");
    Assert.assertFalse(webHdfs.exists(s1path));
    final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
    Assert.assertTrue(webHdfs.exists(s2path));

    webHdfs.deleteSnapshot(foo, "s2");
    Assert.assertFalse(webHdfs.exists(s2path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 12
Source File: SnapshotTestHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Create snapshot for a dir using a given snapshot name
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The dir to be snapshotted
 * @param snapshotName The name of the snapshot
 * @return The path of the snapshot root
 */
public static Path createSnapshot(DistributedFileSystem hdfs,
    Path snapshotRoot, String snapshotName) throws Exception {
  LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot);
  assertTrue(hdfs.exists(snapshotRoot));
  hdfs.allowSnapshot(snapshotRoot);
  hdfs.createSnapshot(snapshotRoot, snapshotName);
  // set quota to a large value for testing counts
  hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
}
 
Example 13
Source File: TestSnapshotStatsMXBean.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test getting SnapshotStatsMXBean information
 */
@Test
public void testSnapshotStatsMXBeanInfo() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  String pathName = "/snapshot";
  Path path = new Path(pathName);

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();

    SnapshotManager sm = cluster.getNamesystem().getSnapshotManager();
    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
    dfs.mkdirs(path);
    dfs.allowSnapshot(path);
    dfs.createSnapshot(path);

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=SnapshotInfo");

    CompositeData[] directories =
        (CompositeData[]) mbs.getAttribute(
            mxbeanName, "SnapshottableDirectories");
    int numDirectories = Array.getLength(directories);
    assertEquals(sm.getNumSnapshottableDirs(), numDirectories);
    CompositeData[] snapshots =
        (CompositeData[]) mbs.getAttribute(mxbeanName, "Snapshots");
    int numSnapshots = Array.getLength(snapshots);
    assertEquals(sm.getNumSnapshots(), numSnapshots);

    CompositeData d = (CompositeData) Array.get(directories, 0);
    CompositeData s = (CompositeData) Array.get(snapshots, 0);
    assertTrue(((String) d.get("path")).contains(pathName));
    assertTrue(((String) s.get("snapshotDirectory")).contains(pathName));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 14
Source File: TestOfflineImageViewer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
        "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();

    // Create a reasonable namespace
    for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
      Path dir = new Path("/dir" + i);
      hdfs.mkdirs(dir);
      writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
      for (int j = 0; j < FILES_PER_DIR; j++) {
        Path file = new Path(dir, "file" + j);
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();

        writtenFiles.put(file.toString(),
            pathToFileEntry(hdfs, file.toString()));
      }
    }

    // Create an empty directory
    Path emptydir = new Path("/emptydir");
    hdfs.mkdirs(emptydir);
    dirCount++;
    writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));

    //Create a directory whose name should be escaped in XML
    Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
    hdfs.mkdirs(invalidXMLDir);
    dirCount++;

    // Get delegation tokens so we log the delegation token op
    Token<?>[] delegationTokens = hdfs
        .addDelegationTokens(TEST_RENEWER, null);
    for (Token<?> t : delegationTokens) {
      LOG.debug("got token " + t);
    }

    // Create INodeReference
    final Path src = new Path("/src");
    hdfs.mkdirs(src);
    dirCount++;
    writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
    final Path orig = new Path("/src/orig");
    hdfs.mkdirs(orig);
    hdfs.allowSnapshot(src);
    hdfs.createSnapshot(src, "snapshot");
    final Path dst = new Path("/dst");
    hdfs.rename(orig, dst);
    dirCount++;
    writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));

    // Set XAttrs so the fsimage contains XAttr ops
    final Path xattr = new Path("/xattr");
    hdfs.mkdirs(xattr);
    dirCount++;
    hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
    hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
    // OIV should be able to handle empty value XAttrs
    hdfs.setXAttr(xattr, "user.a3", null);
    writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));

    // Write results to the fsimage file
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    // Determine location of fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
Example 15
Source File: TestSnapshotStatsMXBean.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test getting SnapshotStatsMXBean information
 */
@Test
public void testSnapshotStatsMXBeanInfo() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  String pathName = "/snapshot";
  Path path = new Path(pathName);

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();

    SnapshotManager sm = cluster.getNamesystem().getSnapshotManager();
    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
    dfs.mkdirs(path);
    dfs.allowSnapshot(path);
    dfs.createSnapshot(path);

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=SnapshotInfo");

    CompositeData[] directories =
        (CompositeData[]) mbs.getAttribute(
            mxbeanName, "SnapshottableDirectories");
    int numDirectories = Array.getLength(directories);
    assertEquals(sm.getNumSnapshottableDirs(), numDirectories);
    CompositeData[] snapshots =
        (CompositeData[]) mbs.getAttribute(mxbeanName, "Snapshots");
    int numSnapshots = Array.getLength(snapshots);
    assertEquals(sm.getNumSnapshots(), numSnapshots);

    CompositeData d = (CompositeData) Array.get(directories, 0);
    CompositeData s = (CompositeData) Array.get(snapshots, 0);
    assertTrue(((String) d.get("path")).contains(pathName));
    assertTrue(((String) s.get("snapshotDirectory")).contains(pathName));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Source File: TestOfflineImageViewer.java    From big-c with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
        "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();

    // Create a reasonable namespace
    for (int i = 0; i < NUM_DIRS; i++) {
      Path dir = new Path("/dir" + i);
      hdfs.mkdirs(dir);
      writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
      for (int j = 0; j < FILES_PER_DIR; j++) {
        Path file = new Path(dir, "file" + j);
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();

        writtenFiles.put(file.toString(),
            pathToFileEntry(hdfs, file.toString()));
      }
    }

    // Create an empty directory
    Path emptydir = new Path("/emptydir");
    hdfs.mkdirs(emptydir);
    writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));

    //Create a directory whose name should be escaped in XML
    Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
    hdfs.mkdirs(invalidXMLDir);

    // Get delegation tokens so we log the delegation token op
    Token<?>[] delegationTokens = hdfs
        .addDelegationTokens(TEST_RENEWER, null);
    for (Token<?> t : delegationTokens) {
      LOG.debug("got token " + t);
    }

    final Path snapshot = new Path("/snapshot");
    hdfs.mkdirs(snapshot);
    hdfs.allowSnapshot(snapshot);
    hdfs.mkdirs(new Path("/snapshot/1"));
    hdfs.delete(snapshot, true);

    // Set XAttrs so the fsimage contains XAttr ops
    final Path xattr = new Path("/xattr");
    hdfs.mkdirs(xattr);
    hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
    hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
    // OIV should be able to handle empty value XAttrs
    hdfs.setXAttr(xattr, "user.a3", null);
    writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));

    // Write results to the fsimage file
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    // Determine location of fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}