Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#saveNamespace()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter#saveNamespace() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestOpenFilesWithSnapshot.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test/test2"), true);
  fs.delete(new Path("/test/test/test3"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
  
  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
 
Example 2
Source File: TestHASafeMode.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Regression test for a bug experienced while developing
 * HDFS-2742. The scenario here is:
 * - image contains some blocks
 * - edits log contains at least one block addition, followed
 *   by deletion of more blocks than were added.
 * - When node starts up, some incorrect accounting of block
 *   totals caused an assertion failure.
 */
@Test
public void testBlocksDeletedInEditLog() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  // Make 4 blocks persisted in the image.
  DFSTestUtil.createFile(fs, new Path("/test"),
      4*BLOCK_SIZE, (short) 3, 1L);
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);
  
  // OP_ADD for 2 blocks
  DFSTestUtil.createFile(fs, new Path("/test2"),
      2*BLOCK_SIZE, (short) 3, 1L);
  
  // OP_DELETE for 4 blocks
  fs.delete(new Path("/test"), true);

  restartActive();
}
 
Example 3
Source File: TestOpenFilesWithSnapshot.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
 
Example 4
Source File: TestOpenFilesWithSnapshot.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
  
  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
 
Example 5
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
  Path foo = new Path("/foo");
  hdfs.mkdirs(foo);
  hdfs.allowSnapshot(foo);
  Path bar = new Path("/foo/bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  assertTrue(hdfs.delete(bar, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);

  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
 
Example 6
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Regression test for a bug experienced while developing
 * HDFS-2742. The scenario here is:
 * - image contains some blocks
 * - edits log contains at least one block addition, followed
 *   by deletion of more blocks than were added.
 * - When node starts up, some incorrect accounting of block
 *   totals caused an assertion failure.
 */
@Test
public void testBlocksDeletedInEditLog() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  // Make 4 blocks persisted in the image.
  DFSTestUtil.createFile(fs, new Path("/test"),
      4*BLOCK_SIZE, (short) 3, 1L);
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);
  
  // OP_ADD for 2 blocks
  DFSTestUtil.createFile(fs, new Path("/test2"),
      2*BLOCK_SIZE, (short) 3, 1L);
  
  // OP_DELETE for 4 blocks
  fs.delete(new Path("/test"), true);

  restartActive();
}
 
Example 7
Source File: TestOpenFilesWithSnapshot.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void doTestMultipleSnapshots(boolean saveNamespace)
    throws IOException {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.createSnapshot(path, "s2");
  fs.delete(new Path("/test/test"), true);
  fs.deleteSnapshot(path, "s2");
  cluster.triggerBlockReports();
  if (saveNamespace) {
    NameNode nameNode = cluster.getNameNode();
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
    NameNodeAdapter.leaveSafeMode(nameNode);
  }
  cluster.restartNameNode(true);
}
 
Example 8
Source File: TestXAttrWithSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Restart the cluster, optionally saving a new checkpoint.
 * 
 * @param checkpoint boolean true to save a new checkpoint
 * @throws Exception if restart fails
 */
private static void restart(boolean checkpoint) throws Exception {
  NameNode nameNode = cluster.getNameNode();
  if (checkpoint) {
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
  }
  shutdown();
  initCluster(false);
}
 
Example 9
Source File: TestSnapshotDeletion.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Delete a snapshot that is taken before a directory deletion,
 * directory diff list should be combined correctly.
 */
@Test (timeout=60000)
public void testDeleteSnapshot1() throws Exception {
  final Path root = new Path("/");

  Path dir = new Path("/dir1");
  Path file1 = new Path(dir, "file1");
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);

  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  Path file2 = new Path(dir, "file2");
  DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);

  hdfs.createSnapshot(root, "s2");

  // delete file
  hdfs.delete(file1, true);
  hdfs.delete(file2, true);

  // delete directory
  assertTrue(hdfs.delete(dir, false));

  // delete second snapshot
  hdfs.deleteSnapshot(root, "s2");

  NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
  NameNodeAdapter.saveNamespace(cluster.getNameNode());

  // restart NN
  cluster.restartNameNodes();
}
 
Example 10
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 30000)
public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception {
  final Path foo = new Path("/foo");
  final Path foo2 = new Path("/foo2");
  hdfs.mkdirs(foo);
  hdfs.mkdirs(foo2);

  hdfs.allowSnapshot(foo);
  hdfs.allowSnapshot(foo2);
  final Path bar = new Path(foo, "bar");
  final Path bar2 = new Path(foo2, "bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  // rename to another snapshottable directory and take snapshot
  assertTrue(hdfs.rename(bar, bar2));
  hdfs.createSnapshot(foo2, "s2");
  // delete the original renamed file to make sure blocks are not updated by
  // the original file
  assertTrue(hdfs.delete(bar2, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  // file in first snapshot
  String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath));
  // file in second snapshot after rename+delete
  String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(),
      "s2/bar");
  DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath));
}
 
Example 11
Source File: TestAclWithSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Restart the cluster, optionally saving a new checkpoint.
 *
 * @param checkpoint boolean true to save a new checkpoint
 * @throws Exception if restart fails
 */
private static void restart(boolean checkpoint) throws Exception {
  NameNode nameNode = cluster.getNameNode();
  if (checkpoint) {
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
  }
  shutdown();
  initCluster(false);
}
 
Example 12
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2804: standby should not populate replication
 * queues when exiting safe mode.
 */
@Test
public void testNoPopulatingReplQueuesWhenExitingSafemode() throws Exception {
  DFSTestUtil.createFile(fs, new Path("/test"), 15*BLOCK_SIZE, (short)3, 1L);
  
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  
  // get some blocks in the SBN's image
  nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  NameNodeAdapter.saveNamespace(nn1);
  nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);

  // and some blocks in the edit logs
  DFSTestUtil.createFile(fs, new Path("/test2"), 15*BLOCK_SIZE, (short)3, 1L);
  nn0.getRpcServer().rollEditLog();
  
  cluster.stopDataNode(1);
  cluster.shutdownNameNode(1);

  //Configuration sbConf = cluster.getConfiguration(1);
  //sbConf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 1);
  cluster.restartNameNode(1, false);
  nn1 = cluster.getNameNode(1);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return !nn1.isInSafeMode();
    }
  }, 100, 10000);
  
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  assertEquals(0L, nn1.getNamesystem().getUnderReplicatedBlocks());
  assertEquals(0L, nn1.getNamesystem().getPendingReplicationBlocks());
}
 
Example 13
Source File: TestSnapshotDeletion.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Delete a snapshot that is taken before a directory deletion,
 * directory diff list should be combined correctly.
 */
@Test (timeout=60000)
public void testDeleteSnapshot1() throws Exception {
  final Path root = new Path("/");

  Path dir = new Path("/dir1");
  Path file1 = new Path(dir, "file1");
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);

  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  Path file2 = new Path(dir, "file2");
  DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);

  hdfs.createSnapshot(root, "s2");

  // delete file
  hdfs.delete(file1, true);
  hdfs.delete(file2, true);

  // delete directory
  assertTrue(hdfs.delete(dir, false));

  // delete second snapshot
  hdfs.deleteSnapshot(root, "s2");

  NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
  NameNodeAdapter.saveNamespace(cluster.getNameNode());

  // restart NN
  cluster.restartNameNodes();
}
 
Example 14
Source File: TestSnapshotDeletion.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Delete a snapshot that is taken before a directory deletion (recursively),
 * directory diff list should be combined correctly.
 */
@Test (timeout=60000)
public void testDeleteSnapshot2() throws Exception {
  final Path root = new Path("/");

  Path dir = new Path("/dir1");
  Path file1 = new Path(dir, "file1");
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);

  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  Path file2 = new Path(dir, "file2");
  DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
  INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
  long file2NodeId = file2Node.getId();

  hdfs.createSnapshot(root, "s2");

  // delete directory recursively
  assertTrue(hdfs.delete(dir, true));
  assertNotNull(fsdir.getInode(file2NodeId));

  // delete second snapshot
  hdfs.deleteSnapshot(root, "s2");
  assertTrue(fsdir.getInode(file2NodeId) == null);

  NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
  NameNodeAdapter.saveNamespace(cluster.getNameNode());

  // restart NN
  cluster.restartNameNodes();
}
 
Example 15
Source File: TestAclConfigFlag.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Restart the cluster, optionally saving a new checkpoint.
 *
 * @param checkpoint boolean true to save a new checkpoint
 * @param aclsEnabled if true, ACL support is enabled
 * @throws Exception if restart fails
 */
private void restart(boolean checkpoint, boolean aclsEnabled)
    throws Exception {
  NameNode nameNode = cluster.getNameNode();
  if (checkpoint) {
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
  }
  shutdown();
  initCluster(false, aclsEnabled);
}
 
Example 16
Source File: TestXAttrConfigFlag.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Restart the cluster, optionally saving a new checkpoint.
 *
 * @param checkpoint boolean true to save a new checkpoint
 * @param xattrsEnabled if true, XAttr support is enabled
 * @throws Exception if restart fails
 */
private void restart(boolean checkpoint, boolean xattrsEnabled)
    throws Exception {
  NameNode nameNode = cluster.getNameNode();
  if (checkpoint) {
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
  }
  shutdown();
  initCluster(false, xattrsEnabled);
}
 
Example 17
Source File: TestBootstrapStandby.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test for downloading a checkpoint made at a later checkpoint
 * from the active.
 */
@Test
public void testDownloadingLaterCheckpoint() throws Exception {
  // Roll edit logs a few times to inflate txid
  nn0.getRpcServer().rollEditLog();
  nn0.getRpcServer().rollEditLog();
  // Make checkpoint
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);
  long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
    .getFSImage().getMostRecentCheckpointTxId();
  assertEquals(6, expectedCheckpointTxId);

  int rc = BootstrapStandby.run(
      new String[]{"-force"},
      cluster.getConfiguration(1));
  assertEquals(0, rc);
  
  // Should have copied over the namespace from the active
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of((int)expectedCheckpointTxId));
  FSImageTestUtil.assertNNFilesMatch(cluster);

  // We should now be able to start the standby successfully.
  cluster.restartNameNode(1);
}
 
Example 18
Source File: TestAclWithSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Restart the cluster, optionally saving a new checkpoint.
 *
 * @param checkpoint boolean true to save a new checkpoint
 * @throws Exception if restart fails
 */
private static void restart(boolean checkpoint) throws Exception {
  NameNode nameNode = cluster.getNameNode();
  if (checkpoint) {
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
  }
  shutdown();
  initCluster(false);
}
 
Example 19
Source File: TestFileCreation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * 1. Check the blocks of old file are cleaned after creating with overwrite
 * 2. Restart NN, check the file
 * 3. Save new checkpoint and restart NN, check the file
 */
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt("dfs.blocksize", blockSize);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
      numDataNodes(3).build();
  DistributedFileSystem dfs = cluster.getFileSystem();
  try {
    dfs.mkdirs(new Path("/foo/dir"));
    String file = "/foo/dir/file";
    Path filePath = new Path(file);
    
    // Case 1: Create file with overwrite, check the blocks of old file
    // are cleaned after creating with overwrite
    NameNode nn = cluster.getNameNode();
    FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
    BlockManager bm = fsn.getBlockManager();
    
    FSDataOutputStream out = dfs.create(filePath);
    byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
    try {
      out.write(oldData);
    } finally {
      out.close();
    }
    
    LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
        nn, file, 0, fileSize);
    assertBlocks(bm, oldBlocks, true);
    
    out = dfs.create(filePath, true);
    byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
    try {
      out.write(newData);
    } finally {
      out.close();
    }
    dfs.deleteOnExit(filePath);
    
    LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
        nn, file, 0, fileSize);
    assertBlocks(bm, newBlocks, true);
    assertBlocks(bm, oldBlocks, false);
    
    FSDataInputStream in = dfs.open(filePath);
    byte[] result = null;
    try {
      result = readAll(in);
    } finally {
      in.close();
    }
    Assert.assertArrayEquals(newData, result);
    
    // Case 2: Restart NN, check the file
    cluster.restartNameNode();
    nn = cluster.getNameNode();
    in = dfs.open(filePath);
    try {
      result = readAll(in);
    } finally {
      in.close();
    }
    Assert.assertArrayEquals(newData, result);
    
    // Case 3: Save new checkpoint and restart NN, check the file
    NameNodeAdapter.enterSafeMode(nn, false);
    NameNodeAdapter.saveNamespace(nn);
    cluster.restartNameNode();
    nn = cluster.getNameNode();
    
    in = dfs.open(filePath);
    try {
      result = readAll(in);
    } finally {
      in.close();
    }
    Assert.assertArrayEquals(newData, result);
  } finally {
    if (dfs != null) {
      dfs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 20
Source File: TestFileCreation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * 1. Check the blocks of old file are cleaned after creating with overwrite
 * 2. Restart NN, check the file
 * 3. Save new checkpoint and restart NN, check the file
 */
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt("dfs.blocksize", blockSize);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
      numDataNodes(3).build();
  DistributedFileSystem dfs = cluster.getFileSystem();
  try {
    dfs.mkdirs(new Path("/foo/dir"));
    String file = "/foo/dir/file";
    Path filePath = new Path(file);
    
    // Case 1: Create file with overwrite, check the blocks of old file
    // are cleaned after creating with overwrite
    NameNode nn = cluster.getNameNode();
    FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
    BlockManager bm = fsn.getBlockManager();
    
    FSDataOutputStream out = dfs.create(filePath);
    byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
    try {
      out.write(oldData);
    } finally {
      out.close();
    }
    
    LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
        nn, file, 0, fileSize);
    assertBlocks(bm, oldBlocks, true);
    
    out = dfs.create(filePath, true);
    byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
    try {
      out.write(newData);
    } finally {
      out.close();
    }
    dfs.deleteOnExit(filePath);
    
    LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
        nn, file, 0, fileSize);
    assertBlocks(bm, newBlocks, true);
    assertBlocks(bm, oldBlocks, false);
    
    FSDataInputStream in = dfs.open(filePath);
    byte[] result = null;
    try {
      result = readAll(in);
    } finally {
      in.close();
    }
    Assert.assertArrayEquals(newData, result);
    
    // Case 2: Restart NN, check the file
    cluster.restartNameNode();
    nn = cluster.getNameNode();
    in = dfs.open(filePath);
    try {
      result = readAll(in);
    } finally {
      in.close();
    }
    Assert.assertArrayEquals(newData, result);
    
    // Case 3: Save new checkpoint and restart NN, check the file
    NameNodeAdapter.enterSafeMode(nn, false);
    NameNodeAdapter.saveNamespace(nn);
    cluster.restartNameNode();
    nn = cluster.getNameNode();
    
    in = dfs.open(filePath);
    try {
      result = readAll(in);
    } finally {
      in.close();
    }
    Assert.assertArrayEquals(newData, result);
  } finally {
    if (dfs != null) {
      dfs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}