Java Code Examples for org.apache.hadoop.test.GenericTestUtils#getMethodName()

The following examples show how to use org.apache.hadoop.test.GenericTestUtils#getMethodName() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Append to lazy persist file is denied.
 * @throws IOException
 */
@Test
public void testAppendIsDenied() throws IOException {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE, true);

  try {
    client.append(path.toString(), BUFFER_LENGTH,
        EnumSet.of(CreateFlag.APPEND), null, null).close();
    fail("Append to LazyPersist file did not fail as expected");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
Example 2
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, true);
  // checkpoint
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  // Stat the file and check that the lazyPersist flag is returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
 
Example 3
Source File: BlockReportTestBase.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test creates a file and closes it.
 * The second datanode is started in the cluster.
 * As soon as the replication process is completed test runs
 * Block report and checks that no underreplicated blocks are left
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_06() throws Exception {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  final int DN_N1 = DN_N0 + 1;

  writeFile(METHOD_NAME, FILE_SIZE, filePath);
  startDNandWait(filePath, true);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N1);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();
  assertEquals("Wrong number of PendingReplication Blocks",
    0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
 
Example 4
Source File: TestFiPipelines.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test initiates and sets actions created by injection framework. The actions
 * work with both aspects of sending acknologment packets in a pipeline.
 * Creates and closes a file of certain length < packet size.
 * Injected actions will check if number of visible bytes at datanodes equals
 * to number of acknoleged bytes
 *
 * @throws IOException in case of an error
 */
@Test
public void pipeline_04() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + METHOD_NAME);
  }

  final PipelinesTestUtil.PipelinesTest pipst =
    (PipelinesTestUtil.PipelinesTest) PipelinesTestUtil.initTest();

  pipst.fiCallSetNumBytes.set(new PipelinesTestUtil.ReceivedCheckAction(METHOD_NAME));
  pipst.fiCallSetBytesAcked.set(new PipelinesTestUtil.AckedCheckAction(METHOD_NAME));

  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  FSDataOutputStream fsOut = fs.create(filePath);
  TestPipelines.writeData(fsOut, 2);
  fs.close();
}
 
Example 5
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testDnRestartWithSavedReplicas()
    throws IOException, InterruptedException {

  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");

  makeTestFile(path1, BLOCK_SIZE, true);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  // However the block replica should not be evicted from RAM_DISK yet.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  LOG.info("Restarting the DataNode");
  cluster.restartDataNode(0, true);
  cluster.waitActive();
  triggerBlockReport();

  // Ensure that the replica is now on persistent storage.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
}
 
Example 6
Source File: TestLazyPersistFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * RamDisk eviction after lazy persist to disk.
 * @throws Exception
 */
@Test
public void testRamDiskEviction() throws Exception {
  startUpCluster(true, 1 + EVICTION_LOW_WATERMARK);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK.
  makeTestFile(path2, BLOCK_SIZE, true);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Ensure the first file was evicted to disk, the second is still on
  // RAM_DISK.
  ensureFileReplicasOnStorageType(path2, RAM_DISK);
  ensureFileReplicasOnStorageType(path1, DEFAULT);

  verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1);
  verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1);
}
 
Example 7
Source File: TestLazyPersistFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testPolicyNotSetByDefault() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, false);
  // Stat the file and check that the LAZY_PERSIST policy is not
  // returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
}
 
Example 8
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Client tries to write LAZY_PERSIST to same DN with no RamDisk configured
 * Write should default to disk. No error.
 * @throws IOException
 */
@Test
public void testFallbackToDisk() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE, true);
  ensureFileReplicasOnStorageType(path, DEFAULT);
}
 
Example 9
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testPlacementOnSizeLimitedRamDisk() throws IOException {
  startUpCluster(true, 3);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  makeTestFile(path1, BLOCK_SIZE, true);
  makeTestFile(path2, BLOCK_SIZE, true);

  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  ensureFileReplicasOnStorageType(path2, RAM_DISK);
}
 
Example 10
Source File: TestRbwSpaceReservation.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout=300000)
public void testWithLimitedSpace() throws IOException {
  // Cluster with just enough space for a full block + meta.
  startCluster(BLOCK_SIZE, 1, 2 * BLOCK_SIZE - 1);
  final String methodName = GenericTestUtils.getMethodName();
  Path file1 = new Path("/" + methodName + ".01.dat");
  Path file2 = new Path("/" + methodName + ".02.dat");

  // Create two files.
  FSDataOutputStream os1 = null, os2 = null;

  try {
    os1 = fs.create(file1);
    os2 = fs.create(file2);

    // Write one byte to the first file.
    byte[] data = new byte[1];
    os1.write(data);
    os1.hsync();

    // Try to write one byte to the second file.
    // The block allocation must fail.
    thrown.expect(RemoteException.class);
    os2.write(data);
    os2.hsync();
  } finally {
    if (os1 != null) {
      os1.close();
    }

    // os2.close() will fail as no block was allocated.
  }
}
 
Example 11
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testPolicyPropagation() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, true);
  // Stat the file and check that the lazyPersist flag is returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
 
Example 12
Source File: TestLazyPersistFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * If the only available storage is RAM_DISK and the LAZY_PERSIST flag is not
 * specified, then block placement should fail.
 *
 * @throws IOException
 */
@Test
public void testRamDiskNotChosenByDefault() throws IOException {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  try {
    makeTestFile(path, BLOCK_SIZE, false);
    fail("Block placement to RAM_DISK should have failed without lazyPersist flag");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
Example 13
Source File: TestLazyPersistFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testPlacementOnSizeLimitedRamDisk() throws IOException {
  startUpCluster(true, 3);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  makeTestFile(path1, BLOCK_SIZE, true);
  makeTestFile(path2, BLOCK_SIZE, true);

  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  ensureFileReplicasOnStorageType(path2, RAM_DISK);
}
 
Example 14
Source File: TestRbwSpaceReservation.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testRBWFileCreationError() throws Exception {

  final short replication = 1;
  startCluster(BLOCK_SIZE, replication, -1);

  final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes()
      .get(0).getFSDataset().getVolumes().get(0);
  final String methodName = GenericTestUtils.getMethodName();
  final Path file = new Path("/" + methodName + ".01.dat");

  // Mock BlockPoolSlice so that RBW file creation gives IOExcception
  BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
  Mockito.when(blockPoolSlice.createRbwFile((Block) Mockito.any()))
      .thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));

  Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
  field.setAccessible(true);
  Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field
      .get(fsVolumeImpl);
  bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);

  try {
    // Write 1 byte to the file
    FSDataOutputStream os = fs.create(file, replication);
    os.write(new byte[1]);
    os.hsync();
    os.close();
    fail("Expecting IOException file creation failure");
  } catch (IOException e) {
    // Exception can be ignored (expected)
  }

  // Ensure RBW space reserved is released
  assertTrue("Expected ZERO but got " + fsVolumeImpl.getReservedForRbw(),
      fsVolumeImpl.getReservedForRbw() == 0);
}
 
Example 15
Source File: TestPipelines.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Creates and closes a file of certain length.
 * Calls append to allow next write() operation to add to the end of it
 * After write() invocation, calls hflush() to make sure that data sunk through
 * the pipeline and check the state of the last block's replica.
 * It supposes to be in RBW state
 *
 * @throws IOException in case of an error
 */
@Test
public void pipeline_01() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + METHOD_NAME);
  }
  Path filePath = new Path("/" + METHOD_NAME + ".dat");

  DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
  if(LOG.isDebugEnabled()) {
    LOG.debug("Invoking append but doing nothing otherwise...");
  }
  FSDataOutputStream ofs = fs.append(filePath);
  ofs.writeBytes("Some more stuff to write");
  ((DFSOutputStream) ofs.getWrappedStream()).hflush();

  List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(
    filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();

  String bpid = cluster.getNamesystem().getBlockPoolId();
  for (DataNode dn : cluster.getDataNodes()) {
    Replica r = DataNodeTestUtils.fetchReplicaInfo(dn, bpid, lb.get(0)
        .getBlock().getBlockId());

    assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
    assertEquals("Should be RBW replica on " + dn
        + " after sequence of calls append()/write()/hflush()",
        HdfsServerConstants.ReplicaState.RBW, r.getState());
  }
  ofs.close();
}
 
Example 16
Source File: BlockReportTestBase.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test writes a file and closes it.
 * Block reported is generated with an extra block.
 * Block report is forced and the check for # of pendingdeletion
 * blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_04() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  DFSTestUtil.createFile(fs, filePath,
                         FILE_SIZE, REPL_FACTOR, rand.nextLong());


  DataNode dn = cluster.getDataNodes().get(DN_N0);
  // all blocks belong to the same file, hence same BP
  String poolId = cluster.getNamesystem().getBlockPoolId();

  // Create a bogus new block which will not be present on the namenode.
  ExtendedBlock b = new ExtendedBlock(
      poolId, rand.nextLong(), 1024L, rand.nextLong());
  dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);

  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
 
Example 17
Source File: TestScrLazyPersistFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void doShortCircuitReadAfterEvictionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);

  // Verify short-circuit read from RAM_DISK.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Verify short-circuit read from RAM_DISK once again.
  ensureFileReplicasOnStorageType(path1, RAM_DISK);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Verify short-circuit read still works from DEFAULT storage.  This time,
  // we'll have a checksum written during lazy persistence.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
  assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));

  // In the implementation of legacy short-circuit reads, any failure is
  // trapped silently, reverts back to a remote read, and also disables all
  // subsequent legacy short-circuit reads in the ClientContext.  If the test
  // uses legacy, then assert that it didn't get disabled.
  ClientContext clientContext = client.getClientContext();
  if (clientContext.getUseLegacyBlockReaderLocal()) {
    Assert.assertFalse(clientContext.getDisableLegacyBlockReaderLocal());
  }
}
 
Example 18
Source File: TestDataNodeRollingUpgrade.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test (timeout=600000)
// Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
public void testDatanodePeersXceiver() throws Exception {
  try {
    startCluster();

    // Create files in DFS.
    String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
    String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
    String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";

    DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
    DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
    DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf);

    DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
    DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
    DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);

    byte[] toWrite = new byte[1024*1024*8];
    Random rb = new Random(1111);
    rb.nextBytes(toWrite);
    s1.write(toWrite, 0, 1024*1024*8);
    s1.flush();
    s2.write(toWrite, 0, 1024*1024*8);
    s2.flush();
    s3.write(toWrite, 0, 1024*1024*8);
    s3.flush();       

    assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
        .getNumPeersXceiver());
    s1.close();
    s2.close();
    s3.close();
    assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
        .getNumPeersXceiver());
    client1.close();
    client2.close();
    client3.close();      
  } finally {
    shutdownCluster();
  }
}
 
Example 19
Source File: TestLazyPersistFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Concurrent read from the same node and verify the contents.
 */
@Test
public void testConcurrentRead()
  throws Exception {
  startUpCluster(true, 2);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  final Path path1 = new Path("/" + METHOD_NAME + ".dat");

  final int SEED = 0xFADED;
  final int NUM_TASKS = 5;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  //Read from multiple clients
  final CountDownLatch latch = new CountDownLatch(NUM_TASKS);
  final AtomicBoolean testFailed = new AtomicBoolean(false);

  Runnable readerRunnable = new Runnable() {
    @Override
    public void run() {
      try {
        Assert.assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
      } catch (Throwable e) {
        LOG.error("readerRunnable error", e);
        testFailed.set(true);
      } finally {
        latch.countDown();
      }
    }
  };

  Thread threads[] = new Thread[NUM_TASKS];
  for (int i = 0; i < NUM_TASKS; i++) {
    threads[i] = new Thread(readerRunnable);
    threads[i].start();
  }

  Thread.sleep(500);

  for (int i = 0; i < NUM_TASKS; i++) {
    Uninterruptibles.joinUninterruptibly(threads[i]);
  }
  Assert.assertFalse(testFailed.get());
}
 
Example 20
Source File: TestDataNodeRollingUpgrade.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Support for layout version change with rolling upgrade was
 * added by HDFS-6800 and HDFS-6981.
 */
@Test(timeout=300000)
public void testWithLayoutChangeAndRollback() throws Exception {
  final long seed = 0x600DF00D;
  try {
    startCluster();

    Path[] paths = new Path[3];
    File[] blockFiles = new File[3];

    // Create two files in DFS.
    for (int i = 0; i < 2; ++i) {
      paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat");
      DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 1, seed);
    }

    startRollingUpgrade();

    // Delete the first file. The DN will save its block files in trash.
    blockFiles[0] = getBlockForFile(paths[0], true);
    File trashFile0 = getTrashFileForBlock(blockFiles[0], false);
    deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);

    // Restart the DN with a new layout version to trigger layout upgrade.
    LOG.info("Shutting down the Datanode");
    MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
    DFSTestUtil.addDataNodeLayoutVersion(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1,
        "Test Layout for TestDataNodeRollingUpgrade");
    LOG.info("Restarting the DataNode");
    cluster.restartDataNode(dnprop, true);
    cluster.waitActive();

    dn0 = cluster.getDataNodes().get(0);
    LOG.info("The DN has been restarted");
    assertFalse(trashFile0.exists());
    assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0]));

    // Ensure that the block file for the first file was moved from 'trash' to 'previous'.
    assertTrue(isBlockFileInPrevious(blockFiles[0]));
    assertFalse(isTrashRootPresent());

    // Delete the second file. Ensure that its block file is in previous.
    blockFiles[1] = getBlockForFile(paths[1], true);
    fs.delete(paths[1], false);
    assertTrue(isBlockFileInPrevious(blockFiles[1]));
    assertFalse(isTrashRootPresent());

    // Create and delete a third file. Its block file should not be
    // in either trash or previous after deletion.
    paths[2] = new Path("/" + GenericTestUtils.getMethodName() + ".2.dat");
    DFSTestUtil.createFile(fs, paths[2], BLOCK_SIZE, (short) 1, seed);
    blockFiles[2] = getBlockForFile(paths[2], true);
    fs.delete(paths[2], false);
    assertFalse(isBlockFileInPrevious(blockFiles[2]));
    assertFalse(isTrashRootPresent());

    // Rollback and ensure that the first two file contents were restored.
    rollbackRollingUpgrade();
    for (int i = 0; i < 2; ++i) {
      byte[] actual = DFSTestUtil.readFileBuffer(fs, paths[i]);
      byte[] calculated = DFSTestUtil.calculateFileContentsFromSeed(seed, BLOCK_SIZE);
      assertArrayEquals(actual, calculated);
    }

    // And none of the block files must be in previous or trash.
    assertFalse(isTrashRootPresent());
    for (int i = 0; i < 3; ++i) {
      assertFalse(isBlockFileInPrevious(blockFiles[i]));
    }
  } finally {
    shutdownCluster();
  }
}