Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#readFileBuffer()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#readFileBuffer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestScrLazyPersistFiles.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void doShortCircuitReadMetaFileCorruptionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Corrupt the lazy-persisted checksum file, and verify that checksum
  // verification catches it.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  MiniDFSCluster.corruptBlock(metaFile);
  exception.expect(ChecksumException.class);
  DFSTestUtil.readFileBuffer(fs, path1);
}
 
Example 2
Source File: TestScrLazyPersistFiles.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void doShortCircuitReadBlockFileCorruptionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Corrupt the lazy-persisted block file, and verify that checksum
  // verification catches it.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  cluster.corruptReplica(0, DFSTestUtil.getFirstBlock(fs, path1));
  exception.expect(ChecksumException.class);
  DFSTestUtil.readFileBuffer(fs, path1);
}
 
Example 3
Source File: TestDistCpSync.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
    throws Exception {
  Assert.assertEquals(s.isDirectory(), t.isDirectory());
  if (compareName) {
    Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
  }
  if (!s.isDirectory()) {
    // verify the file content is the same
    byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
    byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
    Assert.assertArrayEquals(sbytes, tbytes);
  } else {
    FileStatus[] slist = dfs.listStatus(s.getPath());
    FileStatus[] tlist = dfs.listStatus(t.getPath());
    Assert.assertEquals(slist.length, tlist.length);
    for (int i = 0; i < slist.length; i++) {
      verifyCopy(slist[i], tlist[i], true);
    }
  }
}
 
Example 4
Source File: TestScrLazyPersistFiles.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void doShortCircuitReadMetaFileCorruptionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Corrupt the lazy-persisted checksum file, and verify that checksum
  // verification catches it.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  MiniDFSCluster.corruptBlock(metaFile);
  exception.expect(ChecksumException.class);
  DFSTestUtil.readFileBuffer(fs, path1);
}
 
Example 5
Source File: TestDistCpSync.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
    throws Exception {
  Assert.assertEquals(s.isDirectory(), t.isDirectory());
  if (compareName) {
    Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
  }
  if (!s.isDirectory()) {
    // verify the file content is the same
    byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
    byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
    Assert.assertArrayEquals(sbytes, tbytes);
  } else {
    FileStatus[] slist = dfs.listStatus(s.getPath());
    FileStatus[] tlist = dfs.listStatus(t.getPath());
    Assert.assertEquals(slist.length, tlist.length);
    for (int i = 0; i < slist.length; i++) {
      verifyCopy(slist[i], tlist[i], true);
    }
  }
}
 
Example 6
Source File: AclTestHelpers.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that permission is granted to the given fs/user for the given file.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path file to check
 * @throws Exception if there is an unexpected error
 */
public static void assertFilePermissionGranted(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    DFSTestUtil.readFileBuffer(fs, pathToCheck);
  } catch (AccessControlException e) {
    fail("expected permission granted for user " + user + ", path = " +
      pathToCheck);
  }
}
 
Example 7
Source File: LazyPersistTestCase.java    From hadoop with Apache License 2.0 5 votes vote down vote up
protected final boolean verifyReadRandomFile(
    Path path, int fileLength, int seed) throws IOException {
  byte contents[] = DFSTestUtil.readFileBuffer(fs, path);
  byte expected[] = DFSTestUtil.
    calculateFileContentsFromSeed(seed, fileLength);
  return Arrays.equals(contents, expected);
}
 
Example 8
Source File: AclTestHelpers.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that permission is denied to the given fs/user for the given file.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path file to check
 * @throws Exception if there is an unexpected error
 */
public static void assertFilePermissionDenied(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    DFSTestUtil.readFileBuffer(fs, pathToCheck);
    fail("expected AccessControlException for user " + user + ", path = " +
      pathToCheck);
  } catch (AccessControlException e) {
    // expected
  }
}
 
Example 9
Source File: TestShortCircuitCache.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
Example 10
Source File: TestShortCircuitCache.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
Example 11
Source File: TestShortCircuitCache.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testPreReceiptVerificationDfsClientCanDoScr", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.getClient().getConf().brfFailureInjector =
      new TestPreReceiptVerificationFailureInjector();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
  final Path TEST_PATH2 = new Path("/test_file2");
  DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
  DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  ShortCircuitRegistry registry =
      cluster.getDataNodes().get(0).getShortCircuitRegistry();
  registry.visit(new ShortCircuitRegistry.Visitor() {
    @Override
    public void accept(HashMap<ShmId, RegisteredShm> segments,
                       HashMultimap<ExtendedBlockId, Slot> slots) {
      Assert.assertEquals(1, segments.size());
      Assert.assertEquals(2, slots.size());
    }
  });
  cluster.shutdown();
  sockDir.close();
}
 
Example 12
Source File: AclTestHelpers.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Asserts that permission is granted to the given fs/user for the given file.
 *
 * @param fs FileSystem to check
 * @param user UserGroupInformation owner of fs
 * @param pathToCheck Path file to check
 * @throws Exception if there is an unexpected error
 */
public static void assertFilePermissionGranted(FileSystem fs,
    UserGroupInformation user, Path pathToCheck) throws Exception {
  try {
    DFSTestUtil.readFileBuffer(fs, pathToCheck);
  } catch (AccessControlException e) {
    fail("expected permission granted for user " + user + ", path = " +
      pathToCheck);
  }
}
 
Example 13
Source File: LazyPersistTestCase.java    From big-c with Apache License 2.0 5 votes vote down vote up
protected final boolean verifyReadRandomFile(
    Path path, int fileLength, int seed) throws IOException {
  byte contents[] = DFSTestUtil.readFileBuffer(fs, path);
  byte expected[] = DFSTestUtil.
    calculateFileContentsFromSeed(seed, fileLength);
  return Arrays.equals(contents, expected);
}
 
Example 14
Source File: TestShortCircuitCache.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
Example 15
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static void assertFileLength(Path file, long length) throws IOException {
  byte[] data = DFSTestUtil.readFileBuffer(fs, file);
  assertEquals("Wrong data size in snapshot.", length, data.length);
}
 
Example 16
Source File: TestDataNodeRollingUpgrade.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Support for layout version change with rolling upgrade was
 * added by HDFS-6800 and HDFS-6981.
 */
@Test(timeout=300000)
public void testWithLayoutChangeAndRollback() throws Exception {
  final long seed = 0x600DF00D;
  try {
    startCluster();

    Path[] paths = new Path[3];
    File[] blockFiles = new File[3];

    // Create two files in DFS.
    for (int i = 0; i < 2; ++i) {
      paths[i] = new Path("/" + GenericTestUtils.getMethodName() + "." + i + ".dat");
      DFSTestUtil.createFile(fs, paths[i], BLOCK_SIZE, (short) 1, seed);
    }

    startRollingUpgrade();

    // Delete the first file. The DN will save its block files in trash.
    blockFiles[0] = getBlockForFile(paths[0], true);
    File trashFile0 = getTrashFileForBlock(blockFiles[0], false);
    deleteAndEnsureInTrash(paths[0], blockFiles[0], trashFile0);

    // Restart the DN with a new layout version to trigger layout upgrade.
    LOG.info("Shutting down the Datanode");
    MiniDFSCluster.DataNodeProperties dnprop = cluster.stopDataNode(0);
    DFSTestUtil.addDataNodeLayoutVersion(
        DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1,
        "Test Layout for TestDataNodeRollingUpgrade");
    LOG.info("Restarting the DataNode");
    cluster.restartDataNode(dnprop, true);
    cluster.waitActive();

    dn0 = cluster.getDataNodes().get(0);
    LOG.info("The DN has been restarted");
    assertFalse(trashFile0.exists());
    assertFalse(dn0.getStorage().getBPStorage(blockPoolId).isTrashAllowed(blockFiles[0]));

    // Ensure that the block file for the first file was moved from 'trash' to 'previous'.
    assertTrue(isBlockFileInPrevious(blockFiles[0]));
    assertFalse(isTrashRootPresent());

    // Delete the second file. Ensure that its block file is in previous.
    blockFiles[1] = getBlockForFile(paths[1], true);
    fs.delete(paths[1], false);
    assertTrue(isBlockFileInPrevious(blockFiles[1]));
    assertFalse(isTrashRootPresent());

    // Create and delete a third file. Its block file should not be
    // in either trash or previous after deletion.
    paths[2] = new Path("/" + GenericTestUtils.getMethodName() + ".2.dat");
    DFSTestUtil.createFile(fs, paths[2], BLOCK_SIZE, (short) 1, seed);
    blockFiles[2] = getBlockForFile(paths[2], true);
    fs.delete(paths[2], false);
    assertFalse(isBlockFileInPrevious(blockFiles[2]));
    assertFalse(isTrashRootPresent());

    // Rollback and ensure that the first two file contents were restored.
    rollbackRollingUpgrade();
    for (int i = 0; i < 2; ++i) {
      byte[] actual = DFSTestUtil.readFileBuffer(fs, paths[i]);
      byte[] calculated = DFSTestUtil.calculateFileContentsFromSeed(seed, BLOCK_SIZE);
      assertArrayEquals(actual, calculated);
    }

    // And none of the block files must be in previous or trash.
    assertFalse(isTrashRootPresent());
    for (int i = 0; i < 3; ++i) {
      assertFalse(isBlockFileInPrevious(blockFiles[i]));
    }
  } finally {
    shutdownCluster();
  }
}
 
Example 17
Source File: TestDataNodeHotSwapVolumes.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the case that remove a data volume on a particular DataNode when the
 * volume is actively being written.
 * @param dataNodeIdx the index of the DataNode to remove a volume.
 */
private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
    throws IOException, ReconfigurationException, TimeoutException,
    InterruptedException, BrokenBarrierException {
  // Starts DFS cluster with 3 DataNodes to form a pipeline.
  startDFSCluster(1, 3);

  final short REPLICATION = 3;
  final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
  final FileSystem fs = cluster.getFileSystem();
  final Path testFile = new Path("/test");
  final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();

  FSDataOutputStream out = fs.create(testFile, REPLICATION);

  Random rb = new Random(0);
  byte[] writeBuf = new byte[BLOCK_SIZE / 2];  // half of the block.
  rb.nextBytes(writeBuf);
  out.write(writeBuf);
  out.hflush();

  // Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
  // BlockReceiver releases volume reference before finalizeBlock(), the blocks
  // on the volume will be removed, and finalizeBlock() throws IOE.
  final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
  dn.data = Mockito.spy(data);
  doAnswer(new Answer<Object>() {
        public Object answer(InvocationOnMock invocation)
            throws IOException, InterruptedException {
          Thread.sleep(1000);
          // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
          // the block is not removed, since the volume reference should not
          // be released at this point.
          data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
          return null;
        }
      }).when(dn.data).finalizeBlock(any(ExtendedBlock.class));

  final CyclicBarrier barrier = new CyclicBarrier(2);

  List<String> oldDirs = getDataDirs(dn);
  final String newDirs = oldDirs.get(1);  // Remove the first volume.
  final List<Exception> exceptions = new ArrayList<>();
  Thread reconfigThread = new Thread() {
    public void run() {
      try {
        barrier.await();
        dn.reconfigurePropertyImpl(
            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
      } catch (ReconfigurationException |
          InterruptedException |
          BrokenBarrierException e) {
        exceptions.add(e);
      }
    }
  };
  reconfigThread.start();

  barrier.await();
  rb.nextBytes(writeBuf);
  out.write(writeBuf);
  out.hflush();
  out.close();

  reconfigThread.join();

  // Verify the file has sufficient replications.
  DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
  // Read the content back
  byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
  assertEquals(BLOCK_SIZE, content.length);

  // If an IOException thrown from BlockReceiver#run, it triggers
  // DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
  // to see whether there is IOException in BlockReceiver#run().
  assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());

  if (!exceptions.isEmpty()) {
    throw new IOException(exceptions.get(0).getCause());
  }
}
 
Example 18
Source File: TestShortCircuitCache.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
Example 19
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 4 votes vote down vote up
static void assertFileLength(Path file, long length) throws IOException {
  byte[] data = DFSTestUtil.readFileBuffer(fs, file);
  assertEquals("Wrong data size in snapshot.", length, data.length);
}
 
Example 20
Source File: TestDataNodeHotSwapVolumes.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the case that remove a data volume on a particular DataNode when the
 * volume is actively being written.
 * @param dataNodeIdx the index of the DataNode to remove a volume.
 */
private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
    throws IOException, ReconfigurationException, TimeoutException,
    InterruptedException, BrokenBarrierException {
  // Starts DFS cluster with 3 DataNodes to form a pipeline.
  startDFSCluster(1, 3);

  final short REPLICATION = 3;
  final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
  final FileSystem fs = cluster.getFileSystem();
  final Path testFile = new Path("/test");
  final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();

  FSDataOutputStream out = fs.create(testFile, REPLICATION);

  Random rb = new Random(0);
  byte[] writeBuf = new byte[BLOCK_SIZE / 2];  // half of the block.
  rb.nextBytes(writeBuf);
  out.write(writeBuf);
  out.hflush();

  // Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
  // BlockReceiver releases volume reference before finalizeBlock(), the blocks
  // on the volume will be removed, and finalizeBlock() throws IOE.
  final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
  dn.data = Mockito.spy(data);
  doAnswer(new Answer<Object>() {
        public Object answer(InvocationOnMock invocation)
            throws IOException, InterruptedException {
          Thread.sleep(1000);
          // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
          // the block is not removed, since the volume reference should not
          // be released at this point.
          data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
          return null;
        }
      }).when(dn.data).finalizeBlock(any(ExtendedBlock.class));

  final CyclicBarrier barrier = new CyclicBarrier(2);

  List<String> oldDirs = getDataDirs(dn);
  final String newDirs = oldDirs.get(1);  // Remove the first volume.
  final List<Exception> exceptions = new ArrayList<>();
  Thread reconfigThread = new Thread() {
    public void run() {
      try {
        barrier.await();
        dn.reconfigurePropertyImpl(
            DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
      } catch (ReconfigurationException |
          InterruptedException |
          BrokenBarrierException e) {
        exceptions.add(e);
      }
    }
  };
  reconfigThread.start();

  barrier.await();
  rb.nextBytes(writeBuf);
  out.write(writeBuf);
  out.hflush();
  out.close();

  reconfigThread.join();

  // Verify the file has sufficient replications.
  DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
  // Read the content back
  byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
  assertEquals(BLOCK_SIZE, content.length);

  // If an IOException thrown from BlockReceiver#run, it triggers
  // DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
  // to see whether there is IOException in BlockReceiver#run().
  assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());

  if (!exceptions.isEmpty()) {
    throw new IOException(exceptions.get(0).getCause());
  }
}