org.apache.hadoop.hdfs.AppendTestUtil Java Examples

The following examples show how to use org.apache.hadoop.hdfs.AppendTestUtil. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestPipelinesFailover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
 
Example #2
Source File: TestFSMainOperationsWebHdfs.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
 
Example #3
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
Example #4
Source File: TestPipelinesFailover.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
 
Example #5
Source File: BaseTestHttpFSWith.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
Example #6
Source File: TestFSMainOperationsWebHdfs.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
 
Example #7
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example #8
Source File: TestDNFencing.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2742. The issue in this bug was:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - Standby queues the RBW state in PendingDatanodeMessages
 * - Standby processes edit logs during failover. Before fixing
 *   this bug, it was mistakenly applying the RBW reported state
 *   after the block had been completed, causing the block to get
 *   marked corrupt. Instead, we should now be applying the RBW
 *   message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
 */
@Test
public void testBlockReportsWhileFileBeingWritten() throws Exception {
  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();
    
    // Block report will include the RBW replica, but will be
    // queued on the StandbyNode.
    cluster.triggerBlockReports();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
Example #9
Source File: TestHSync.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
Example #10
Source File: TestPacketReceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
Example #11
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example #12
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
Example #13
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  DistributedFileSystem fs = cluster.getFileSystem();
  Path dir = getTestRootPath(fc, "test/hadoop");
  Path file = getTestRootPath(fc, "test/hadoop/file");

  final byte[] data = FileSystemTestHelper.getFileData(
      numOfBlocks, blockSize);
  FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fc.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fc.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

  ContentSummary cs = fs.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue(fs.delete(dir, true));
}
 
Example #14
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
 
Example #15
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example #16
Source File: TestDNFencing.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2742. The issue in this bug was:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - Standby queues the RBW state in PendingDatanodeMessages
 * - Standby processes edit logs during failover. Before fixing
 *   this bug, it was mistakenly applying the RBW reported state
 *   after the block had been completed, causing the block to get
 *   marked corrupt. Instead, we should now be applying the RBW
 *   message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
 */
@Test
public void testBlockReportsWhileFileBeingWritten() throws Exception {
  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();
    
    // Block report will include the RBW replica, but will be
    // queued on the StandbyNode.
    cluster.triggerBlockReports();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
Example #17
Source File: TestHSync.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
Example #18
Source File: TestDFSConcurrentFileOperations.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void testLeaseRecoveryOnTrashedFile() throws Exception {
  Configuration conf = new Configuration();
  
  conf.setLong("dfs.block.size", blockSize);
  
  init(conf);
  
  String src = "/file-1";
  String dst = "/file-2";
  Path srcPath = new Path(src);
  Path dstPath = new Path(dst);
  FSDataOutputStream fos = fs.create(srcPath);

  AppendTestUtil.write(fos, 0, writeSize);
  fos.sync();
  
  // renaming a file out from under a client will cause close to fail
  // and result in the lease remaining while the blocks are finalized on
  // the DNs
  fs.rename(srcPath, dstPath);

  try {
    fos.close();
    fail("expected IOException");
  } catch (IOException e) {
    //expected
  }

  FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
  AppendTestUtil.recoverFile(cluster, fs2, dstPath);
  AppendTestUtil.check(fs2, dstPath, writeSize);
}
 
Example #19
Source File: TestAvatarDataNodeMultipleRegistrations.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
Example #20
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
 
Example #21
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
Example #22
Source File: TestShortCircuitLocalRead.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example #23
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example #24
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example #25
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  DistributedFileSystem fs = cluster.getFileSystem();
  Path dir = getTestRootPath(fc, "test/hadoop");
  Path file = getTestRootPath(fc, "test/hadoop/file");

  final byte[] data = FileSystemTestHelper.getFileData(
      numOfBlocks, blockSize);
  FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fc.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fc.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

  ContentSummary cs = fs.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue(fs.delete(dir, true));
}
 
Example #26
Source File: TestPacketReceiver.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
Example #27
Source File: TestShortCircuitLocalRead.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test to run benchmarks between short circuit read vs regular read with
 * specified number of threads simultaneously reading.
 * <br>
 * Run this using the following command:
 * bin/hadoop --config confdir \
 * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
 * <shortcircuit on?> <checsum on?> <Number of threads>
 */
public static void main(String[] args) throws Exception {    
  if (args.length != 3) {
    System.out.println("Usage: test shortcircuit checksum threadCount");
    System.exit(1);
  }
  boolean shortcircuit = Boolean.valueOf(args[0]);
  boolean checksum = Boolean.valueOf(args[1]);
  int threadCount = Integer.parseInt(args[2]);

  // Setup create a file
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/TestShortCircuitLocalRead._PORT");
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      checksum);
  
  //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
  int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
  final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);
  
  // create a new file in home directory. Do not close it.
  final Path file1 = new Path("filelocal.dat");
  final FileSystem fs = FileSystem.get(conf);
  FSDataOutputStream stm = createFile(fs, file1, 1);
  
  stm.write(dataToWrite);
  stm.close();

  long start = Time.now();
  final int iteration = 20;
  Thread[] threads = new Thread[threadCount];
  for (int i = 0; i < threadCount; i++) {
    threads[i] = new Thread() {
      @Override
      public void run() {
        for (int i = 0; i < iteration; i++) {
          try {
            String user = getCurrentUser();
            checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, true);
          } catch (IOException e) {
            e.printStackTrace();
          } catch (InterruptedException e) {
            e.printStackTrace();
          }
        }
      }
    };
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].start();
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].join();
  }
  long end = Time.now();
  System.out.println("Iteration " + iteration + " took " + (end - start));
  fs.delete(file1, false);
}
 
Example #28
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Upgrade, RollBack, and restart test for Truncate.
 */
@Test
public void testUpgradeAndRestart() throws IOException {
  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  fs.allowSnapshot(parent);
  String truncateFile = "testUpgrade";
  final Path p = new Path(parent, truncateFile);
  int startingFileSize = 2 * BLOCK_SIZE;
  int toTruncate = 1;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  Path snapshotDir = fs.createSnapshot(parent, "ss0");
  Path snapshotFile = new Path(snapshotDir, truncateFile);

  int newLengthBeforeUpgrade = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLengthBeforeUpgrade);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  checkBlockRecovery(p);

  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);
  long totalBlockBefore = cluster.getNamesystem().getBlocksTotal();

  restartCluster(StartupOption.UPGRADE);

  assertThat("SafeMode should be OFF",
      cluster.getNamesystem().isInSafeMode(), is(false));
  assertThat("NameNode should be performing upgrade.",
      cluster.getNamesystem().isUpgradeFinalized(), is(false));
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));

  int newLengthAfterUpgrade = newLengthBeforeUpgrade - toTruncate;
  Block oldBlk = getLocatedBlocks(p).getLastLocatedBlock()
      .getBlock().getLocalBlock();
  isReady = fs.truncate(p, newLengthAfterUpgrade);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));
  fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthAfterUpgrade));
  assertThat("Should copy on truncate during upgrade",
      getLocatedBlocks(p).getLastLocatedBlock().getBlock()
      .getLocalBlock().getBlockId(), is(not(equalTo(oldBlk.getBlockId()))));

  checkBlockRecovery(p);

  checkFullFile(p, newLengthAfterUpgrade, contents);
  assertThat("Total block count should be unchanged from copy-on-truncate",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));

  restartCluster(StartupOption.ROLLBACK);

  assertThat("File does not exist " + p, fs.exists(p), is(true));
  fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertThat("Total block count should be unchanged from rolling back",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));

  restartCluster(StartupOption.REGULAR);
  assertThat("Total block count should be unchanged from start-up",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);

  // empty edits and restart
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  cluster.restartNameNode(true);
  assertThat("Total block count should be unchanged from start-up",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);

  fs.deleteSnapshot(parent, "ss0");
  fs.delete(parent, true);
  assertThat("File " + p + " shouldn't exist", fs.exists(p), is(false));
}
 
Example #29
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 4 votes vote down vote up
static void checkFullFile(Path p, int newLength, byte[] contents)
    throws IOException {
  AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
}
 
Example #30
Source File: TestPipelinesFailover.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void doWriteOverFailoverTest(TestScenario scenario,
    MethodToTestIdempotence methodToTest) throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  // Don't check replication periodically.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  
  FSDataOutputStream stm = null;
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    int sizeWritten = 0;
    
    cluster.waitActive();
    cluster.transitionToActive(0);
    Thread.sleep(500);

    LOG.info("Starting with NN 0 active");
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    stm = fs.create(TEST_PATH);
    
    // write a block and a half
    AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
    sizeWritten += BLOCK_AND_A_HALF;
    
    // Make sure all of the blocks are written out before failover.
    stm.hflush();

    LOG.info("Failing over to NN 1");
    scenario.run(cluster);

    // NOTE: explicitly do *not* make any further metadata calls
    // to the NN here. The next IPC call should be to allocate the next
    // block. Any other call would notice the failover and not test
    // idempotence of the operation (HDFS-3031)
    
    FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
    BlockManagerTestUtil.updateState(ns1.getBlockManager());
    assertEquals(0, ns1.getPendingReplicationBlocks());
    assertEquals(0, ns1.getCorruptReplicaBlocks());
    assertEquals(0, ns1.getMissingBlocksCount());

    // If we're testing allocateBlock()'s idempotence, write another
    // block and a half, so we have to allocate a new block.
    // Otherise, don't write anything, so our next RPC will be
    // completeFile() if we're testing idempotence of that operation.
    if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
      // write another block and a half
      AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
      sizeWritten += BLOCK_AND_A_HALF;
    }
    
    stm.close();
    stm = null;
    
    AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
  } finally {
    IOUtils.closeStream(stm);
    cluster.shutdown();
  }
}