Java Code Examples for org.apache.hadoop.hdfs.AppendTestUtil

The following examples show how to use org.apache.hadoop.hdfs.AppendTestUtil. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: BaseTestHttpFSWith.java    License: Apache License 2.0 6 votes vote down vote up
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
Example 2
Source Project: hadoop   Source File: TestFSMainOperationsWebHdfs.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
 
Example 3
Source Project: hadoop   Source File: TestPipelinesFailover.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
 
Example 4
Source Project: big-c   Source File: BaseTestHttpFSWith.java    License: Apache License 2.0 6 votes vote down vote up
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
Example 5
Source Project: big-c   Source File: TestFSMainOperationsWebHdfs.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
 
Example 6
Source Project: big-c   Source File: TestPipelinesFailover.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void doAnAction() throws Exception {
  FSDataOutputStream stm = fs.create(path, true);
  try {
    AppendTestUtil.write(stm, 0, 100);
    stm.hflush();
    loopRecoverLease(fsOtherUser, path);
    AppendTestUtil.check(fs, path, 100);
  } finally {
    try {
      stm.close();
    } catch (IOException e) {
      // should expect this since we lost the lease
    }
  }
}
 
Example 7
Source Project: hadoop   Source File: TestHDFSFileContextMainOperations.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  DistributedFileSystem fs = cluster.getFileSystem();
  Path dir = getTestRootPath(fc, "test/hadoop");
  Path file = getTestRootPath(fc, "test/hadoop/file");

  final byte[] data = FileSystemTestHelper.getFileData(
      numOfBlocks, blockSize);
  FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fc.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fc.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

  ContentSummary cs = fs.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue(fs.delete(dir, true));
}
 
Example 8
Source Project: hadoop   Source File: TestPacketReceiver.java    License: Apache License 2.0 5 votes vote down vote up
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
Example 9
Source Project: hadoop   Source File: TestShortCircuitLocalRead.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 10
Source Project: hadoop   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
Example 11
Source Project: hadoop   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example 12
Source Project: hadoop   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
 
Example 13
Source Project: hadoop   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example 14
Source Project: hadoop   Source File: TestDNFencing.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2742. The issue in this bug was:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - Standby queues the RBW state in PendingDatanodeMessages
 * - Standby processes edit logs during failover. Before fixing
 *   this bug, it was mistakenly applying the RBW reported state
 *   after the block had been completed, causing the block to get
 *   marked corrupt. Instead, we should now be applying the RBW
 *   message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
 */
@Test
public void testBlockReportsWhileFileBeingWritten() throws Exception {
  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();
    
    // Block report will include the RBW replica, but will be
    // queued on the StandbyNode.
    cluster.triggerBlockReports();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
Example 15
Source Project: hadoop   Source File: TestHSync.java    License: Apache License 2.0 5 votes vote down vote up
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
Example 16
Source Project: big-c   Source File: TestHDFSFileContextMainOperations.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  DistributedFileSystem fs = cluster.getFileSystem();
  Path dir = getTestRootPath(fc, "test/hadoop");
  Path file = getTestRootPath(fc, "test/hadoop/file");

  final byte[] data = FileSystemTestHelper.getFileData(
      numOfBlocks, blockSize);
  FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fc.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fc.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

  ContentSummary cs = fs.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue(fs.delete(dir, true));
}
 
Example 17
Source Project: big-c   Source File: TestPacketReceiver.java    License: Apache License 2.0 5 votes vote down vote up
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
Example 18
Source Project: big-c   Source File: TestShortCircuitLocalRead.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 19
Source Project: big-c   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
Example 20
Source Project: big-c   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example 21
Source Project: big-c   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
 
Example 22
Source Project: big-c   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example 23
Source Project: big-c   Source File: TestDNFencing.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2742. The issue in this bug was:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - Standby queues the RBW state in PendingDatanodeMessages
 * - Standby processes edit logs during failover. Before fixing
 *   this bug, it was mistakenly applying the RBW reported state
 *   after the block had been completed, causing the block to get
 *   marked corrupt. Instead, we should now be applying the RBW
 *   message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
 */
@Test
public void testBlockReportsWhileFileBeingWritten() throws Exception {
  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();
    
    // Block report will include the RBW replica, but will be
    // queued on the StandbyNode.
    cluster.triggerBlockReports();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
Example 24
Source Project: big-c   Source File: TestHSync.java    License: Apache License 2.0 5 votes vote down vote up
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
Example 25
Source Project: RDFS   Source File: TestDFSConcurrentFileOperations.java    License: Apache License 2.0 5 votes vote down vote up
public void testLeaseRecoveryOnTrashedFile() throws Exception {
  Configuration conf = new Configuration();
  
  conf.setLong("dfs.block.size", blockSize);
  
  init(conf);
  
  String src = "/file-1";
  String dst = "/file-2";
  Path srcPath = new Path(src);
  Path dstPath = new Path(dst);
  FSDataOutputStream fos = fs.create(srcPath);

  AppendTestUtil.write(fos, 0, writeSize);
  fos.sync();
  
  // renaming a file out from under a client will cause close to fail
  // and result in the lease remaining while the blocks are finalized on
  // the DNs
  fs.rename(srcPath, dstPath);

  try {
    fos.close();
    fail("expected IOException");
  } catch (IOException e) {
    //expected
  }

  FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
  AppendTestUtil.recoverFile(cluster, fs2, dstPath);
  AppendTestUtil.check(fs2, dstPath, writeSize);
}
 
Example 26
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
Example 27
Source Project: hadoop   Source File: TestShortCircuitLocalRead.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
    int readOffset, String shortCircuitUser, String readingUser,
    boolean legacyShortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      ignoreChecksum);
  // Set a random client context name so that we don't share a cache with
  // other invocations of this function.
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
      UUID.randomUUID().toString());
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  if (shortCircuitUser != null) {
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
        shortCircuitUser);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
    Path file1 = fs.makeQualified(new Path("filelocal.dat"));
    FSDataOutputStream stm = createFile(fs, file1, 1);
    stm.write(fileData);
    stm.close();
    
    URI uri = cluster.getURI();
    checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
        legacyShortCircuitFails);
    checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
        conf, legacyShortCircuitFails);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 28
Source Project: hadoop   Source File: TestShortCircuitLocalRead.java    License: Apache License 2.0 4 votes vote down vote up
@Test(timeout=10000)
public void testSkipWithVerifyChecksum() throws IOException {
  int size = blockSize;
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/testSkipWithVerifyChecksum._PORT");
  DomainSocket.disableBindPathValidation();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
    // create a new file in home directory. Do not close it.
    Path file1 = new Path("filelocal.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // write to file
    stm.write(fileData);
    stm.close();
    
    // now test the skip function
    FSDataInputStream instm = fs.open(file1);
    byte[] actual = new byte[fileData.length];
    // read something from the block first, otherwise BlockReaderLocal.skip()
    // will not be invoked
    int nread = instm.read(actual, 0, 3);
    long skipped = 2*size+3;
    instm.seek(skipped);
    nread = instm.read(actual, (int)(skipped + nread), 3);
    instm.close();
      
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 29
Source Project: hadoop   Source File: TestShortCircuitLocalRead.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Test to run benchmarks between short circuit read vs regular read with
 * specified number of threads simultaneously reading.
 * <br>
 * Run this using the following command:
 * bin/hadoop --config confdir \
 * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
 * <shortcircuit on?> <checsum on?> <Number of threads>
 */
public static void main(String[] args) throws Exception {    
  if (args.length != 3) {
    System.out.println("Usage: test shortcircuit checksum threadCount");
    System.exit(1);
  }
  boolean shortcircuit = Boolean.valueOf(args[0]);
  boolean checksum = Boolean.valueOf(args[1]);
  int threadCount = Integer.parseInt(args[2]);

  // Setup create a file
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/TestShortCircuitLocalRead._PORT");
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      checksum);
  
  //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
  int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
  final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);
  
  // create a new file in home directory. Do not close it.
  final Path file1 = new Path("filelocal.dat");
  final FileSystem fs = FileSystem.get(conf);
  FSDataOutputStream stm = createFile(fs, file1, 1);
  
  stm.write(dataToWrite);
  stm.close();

  long start = Time.now();
  final int iteration = 20;
  Thread[] threads = new Thread[threadCount];
  for (int i = 0; i < threadCount; i++) {
    threads[i] = new Thread() {
      @Override
      public void run() {
        for (int i = 0; i < iteration; i++) {
          try {
            String user = getCurrentUser();
            checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, true);
          } catch (IOException e) {
            e.printStackTrace();
          } catch (InterruptedException e) {
            e.printStackTrace();
          }
        }
      }
    };
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].start();
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].join();
  }
  long end = Time.now();
  System.out.println("Iteration " + iteration + " took " + (end - start));
  fs.delete(file1, false);
}
 
Example 30
Source Project: hadoop   Source File: TestFileTruncate.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * dn0 is shutdown before truncate and restart after truncate successful.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesRestart() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesRestart");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();

  int dn = 0;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  cluster.getDataNodes().get(dn).shutdown();
  try {
    boolean isReady = fs.truncate(p, newLength);
    assertFalse(isReady);
  } finally {
    cluster.restartDataNode(dn, true, true);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For non copy-on-truncate, the truncated block id is the same, but the 
   * GS should increase.
   * The truncated block will be replicated to dn0 after it restarts.
   */
  assertEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // Old replica is disregarded and replaced with the truncated one
  assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.delete(parent, true);
}