Java Code Examples for org.apache.hadoop.hdfs.AppendTestUtil#initBuffer()

The following examples show how to use org.apache.hadoop.hdfs.AppendTestUtil#initBuffer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestPacketReceiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
Example 2
Source File: TestHSync.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
Example 3
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example 4
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
 
Example 5
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example 6
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
Example 7
Source File: TestPacketReceiver.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
Example 8
Source File: TestHSync.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
Example 9
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example 10
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
 
Example 11
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example 12
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
Example 13
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Upgrade, RollBack, and restart test for Truncate.
 */
@Test
public void testUpgradeAndRestart() throws IOException {
  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  fs.allowSnapshot(parent);
  String truncateFile = "testUpgrade";
  final Path p = new Path(parent, truncateFile);
  int startingFileSize = 2 * BLOCK_SIZE;
  int toTruncate = 1;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  Path snapshotDir = fs.createSnapshot(parent, "ss0");
  Path snapshotFile = new Path(snapshotDir, truncateFile);

  int newLengthBeforeUpgrade = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLengthBeforeUpgrade);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  checkBlockRecovery(p);

  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);
  long totalBlockBefore = cluster.getNamesystem().getBlocksTotal();

  restartCluster(StartupOption.UPGRADE);

  assertThat("SafeMode should be OFF",
      cluster.getNamesystem().isInSafeMode(), is(false));
  assertThat("NameNode should be performing upgrade.",
      cluster.getNamesystem().isUpgradeFinalized(), is(false));
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));

  int newLengthAfterUpgrade = newLengthBeforeUpgrade - toTruncate;
  Block oldBlk = getLocatedBlocks(p).getLastLocatedBlock()
      .getBlock().getLocalBlock();
  isReady = fs.truncate(p, newLengthAfterUpgrade);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));
  fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthAfterUpgrade));
  assertThat("Should copy on truncate during upgrade",
      getLocatedBlocks(p).getLastLocatedBlock().getBlock()
      .getLocalBlock().getBlockId(), is(not(equalTo(oldBlk.getBlockId()))));

  checkBlockRecovery(p);

  checkFullFile(p, newLengthAfterUpgrade, contents);
  assertThat("Total block count should be unchanged from copy-on-truncate",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));

  restartCluster(StartupOption.ROLLBACK);

  assertThat("File does not exist " + p, fs.exists(p), is(true));
  fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertThat("Total block count should be unchanged from rolling back",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));

  restartCluster(StartupOption.REGULAR);
  assertThat("Total block count should be unchanged from start-up",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);

  // empty edits and restart
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  cluster.restartNameNode(true);
  assertThat("Total block count should be unchanged from start-up",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);

  fs.deleteSnapshot(parent, "ss0");
  fs.delete(parent, true);
  assertThat("File " + p + " shouldn't exist", fs.exists(p), is(false));
}
 
Example 14
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * dn0 is shutdown before truncate and restart after truncate successful.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesRestart() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesRestart");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();

  int dn = 0;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  cluster.getDataNodes().get(dn).shutdown();
  try {
    boolean isReady = fs.truncate(p, newLength);
    assertFalse(isReady);
  } finally {
    cluster.restartDataNode(dn, true, true);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For non copy-on-truncate, the truncated block id is the same, but the 
   * GS should increase.
   * The truncated block will be replicated to dn0 after it restarts.
   */
  assertEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // Old replica is disregarded and replaced with the truncated one
  assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.delete(parent, true);
}
 
Example 15
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The last block is truncated at mid. (copy-on-truncate)
 * dn1 is shutdown before truncate and restart after truncate successful.
 */
@Test(timeout=60000)
public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
  fs.allowSnapshot(parent);
  fs.createSnapshot(parent, "ss0");

  int dn = 1;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  cluster.getDataNodes().get(dn).shutdown();
  try {
    boolean isReady = fs.truncate(p, newLength);
    assertFalse(isReady);
  } finally {
    cluster.restartDataNode(dn, true, true);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For copy-on-truncate, new block is made with new block id and new GS.
   * The replicas of the new block is 2, then it will be replicated to dn1.
   */
  assertNotEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // New block is replicated to dn1
  assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  // Old replica exists too since there is snapshot
  assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(), 
      oldBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn, 
      oldBlock.getBlock()).getName().endsWith(
          oldBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.deleteSnapshot(parent, "ss0");
  fs.delete(parent, true);
}
 
Example 16
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * dn0, dn1 are restarted immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesRestartImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesRestartImmediately");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();

  int dn0 = 0;
  int dn1 = 1;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.restartDataNode(dn0, true, true);
  cluster.restartDataNode(dn1, true, true);
  cluster.waitActive();
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For non copy-on-truncate, the truncated block id is the same, but the 
   * GS should increase.
   */
  assertEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // Old replica is disregarded and replaced with the truncated one on dn0
  assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn0, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Old replica is disregarded and replaced with the truncated one on dn1
  assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn1, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.delete(parent, true);
}
 
Example 17
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * dn0, dn1 are restarted immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesRestartImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesRestartImmediately");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();

  int dn0 = 0;
  int dn1 = 1;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.restartDataNode(dn0, true, true);
  cluster.restartDataNode(dn1, true, true);
  cluster.waitActive();
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For non copy-on-truncate, the truncated block id is the same, but the 
   * GS should increase.
   */
  assertEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // Old replica is disregarded and replaced with the truncated one on dn0
  assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn0, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Old replica is disregarded and replaced with the truncated one on dn1
  assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn1, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.delete(parent, true);
}
 
Example 18
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The last block is truncated at mid. (copy-on-truncate)
 * dn1 is shutdown before truncate and restart after truncate successful.
 */
@Test(timeout=60000)
public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
  fs.allowSnapshot(parent);
  fs.createSnapshot(parent, "ss0");

  int dn = 1;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  cluster.getDataNodes().get(dn).shutdown();
  try {
    boolean isReady = fs.truncate(p, newLength);
    assertFalse(isReady);
  } finally {
    cluster.restartDataNode(dn, true, true);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For copy-on-truncate, new block is made with new block id and new GS.
   * The replicas of the new block is 2, then it will be replicated to dn1.
   */
  assertNotEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // New block is replicated to dn1
  assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  // Old replica exists too since there is snapshot
  assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(), 
      oldBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn, 
      oldBlock.getBlock()).getName().endsWith(
          oldBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.deleteSnapshot(parent, "ss0");
  fs.delete(parent, true);
}
 
Example 19
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Upgrade, RollBack, and restart test for Truncate.
 */
@Test
public void testUpgradeAndRestart() throws IOException {
  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  fs.allowSnapshot(parent);
  String truncateFile = "testUpgrade";
  final Path p = new Path(parent, truncateFile);
  int startingFileSize = 2 * BLOCK_SIZE;
  int toTruncate = 1;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  Path snapshotDir = fs.createSnapshot(parent, "ss0");
  Path snapshotFile = new Path(snapshotDir, truncateFile);

  int newLengthBeforeUpgrade = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLengthBeforeUpgrade);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  checkBlockRecovery(p);

  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);
  long totalBlockBefore = cluster.getNamesystem().getBlocksTotal();

  restartCluster(StartupOption.UPGRADE);

  assertThat("SafeMode should be OFF",
      cluster.getNamesystem().isInSafeMode(), is(false));
  assertThat("NameNode should be performing upgrade.",
      cluster.getNamesystem().isUpgradeFinalized(), is(false));
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));

  int newLengthAfterUpgrade = newLengthBeforeUpgrade - toTruncate;
  Block oldBlk = getLocatedBlocks(p).getLastLocatedBlock()
      .getBlock().getLocalBlock();
  isReady = fs.truncate(p, newLengthAfterUpgrade);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));
  fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthAfterUpgrade));
  assertThat("Should copy on truncate during upgrade",
      getLocatedBlocks(p).getLastLocatedBlock().getBlock()
      .getLocalBlock().getBlockId(), is(not(equalTo(oldBlk.getBlockId()))));

  checkBlockRecovery(p);

  checkFullFile(p, newLengthAfterUpgrade, contents);
  assertThat("Total block count should be unchanged from copy-on-truncate",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));

  restartCluster(StartupOption.ROLLBACK);

  assertThat("File does not exist " + p, fs.exists(p), is(true));
  fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertThat("Total block count should be unchanged from rolling back",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));

  restartCluster(StartupOption.REGULAR);
  assertThat("Total block count should be unchanged from start-up",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);

  // empty edits and restart
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  cluster.restartNameNode(true);
  assertThat("Total block count should be unchanged from start-up",
      cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
  checkFullFile(p, newLengthBeforeUpgrade, contents);
  assertFileLength(snapshotFile, startingFileSize);

  fs.deleteSnapshot(parent, "ss0");
  fs.delete(parent, true);
  assertThat("File " + p + " shouldn't exist", fs.exists(p), is(false));
}
 
Example 20
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * dn0 is shutdown before truncate and restart after truncate successful.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesRestart() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesRestart");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();

  int dn = 0;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  cluster.getDataNodes().get(dn).shutdown();
  try {
    boolean isReady = fs.truncate(p, newLength);
    assertFalse(isReady);
  } finally {
    cluster.restartDataNode(dn, true, true);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For non copy-on-truncate, the truncated block id is the same, but the 
   * GS should increase.
   * The truncated block will be replicated to dn0 after it restarts.
   */
  assertEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // Old replica is disregarded and replaced with the truncated one
  assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.delete(parent, true);
}