Java Code Examples for org.apache.hadoop.hdfs.client.HdfsDataOutputStream#hsync()

The following examples show how to use org.apache.hadoop.hdfs.client.HdfsDataOutputStream#hsync() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFSImageWithSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
Example 2
Source File: TestSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
void modify() throws Exception {
  assertTrue(fs.exists(file));
  byte[] toAppend = new byte[appendLen];
  random.nextBytes(toAppend);

  out = (HdfsDataOutputStream)fs.append(file);
  out.write(toAppend);
  out.hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
}
 
Example 3
Source File: TestFSImageWithSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
Example 4
Source File: TestSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
void modify() throws Exception {
  assertTrue(fs.exists(file));
  byte[] toAppend = new byte[appendLen];
  random.nextBytes(toAppend);

  out = (HdfsDataOutputStream)fs.append(file);
  out.write(toAppend);
  out.hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
}
 
Example 5
Source File: TestFSImageWithSnapshot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the fsimage saving/loading while file appending.
 */
@Test (timeout=60000)
public void testSaveLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  // 1. create snapshot s0
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  // 2. create snapshot s1 before appending sub1file1 finishes
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // also append sub1file2
  DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // 3. create snapshot s2 before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // 4. save fsimage before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // dump fsdir
  File fsnBefore = dumpTree2File("before");
  // save the namesystem to a temp file
  File imageFile = saveFSImageToTempFile();
  
  // 5. load fsimage and compare
  // first restart the cluster, and format the cluster
  out.close();
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(true)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  // then load the fsimage
  loadFSImageFromTempFile(imageFile);
  
  // dump the fsdir tree again
  File fsnAfter = dumpTree2File("after");
  
  // compare two dumped tree
  SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
 
Example 6
Source File: TestINodeFileUnderConstructionWithSnapshot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
 * blocks within the size range are returned.
 */
@Test
public void testGetBlockLocations() throws Exception {
  final Path root = new Path("/");
  final Path file = new Path("/file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // take a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  
  final Path fileInSnapshot = SnapshotTestHelper.getSnapshotPath(root,
      "s1", file.getName());
  FileStatus status = hdfs.getFileStatus(fileInSnapshot);
  // make sure we record the size for the file
  assertEquals(BLOCKSIZE, status.getLen());
  
  // append data to file
  DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE - 1);
  status = hdfs.getFileStatus(fileInSnapshot);
  // the size of snapshot file should still be BLOCKSIZE
  assertEquals(BLOCKSIZE, status.getLen());
  // the size of the file should be (2 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  
  // call DFSClient#callGetBlockLocations for the file in snapshot
  LocatedBlocks blocks = DFSClientAdapter.callGetBlockLocations(
      cluster.getNameNodeRpc(), fileInSnapshot.toString(), 0, Long.MAX_VALUE);
  List<LocatedBlock> blockList = blocks.getLocatedBlocks();
  
  // should be only one block
  assertEquals(BLOCKSIZE, blocks.getFileLength());
  assertEquals(1, blockList.size());
  
  // check the last block
  LocatedBlock lastBlock = blocks.getLastLocatedBlock();
  assertEquals(0, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  // take another snapshot
  SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
  final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root,
      "s2", file.getName());
  
  // append data to file without closing
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  status = hdfs.getFileStatus(fileInSnapshot2);
  // the size of snapshot file should be BLOCKSIZE*2-1
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  // the size of the file should be (3 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
  assertFalse(blocks.isUnderConstruction());
  assertTrue(blocks.isLastBlockComplete());
  blockList = blocks.getLocatedBlocks();
  
  // should be 2 blocks
  assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
  assertEquals(2, blockList.size());
  
  // check the last block
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), BLOCKSIZE, 0);
  blockList = blocks.getLocatedBlocks();
  assertEquals(1, blockList.size());
  
  // check blocks for file being written
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      file.toString(), 0, Long.MAX_VALUE);
  blockList = blocks.getLocatedBlocks();
  assertEquals(3, blockList.size());
  assertTrue(blocks.isUnderConstruction());
  assertFalse(blocks.isLastBlockComplete());
  
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
  out.close();
}
 
Example 7
Source File: TestFSImageWithSnapshot.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the fsimage saving/loading while file appending.
 */
@Test (timeout=60000)
public void testSaveLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  // 1. create snapshot s0
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  // 2. create snapshot s1 before appending sub1file1 finishes
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // also append sub1file2
  DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // 3. create snapshot s2 before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // 4. save fsimage before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // dump fsdir
  File fsnBefore = dumpTree2File("before");
  // save the namesystem to a temp file
  File imageFile = saveFSImageToTempFile();
  
  // 5. load fsimage and compare
  // first restart the cluster, and format the cluster
  out.close();
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(true)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  // then load the fsimage
  loadFSImageFromTempFile(imageFile);
  
  // dump the fsdir tree again
  File fsnAfter = dumpTree2File("after");
  
  // compare two dumped tree
  SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
 
Example 8
Source File: TestINodeFileUnderConstructionWithSnapshot.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
 * blocks within the size range are returned.
 */
@Test
public void testGetBlockLocations() throws Exception {
  final Path root = new Path("/");
  final Path file = new Path("/file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // take a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  
  final Path fileInSnapshot = SnapshotTestHelper.getSnapshotPath(root,
      "s1", file.getName());
  FileStatus status = hdfs.getFileStatus(fileInSnapshot);
  // make sure we record the size for the file
  assertEquals(BLOCKSIZE, status.getLen());
  
  // append data to file
  DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE - 1);
  status = hdfs.getFileStatus(fileInSnapshot);
  // the size of snapshot file should still be BLOCKSIZE
  assertEquals(BLOCKSIZE, status.getLen());
  // the size of the file should be (2 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  
  // call DFSClient#callGetBlockLocations for the file in snapshot
  LocatedBlocks blocks = DFSClientAdapter.callGetBlockLocations(
      cluster.getNameNodeRpc(), fileInSnapshot.toString(), 0, Long.MAX_VALUE);
  List<LocatedBlock> blockList = blocks.getLocatedBlocks();
  
  // should be only one block
  assertEquals(BLOCKSIZE, blocks.getFileLength());
  assertEquals(1, blockList.size());
  
  // check the last block
  LocatedBlock lastBlock = blocks.getLastLocatedBlock();
  assertEquals(0, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  // take another snapshot
  SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
  final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root,
      "s2", file.getName());
  
  // append data to file without closing
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  status = hdfs.getFileStatus(fileInSnapshot2);
  // the size of snapshot file should be BLOCKSIZE*2-1
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  // the size of the file should be (3 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
  assertFalse(blocks.isUnderConstruction());
  assertTrue(blocks.isLastBlockComplete());
  blockList = blocks.getLocatedBlocks();
  
  // should be 2 blocks
  assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
  assertEquals(2, blockList.size());
  
  // check the last block
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), BLOCKSIZE, 0);
  blockList = blocks.getLocatedBlocks();
  assertEquals(1, blockList.size());
  
  // check blocks for file being written
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      file.toString(), 0, Long.MAX_VALUE);
  blockList = blocks.getLocatedBlocks();
  assertEquals(3, blockList.size());
  assertTrue(blocks.isUnderConstruction());
  assertFalse(blocks.isLastBlockComplete());
  
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
  out.close();
}