Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#appendFile()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#appendFile() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSnapshotDiffReport.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Rename a file and then append some data to it
 */
@Test
public void testDiffReportWithRenameAndAppend() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  final Path bar = new Path(root, "bar");
  hdfs.rename(foo, bar);
  DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");

  // we always put modification on the file before rename
  verifyDiffReport(root, "s0", "s1",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
      new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
          DFSUtil.string2Bytes("bar")));
}
 
Example 2
Source File: TestSnapshotDiffReport.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Rename a file and then append some data to it
 */
@Test
public void testDiffReportWithRenameAndAppend() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  final Path bar = new Path(root, "bar");
  hdfs.rename(foo, bar);
  DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");

  // we always put modification on the file before rename
  verifyDiffReport(root, "s0", "s1",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
      new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
          DFSUtil.string2Bytes("bar")));
}
 
Example 3
Source File: TestDistCpSync.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * make some changes under the given directory (created in the above way).
 * 1. rename dir/foo/d1 to dir/bar/d1
 * 2. delete dir/bar/d1/f3
 * 3. rename dir/foo to /dir/bar/d1/foo
 * 4. delete dir/bar/d1/foo/f1
 * 5. create file dir/bar/d1/foo/f1 whose size is 2*BLOCK_SIZE
 * 6. append one BLOCK to file dir/bar/f2
 * 7. rename dir/bar to dir/foo
 *
 * Thus after all these ops the subtree looks like this:
 *                       dir/
 *                       foo/
 *                 d1/    f2(A)    d2/
 *                foo/             f4
 *                f1(new)
 */
private void changeData(Path dir) throws Exception {
  final Path foo = new Path(dir, "foo");
  final Path bar = new Path(dir, "bar");
  final Path d1 = new Path(foo, "d1");
  final Path f2 = new Path(bar, "f2");

  final Path bar_d1 = new Path(bar, "d1");
  dfs.rename(d1, bar_d1);
  final Path f3 = new Path(bar_d1, "f3");
  dfs.delete(f3, true);
  final Path newfoo = new Path(bar_d1, "foo");
  dfs.rename(foo, newfoo);
  final Path f1 = new Path(newfoo, "f1");
  dfs.delete(f1, true);
  DFSTestUtil.createFile(dfs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0);
  DFSTestUtil.appendFile(dfs, f2, (int) BLOCK_SIZE);
  dfs.rename(bar, new Path(dir, "foo"));
}
 
Example 4
Source File: TestDiskspaceQuotaUpdate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test if the quota can be correctly updated when file length is updated
 * through fsync
 */
@Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  FSDataOutputStream out = dfs.append(bar);
  out.write(new byte[BLOCKSIZE / 4]);
  ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction

  out.write(new byte[BLOCKSIZE / 4]);
  out.close();

  fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns);
  assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);

  // append another block
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
 
Example 5
Source File: TestDiskspaceQuotaUpdate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test append over storage quota does not mark file as UC or create lease
 */
@Test (timeout=60000)
public void testAppendOverStorageQuota() throws Exception {
  final Path dir = new Path("/TestAppendOverQuota");
  final Path file = new Path(dir, "file");

  // create partial block file
  dfs.mkdirs(dir);
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);

  // lower quota to cause exception when appending to partial block
  dfs.setQuota(dir, Long.MAX_VALUE - 1, 1);
  final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
      .asDirectory();
  final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  try {
    DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
    Assert.fail("append didn't fail");
  } catch (DSQuotaExceededException e) {
    // ignore
  }

  // check that the file exists, isn't UC, and has no dangling lease
  INodeFile inode = fsdir.getINode(file.toString()).asFile();
  Assert.assertNotNull(inode);
  Assert.assertFalse("should not be UC", inode.isUnderConstruction());
  Assert.assertNull("should not have a lease", cluster.getNamesystem().getLeaseManager().getLeaseByPath(file.toString()));
  // make sure the quota usage is unchanged
  final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  assertEquals(spaceUsed, newSpaceUsed);
  // make sure edits aren't corrupted
  dfs.recoverLease(file);
  cluster.restartNameNodes();
}
 
Example 6
Source File: TestDiskspaceQuotaUpdate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test append over a specific type of storage quota does not mark file as
 * UC or create a lease
 */
@Test (timeout=60000)
public void testAppendOverTypeQuota() throws Exception {
  final Path dir = new Path("/TestAppendOverTypeQuota");
  final Path file = new Path(dir, "file");

  // create partial block file
  dfs.mkdirs(dir);
  // set the storage policy on dir
  dfs.setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);

  // set quota of SSD to 1L
  dfs.setQuotaByStorageType(dir, StorageType.SSD, 1L);
  final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
      .asDirectory();
  final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  try {
    DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
    Assert.fail("append didn't fail");
  } catch (RemoteException e) {
    assertTrue(e.getClassName().contains("QuotaByStorageTypeExceededException"));
  }

  // check that the file exists, isn't UC, and has no dangling lease
  INodeFile inode = fsdir.getINode(file.toString()).asFile();
  Assert.assertNotNull(inode);
  Assert.assertFalse("should not be UC", inode.isUnderConstruction());
  Assert.assertNull("should not have a lease", cluster.getNamesystem()
      .getLeaseManager().getLeaseByPath(file.toString()));
  // make sure the quota usage is unchanged
  final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  assertEquals(spaceUsed, newSpaceUsed);
  // make sure edits aren't corrupted
  dfs.recoverLease(file);
  cluster.restartNameNodes();
}
 
Example 7
Source File: TestDataNodeHotSwapVolumes.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testAddVolumesDuringWrite()
    throws IOException, InterruptedException, TimeoutException,
    ReconfigurationException {
  startDFSCluster(1, 1);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  Path testFile = new Path("/test");
  createFile(testFile, 4);  // Each volume has 2 blocks.

  addVolumes(2);

  // Continue to write the same file, thus the new volumes will have blocks.
  DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * 8);
  verifyFileLength(cluster.getFileSystem(), testFile, 8 + 4);
  // After appending data, there should be [2, 2, 4, 4] blocks in each volume
  // respectively.
  List<Integer> expectedNumBlocks = Arrays.asList(2, 2, 4, 4);

  List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
      cluster.getAllBlockReports(bpid);
  assertEquals(1, blockReports.size());  // 1 DataNode
  assertEquals(4, blockReports.get(0).size());  // 4 volumes
  Map<DatanodeStorage, BlockListAsLongs> dnReport =
      blockReports.get(0);
  List<Integer> actualNumBlocks = new ArrayList<Integer>();
  for (BlockListAsLongs blockList : dnReport.values()) {
    actualNumBlocks.add(blockList.getNumberOfBlocks());
  }
  Collections.sort(actualNumBlocks);
  assertEquals(expectedNumBlocks, actualNumBlocks);
}
 
Example 8
Source File: TestTruncateQuotaUpdate.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void prepare() throws Exception {
  // original size: 2.5 blocks
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
      REPLICATION, 0L);
  SnapshotTestHelper.createSnapshot(dfs, dir, "s1");

  // truncate to 1.5 block
  dfs.truncate(file, BLOCKSIZE + BLOCKSIZE / 2);
  TestFileTruncate.checkBlockRecovery(file, dfs);

  // append another 1 BLOCK
  DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
}
 
Example 9
Source File: TestDataNodeHotSwapVolumes.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testAddVolumesToFederationNN()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  // Starts a Cluster with 2 NameNode and 3 DataNodes. Each DataNode has 2
  // volumes.
  final int numNameNodes = 2;
  final int numDataNodes = 1;
  startDFSCluster(numNameNodes, numDataNodes);
  Path testFile = new Path("/test");
  // Create a file on the first namespace with 4 blocks.
  createFile(0, testFile, 4);
  // Create a file on the second namespace with 4 blocks.
  createFile(1, testFile, 4);

  // Add 2 volumes to the first DataNode.
  final int numNewVolumes = 2;
  addVolumes(numNewVolumes);

  // Append to the file on the first namespace.
  DFSTestUtil.appendFile(cluster.getFileSystem(0), testFile, BLOCK_SIZE * 8);

  List<List<Integer>> actualNumBlocks = getNumBlocksReport(0);
  assertEquals(cluster.getDataNodes().size(), actualNumBlocks.size());
  List<Integer> blocksOnFirstDN = actualNumBlocks.get(0);
  Collections.sort(blocksOnFirstDN);
  assertEquals(Arrays.asList(2, 2, 4, 4), blocksOnFirstDN);

  // Verify the second namespace also has the new volumes and they are empty.
  actualNumBlocks = getNumBlocksReport(1);
  assertEquals(4, actualNumBlocks.get(0).size());
  assertEquals(numNewVolumes,
      Collections.frequency(actualNumBlocks.get(0), 0));
}
 
Example 10
Source File: TestDiskspaceQuotaUpdate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test append over a specific type of storage quota does not mark file as
 * UC or create a lease
 */
@Test (timeout=60000)
public void testAppendOverTypeQuota() throws Exception {
  final Path dir = new Path("/TestAppendOverTypeQuota");
  final Path file = new Path(dir, "file");

  // create partial block file
  dfs.mkdirs(dir);
  // set the storage policy on dir
  dfs.setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);

  // set quota of SSD to 1L
  dfs.setQuotaByStorageType(dir, StorageType.SSD, 1L);
  final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
      .asDirectory();
  final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  try {
    DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
    Assert.fail("append didn't fail");
  } catch (RemoteException e) {
    assertTrue(e.getClassName().contains("QuotaByStorageTypeExceededException"));
  }

  // check that the file exists, isn't UC, and has no dangling lease
  INodeFile inode = fsdir.getINode(file.toString()).asFile();
  Assert.assertNotNull(inode);
  Assert.assertFalse("should not be UC", inode.isUnderConstruction());
  Assert.assertNull("should not have a lease", cluster.getNamesystem()
      .getLeaseManager().getLeaseByPath(file.toString()));
  // make sure the quota usage is unchanged
  final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  assertEquals(spaceUsed, newSpaceUsed);
  // make sure edits aren't corrupted
  dfs.recoverLease(file);
  cluster.restartNameNodes();
}
 
Example 11
Source File: TestDiskspaceQuotaUpdate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test append over storage quota does not mark file as UC or create lease
 */
@Test (timeout=60000)
public void testAppendOverStorageQuota() throws Exception {
  final Path dir = new Path("/TestAppendOverQuota");
  final Path file = new Path(dir, "file");

  // create partial block file
  dfs.mkdirs(dir);
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);

  // lower quota to cause exception when appending to partial block
  dfs.setQuota(dir, Long.MAX_VALUE - 1, 1);
  final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
      .asDirectory();
  final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  try {
    DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
    Assert.fail("append didn't fail");
  } catch (DSQuotaExceededException e) {
    // ignore
  }

  // check that the file exists, isn't UC, and has no dangling lease
  INodeFile inode = fsdir.getINode(file.toString()).asFile();
  Assert.assertNotNull(inode);
  Assert.assertFalse("should not be UC", inode.isUnderConstruction());
  Assert.assertNull("should not have a lease", cluster.getNamesystem().getLeaseManager().getLeaseByPath(file.toString()));
  // make sure the quota usage is unchanged
  final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  assertEquals(spaceUsed, newSpaceUsed);
  // make sure edits aren't corrupted
  dfs.recoverLease(file);
  cluster.restartNameNodes();
}
 
Example 12
Source File: TestDiskspaceQuotaUpdate.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test if the quota can be correctly updated when file length is updated
 * through fsync
 */
@Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  FSDataOutputStream out = dfs.append(bar);
  out.write(new byte[BLOCKSIZE / 4]);
  ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction

  out.write(new byte[BLOCKSIZE / 4]);
  out.close();

  fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns);
  assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);

  // append another block
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
 
Example 13
Source File: TestDataNodeHotSwapVolumes.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testAddVolumesDuringWrite()
    throws IOException, InterruptedException, TimeoutException,
    ReconfigurationException {
  startDFSCluster(1, 1);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  Path testFile = new Path("/test");
  createFile(testFile, 4);  // Each volume has 2 blocks.

  addVolumes(2);

  // Continue to write the same file, thus the new volumes will have blocks.
  DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * 8);
  verifyFileLength(cluster.getFileSystem(), testFile, 8 + 4);
  // After appending data, there should be [2, 2, 4, 4] blocks in each volume
  // respectively.
  List<Integer> expectedNumBlocks = Arrays.asList(2, 2, 4, 4);

  List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
      cluster.getAllBlockReports(bpid);
  assertEquals(1, blockReports.size());  // 1 DataNode
  assertEquals(4, blockReports.get(0).size());  // 4 volumes
  Map<DatanodeStorage, BlockListAsLongs> dnReport =
      blockReports.get(0);
  List<Integer> actualNumBlocks = new ArrayList<Integer>();
  for (BlockListAsLongs blockList : dnReport.values()) {
    actualNumBlocks.add(blockList.getNumberOfBlocks());
  }
  Collections.sort(actualNumBlocks);
  assertEquals(expectedNumBlocks, actualNumBlocks);
}
 
Example 14
Source File: TestFSImageWithSnapshot.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the fsimage saving/loading while file appending.
 */
@Test (timeout=60000)
public void testSaveLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  // 1. create snapshot s0
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  // 2. create snapshot s1 before appending sub1file1 finishes
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // also append sub1file2
  DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // 3. create snapshot s2 before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // 4. save fsimage before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // dump fsdir
  File fsnBefore = dumpTree2File("before");
  // save the namesystem to a temp file
  File imageFile = saveFSImageToTempFile();
  
  // 5. load fsimage and compare
  // first restart the cluster, and format the cluster
  out.close();
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(true)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  // then load the fsimage
  loadFSImageFromTempFile(imageFile);
  
  // dump the fsdir tree again
  File fsnAfter = dumpTree2File("after");
  
  // compare two dumped tree
  SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
 
Example 15
Source File: TestDiskspaceQuotaUpdate.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if the quota can be correctly updated for append
 */
@Test (timeout=60000)
public void testUpdateQuotaForAppend() throws Exception {
  final Path foo = new Path(dir ,"foo");
  final Path bar = new Path(foo, "bar");
  long currentFileLen = BLOCKSIZE;
  DFSTestUtil.createFile(dfs, bar, currentFileLen, REPLICATION, seed);
  dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);

  // append half of the block data, the previous file length is at block
  // boundary
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
  currentFileLen += (BLOCKSIZE / 2);

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  assertTrue(fooNode.isQuotaSet());
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(currentFileLen * REPLICATION, ds);
  ContentSummary c = dfs.getContentSummary(foo);
  assertEquals(c.getSpaceConsumed(), ds);

  // append another block, the previous file length is not at block boundary
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
  currentFileLen += BLOCKSIZE;

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(currentFileLen * REPLICATION, ds);
  c = dfs.getContentSummary(foo);
  assertEquals(c.getSpaceConsumed(), ds);

  // append several blocks
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE * 3 + BLOCKSIZE / 8);
  currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(currentFileLen * REPLICATION, ds);
  c = dfs.getContentSummary(foo);
  assertEquals(c.getSpaceConsumed(), ds);
}
 
Example 16
Source File: TestSnapshotPathINodes.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** 
 * for snapshot file while modifying file after snapshot.
 */
@Test (timeout=15000)
public void testSnapshotPathINodesAfterModification() throws Exception {
  // First check the INode for /TestSnapshot/sub1/file1
  String[] names = INode.getPathNames(file1.toString());
  byte[][] components = INode.getPathComponents(names);
  INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
      components, false);
  // The number of inodes should be equal to components.length
  assertEquals(nodesInPath.length(), components.length);

  // The last INode should be associated with file1
  assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
      file1.toString());
  // record the modification time of the inode
  final long modTime = nodesInPath.getINode(nodesInPath.length() - 1)
      .getModificationTime();
  
  // Create a snapshot for the dir, and check the inodes for the path
  // pointing to a snapshot file
  hdfs.allowSnapshot(sub1);
  hdfs.createSnapshot(sub1, "s3");
  
  // Modify file1
  DFSTestUtil.appendFile(hdfs, file1, "the content for appending");

  // Check the INodes for snapshot of file1
  String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
  names = INode.getPathNames(snapshotPath);
  components = INode.getPathComponents(names);
  INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir,
      components, false);
  // Length of ssInodes should be (components.length - 1), since we will
  // ignore ".snapshot" 
  assertEquals(ssNodesInPath.length(), components.length - 1);
  final Snapshot s3 = getSnapshot(ssNodesInPath, "s3", 3);
  assertSnapshot(ssNodesInPath, true, s3, 3);
  // Check the INode for snapshot of file1
  INode snapshotFileNode = ssNodesInPath.getLastINode();
  assertEquals(snapshotFileNode.getLocalName(), file1.getName());
  assertTrue(snapshotFileNode.asFile().isWithSnapshot());
  // The modification time of the snapshot INode should be the same with the
  // original INode before modification
  assertEquals(modTime,
      snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));

  // Check the INode for /TestSnapshot/sub1/file1 again
  names = INode.getPathNames(file1.toString());
  components = INode.getPathComponents(names);
  INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir,
      components, false);
  assertSnapshot(newNodesInPath, false, s3, -1);
  // The number of inodes should be equal to components.length
  assertEquals(newNodesInPath.length(), components.length);
  // The last INode should be associated with file1
  final int last = components.length - 1;
  assertEquals(newNodesInPath.getINode(last).getFullPathName(),
      file1.toString());
  // The modification time of the INode for file3 should have been changed
  Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
  hdfs.deleteSnapshot(sub1, "s3");
  hdfs.disallowSnapshot(sub1);
}
 
Example 17
Source File: TestSnapshotFileLength.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Adding as part of jira HDFS-5343
 * Test for checking the cat command on snapshot path it
 *  cannot read a file beyond snapshot file length
 * @throws Exception
 */
@Test (timeout = 600000)
public void testSnapshotFileLengthWithCatCommand() throws Exception {

  FSDataInputStream fis = null;
  FileStatus fileStatus = null;

  int bytesRead;
  byte[] buffer = new byte[BLOCKSIZE * 8];

  hdfs.mkdirs(sub);
  Path file1 = new Path(sub, file1Name);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);

  hdfs.allowSnapshot(sub);
  hdfs.createSnapshot(sub, snapshot1);

  DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);

  // Make sure we can read the entire file via its non-snapshot path.
  fileStatus = hdfs.getFileStatus(file1);
  assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
  fis = hdfs.open(file1);
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
  fis.close();

  Path file1snap1 =
      SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
  fis = hdfs.open(file1snap1);
  fileStatus = hdfs.getFileStatus(file1snap1);
  assertEquals(fileStatus.getLen(), BLOCKSIZE);
  // Make sure we can only read up to the snapshot length.
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
  fis.close();

  PrintStream outBackup = System.out;
  PrintStream errBackup = System.err;
  ByteArrayOutputStream bao = new ByteArrayOutputStream();
  System.setOut(new PrintStream(bao));
  System.setErr(new PrintStream(bao));
  // Make sure we can cat the file upto to snapshot length
  FsShell shell = new FsShell();
  try {
    ToolRunner.run(conf, shell, new String[] { "-cat",
    "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
    assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
  } finally {
    System.setOut(outBackup);
    System.setErr(errBackup);
  }
}
 
Example 18
Source File: TestSnapshotFileLength.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Adding as part of jira HDFS-5343
 * Test for checking the cat command on snapshot path it
 *  cannot read a file beyond snapshot file length
 * @throws Exception
 */
@Test (timeout = 600000)
public void testSnapshotFileLengthWithCatCommand() throws Exception {

  FSDataInputStream fis = null;
  FileStatus fileStatus = null;

  int bytesRead;
  byte[] buffer = new byte[BLOCKSIZE * 8];

  hdfs.mkdirs(sub);
  Path file1 = new Path(sub, file1Name);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);

  hdfs.allowSnapshot(sub);
  hdfs.createSnapshot(sub, snapshot1);

  DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);

  // Make sure we can read the entire file via its non-snapshot path.
  fileStatus = hdfs.getFileStatus(file1);
  assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
  fis = hdfs.open(file1);
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
  fis.close();

  Path file1snap1 =
      SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
  fis = hdfs.open(file1snap1);
  fileStatus = hdfs.getFileStatus(file1snap1);
  assertEquals(fileStatus.getLen(), BLOCKSIZE);
  // Make sure we can only read up to the snapshot length.
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
  fis.close();

  PrintStream outBackup = System.out;
  PrintStream errBackup = System.err;
  ByteArrayOutputStream bao = new ByteArrayOutputStream();
  System.setOut(new PrintStream(bao));
  System.setErr(new PrintStream(bao));
  // Make sure we can cat the file upto to snapshot length
  FsShell shell = new FsShell();
  try {
    ToolRunner.run(conf, shell, new String[] { "-cat",
    "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
    assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
  } finally {
    System.setOut(outBackup);
    System.setErr(errBackup);
  }
}
 
Example 19
Source File: TestSnapshot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
void modify() throws Exception {
  assertTrue(fs.exists(file));
  DFSTestUtil.appendFile(fs, file, appendLen);
}
 
Example 20
Source File: TestSnapshot.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
void modify() throws Exception {
  assertTrue(fs.exists(file));
  DFSTestUtil.appendFile(fs, file, appendLen);
}