Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#setQuota()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#setQuota() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 2
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 3
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 4
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 5
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testOldRenameWithQuota() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src1");
  Path src2 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src2");
  Path dst1 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst1");
  Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2");
  createFile(src1);
  createFile(src2);
  fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
      HdfsConstants.QUOTA_DONT_SET);
  fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);

  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  /* 
   * Test1: src does not exceed quota and dst has no quota check and hence 
   * accommodates rename
   */
  oldRename(src1, dst1, true, false);

  /*
   * Test2: src does not exceed quota and dst has *no* quota to accommodate 
   * rename. 
   */
  // dstDir quota = 1 and dst1 already uses it
  oldRename(src2, dst2, false, true);

  /*
   * Test3: src exceeds quota and dst has *no* quota to accommodate rename
   */
  // src1 has no quota to accommodate new rename node
  fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
  oldRename(dst1, src1, false, true);
}
 
Example 6
Source File: SnapshotTestHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Create snapshot for a dir using a given snapshot name
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The dir to be snapshotted
 * @param snapshotName The name of the snapshot
 * @return The path of the snapshot root
 */
public static Path createSnapshot(DistributedFileSystem hdfs,
    Path snapshotRoot, String snapshotName) throws Exception {
  LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot);
  assertTrue(hdfs.exists(snapshotRoot));
  hdfs.allowSnapshot(snapshotRoot);
  hdfs.createSnapshot(snapshotRoot, snapshotName);
  // set quota to a large value for testing counts
  hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
}
 
Example 7
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testOldRenameWithQuota() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src1");
  Path src2 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src2");
  Path dst1 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst1");
  Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2");
  createFile(src1);
  createFile(src2);
  fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
      HdfsConstants.QUOTA_DONT_SET);
  fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);

  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  /* 
   * Test1: src does not exceed quota and dst has no quota check and hence 
   * accommodates rename
   */
  oldRename(src1, dst1, true, false);

  /*
   * Test2: src does not exceed quota and dst has *no* quota to accommodate 
   * rename. 
   */
  // dstDir quota = 1 and dst1 already uses it
  oldRename(src2, dst2, false, true);

  /*
   * Test3: src exceeds quota and dst has *no* quota to accommodate rename
   */
  // src1 has no quota to accommodate new rename node
  fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
  oldRename(dst1, src1, false, true);
}
 
Example 8
Source File: SnapshotTestHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Create snapshot for a dir using a given snapshot name
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The dir to be snapshotted
 * @param snapshotName The name of the snapshot
 * @return The path of the snapshot root
 */
public static Path createSnapshot(DistributedFileSystem hdfs,
    Path snapshotRoot, String snapshotName) throws Exception {
  LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot);
  assertTrue(hdfs.exists(snapshotRoot));
  hdfs.allowSnapshot(snapshotRoot);
  hdfs.createSnapshot(snapshotRoot, snapshotName);
  // set quota to a large value for testing counts
  hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
}
 
Example 9
Source File: TestWithMiniClusterBase.java    From NNAnalytics with Apache License 2.0 4 votes vote down vote up
protected void addFiles(int numOfFiles, long sleepBetweenMs) throws Exception {
  DistributedFileSystem fileSystem = (DistributedFileSystem) FileSystem.get(CONF);
  for (int i = 0; i < numOfFiles; i++) {
    int dirNumber1 = RANDOM.nextInt(10);
    Path dirPath = new Path("/dir" + dirNumber1);
    int dirNumber2 = RANDOM.nextInt(10);
    dirPath = dirPath.suffix("/dir" + dirNumber2);
    int dirNumber3 = RANDOM.nextInt(10);
    dirPath = dirPath.suffix("/dir" + dirNumber3);
    fileSystem.mkdirs(dirPath);
    Path filePath = dirPath.suffix("/file" + i);
    int fileType = RANDOM.nextInt(7);
    switch (fileType) {
      case 0:
        filePath = filePath.suffix(".zip");
        break;
      case 1:
        filePath = filePath.suffix(".avro");
        break;
      case 2:
        filePath = filePath.suffix(".orc");
        break;
      case 3:
        filePath = filePath.suffix(".txt");
        break;
      case 4:
        filePath = filePath.suffix(".json");
        break;
      case 5:
        filePath = dirPath.suffix("/part-r-" + i);
        break;
      case 6:
        filePath = filePath.suffix("_45454");
      default:
        break;
    }
    int fileSize = RANDOM.nextInt(4);
    switch (fileSize) {
      case 1:
        DFSTestUtil.writeFile(fileSystem, filePath, new String(TINY_FILE_BYTES));
        break;
      case 2:
        DFSTestUtil.writeFile(fileSystem, filePath, new String(SMALL_FILE_BYTES));
        break;
      case 3:
        DFSTestUtil.writeFile(fileSystem, filePath, new String(MEDIUM_FILE_BYTES));
        break;
      case 0:
      default:
        DFSTestUtil.writeFile(fileSystem, filePath, "");
        break;
    }
    if (dirNumber1 == 1) {
      fileSystem.setQuota(filePath.getParent(), 100L, 100000000000L);
    }
    int user = RANDOM.nextInt(3);
    switch (user) {
      case 1:
        fileSystem.setOwner(filePath, USERS[0], USERS[0]);
        break;
      case 2:
        fileSystem.setOwner(filePath, USERS[1], USERS[1]);
        break;
      case 0:
      default:
        break;
    }
    short repFactor = (short) RANDOM.nextInt(4);
    if (repFactor != 0) {
      fileSystem.setReplication(filePath, repFactor);
    }
    int weeksAgo = RANDOM.nextInt(60);
    long timeStamp = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(weeksAgo * 7);
    if (weeksAgo != 0) {
      fileSystem.setTimes(filePath, timeStamp, timeStamp);
    }
    if (sleepBetweenMs != 0L) {
      Thread.sleep(sleepBetweenMs);
    }
  }
}
 
Example 10
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testRenameWithQuota() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src1");
  Path src2 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src2");
  Path dst1 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst1");
  Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2");
  createFile(src1);
  createFile(src2);
  fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
      HdfsConstants.QUOTA_DONT_SET);
  fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);

  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  /* 
   * Test1: src does not exceed quota and dst has no quota check and hence 
   * accommodates rename
   */
  // rename uses dstdir quota=1
  rename(src1, dst1, false, true, false, Rename.NONE);
  // rename reuses dstdir quota=1
  rename(src2, dst1, true, true, false, Rename.OVERWRITE);

  /*
   * Test2: src does not exceed quota and dst has *no* quota to accommodate 
   * rename. 
   */
  // dstDir quota = 1 and dst1 already uses it
  createFile(src2);
  rename(src2, dst2, false, false, true, Rename.NONE);

  /*
   * Test3: src exceeds quota and dst has *no* quota to accommodate rename
   * rename to a destination that does not exist
   */
  // src1 has no quota to accommodate new rename node
  fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
  rename(dst1, src1, false, false, true, Rename.NONE);
  
  /*
   * Test4: src exceeds quota and dst has *no* quota to accommodate rename
   * rename to a destination that exists and quota freed by deletion of dst
   * is same as quota needed by src.
   */
  // src1 has no quota to accommodate new rename node
  fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET);
  createFile(src1);
  fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
  rename(dst1, src1, true, true, false, Rename.OVERWRITE);
}
 
Example 11
Source File: TestQuotasWithHA.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that quotas are properly tracked by the standby through
 * create, append, delete.
 */
@Test(timeout=60000)
public void testQuotasTrackedOnStandby() throws Exception {
  fs.mkdirs(TEST_DIR);
  DistributedFileSystem dfs = (DistributedFileSystem)fs;
  dfs.setQuota(TEST_DIR, NS_QUOTA, DS_QUOTA);
  long expectedSize = 3 * BLOCK_SIZE + BLOCK_SIZE/2;
  DFSTestUtil.createFile(fs, TEST_FILE, expectedSize, (short)1, 1L);

  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  ContentSummary cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
  assertEquals(NS_QUOTA, cs.getQuota());
  assertEquals(DS_QUOTA, cs.getSpaceQuota());
  assertEquals(expectedSize, cs.getSpaceConsumed());
  assertEquals(1, cs.getDirectoryCount());
  assertEquals(1, cs.getFileCount());

  // Append to the file and make sure quota is updated correctly.
  FSDataOutputStream stm = fs.append(TEST_FILE);
  try {
    byte[] data = new byte[(int) (BLOCK_SIZE * 3 / 2)];
    stm.write(data);
    expectedSize += data.length;
  } finally {
    IOUtils.closeStream(stm);
  }
  
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
  assertEquals(NS_QUOTA, cs.getQuota());
  assertEquals(DS_QUOTA, cs.getSpaceQuota());
  assertEquals(expectedSize, cs.getSpaceConsumed());
  assertEquals(1, cs.getDirectoryCount());
  assertEquals(1, cs.getFileCount());

  
  fs.delete(TEST_FILE, true);
  expectedSize = 0;
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
  assertEquals(NS_QUOTA, cs.getQuota());
  assertEquals(DS_QUOTA, cs.getSpaceQuota());
  assertEquals(expectedSize, cs.getSpaceConsumed());
  assertEquals(1, cs.getDirectoryCount());
  assertEquals(0, cs.getFileCount());
}
 
Example 12
Source File: TestINodeFile.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
 * replace the original INodeDirectory. Before HDFS-4243, the parent field of
 * all the children INodes of the target INodeDirectory is not changed to
 * point to the new INodeDirectoryWithQuota. This testcase tests this
 * scenario.
 */
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
  long fileLen = 1024;
  replication = 3;
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    FSDirectory fsdir = fsn.getFSDirectory();
    DistributedFileSystem dfs = cluster.getFileSystem();

    // Create a file for test
    final Path dir = new Path("/dir");
    final Path file = new Path(dir, "file");
    DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);

    // Check the full path name of the INode associating with the file
    INode fnode = fsdir.getINode(file.toString());
    assertEquals(file.toString(), fnode.getFullPathName());
    
    // Call FSDirectory#unprotectedSetQuota which calls
    // INodeDirectory#replaceChild
    dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
    INodeDirectory dirNode = getDir(fsdir, dir);
    assertEquals(dir.toString(), dirNode.getFullPathName());
    assertTrue(dirNode.isWithQuota());
    
    final Path newDir = new Path("/newdir");
    final Path newFile = new Path(newDir, "file");
    // Also rename dir
    dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
    // /dir/file now should be renamed to /newdir/file
    fnode = fsdir.getINode(newFile.toString());
    // getFullPathName can return correct result only if the parent field of
    // child node is set correctly
    assertEquals(newFile.toString(), fnode.getFullPathName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 13
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testRenameWithQuota() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src1");
  Path src2 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src2");
  Path dst1 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst1");
  Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2");
  createFile(src1);
  createFile(src2);
  fs.setQuota(src1.getParent(), HdfsConstants.QUOTA_DONT_SET,
      HdfsConstants.QUOTA_DONT_SET);
  fc.mkdir(dst1.getParent(), FileContext.DEFAULT_PERM, true);

  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  /* 
   * Test1: src does not exceed quota and dst has no quota check and hence 
   * accommodates rename
   */
  // rename uses dstdir quota=1
  rename(src1, dst1, false, true, false, Rename.NONE);
  // rename reuses dstdir quota=1
  rename(src2, dst1, true, true, false, Rename.OVERWRITE);

  /*
   * Test2: src does not exceed quota and dst has *no* quota to accommodate 
   * rename. 
   */
  // dstDir quota = 1 and dst1 already uses it
  createFile(src2);
  rename(src2, dst2, false, false, true, Rename.NONE);

  /*
   * Test3: src exceeds quota and dst has *no* quota to accommodate rename
   * rename to a destination that does not exist
   */
  // src1 has no quota to accommodate new rename node
  fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
  rename(dst1, src1, false, false, true, Rename.NONE);
  
  /*
   * Test4: src exceeds quota and dst has *no* quota to accommodate rename
   * rename to a destination that exists and quota freed by deletion of dst
   * is same as quota needed by src.
   */
  // src1 has no quota to accommodate new rename node
  fs.setQuota(src1.getParent(), 100, HdfsConstants.QUOTA_DONT_SET);
  createFile(src1);
  fs.setQuota(src1.getParent(), 1, HdfsConstants.QUOTA_DONT_SET);
  rename(dst1, src1, true, true, false, Rename.OVERWRITE);
}
 
Example 14
Source File: TestQuotasWithHA.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that quotas are properly tracked by the standby through
 * create, append, delete.
 */
@Test(timeout=60000)
public void testQuotasTrackedOnStandby() throws Exception {
  fs.mkdirs(TEST_DIR);
  DistributedFileSystem dfs = (DistributedFileSystem)fs;
  dfs.setQuota(TEST_DIR, NS_QUOTA, DS_QUOTA);
  long expectedSize = 3 * BLOCK_SIZE + BLOCK_SIZE/2;
  DFSTestUtil.createFile(fs, TEST_FILE, expectedSize, (short)1, 1L);

  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  ContentSummary cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
  assertEquals(NS_QUOTA, cs.getQuota());
  assertEquals(DS_QUOTA, cs.getSpaceQuota());
  assertEquals(expectedSize, cs.getSpaceConsumed());
  assertEquals(1, cs.getDirectoryCount());
  assertEquals(1, cs.getFileCount());

  // Append to the file and make sure quota is updated correctly.
  FSDataOutputStream stm = fs.append(TEST_FILE);
  try {
    byte[] data = new byte[(int) (BLOCK_SIZE * 3 / 2)];
    stm.write(data);
    expectedSize += data.length;
  } finally {
    IOUtils.closeStream(stm);
  }
  
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
  assertEquals(NS_QUOTA, cs.getQuota());
  assertEquals(DS_QUOTA, cs.getSpaceQuota());
  assertEquals(expectedSize, cs.getSpaceConsumed());
  assertEquals(1, cs.getDirectoryCount());
  assertEquals(1, cs.getFileCount());

  
  fs.delete(TEST_FILE, true);
  expectedSize = 0;
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  cs = nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
  assertEquals(NS_QUOTA, cs.getQuota());
  assertEquals(DS_QUOTA, cs.getSpaceQuota());
  assertEquals(expectedSize, cs.getSpaceConsumed());
  assertEquals(1, cs.getDirectoryCount());
  assertEquals(0, cs.getFileCount());
}
 
Example 15
Source File: TestINodeFile.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
 * replace the original INodeDirectory. Before HDFS-4243, the parent field of
 * all the children INodes of the target INodeDirectory is not changed to
 * point to the new INodeDirectoryWithQuota. This testcase tests this
 * scenario.
 */
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
  long fileLen = 1024;
  replication = 3;
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    FSDirectory fsdir = fsn.getFSDirectory();
    DistributedFileSystem dfs = cluster.getFileSystem();

    // Create a file for test
    final Path dir = new Path("/dir");
    final Path file = new Path(dir, "file");
    DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);

    // Check the full path name of the INode associating with the file
    INode fnode = fsdir.getINode(file.toString());
    assertEquals(file.toString(), fnode.getFullPathName());
    
    // Call FSDirectory#unprotectedSetQuota which calls
    // INodeDirectory#replaceChild
    dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
    INodeDirectory dirNode = getDir(fsdir, dir);
    assertEquals(dir.toString(), dirNode.getFullPathName());
    assertTrue(dirNode.isWithQuota());
    
    final Path newDir = new Path("/newdir");
    final Path newFile = new Path(newDir, "file");
    // Also rename dir
    dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
    // /dir/file now should be renamed to /newdir/file
    fnode = fsdir.getINode(newFile.toString());
    // getFullPathName can return correct result only if the parent field of
    // child node is set correctly
    assertEquals(newFile.toString(), fnode.getFullPathName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Source File: OfflineEditsViewerHelper.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Run file operations to create edits for all op codes
 * to be tested.
 */
private void runOperations() throws IOException {

  LOG.info("Creating edits by performing fs operations");
  // no check, if it's not it throws an exception which is what we want
  DistributedFileSystem dfs =
    (DistributedFileSystem)cluster.getFileSystem();
  // OP_ADD 0, OP_SET_GENSTAMP 10
  Path pathFileCreate = new Path("/file_create");
  FSDataOutputStream s = dfs.create(pathFileCreate);
  // OP_CLOSE 9
  s.close();
  // OP_RENAME 1
  Path pathFileMoved = new Path("/file_moved");
  dfs.rename(pathFileCreate, pathFileMoved);
  // OP_DELETE 2
  dfs.delete(pathFileMoved, false);
  // OP_MKDIR 3
  Path pathDirectoryMkdir = new Path("/directory_mkdir");
  dfs.mkdirs(pathDirectoryMkdir);
  // OP_SET_REPLICATION 4
  s = dfs.create(pathFileCreate);
  s.close();
  dfs.setReplication(pathFileCreate, (short)1);
  // OP_SET_PERMISSIONS 7
  Short permission = 0777;
  dfs.setPermission(pathFileCreate, new FsPermission(permission));
  // OP_SET_OWNER 8
  dfs.setOwner(pathFileCreate, new String("newOwner"), null);
  // OP_CLOSE 9 see above
  // OP_SET_GENSTAMP 10 see above
  // OP_SET_NS_QUOTA 11 obsolete
  // OP_CLEAR_NS_QUOTA 12 obsolete
  // OP_TIMES 13
  long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
  long atime = mtime;
  dfs.setTimes(pathFileCreate, mtime, atime);
  // OP_SET_QUOTA 14
  dfs.setQuota(pathDirectoryMkdir, 1000L, FSConstants.QUOTA_DONT_SET);
  // OP_CONCAT_DELETE 16
  Path   pathConcatTarget = new Path("/file_concat_target");
  Path[] pathConcatFiles  = new Path[2];
  pathConcatFiles[0]      = new Path("/file_concat_0");
  pathConcatFiles[1]      = new Path("/file_concat_1");

  long  length      = blockSize * 3; // multiple of blocksize for concat
  short replication = 1;
  long  seed        = 1;

  DFSTestUtil.createFile(dfs, pathConcatTarget, length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[0], length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[1], length, replication, seed);
  dfs.concat(pathConcatTarget, pathConcatFiles, false);

  // sync to disk, otherwise we parse partial edits
  cluster.getNameNode().getFSImage().getEditLog().logSync();
  dfs.close();
}