Java Code Examples for org.apache.hadoop.fs.FileSystem.getContentSummary()

The following are Jave code examples for showing how to use getContentSummary() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: BaseTestHttpFSWith.java   Source Code and License Vote up 7 votes
private void testContentSummary() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path path = new Path(getProxiedFSTestDir(), "foo.txt");
  OutputStream os = fs.create(path);
  os.write(1);
  os.close();
  ContentSummary hdfsContentSummary = fs.getContentSummary(path);
  fs.close();
  fs = getHttpFSFileSystem();
  ContentSummary httpContentSummary = fs.getContentSummary(path);
  fs.close();
  Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount());
  Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount());
  Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength());
  Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
  Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
  Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
}
 
Example 2
Project: alluxio   File: HdfsAndAlluxioUtils_update.java   Source Code and License Vote up 5 votes
/**
 * 此方法用于获取文件的 ContentSummary
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return ContentSummary
 */
public static ContentSummary getContentSummary(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		pathNotExistCheck(path, fs, uri);
		return fs.getContentSummary(uri);
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return null;
}
 
Example 3
Project: aliyun-maxcompute-data-collectors   File: TestTargetDir.java   Source Code and License Vote up 5 votes
/** test target-dir contains imported files. */
public void testTargetDir() throws IOException {

  try {
    String targetDir = getWarehouseDir() + "/tempTargetDir";

    ArrayList args = getOutputArgv(true);
    args.add("--target-dir");
    args.add(targetDir);

    // delete target-dir if exists and recreate it
    FileSystem fs = FileSystem.get(getConf());
    Path outputPath = new Path(targetDir);
    if (fs.exists(outputPath)) {
      fs.delete(outputPath, true);
    }

    String[] argv = (String[]) args.toArray(new String[0]);
    runImport(argv);

    ContentSummary summ = fs.getContentSummary(outputPath);

    assertTrue("There's no new imported files in target-dir",
        summ.getFileCount() > 0);

  } catch (Exception e) {
    LOG.error("Got Exception: " + StringUtils.stringifyException(e));
    fail(e.toString());
  }
}
 
Example 4
Project: hadoop   File: TestQuota.java   Source Code and License Vote up 4 votes
/**
  * Violate a space quota using files of size < 1 block. Test that block
  * allocation conservatively assumes that for quota checking the entire
  * space of the block is used.
  */
 @Test
 public void testBlockAllocationAdjustsUsageConservatively() 
     throws Exception {
   Configuration conf = new HdfsConfiguration();
   final int BLOCK_SIZE = 6 * 1024;
   conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
   MiniDFSCluster cluster = 
     new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
   cluster.waitActive();
   FileSystem fs = cluster.getFileSystem();
   DFSAdmin admin = new DFSAdmin(conf);

   final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
   final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
   System.out.println("webhdfsuri=" + webhdfsuri);
   final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);

   try {
     Path dir = new Path("/test");
     Path file1 = new Path("/test/test1");
     Path file2 = new Path("/test/test2");
     boolean exceededQuota = false;
     final int QUOTA_SIZE = 3 * BLOCK_SIZE; // total space usage including
                                            // repl.
     final int FILE_SIZE = BLOCK_SIZE / 2;
     ContentSummary c;
     
     // Create the directory and set the quota
     assertTrue(fs.mkdirs(dir));
     runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE),
         dir.toString());

     // Creating a file should use half the quota
     DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short) 3, 1L);
     DFSTestUtil.waitReplication(fs, file1, (short) 3);
     c = fs.getContentSummary(dir);
     checkContentSummary(c, webhdfs.getContentSummary(dir));
     assertEquals("Quota is half consumed", QUOTA_SIZE / 2,
                  c.getSpaceConsumed());

     // We can not create the 2nd file because even though the total spaced
     // used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512)
     // when a block for a file is created the space used is adjusted
     // conservatively (3 * block size, ie assumes a full block is written)
     // which will violate the quota (3 * block size) since we've already 
     // used half the quota for the first file.
     try {
       DFSTestUtil.createFile(fs, file2, FILE_SIZE, (short) 3, 1L);
     } catch (QuotaExceededException e) {
       exceededQuota = true;
     }
     assertTrue("Quota not exceeded", exceededQuota);
   } finally {
     cluster.shutdown();
   }
}
 
Example 5
Project: hadoop   File: FSOperations.java   Source Code and License Vote up 2 votes
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return a Map object (JSON friendly) with the content-summary.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
public Map execute(FileSystem fs) throws IOException {
  ContentSummary contentSummary = fs.getContentSummary(path);
  return contentSummaryToJSON(contentSummary);
}