org.apache.hadoop.fs.ContentSummary Java Examples

The following examples show how to use org.apache.hadoop.fs.ContentSummary. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static ContentSummary convert(ContentSummaryProto cs) {
  if (cs == null) return null;
  ContentSummary.Builder builder = new ContentSummary.Builder();
  builder.length(cs.getLength()).
      fileCount(cs.getFileCount()).
      directoryCount(cs.getDirectoryCount()).
      quota(cs.getQuota()).
      spaceConsumed(cs.getSpaceConsumed()).
      spaceQuota(cs.getSpaceQuota());
  if (cs.hasTypeQuotaInfos()) {
    for (HdfsProtos.StorageTypeQuotaInfoProto info :
        cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
      StorageType type = PBHelper.convertStorageType(info.getType());
      builder.typeConsumed(type, info.getConsumed());
      builder.typeQuota(type, info.getQuota());
    }
  }
  return builder.build();
}
 
Example #2
Source File: DistributedCacheTestUtil.java    From pentaho-hadoop-shims with Apache License 2.0 6 votes vote down vote up
/**
 * Utility to attempt to stage a file to HDFS for use with Distributed Cache.
 *
 * @param ch                Distributed Cache Helper
 * @param source            File or directory to stage
 * @param fs                FileSystem to stage to
 * @param root              Root directory to clean up when this test is complete
 * @param dest              Destination path to stage to
 * @param expectedFileCount Expected number of files to exist in the destination once staged
 * @param expectedDirCount  Expected number of directories to exist in the destiation once staged
 * @throws Exception
 */
static void stageForCacheTester( DistributedCacheUtilImpl ch, FileObject source, FileSystem fs, Path root, Path dest,
                                 int expectedFileCount, int expectedDirCount ) throws Exception {
  try {
    ch.stageForCache( source, fs, dest, true );

    assertTrue( fs.exists( dest ) );
    ContentSummary cs = fs.getContentSummary( dest );
    assertEquals( expectedFileCount, cs.getFileCount() );
    assertEquals( expectedDirCount, cs.getDirectoryCount() );
    assertEquals( FsPermission.createImmutable( (short) 0755 ), fs.getFileStatus( dest ).getPermission() );
  } finally {
    // Clean up after ourself
    if ( !fs.delete( root, true ) ) {
      log.logError( "error deleting FileSystem temp dir " + root );
    }
  }
}
 
Example #3
Source File: TestUnionQuery.java    From tajo with Apache License 2.0 6 votes vote down vote up
private void verifyResultStats(Optional<TajoResultSetBase[]> existing, long numRows) throws Exception {
  assertTrue(existing.isPresent());

  // Get TableStats using TajoResultSetBase.
  TajoResultSetBase[] resultSet = existing.get();
  QueryId qid = resultSet[0].getQueryId();
  QueryInfo queryInfo = testingCluster.getMaster().getContext().getQueryJobManager().getFinishedQuery(qid);
  TableDesc desc = queryInfo.getResultDesc();
  TableStats stats = desc.getStats();

  // Compare specified number of rows to the number of rows in TableStats.
  assertEquals(numRows, stats.getNumRows().longValue());

  // Compare the volume number of directRaw to the number of rows in TableStats.
  FileSystem fs = FileSystem.get(conf);
  Path path = new Path(desc.getUri());
  assertTrue(fs.exists(path));
  ContentSummary summary = fs.getContentSummary(path);
  assertEquals(summary.getLength(), stats.getNumBytes().longValue());

  closeResultSets(resultSet);
}
 
Example #4
Source File: IgniteHadoopFileSystem.java    From ignite with Apache License 2.0 6 votes vote down vote up
/** {@inheritDoc} */
@Override public ContentSummary getContentSummary(Path f) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    try {
        IgfsPathSummary sum = rmtClient.contentSummary(convert(f));

        return new ContentSummary(sum.totalLength(), sum.filesCount(), sum.directoriesCount(),
            -1, sum.totalLength(), rmtClient.fsStatus().spaceTotal());
    }
    finally {
        leaveBusy();
    }
}
 
Example #5
Source File: PBHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static ContentSummaryProto convert(ContentSummary cs) {
  if (cs == null) return null;
  ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder();
      builder.setLength(cs.getLength()).
      setFileCount(cs.getFileCount()).
      setDirectoryCount(cs.getDirectoryCount()).
      setQuota(cs.getQuota()).
      setSpaceConsumed(cs.getSpaceConsumed()).
      setSpaceQuota(cs.getSpaceQuota());

  if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
    HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
        HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
    for (StorageType t: StorageType.getTypesSupportingQuota()) {
      HdfsProtos.StorageTypeQuotaInfoProto info =
          HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
              setType(convertStorageType(t)).
              setConsumed(cs.getTypeConsumed(t)).
              setQuota(cs.getTypeQuota(t)).
              build();
      isb.addTypeQuotaInfo(info);
    }
    builder.setTypeQuotaInfos(isb);
  }
  return builder.build();
}
 
Example #6
Source File: HttpFSFileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
  Map<String, String> params = new HashMap<String, String>();
  params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
  HttpURLConnection conn =
    getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
  HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
  JSONObject json = (JSONObject) ((JSONObject)
    HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
  return new ContentSummary.Builder().
      length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)).
      fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)).
      directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)).
      quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)).
      spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)).
      spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build();
}
 
Example #7
Source File: HttpFSFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
  Map<String, String> params = new HashMap<String, String>();
  params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
  HttpURLConnection conn =
    getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
  HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
  JSONObject json = (JSONObject) ((JSONObject)
    HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
  return new ContentSummary.Builder().
      length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)).
      fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)).
      directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)).
      quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)).
      spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)).
      spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build();
}
 
Example #8
Source File: TestQuotaByStorageType.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testContentSummaryWithoutStoragePolicy() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(!fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify getContentSummary without any quota set
  // Expect no type quota and usage information available
  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
  for (StorageType t : StorageType.values()) {
    assertEquals(cs.getTypeConsumed(t), 0);
    assertEquals(cs.getTypeQuota(t), -1);
  }
}
 
Example #9
Source File: DataValidationInputFormat.java    From jumbune with GNU Lesser General Public License v3.0 6 votes vote down vote up
/**
 *  Finds files inside directories recusively and add to  fileStatusList
 * @param job refers to JobContext that is being used to read the configurations of the job that ran
 * @param minSize refers to the minimum file block size.
 * @param maxSize refers to the maximum file block size.
 * @param splits refers  to a list of splits that are being generated.
 * @param fileStatusList list of FileStatus
 * @throws IOException Signals that an I/O exception has occurred.
 */
public void setData(JobContext job, long minSize, long maxSize,
		List<InputSplit> splits, List<FileStatus> fileStatusList) throws IOException {
	for(FileStatus file:fileStatusList) {
		if (file.isDirectory()) {
			Path dirPath = file.getPath();
			FileStatus [] fileArray = dirPath.getFileSystem(job.getConfiguration()).listStatus(dirPath);
			setData(job, minSize, maxSize, splits, Arrays.asList(fileArray));
		} else {
			//Checking whether file is empty or not
			Path path  = file.getPath();
			FileSystem fs = path.getFileSystem(job.getConfiguration());
			ContentSummary cs = fs.getContentSummary(path);
			if (cs.getLength() > 0) {
				generateSplits(job, minSize, maxSize, splits, file);	
			} 
	    }
	}
}
 
Example #10
Source File: TestQuotaByStorageType.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(!fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify getContentSummary without any quota set
  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
 
Example #11
Source File: DistributedCacheUtilImplOSDependentTest.java    From pentaho-hadoop-shims with Apache License 2.0 6 votes vote down vote up
@Test
public void stagePluginsForCache() throws Exception {
  DistributedCacheUtilImpl ch = new DistributedCacheUtilImpl();

  Configuration conf = new Configuration();
  FileSystem fs = DistributedCacheTestUtil.getLocalFileSystem( conf );

  Path pluginsDir = new Path( "bin/test/plugins-installation-dir" );

  FileObject pluginDir = DistributedCacheTestUtil.createTestFolderWithContent();

  try {
    ch.stagePluginsForCache( fs, pluginsDir, "bin/test/sample-folder" );
    Path pluginInstallPath = new Path( pluginsDir, "bin/test/sample-folder" );
    assertTrue( fs.exists( pluginInstallPath ) );
    ContentSummary summary = fs.getContentSummary( pluginInstallPath );
    assertEquals( 6, summary.getFileCount() );
    assertEquals( 9, summary.getDirectoryCount() );
  } finally {
    pluginDir.delete( new AllFileSelector() );
    fs.delete( pluginsDir, true );
  }
}
 
Example #12
Source File: ColumnToRowJob.java    From kylin with Apache License 2.0 6 votes vote down vote up
private int calReducerNum(Path input) {
    try {
        long bytesPerReducer = DEFAULT_SIZE_PER_REDUCER;
        FileSystem fs = FileSystem.get(job.getConfiguration());
        ContentSummary cs = fs.getContentSummary(input);
        long totalInputFileSize = cs.getLength();

        int reducers = (int) ((totalInputFileSize + bytesPerReducer - 1) / bytesPerReducer);
        reducers = Math.max(1, reducers);
        reducers = Math.min(MAX_REDUCERS, reducers);
        logger.info("BytesPerReducer={}, maxReducers={}, totalInputFileSize={}, setReducers={}", bytesPerReducer,
                MAX_REDUCERS, totalInputFileSize, reducers);
        return reducers;
    } catch (IOException e) {
        logger.error("error when calculate reducer number", e);
    }
    return 1;
}
 
Example #13
Source File: TestFSMainOperationsWebHdfs.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testTruncate() throws Exception {
  final short repl = 3;
  final int blockSize = 1024;
  final int numOfBlocks = 2;
  Path dir = getTestRootPath(fSys, "test/hadoop");
  Path file = getTestRootPath(fSys, "test/hadoop/file");

  final byte[] data = getFileData(numOfBlocks, blockSize);
  createFile(fSys, file, data, blockSize, repl);

  final int newLength = blockSize;

  boolean isReady = fSys.truncate(file, newLength);

  Assert.assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fSys.getFileStatus(file);
  Assert.assertEquals(fileStatus.getLen(), newLength);
  AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());

  ContentSummary cs = fSys.getContentSummary(dir);
  Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
      newLength * repl);
  Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
 
Example #14
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static ContentSummaryProto convert(ContentSummary cs) {
  if (cs == null) return null;
  ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder();
      builder.setLength(cs.getLength()).
      setFileCount(cs.getFileCount()).
      setDirectoryCount(cs.getDirectoryCount()).
      setQuota(cs.getQuota()).
      setSpaceConsumed(cs.getSpaceConsumed()).
      setSpaceQuota(cs.getSpaceQuota());

  if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
    HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
        HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
    for (StorageType t: StorageType.getTypesSupportingQuota()) {
      HdfsProtos.StorageTypeQuotaInfoProto info =
          HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
              setType(convertStorageType(t)).
              setConsumed(cs.getTypeConsumed(t)).
              setQuota(cs.getTypeQuota(t)).
              build();
      isb.addTypeQuotaInfo(info);
    }
    builder.setTypeQuotaInfos(isb);
  }
  return builder.build();
}
 
Example #15
Source File: INode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Compute {@link ContentSummary}. 
 */
public final ContentSummary computeAndConvertContentSummary(
    ContentSummaryComputationContext summary) {
  ContentCounts counts = computeContentSummary(summary).getCounts();
  final QuotaCounts q = getQuotaCounts();
  return new ContentSummary.Builder().
      length(counts.getLength()).
      fileCount(counts.getFileCount() + counts.getSymlinkCount()).
      directoryCount(counts.getDirectoryCount()).
      quota(q.getNameSpace()).
      spaceConsumed(counts.getStoragespace()).
      spaceQuota(q.getStorageSpace()).
      typeConsumed(counts.getTypeSpaces()).
      typeQuota(q.getTypeSpaces().asArray()).
      build();
}
 
Example #16
Source File: TestQuotaByStorageType.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2.5 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify space consumed and remaining quota
  long storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, storageTypeConsumed);

  // Delete file and verify the consumed space of the storage type is updated
  dfs.delete(createdFile1, false);
  storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(0, storageTypeConsumed);

  QuotaCounts counts = new QuotaCounts.Builder().build();
  fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true);
  assertEquals(fnode.dumpTreeRecursively().toString(), 0,
      counts.getTypeSpaces().get(StorageType.SSD));

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), 0);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), 0);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), 0);
}
 
Example #17
Source File: IgniteHadoopFileSystemAbstractSelfTest.java    From ignite with Apache License 2.0 5 votes vote down vote up
/**
 * Test expected failures for 'get content summary' operation.
 *
 * @param fs File system to test.
 * @param path Path to evaluate content summary for.
 */
private void assertContentSummaryFails(final FileSystem fs, final Path path) {
    GridTestUtils.assertThrows(log, new Callable<ContentSummary>() {
        @Override public ContentSummary call() throws Exception {
            return fs.getContentSummary(path);
        }
    }, FileNotFoundException.class, null);
}
 
Example #18
Source File: Count.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void processOptions(LinkedList<String> args) {
  CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
      OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER);
  cf.parse(args);
  if (args.isEmpty()) { // default path is the current working directory
    args.add(".");
  }
  showQuotas = cf.getOpt(OPTION_QUOTA);
  humanReadable = cf.getOpt(OPTION_HUMAN);
  if (cf.getOpt(OPTION_HEADER)) {
    out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME");
  }
}
 
Example #19
Source File: TestChRootedFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetContentSummary() throws IOException {
  // GetContentSummary of a dir
  fSys.mkdirs(new Path("/newDir/dirFoo"));
  ContentSummary cs = fSys.getContentSummary(new Path("/newDir/dirFoo"));
  Assert.assertEquals(-1L, cs.getQuota());
  Assert.assertEquals(-1L, cs.getSpaceQuota());
}
 
Example #20
Source File: FSDirectory.java    From RDFS with Apache License 2.0 5 votes vote down vote up
ContentSummary getContentSummary(String src) throws IOException {
  String srcs = normalizePath(src);
  readLock();
  try {
    INode targetNode = rootDir.getNode(srcs);
    if (targetNode == null) {
      throw new FileNotFoundException("File does not exist: " + srcs);
    }
    else {
      return targetNode.computeContentSummary();
    }
  } finally {
    readUnlock();
  }
}
 
Example #21
Source File: TestHDFSConcat.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testConcatWithQuotaIncrease() throws IOException {
  final short repl = 3;
  final int srcNum = 10;
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final Path[] srcs = new Path[srcNum];
  final Path target = new Path(bar, "target");
  DFSTestUtil.createFile(dfs, target, blockSize, repl, 0L);

  final long dsQuota = blockSize * repl + blockSize * srcNum * REPL_FACTOR;
  dfs.setQuota(foo, Long.MAX_VALUE - 1, dsQuota);

  for (int i = 0; i < srcNum; i++) {
    srcs[i] = new Path(bar, "src" + i);
    DFSTestUtil.createFile(dfs, srcs[i], blockSize, REPL_FACTOR, 0L);
  }

  ContentSummary summary = dfs.getContentSummary(bar);
  Assert.assertEquals(11, summary.getFileCount());
  Assert.assertEquals(dsQuota, summary.getSpaceConsumed());

  try {
    dfs.concat(target, srcs);
    fail("QuotaExceededException expected");
  } catch (RemoteException e) {
    Assert.assertTrue(
        e.unwrapRemoteException() instanceof QuotaExceededException);
  }

  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  dfs.concat(target, srcs);
  summary = dfs.getContentSummary(bar);
  Assert.assertEquals(1, summary.getFileCount());
  Assert.assertEquals(blockSize * repl * (srcNum + 1),
      summary.getSpaceConsumed());
}
 
Example #22
Source File: TestHDFSConcat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * make sure we update the quota correctly after concat
 */
@Test
public void testConcatWithQuotaDecrease() throws IOException {
  final short srcRepl = 3; // note this is different with REPL_FACTOR
  final int srcNum = 10;
  final Path foo = new Path("/foo");
  final Path[] srcs = new Path[srcNum];
  final Path target = new Path(foo, "target");
  DFSTestUtil.createFile(dfs, target, blockSize, REPL_FACTOR, 0L);

  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  for (int i = 0; i < srcNum; i++) {
    srcs[i] = new Path(foo, "src" + i);
    DFSTestUtil.createFile(dfs, srcs[i], blockSize * 2, srcRepl, 0L);
  }

  ContentSummary summary = dfs.getContentSummary(foo);
  Assert.assertEquals(11, summary.getFileCount());
  Assert.assertEquals(blockSize * REPL_FACTOR +
          blockSize * 2 * srcRepl * srcNum, summary.getSpaceConsumed());

  dfs.concat(target, srcs);
  summary = dfs.getContentSummary(foo);
  Assert.assertEquals(1, summary.getFileCount());
  Assert.assertEquals(
      blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum,
      summary.getSpaceConsumed());
}
 
Example #23
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example #24
Source File: TestQuotaByStorageType.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify space consumed and remaining quota
  long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // append several blocks
  int appendLen = BLOCKSIZE * 2;
  DFSTestUtil.appendFile(dfs, createdFile1, appendLen);
  file1Len += appendLen;

  ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
 
Example #25
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
Example #26
Source File: TestCount.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() {
  conf = new Configuration();
  conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
  mockFs = mock(FileSystem.class);
  fileStat = mock(FileStatus.class);
  mockCs = mock(ContentSummary.class);
  when(fileStat.isFile()).thenReturn(true);
}
 
Example #27
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public ContentSummary getContentSummary(String path)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  GetContentSummaryRequestProto req = GetContentSummaryRequestProto
      .newBuilder()
      .setPath(path)
      .build();
  try {
    return PBHelper.convert(rpcProxy.getContentSummary(null, req)
        .getSummary());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #28
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public GetContentSummaryResponseProto getContentSummary(
    RpcController controller, GetContentSummaryRequestProto req)
    throws ServiceException {
  try {
    ContentSummary result = server.getContentSummary(req.getPath());
    return GetContentSummaryResponseProto.newBuilder()
        .setSummary(PBHelper.convert(result)).build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #29
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
Example #30
Source File: TestQuotaByStorageType.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateTruncate() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify SSD consumed before truncate
  long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Truncate file to 1 * BLOCKSIZE
  int newFile1Len = BLOCKSIZE * 1;
  dfs.truncate(createdFile1, newFile1Len);

  // Verify SSD consumed after truncate
  ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(newFile1Len, ssdConsumed);

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2);
}