org.apache.hadoop.fs.FileContext Java Examples

The following examples show how to use org.apache.hadoop.fs.FileContext. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Display.java    From big-c with Apache License 2.0 6 votes vote down vote up
public AvroFileInputStream(FileStatus status) throws IOException {
  pos = 0;
  buffer = new byte[0];
  GenericDatumReader<Object> reader = new GenericDatumReader<Object>();
  FileContext fc = FileContext.getFileContext(new Configuration());
  fileReader =
    DataFileReader.openReader(new AvroFSInput(fc, status.getPath()),reader);
  Schema schema = fileReader.getSchema();
  writer = new GenericDatumWriter<Object>(schema);
  output = new ByteArrayOutputStream();
  JsonGenerator generator =
    new JsonFactory().createJsonGenerator(output, JsonEncoding.UTF8);
  MinimalPrettyPrinter prettyPrinter = new MinimalPrettyPrinter();
  prettyPrinter.setRootValueSeparator(System.getProperty("line.separator"));
  generator.setPrettyPrinter(prettyPrinter);
  encoder = EncoderFactory.get().jsonEncoder(schema, generator);
}
 
Example #2
Source File: DataGenerator.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
  try { // initialize file system handle
    fc = FileContext.getFileContext(getConf());
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return -1;
  }

  for (int i = 0; i < args.length; i++) { // parse command line
    if (args[i].equals("-root")) {
      root = new Path(args[++i]);
    } else if (args[i].equals("-inDir")) {
      inDir = new File(args[++i]);
    } else {
      System.err.println(USAGE);
      ToolRunner.printGenericCommandUsage(System.err);
      System.exit(-1);
    }
  }
  return 0;
}
 
Example #3
Source File: TestPermissionSymlinks.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void doDeleteLinkParentNotWritable() throws Exception {
  // Try to delete where the symlink's parent dir is not writable
  try {
    user.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws IOException {
        FileContext myfc = FileContext.getFileContext(conf);
        myfc.delete(link, false);
        return null;
      }
    });
    fail("Deleted symlink without write permissions on parent!");
  } catch (AccessControlException e) {
    GenericTestUtils.assertExceptionContains("Permission denied", e);
  }
}
 
Example #4
Source File: TestLinuxContainerExecutor.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testContainerLaunch() throws Exception {
  Assume.assumeTrue(shouldRun());
  String expectedRunAsUser =
      conf.get(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,
        YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER);

  File touchFile = new File(workSpace, "touch-file");
  int ret = runAndBlock("touch", touchFile.getAbsolutePath());

  assertEquals(0, ret);
  FileStatus fileStatus =
      FileContext.getLocalFSFileContext().getFileStatus(
        new Path(touchFile.getAbsolutePath()));
  assertEquals(expectedRunAsUser, fileStatus.getOwner());
  cleanupAppFiles(expectedRunAsUser);

}
 
Example #5
Source File: MiniYARNClusterSplice.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
protected synchronized void serviceStop() throws Exception {
    if (resourceManagers[index] != null) {
        waitForAppMastersToFinish(5000);
        resourceManagers[index].stop();
    }

    if (Shell.WINDOWS) {
        // On Windows, clean up the short temporary symlink that was created to
        // work around path length limitation.
        String testWorkDirPath = testWorkDir.getAbsolutePath();
        try {
            FileContext.getLocalFSFileContext().delete(new Path(testWorkDirPath),
                                                       true);
        } catch (IOException e) {
            LOG.warn("could not cleanup symlink: " +
                         testWorkDir.getAbsolutePath());
        }
    }
    super.serviceStop();
}
 
Example #6
Source File: ContainerLocalizer.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static void initDirs(Configuration conf, String user, String appId,
    FileContext lfs, List<Path> localDirs) throws IOException {
  if (null == localDirs || 0 == localDirs.size()) {
    throw new IOException("Cannot initialize without local dirs");
  }
  String[] appsFileCacheDirs = new String[localDirs.size()];
  String[] usersFileCacheDirs = new String[localDirs.size()];
  for (int i = 0, n = localDirs.size(); i < n; ++i) {
    // $x/usercache/$user
    Path base = lfs.makeQualified(
        new Path(new Path(localDirs.get(i), USERCACHE), user));
    // $x/usercache/$user/filecache
    Path userFileCacheDir = new Path(base, FILECACHE);
    usersFileCacheDirs[i] = userFileCacheDir.toString();
    createDir(lfs, userFileCacheDir, FILECACHE_PERMS, false);
    // $x/usercache/$user/appcache/$appId
    Path appBase = new Path(base, new Path(APPCACHE, appId));
    // $x/usercache/$user/appcache/$appId/filecache
    Path appFileCacheDir = new Path(appBase, FILECACHE);
    appsFileCacheDirs[i] = appFileCacheDir.toString();
    createDir(lfs, appFileCacheDir, FILECACHE_PERMS, false);
  }
  conf.setStrings(String.format(APPCACHE_CTXT_FMT, appId), appsFileCacheDirs);
  conf.setStrings(String.format(USERCACHE_CTXT_FMT, user), usersFileCacheDirs);
}
 
Example #7
Source File: TestFSDownload.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static LocalResource createZipFile(FileContext files, Path p, int len,
    Random r, LocalResourceVisibility vis) throws IOException,
    URISyntaxException {
  byte[] bytes = new byte[len];
  r.nextBytes(bytes);

  File archiveFile = new File(p.toUri().getPath() + ".ZIP");
  archiveFile.createNewFile();
  ZipOutputStream out = new ZipOutputStream(
      new FileOutputStream(archiveFile));
  out.putNextEntry(new ZipEntry(p.getName()));
  out.write(bytes);
  out.closeEntry();
  out.close();

  LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
  ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
      + ".ZIP")));
  ret.setSize(len);
  ret.setType(LocalResourceType.ARCHIVE);
  ret.setVisibility(vis);
  ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".ZIP"))
      .getModificationTime());
  return ret;
}
 
Example #8
Source File: TestPermissionSymlinks.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void doDeleteLinkParentNotWritable() throws Exception {
  // Try to delete where the symlink's parent dir is not writable
  try {
    user.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws IOException {
        FileContext myfc = FileContext.getFileContext(conf);
        myfc.delete(link, false);
        return null;
      }
    });
    fail("Deleted symlink without write permissions on parent!");
  } catch (AccessControlException e) {
    GenericTestUtils.assertExceptionContains("Permission denied", e);
  }
}
 
Example #9
Source File: TestDistributedShell.java    From big-c with Apache License 2.0 6 votes vote down vote up
@After
public void tearDown() throws IOException {
  if (yarnCluster != null) {
    try {
      yarnCluster.stop();
    } finally {
      yarnCluster = null;
    }
  }
  FileContext fsContext = FileContext.getLocalFSFileContext();
  fsContext
      .delete(
          new Path(conf
              .get("yarn.timeline-service.leveldb-timeline-store.path")),
          true);
}
 
Example #10
Source File: DataGenerator.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Parse the command line arguments and initialize the data */
private int init(String[] args) {
  try { // initialize file system handle
    fc = FileContext.getFileContext(getConf());
  } catch (IOException ioe) {
    System.err.println("Can not initialize the file system: " + 
        ioe.getLocalizedMessage());
    return -1;
  }

  for (int i = 0; i < args.length; i++) { // parse command line
    if (args[i].equals("-root")) {
      root = new Path(args[++i]);
    } else if (args[i].equals("-inDir")) {
      inDir = new File(args[++i]);
    } else {
      System.err.println(USAGE);
      ToolRunner.printGenericCommandUsage(System.err);
      System.exit(-1);
    }
  }
  return 0;
}
 
Example #11
Source File: HistoryFileManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void mkdir(FileContext fc, Path path, FsPermission fsp)
    throws IOException {
  if (!fc.util().exists(path)) {
    try {
      fc.mkdir(path, fsp, true);

      FileStatus fsStatus = fc.getFileStatus(path);
      LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
          + ", Expected: " + fsp.toShort());
      if (fsStatus.getPermission().toShort() != fsp.toShort()) {
        LOG.info("Explicitly setting permissions to : " + fsp.toShort()
            + ", " + fsp);
        fc.setPermission(path, fsp);
      }
    } catch (FileAlreadyExistsException e) {
      LOG.info("Directory: [" + path + "] already exists.");
    }
  }
}
 
Example #12
Source File: HistoryFileManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
    PathFilter pathFilter) throws IOException {
  path = fc.makeQualified(path);
  List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
  try {
    RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
    while (fileStatusIter.hasNext()) {
      FileStatus fileStatus = fileStatusIter.next();
      Path filePath = fileStatus.getPath();
      if (fileStatus.isFile() && pathFilter.accept(filePath)) {
        jhStatusList.add(fileStatus);
      }
    }
  } catch (FileNotFoundException fe) {
    LOG.error("Error while scanning directory " + path, fe);
  }
  return jhStatusList;
}
 
Example #13
Source File: TestPermissionSymlinks.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void doReadTargetNotReadable() throws Exception {
  try {
    user.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws IOException {
        FileContext myfc = FileContext.getFileContext(conf);
        myfc.open(link).read();
        return null;
      }
    });
    fail("Read link target even though target does not have"
        + " read permissions!");
  } catch (IOException e) {
    GenericTestUtils.assertExceptionContains("Permission denied", e);
  }
}
 
Example #14
Source File: TestContainerLocalizer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
@SuppressWarnings("unchecked") // mocked generics
public void testContainerLocalizerClosesFilesystems() throws Exception {
  // verify filesystems are closed when localizer doesn't fail
  FileContext fs = FileContext.getLocalFSFileContext();
  spylfs = spy(fs.getDefaultFileSystem());
  ContainerLocalizer localizer = setupContainerLocalizerForTest();
  doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class),
      any(CompletionService.class), any(UserGroupInformation.class));
  verify(localizer, never()).closeFileSystems(
      any(UserGroupInformation.class));
  localizer.runLocalization(nmAddr);
  verify(localizer).closeFileSystems(any(UserGroupInformation.class));

  spylfs = spy(fs.getDefaultFileSystem());
  // verify filesystems are closed when localizer fails
  localizer = setupContainerLocalizerForTest();
  doThrow(new YarnRuntimeException("Forced Failure")).when(localizer).localizeFiles(
      any(LocalizationProtocol.class), any(CompletionService.class),
      any(UserGroupInformation.class));
  verify(localizer, never()).closeFileSystems(
      any(UserGroupInformation.class));
  localizer.runLocalization(nmAddr);
  verify(localizer).closeFileSystems(any(UserGroupInformation.class));
}
 
Example #15
Source File: AbstractApexPluginDispatcher.java    From attic-apex-core with Apache License 2.0 6 votes vote down vote up
private Configuration readLaunchConfiguration() throws IOException
{
  Path appPath = new Path(appContext.getApplicationPath());
  Path  configFilePath = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME);
  try {
    LOG.debug("Reading launch configuration file ");
    URI uri = appPath.toUri();
    Configuration config = new YarnConfiguration();
    fileContext = uri.getScheme() == null ? FileContext.getFileContext(config) : FileContext.getFileContext(uri, config);
    FSDataInputStream is = fileContext.open(configFilePath);
    config.addResource(is);
    LOG.debug("Read launch configuration");
    return config;
  } catch (FileNotFoundException ex) {
    LOG.warn("Configuration file not found {}", configFilePath);
    return new Configuration();
  }
}
 
Example #16
Source File: TestEncryptionZones.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
Example #17
Source File: ResourceLocalizationService.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void cleanUpLocalDir(FileContext lfs, DeletionService del,
    String localDir) {
  long currentTimeStamp = System.currentTimeMillis();
  renameLocalDir(lfs, localDir, ContainerLocalizer.USERCACHE,
    currentTimeStamp);
  renameLocalDir(lfs, localDir, ContainerLocalizer.FILECACHE,
    currentTimeStamp);
  renameLocalDir(lfs, localDir, ResourceLocalizationService.NM_PRIVATE_DIR,
    currentTimeStamp);
  try {
    deleteLocalDir(lfs, del, localDir);
  } catch (IOException e) {
    // Do nothing, just give the warning
    LOG.warn("Failed to delete localDir: " + localDir);
  }
}
 
Example #18
Source File: TestReservedRawPaths.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
Example #19
Source File: AvroFileInputOperatorTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testApplication() throws IOException, Exception
{
  try {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.dir).getAbsolutePath()), true);
    int cnt = 7;
    createAvroInput(cnt);
    writeAvroFile(new File(FILENAME));
    createAvroInput(cnt - 2);
    writeAvroFile(new File(OTHER_FILE));
    avroFileInput.setDirectory(testMeta.dir);

    LocalMode lma = LocalMode.newInstance();
    Configuration conf = new Configuration(false);

    AvroReaderApplication avroReaderApplication = new AvroReaderApplication();
    avroReaderApplication.setAvroFileInputOperator(avroFileInput);
    lma.prepareDAG(avroReaderApplication, conf);

    LocalMode.Controller lc = lma.getController();
    lc.run(10000);// runs for 10 seconds and quits
  } catch (ConstraintViolationException e) {
    Assert.fail("constraint violations: " + e.getConstraintViolations());
  }
}
 
Example #20
Source File: BaseContainerManagerTest.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public BaseContainerManagerTest() throws UnsupportedFileSystemException {
  localFS = FileContext.getLocalFSFileContext();
  localDir =
      new File("target", this.getClass().getSimpleName() + "-localDir")
          .getAbsoluteFile();
  localLogDir =
      new File("target", this.getClass().getSimpleName() + "-localLogDir")
          .getAbsoluteFile();
  remoteLogDir =
    new File("target", this.getClass().getSimpleName() + "-remoteLogDir")
        .getAbsoluteFile();
  tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir");
}
 
Example #21
Source File: ContainersLauncher.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  try {
    //TODO Is this required?
    FileContext.getLocalFSFileContext(conf);
  } catch (UnsupportedFileSystemException e) {
    throw new YarnRuntimeException("Failed to start ContainersLauncher", e);
  }
  super.serviceInit(conf);
}
 
Example #22
Source File: DirectoryCollection.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void createDir(FileContext localFs, Path dir, FsPermission perm)
    throws IOException {
  if (dir == null) {
    return;
  }
  try {
    localFs.getFileStatus(dir);
  } catch (FileNotFoundException e) {
    createDir(localFs, dir.getParent(), perm);
    localFs.mkdir(dir, perm, false);
    if (!perm.equals(perm.applyUMask(localFs.getUMask()))) {
      localFs.setPermission(dir, perm);
    }
  }
}
 
Example #23
Source File: TestViewFsWithXAttrs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
  cluster = new MiniDFSCluster.Builder(clusterConf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .numDataNodes(2)
      .build();
  cluster.waitClusterUp();

  fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
  fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
 
Example #24
Source File: TestNodeManagerResync.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws UnsupportedFileSystemException {
  localFS = FileContext.getLocalFSFileContext();
  tmpDir.mkdirs();
  logsDir.mkdirs();
  remoteLogsDir.mkdirs();
  nmLocalDir.mkdirs();
  syncBarrier = new CyclicBarrier(2);
}
 
Example #25
Source File: DockerContainerExecutor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public DockerContainerExecutor() {
  try {
    this.lfs = FileContext.getLocalFSFileContext();
    this.dockerImagePattern = Pattern.compile(DOCKER_IMAGE_PATTERN);
  } catch (UnsupportedFileSystemException e) {
    throw new RuntimeException(e);
  }
}
 
Example #26
Source File: AsyncFSStorageAgentTest.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
@Test
public void testDelete() throws IOException
{
  testLoad();
  testMeta.storageAgent.delete(1, 1);
  Path appPath = new Path(testMeta.applicationPath);
  FileContext fileContext = FileContext.getFileContext();
  Assert.assertTrue("operator 2 window 1", fileContext.util().exists(new Path(appPath + "/" + 2 + "/" + 1)));
  Assert.assertFalse("operator 1 window 1", fileContext.util().exists(new Path(appPath + "/" + 1 + "/" + 1)));
}
 
Example #27
Source File: TestFSDownload.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void verifyPermsRecursively(FileSystem fs,
    FileContext files, Path p,
    LocalResourceVisibility vis) throws IOException {
  FileStatus status = files.getFileStatus(p);
  if (status.isDirectory()) {
    if (vis == LocalResourceVisibility.PUBLIC) {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PUBLIC_DIR_PERMS.toShort());
    }
    else {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PRIVATE_DIR_PERMS.toShort());
    }
    if (!status.isSymlink()) {
      FileStatus[] statuses = fs.listStatus(p);
      for (FileStatus stat : statuses) {
        verifyPermsRecursively(fs, files, stat.getPath(), vis);
      }
    }
  }
  else {
    if (vis == LocalResourceVisibility.PUBLIC) {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PUBLIC_FILE_PERMS.toShort());
    }
    else {
      Assert.assertTrue(status.getPermission().toShort() ==
        FSDownload.PRIVATE_FILE_PERMS.toShort());
    }
  }      
}
 
Example #28
Source File: LoadGenerator.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Main function called by tool runner.
 * It first initializes data by parsing the command line arguments.
 * It then calls the loadGenerator
 */
@Override
public int run(String[] args) throws Exception {
  int exitCode = parseArgs(false, args);
  if (exitCode != 0) {
    return exitCode;
  }
  System.out.println("Running LoadGenerator against fileSystem: " + 
  FileContext.getFileContext().getDefaultFileSystem().getUri());
  exitCode = generateLoadOnNN();
  printResults(System.out);
  return exitCode;
}
 
Example #29
Source File: TestJobHistoryEventHandler.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private String setupTestWorkDir() {
  File testWorkDir = new File("target", this.getClass().getCanonicalName());
  try {
    FileContext.getLocalFSFileContext().delete(
        new Path(testWorkDir.getAbsolutePath()), true);
    return testWorkDir.getAbsolutePath();
  } catch (Exception e) {
    LOG.warn("Could not cleanup", e);
    throw new YarnRuntimeException("could not cleanup test dir", e);
  }
}
 
Example #30
Source File: TestLogAggregationService.java    From big-c with Apache License 2.0 5 votes vote down vote up
private int numOfLogsAvailable(LogAggregationService logAggregationService,
    ApplicationId appId, boolean sizeLimited, String lastLogFile)
    throws IOException {
  Path appLogDir = logAggregationService.getRemoteAppLogDir(appId, this.user);
  RemoteIterator<FileStatus> nodeFiles = null;
  try {
    Path qualifiedLogDir =
        FileContext.getFileContext(this.conf).makeQualified(appLogDir);
    nodeFiles =
        FileContext.getFileContext(qualifiedLogDir.toUri(), this.conf)
          .listStatus(appLogDir);
  } catch (FileNotFoundException fnf) {
    return -1;
  }
  int count = 0;
  while (nodeFiles.hasNext()) {
    FileStatus status = nodeFiles.next();
    String filename = status.getPath().getName();
    if (filename.contains(LogAggregationUtils.TMP_FILE_SUFFIX)
        || (lastLogFile != null && filename.contains(lastLogFile)
            && sizeLimited)) {
      return -1;
    }
    if (filename.contains(LogAggregationUtils
      .getNodeString(logAggregationService.getNodeId()))) {
      count++;
    }
  }
  return count;
}