Java Code Examples for org.apache.hadoop.fs.LocatedFileStatus#getPath()

The following examples show how to use org.apache.hadoop.fs.LocatedFileStatus#getPath() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ColumnarFilesReader.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
void checkPath() {
    try {
        RemoteIterator<LocatedFileStatus> files = fs.listFiles(folderPath, false);
        if (files == null) {
            throw new IllegalArgumentException("Invalid path " + folderPath);
        }
        while (files.hasNext()) {
            LocatedFileStatus fileStatus = files.next();
            Path path = fileStatus.getPath();
            String name = path.getName();

            if (name.endsWith(Constants.DATA_FILE_SUFFIX)) {
                dataFilePath = path;
            } else if (name.endsWith(Constants.META_FILE_SUFFIX)) {
                metaFilePath = path;
            } else {
                logger.warn("Contains invalid file {} in path {}", path, folderPath);
            }
        }
        if (dataFilePath == null || metaFilePath == null) {
            throw new IllegalArgumentException("Invalid path " + folderPath);
        }
    } catch (IOException e) {
        throw new RuntimeException("io error", e);
    }
}
 
Example 2
Source File: FileInputFormat.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void verifyLocatedFileStatus(
    JobConf conf, List<LocatedFileStatus> stats)
    throws IOException {
  if (!conf.getBoolean("mapred.fileinputformat.verifysplits", true)) {
    return;
  }
  for (LocatedFileStatus stat: stats) {
    long fileLen = stat.getLen();
    long blockLenTotal = 0;
    for (BlockLocation loc: stat.getBlockLocations()) {
      blockLenTotal += loc.getLength();
    }
    if (blockLenTotal != fileLen) {
      throw new IOException("Error while getting located status, " +
        stat.getPath() + " has length " + fileLen + " but blocks total is " +
        blockLenTotal);
    }
  }
}
 
Example 3
Source File: SegmentHelper.java    From indexr with Apache License 2.0 6 votes vote down vote up
public static void literalAllSegments(FileSystem fileSystem, Path dir, Consumer<LocatedFileStatus> consumer) throws IOException {
    RemoteIterator<LocatedFileStatus> files = fileSystem.listFiles(dir, true);
    while (files.hasNext()) {
        LocatedFileStatus fileStatus = files.next();
        if (!fileStatus.isFile()) {
            continue;
        }
        if (fileStatus.getLen() == 0) {
            continue;
        }

        Path path = fileStatus.getPath();
        if (checkSegmentByPath(path)) {
            consumer.accept(fileStatus);
        }
    }
}
 
Example 4
Source File: ColumnarFilesReader.java    From kylin with Apache License 2.0 6 votes vote down vote up
void checkPath() {
    try {
        RemoteIterator<LocatedFileStatus> files = fs.listFiles(folderPath, false);
        if (files == null) {
            throw new IllegalArgumentException("Invalid path " + folderPath);
        }
        while (files.hasNext()) {
            LocatedFileStatus fileStatus = files.next();
            Path path = fileStatus.getPath();
            String name = path.getName();

            if (name.endsWith(Constants.DATA_FILE_SUFFIX)) {
                dataFilePath = path;
            } else if (name.endsWith(Constants.META_FILE_SUFFIX)) {
                metaFilePath = path;
            } else {
                logger.warn("Contains invalid file {} in path {}", path, folderPath);
            }
        }
        if (dataFilePath == null || metaFilePath == null) {
            throw new IllegalArgumentException("Invalid path " + folderPath);
        }
    } catch (IOException e) {
        throw new RuntimeException("io error", e);
    }
}
 
Example 5
Source File: IntegrationTestBigLinkedList.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static SortedSet<byte[]> readFileToSearch(final Configuration conf,
    final FileSystem fs, final LocatedFileStatus keyFileStatus) throws IOException,
    InterruptedException {
  SortedSet<byte []> result = new TreeSet<>(Bytes.BYTES_COMPARATOR);
  // Return entries that are flagged Counts.UNDEFINED in the value. Return the row. This is
  // what is missing.
  TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
  try (SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader rr =
      new SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader()) {
    InputSplit is =
      new FileSplit(keyFileStatus.getPath(), 0, keyFileStatus.getLen(), new String [] {});
    rr.initialize(is, context);
    while (rr.nextKeyValue()) {
      rr.getCurrentKey();
      BytesWritable bw = rr.getCurrentValue();
      if (Verify.VerifyReducer.whichType(bw.getBytes()) == Verify.Counts.UNDEFINED) {
        byte[] key = new byte[rr.getCurrentKey().getLength()];
        System.arraycopy(rr.getCurrentKey().getBytes(), 0, key, 0, rr.getCurrentKey()
            .getLength());
        result.add(key);
      }
    }
  }
  return result;
}
 
Example 6
Source File: DwcaArchiveBuilder.java    From occurrence with Apache License 2.0 5 votes vote down vote up
/**
 * Appends the compressed files found within the directory to the zip stream as the named file
 */
private void appendPreCompressedFile(ModalZipOutputStream out, Path dir, String filename, String headerRow)
  throws IOException {
  RemoteIterator<LocatedFileStatus> files = sourceFs.listFiles(dir, false);
  List<InputStream> parts = Lists.newArrayList();

  // Add the header first, which must also be compressed
  ByteArrayOutputStream header = new ByteArrayOutputStream();
  D2Utils.compress(new ByteArrayInputStream(headerRow.getBytes()), header);
  parts.add(new ByteArrayInputStream(header.toByteArray()));

  // Locate the streams to the compressed content on HDFS
  while (files.hasNext()) {
    LocatedFileStatus fs = files.next();
    Path path = fs.getPath();
    if (path.toString().endsWith(D2Utils.FILE_EXTENSION)) {
      LOG.info("Deflated content to merge: {} ", path);
      parts.add(sourceFs.open(path));
    }
  }

  // create the Zip entry, and write the compressed bytes
  org.gbif.hadoop.compress.d2.zip.ZipEntry ze = new org.gbif.hadoop.compress.d2.zip.ZipEntry(filename);
  out.putNextEntry(ze, ModalZipOutputStream.MODE.PRE_DEFLATED);
  try (D2CombineInputStream in = new D2CombineInputStream(parts)) {
    ByteStreams.copy(in, out);
    in.close(); // important so counts are accurate
    ze.setSize(in.getUncompressedLength()); // important to set the sizes and CRC
    ze.setCompressedSize(in.getCompressedLength());
    ze.setCrc(in.getCrc32());
  } finally {
    out.closeEntry();
  }
}
 
Example 7
Source File: FileInputFormat.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** 
 * Generate the list of files and make them into FileSplits.
 */ 
public List<InputSplit> getSplits(JobContext job
                                  ) throws IOException {
  long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
  long maxSize = getMaxSplitSize(job);

  // generate splits
  List<InputSplit> splits = new ArrayList<InputSplit>();
  for (LocatedFileStatus file: listLocatedStatus(job)) {
    Path path = file.getPath();
    long length = file.getLen();
    BlockLocation[] blkLocations = file.getBlockLocations();

    if ((length != 0) && isSplitable(job, path)) { 
      long blockSize = file.getBlockSize();
      long splitSize = computeSplitSize(blockSize, minSize, maxSize);

      long bytesRemaining = length;
      while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
        int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
        splits.add(new FileSplit(path, length-bytesRemaining, splitSize, 
                                 blkLocations[blkIndex].getHosts()));
        bytesRemaining -= splitSize;
      }
      
      if (bytesRemaining != 0) {
        splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining, 
                   blkLocations[blkLocations.length-1].getHosts()));
      }
    } else if (length != 0) {
      splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts()));
    } else { 
      //Create empty hosts array for zero length files
      splits.add(new FileSplit(path, 0, length, new String[0]));
    }
  }
  LOG.debug("Total # of splits: " + splits.size());
  return splits;
}
 
Example 8
Source File: FlumeHDFSSinkServiceTest.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
@Test
public void testProcessEvents() throws FileNotFoundException, IOException {
	int searchEventsCount = 101;
	List<Event> searchEvents = generateSearchAnalyticsDataService
			.getSearchEvents(searchEventsCount);

	flumeHDFSSinkService.processEvents(searchEvents);

	// list all files and check data.
	Path dirPath = new Path(hadoopClusterService.getHDFSUri()
			+ "/searchevents");
	// FileStatus[] dirStat = fs.listStatus(dirPath);
	// Path fList[] = FileUtil.stat2Paths(dirStat);

	DistributedFileSystem fs = hadoopClusterService.getFileSystem();
	RemoteIterator<LocatedFileStatus> files = fs.listFiles(dirPath, true);
	while (files.hasNext()) {
		LocatedFileStatus locatedFileStatus = files.next();
		System.out.println("Check:" + locatedFileStatus.getPath());
		if (locatedFileStatus.isFile()) {
			Path path = locatedFileStatus.getPath();
			if (path.getName().startsWith("searchevents")) {
				FSDataInputStream input = fs.open(path);
				BufferedReader reader = new BufferedReader(
						new InputStreamReader(input));
				String body = null;
				while ((body = reader.readLine()) != null) {
					System.out.println("body is:" + body);
				}
				reader.close();
				input.close();
			}
		}
	}
}
 
Example 9
Source File: AbstractSearchJUnit4SpringContextTests.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
protected int printAndCountHdfsFileDirData(String path, String filePrefix,
		boolean print, boolean count) throws IOException {
	int recordsCount = 0;
	DistributedFileSystem fs = hadoopClusterService.getFileSystem();
	RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path(path),
			true);
	while (files.hasNext()) {
		LocatedFileStatus locatedFileStatus = files.next();
		System.out.println("Check:" + locatedFileStatus.getPath());
		if (locatedFileStatus.isFile()) {
			Path filePath = locatedFileStatus.getPath();
			if (filePath.getName().startsWith(filePrefix)) {
				FSDataInputStream input = fs.open(filePath);
				BufferedReader reader = new BufferedReader(
						new InputStreamReader(input));
				String body = null;
				while ((body = reader.readLine()) != null) {
					if (print) {
						System.out.println("file is: " + filePath.getName() + "body is:" + body);
					}
					if (count) {
						recordsCount++;
					}
				}
				reader.close();
				input.close();
			}
		}
	}
	return recordsCount;
}
 
Example 10
Source File: CompleteSetupIntegrationTest.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
private void FlumehdfsSinkAndTestData(List<Event> searchEvents)
		throws EventDeliveryException, IOException, FileNotFoundException {

	flumeHDFSSinkService.processEvents(searchEvents);

	// list all files and check data.
	Path dirPath = new Path(hadoopClusterService.getHDFSUri()
			+ "/searchevents");
	// FileStatus[] dirStat = fs.listStatus(dirPath);
	// Path fList[] = FileUtil.stat2Paths(dirStat);

	DistributedFileSystem fs = hadoopClusterService.getFileSystem();
	RemoteIterator<LocatedFileStatus> files = fs.listFiles(dirPath, true);
	while (files.hasNext()) {
		LocatedFileStatus locatedFileStatus = files.next();
		System.out.println("Check:" + locatedFileStatus.getPath());
		if (locatedFileStatus.isFile()) {
			Path path = locatedFileStatus.getPath();
			if (path.getName().startsWith("searchevents")) {
				FSDataInputStream input = fs.open(path);
				BufferedReader reader = new BufferedReader(
						new InputStreamReader(input));
				String body = null;
				while ((body = reader.readLine()) != null) {
					System.out.println("body is:" + body);
				}
				reader.close();
				input.close();
			}
		}
	}
}
 
Example 11
Source File: ColumnarSplitInputFormat.java    From kylin with Apache License 2.0 5 votes vote down vote up
private boolean isValidFragmentPath(FileSystem fs, Path path) throws IOException {
    RemoteIterator<LocatedFileStatus> files = fs.listFiles(path, false);
    if (files == null) {
        logger.warn("Invalid fragment path:{}, empty folder", path);
        return false;
    }
    boolean hasDataFile = false;
    boolean hasMetaFile = false;
    while (files.hasNext()) {
        LocatedFileStatus fileStatus = files.next();
        Path childPath = fileStatus.getPath();
        String name = childPath.getName();

        if (name.endsWith(Constants.DATA_FILE_SUFFIX)) {
            hasDataFile = true;
        } else if (name.endsWith(Constants.META_FILE_SUFFIX)) {
            hasMetaFile = true;
        } else {
            logger.warn("Contains invalid file {} in path {}", childPath, path);
        }
    }
    if (hasDataFile && hasMetaFile) {
        return true;
    } else {
        logger.warn("Invalid fragment path:{}, data file exists:{}, meta file exists:{}", path, hasDataFile,
                hasMetaFile);
        return false;
    }
}
 
Example 12
Source File: AbstractFlagConfig.java    From datawave with Apache License 2.0 5 votes vote down vote up
protected Path getTestFile(FileSystem fs) throws IOException {
    createTestFiles(1, 1);
    Path file = null;
    for (RemoteIterator<LocatedFileStatus> it = fs.listFiles(new Path(this.fmc.getBaseHDFSDir()), true); it.hasNext();) {
        LocatedFileStatus status = it.next();
        if (status.isFile()) {
            file = status.getPath();
            break;
        }
    }
    return file;
}
 
Example 13
Source File: ColumnarSplitInputFormat.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private boolean isValidFragmentPath(FileSystem fs, Path path) throws IOException {
    RemoteIterator<LocatedFileStatus> files = fs.listFiles(path, false);
    if (files == null) {
        logger.warn("Invalid fragment path:{}, empty folder", path);
        return false;
    }
    boolean hasDataFile = false;
    boolean hasMetaFile = false;
    while (files.hasNext()) {
        LocatedFileStatus fileStatus = files.next();
        Path childPath = fileStatus.getPath();
        String name = childPath.getName();

        if (name.endsWith(Constants.DATA_FILE_SUFFIX)) {
            hasDataFile = true;
        } else if (name.endsWith(Constants.META_FILE_SUFFIX)) {
            hasMetaFile = true;
        } else {
            logger.warn("Contains invalid file {} in path {}", childPath, path);
        }
    }
    if (hasDataFile && hasMetaFile) {
        return true;
    } else {
        logger.warn("Invalid fragment path:{}, data file exists:{}, meta file exists:{}", path, hasDataFile,
                hasMetaFile);
        return false;
    }
}
 
Example 14
Source File: DumpHDFSData.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException, InterruptedException, SQLException {
  if(args.length < 3 || args.length > 4) {
    System.err.println("Usage: DumpHDFSData namenode_url homedir tablename");
    System.err.println("  Dumps the RAW data for the table tablename into a CSV format for debugging purposes");
    System.exit(1);
  }
  
  
  String namenodeURL = args[0];
  String homeDir = args[1];
  String table = args[2];
  
  
  Configuration conf = new Configuration();
  conf.set("fs.default.name", namenodeURL);
  FileSystem fs = FileSystem.get(conf);
  
  PrintStream out = new PrintStream(table + ".csv");
  try {
    String fullTable = RowInputFormat.getFullyQualifiedTableName(table);
    String folder = HdfsRegionManager.getRegionFolder(Misc.getRegionPath(fullTable));
    RemoteIterator<LocatedFileStatus> fileItr = fs.listFiles(new Path(homeDir + "/" + folder), true);
    
    conf.set(RowInputFormat.HOME_DIR, homeDir);
    conf.set(RowInputFormat.INPUT_TABLE, table);
    
    boolean wroteHeader = false;
    TaskAttemptContextImpl context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
    
    while(fileItr.hasNext()) {
      LocatedFileStatus file = fileItr.next();
      Path path = file.getPath();
      if(!path.getName().endsWith("hop")) {
        continue;
      }
      CombineFileSplit split = new CombineFileSplit(new Path[] { path } , new long[] { file.getLen()});
      RowRecordReader reader = new RowRecordReader();
      reader.initialize(split, context);
      while(reader.nextKeyValue()) {
        Row row = reader.getCurrentValue();
        ResultSet rs = row.getRowAsResultSet();
        Type op = row.getEventType();
        long ts = row.getTimestamp();

        int numColumns = rs.getMetaData().getColumnCount();
        if(!wroteHeader) {
          out.print("timestamp,operation,path");
          for(int i =1; i <= numColumns; i++) {
            out.print(",");
            out.print(rs.getMetaData().getColumnName(i));
          }
          out.println();
          wroteHeader = true;
        }

        out.print(ts);
        out.print(",");
        out.print(op);
        out.print(",");
        out.print(path);
        for(int i =1; i <= numColumns; i++) {
          out.print(",");
          String s= rs.getString(i);
          if(s != null) {
            s = s.replaceAll("([,\n])", "\\\\1");
          } else {
            s = "NULL";
          }
          out.print(s);
        }
        out.println();
      }
    }
  
  } finally {
    out.close();
  }
}
 
Example 15
Source File: DumpHDFSData.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException, InterruptedException, SQLException {
  if(args.length < 3 || args.length > 4) {
    System.err.println("Usage: DumpHDFSData namenode_url homedir tablename");
    System.err.println("  Dumps the RAW data for the table tablename into a CSV format for debugging purposes");
    System.exit(1);
  }
  
  
  String namenodeURL = args[0];
  String homeDir = args[1];
  String table = args[2];
  
  
  Configuration conf = new Configuration();
  conf.set("fs.default.name", namenodeURL);
  FileSystem fs = FileSystem.get(conf);
  
  PrintStream out = new PrintStream(table + ".csv");
  try {
    String fullTable = RowInputFormat.getFullyQualifiedTableName(table);
    String folder = HdfsRegionManager.getRegionFolder(Misc.getRegionPath(fullTable));
    RemoteIterator<LocatedFileStatus> fileItr = fs.listFiles(new Path(homeDir + "/" + folder), true);
    
    conf.set(RowInputFormat.HOME_DIR, homeDir);
    conf.set(RowInputFormat.INPUT_TABLE, table);
    
    boolean wroteHeader = false;
    TaskAttemptContextImpl context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
    
    while(fileItr.hasNext()) {
      LocatedFileStatus file = fileItr.next();
      Path path = file.getPath();
      if(!path.getName().endsWith("hop")) {
        continue;
      }
      CombineFileSplit split = new CombineFileSplit(new Path[] { path } , new long[] { file.getLen()});
      RowRecordReader reader = new RowRecordReader();
      reader.initialize(split, context);
      while(reader.nextKeyValue()) {
        Row row = reader.getCurrentValue();
        ResultSet rs = row.getRowAsResultSet();
        Type op = row.getEventType();
        long ts = row.getTimestamp();

        int numColumns = rs.getMetaData().getColumnCount();
        if(!wroteHeader) {
          out.print("timestamp,operation,path");
          for(int i =1; i <= numColumns; i++) {
            out.print(",");
            out.print(rs.getMetaData().getColumnName(i));
          }
          out.println();
          wroteHeader = true;
        }

        out.print(ts);
        out.print(",");
        out.print(op);
        out.print(",");
        out.print(path);
        for(int i =1; i <= numColumns; i++) {
          out.print(",");
          String s= rs.getString(i);
          if(s != null) {
            s = s.replaceAll("([,\n])", "\\\\1");
          } else {
            s = "NULL";
          }
          out.print(s);
        }
        out.println();
      }
    }
  
  } finally {
    out.close();
  }
}
 
Example 16
Source File: HiveMetadataUtils.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
public static HiveReaderProto.FileSystemPartitionUpdateKey getFSBasedUpdateKey(String partitionDir, JobConf job,
                                                                               boolean isRecursive, boolean directoriesOnly,
                                                                               int partitionId) {
  final List<HiveReaderProto.FileSystemCachedEntity> cachedEntities = new ArrayList<>();
  final Path rootLocation = new Path(partitionDir);
  try {
    // TODO: DX-16001 - make async configurable for Hive.
    final HadoopFileSystemWrapper fs = new HadoopFileSystemWrapper(rootLocation, job);

    if (fs.exists(rootLocation)) {
      final FileStatus rootStatus = fs.getFileStatus(rootLocation);
      if (rootStatus.isDirectory()) {
        cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
          .setPath(EMPTY_STRING)
          .setLastModificationTime(rootStatus.getModificationTime())
          .setIsDir(true)
          .build());

        final RemoteIterator<LocatedFileStatus> statuses = isRecursive ? fs.listFiles(rootLocation, true) : fs.listFiles(rootLocation, false);
        while (statuses.hasNext()) {
          LocatedFileStatus fileStatus = statuses.next();
          final Path filePath = fileStatus.getPath();
          if (fileStatus.isDirectory()) {
            cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
              .setPath(PathUtils.relativePath(filePath, rootLocation))
              .setLastModificationTime(fileStatus.getModificationTime())
              .setIsDir(true)
              .build());
          } else if (fileStatus.isFile() && !directoriesOnly) {
            cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
              .setPath(PathUtils.relativePath(filePath, rootLocation))
              .setLastModificationTime(fileStatus.getModificationTime())
              .setIsDir(false)
              .build());
          }
        }
      } else {
        cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
          .setPath(EMPTY_STRING)
          .setLastModificationTime(rootStatus.getModificationTime())
          .setIsDir(false)
          .build());
      }
      return HiveReaderProto.FileSystemPartitionUpdateKey.newBuilder()
        .setPartitionId(partitionId)
        .setPartitionRootDir(fs.makeQualified(rootLocation).toString())
        .addAllCachedEntities(cachedEntities)
        .build();
    }
    return null;
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
Example 17
Source File: HiveMetadataUtils.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
public static HiveReaderProto.FileSystemPartitionUpdateKey getFSBasedUpdateKey(String partitionDir, JobConf job,
                                                                               boolean isRecursive, boolean directoriesOnly,
                                                                               int partitionId) {
  final List<HiveReaderProto.FileSystemCachedEntity> cachedEntities = new ArrayList<>();
  final Path rootLocation = new Path(partitionDir);
  try {
    // TODO: DX-16001 - make async configurable for Hive.
    final HadoopFileSystemWrapper fs = new HadoopFileSystemWrapper(rootLocation, job);

    if (fs.exists(rootLocation)) {
      final FileStatus rootStatus = fs.getFileStatus(rootLocation);
      if (rootStatus.isDirectory()) {
        cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
          .setPath(EMPTY_STRING)
          .setLastModificationTime(rootStatus.getModificationTime())
          .setIsDir(true)
          .build());

        final RemoteIterator<LocatedFileStatus> statuses = isRecursive ? fs.listFiles(rootLocation, true) : fs.listFiles(rootLocation, false);
        while (statuses.hasNext()) {
          LocatedFileStatus fileStatus = statuses.next();
          final Path filePath = fileStatus.getPath();
          if (fileStatus.isDirectory()) {
            cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
              .setPath(PathUtils.relativePath(filePath, rootLocation))
              .setLastModificationTime(fileStatus.getModificationTime())
              .setIsDir(true)
              .build());
          } else if (fileStatus.isFile() && !directoriesOnly) {
            cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
              .setPath(PathUtils.relativePath(filePath, rootLocation))
              .setLastModificationTime(fileStatus.getModificationTime())
              .setIsDir(false)
              .build());
          }
        }
      } else {
        cachedEntities.add(HiveReaderProto.FileSystemCachedEntity.newBuilder()
          .setPath(EMPTY_STRING)
          .setLastModificationTime(rootStatus.getModificationTime())
          .setIsDir(false)
          .build());
      }
      return HiveReaderProto.FileSystemPartitionUpdateKey.newBuilder()
        .setPartitionId(partitionId)
        .setPartitionRootDir(fs.makeQualified(rootLocation).toString())
        .addAllCachedEntities(cachedEntities)
        .build();
    }
    return null;
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}