Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#listStatus()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#listStatus() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HDFSTool.java    From WIFIProbe with Apache License 2.0 5 votes vote down vote up
public static void concat(String dir) throws IOException {


        String directory = NodeConfig.HDFS_PATH + dir;
        Configuration conf = new Configuration();
        DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
        FileStatus fileList[] = fs.listStatus(new Path(directory));

        if (fileList.length>=2) {

            ArrayList<Path>  srcs = new ArrayList<Path>(fileList.length);
            for (FileStatus fileStatus : fileList) {
                if ( fileStatus.isFile() &&
                        (fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
                    srcs.add(fileStatus.getPath());
                }
            }

            if (srcs.size()>=2) {
                Logger.println("come to here");
                Path appended = srcs.get(0);
                Path[] sources = new Path[srcs.size()-1];
                for (int i=0; i<srcs.size()-1; i++) {
                    sources[i] = srcs.get(i+1);
                }
                Logger.println(fs==null);
                Logger.println(appended==null);
                Logger.println(sources==null);
                fs.concat(appended, sources);
                Logger.println("concat to : " + appended.getName());
                Logger.println(Arrays.toString(sources));
            }

            fs.close();
        }


    }
 
Example 2
Source File: SnapshotTestHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check the functionality of a snapshot.
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The root of the snapshot
 * @param snapshottedDir The snapshotted directory
 */
public static void checkSnapshotCreation(DistributedFileSystem hdfs,
    Path snapshotRoot, Path snapshottedDir) throws Exception {
  // Currently we only check if the snapshot was created successfully
  assertTrue(hdfs.exists(snapshotRoot));
  // Compare the snapshot with the current dir
  FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
  FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
  assertEquals("snapshottedDir=" + snapshottedDir
      + ", snapshotRoot=" + snapshotRoot,
      currentFiles.length, snapshotFiles.length);
}
 
Example 3
Source File: SnapshotTestHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check the functionality of a snapshot.
 * 
 * @param hdfs DistributedFileSystem instance
 * @param snapshotRoot The root of the snapshot
 * @param snapshottedDir The snapshotted directory
 */
public static void checkSnapshotCreation(DistributedFileSystem hdfs,
    Path snapshotRoot, Path snapshottedDir) throws Exception {
  // Currently we only check if the snapshot was created successfully
  assertTrue(hdfs.exists(snapshotRoot));
  // Compare the snapshot with the current dir
  FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
  FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
  assertEquals("snapshottedDir=" + snapshottedDir
      + ", snapshotRoot=" + snapshotRoot,
      currentFiles.length, snapshotFiles.length);
}
 
Example 4
Source File: IndexScrutinyToolIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that with the output to file option set, the scrutiny tool outputs invalid rows to file
 */
@Test public void testOutputInvalidRowsToFile() throws Exception {
    insertOneValid_OneBadVal_OneMissingTarget();

    String[]
            argValues =
            getArgValues(schemaName, dataTableName, indexTableName, 10L, SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.FILE, null);
    runScrutiny(argValues);

    // check the output files
    Path outputPath = CsvBulkImportUtil.getOutputPath(new Path(outputDir), dataTableFullName);
    DistributedFileSystem fs = getUtility().getDFSCluster().getFileSystem();
    List<Path> paths = Lists.newArrayList();
    Path firstPart = null;
    for (FileStatus outputFile : fs.listStatus(outputPath)) {
        if (outputFile.getPath().getName().startsWith("part")) {
            if (firstPart == null) {
                firstPart = outputFile.getPath();
            } else {
                paths.add(outputFile.getPath());
            }
        }
    }
    if (dataTableDdl.contains("SALT_BUCKETS")) {
        fs.concat(firstPart, paths.toArray(new Path[0]));
    }
    Path outputFilePath = firstPart;
    assertTrue(fs.exists(outputFilePath));
    FSDataInputStream fsDataInputStream = fs.open(outputFilePath);
    BufferedReader reader = new BufferedReader(new InputStreamReader(fsDataInputStream));
    TreeSet<String> lines = Sets.newTreeSet();
    try {
        String line = null;
        while ((line = reader.readLine()) != null) {
            lines.add(line);
        }
    } finally {
        IOUtils.closeQuietly(reader);
        IOUtils.closeQuietly(fsDataInputStream);
    }
    Iterator<String> lineIterator = lines.iterator();
    assertEquals("[2, name-2, " + new Timestamp(testTime).toString() + ", 95123]\t[2, name-2, "
            + new Timestamp(testTime).toString() + ", 9999]", lineIterator.next());
    assertEquals("[3, name-3, " + new Timestamp(testTime).toString() + ", 95123]\tTarget row not found",
            lineIterator.next());

}