Java Code Examples for org.apache.hadoop.fs.FileSystem.isDirectory()

The following are Jave code examples for showing how to use isDirectory() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: MRNMF   File: MM1.java   Source Code and License Vote up 6 votes
@Override
protected void setup(Context context) throws IOException, InterruptedException {
    String BPath = context.getConfiguration().get("mpath");
    Bw = context.getConfiguration().getInt("mw", -1);
    Bh = context.getConfiguration().getInt("mh", -1);
    prefix = context.getConfiguration().get("prefix", "");

    Path pt = new Path(BPath);
    Configuration conf = new Configuration();
    conf.setBoolean("fs.hdfs.impl.disable.cache", true);
    FileSystem fs = FileSystem.get(conf);

    if (fs.isDirectory(pt)) {
        B = readMatrixFromOutput(pt, Bh, Bw);
    } else {
        B = new double[Bh][Bw];
        readMatrixFromFile(fs, pt, B);
    }

}
 
Example 2
Project: hdfs-shell   File: ContextCommands.java   Source Code and License Vote up 6 votes
@CliCommand(value = "cd", help = "Changes current dir")
public String cd(@CliOption(key = {""}, help = "cd [<path>]") String newDir) {
    if (StringUtils.isEmpty(newDir)) {
        newDir = getHomeDir();
    }

    final Path path = (newDir.startsWith("/")) ? new Path(newDir) : new Path(getCurrentDir(), newDir);
    try {
        final FileSystem fs = getFileSystem();
        if (fs.exists(path) && fs.isDirectory(path)) {
            currentDir = path.toUri().getPath();
        } else {
            return "-shell: cd: " + newDir + " No such file or directory";
        }
    } catch (Exception e) {
        return "Change directory failed! " + e.getMessage();
    }
    return "";
}
 
Example 3
Project: dremio-oss   File: GlobalDictionaryBuilder.java   Source Code and License Vote up 6 votes
public static void main(String []args) {
  try (final BufferAllocator bufferAllocator = new RootAllocator(SabotConfig.getMaxDirectMemory())) {
    final Path tableDir  = new Path(args[0]);
    final FileSystem fs = tableDir.getFileSystem(new Configuration());
    if (fs.exists(tableDir) && fs.isDirectory(tableDir)) {
      Map<ColumnDescriptor, Path> dictionaryEncodedColumns = createGlobalDictionaries(fs, tableDir, bufferAllocator).getColumnsToDictionaryFiles();
      long version = getDictionaryVersion(fs, tableDir);
      Path dictionaryRootDir = getDictionaryVersionedRootPath(fs, tableDir, version);
      for (ColumnDescriptor columnDescriptor: dictionaryEncodedColumns.keySet()) {
        final VectorContainer data = readDictionary(fs, dictionaryRootDir, columnDescriptor, bufferAllocator);
        System.out.println("Dictionary for column [" + columnDescriptor.toString() + " size " + data.getRecordCount());
        BatchPrinter.printBatch(data);
        data.clear();
      }
    }
  } catch (IOException ioe) {
    logger.error("Failed ", ioe);
  }
}
 
Example 4
Project: dremio-oss   File: ParquetFormatPlugin.java   Source Code and License Vote up 6 votes
/**
 * Check if any columns are dictionary encoded by looking up for .dict files
 * @param fs filesystem
 * @param selectionRoot root of table
 * @param batchSchema schema for this parquet table
 */
public static DictionaryEncodedColumns scanForDictionaryEncodedColumns(FileSystem fs, String selectionRoot, BatchSchema batchSchema) {
  try {
    Path root = new Path(selectionRoot);
    if (!fs.isDirectory(root)) {
      root = root.getParent();
    }
    long version = GlobalDictionaryBuilder.getDictionaryVersion(fs, root);
    if (version != -1) {
      final List<String> columns = Lists.newArrayList();
      final DictionaryEncodedColumns dictionaryEncodedColumns = new DictionaryEncodedColumns();
      root = GlobalDictionaryBuilder.getDictionaryVersionedRootPath(fs, root, version);
      for (Field field : batchSchema.getFields()) {
        final Path dictionaryFilePath = GlobalDictionaryBuilder.getDictionaryFile(fs, root, field.getName());
        if (dictionaryFilePath != null) {
          columns.add(field.getName());
        }
      }
      if (!columns.isEmpty()) {
        dictionaryEncodedColumns.setVersion(version);
        dictionaryEncodedColumns.setRootPath(root.toString());
        dictionaryEncodedColumns.setColumnsList(columns);
        return dictionaryEncodedColumns;
      }
    }
  } catch (UnsupportedOperationException e) { // class path based filesystem doesn't support listing
    if (!ClassPathFileSystem.SCHEME.equals(fs.getUri().getScheme())) {
      throw e;
    }
  } catch (IOException ioe) {
    logger.warn(format("Failed to scan directory %s for global dictionary", selectionRoot), ioe);
  }
  return null;
}
 
Example 5
Project: ditb   File: IntegrationTestBigLinkedList.java   Source Code and License Vote up 6 votes
static SortedSet<byte []> readKeysToSearch(final Configuration conf)
throws IOException, InterruptedException {
  Path keysInputDir = new Path(conf.get(SEARCHER_INPUTDIR_KEY));
  FileSystem fs = FileSystem.get(conf);
  SortedSet<byte []> result = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
  if (!fs.exists(keysInputDir)) {
    throw new FileNotFoundException(keysInputDir.toString());
  }
  if (!fs.isDirectory(keysInputDir)) {
    throw new UnsupportedOperationException("TODO");
  } else {
    RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(keysInputDir, false);
    while(iterator.hasNext()) {
      LocatedFileStatus keyFileStatus = iterator.next();
      // Skip "_SUCCESS" file.
      if (keyFileStatus.getPath().getName().startsWith("_")) continue;
      result.addAll(readFileToSearch(conf, fs, keyFileStatus));
    }
  }
  return result;
}
 
Example 6
Project: MRNMF   File: MatrixUpdater.java   Source Code and License Vote up 5 votes
public static void addInpuPath(Job job, Path path) throws IOException {
    FileSystem fs = path.getFileSystem(new Configuration());
    if (fs.isDirectory(path)) {
        for (Path p : FileUtil.stat2Paths(fs.listStatus(path))) {
            if (p.toString().contains("part"))
                FileInputFormat.addInputPath(job, p);
        }
    } else {
        FileInputFormat.addInputPath(job, path);
    }
}
 
Example 7
Project: hadoop-oss   File: AbstractFSContractTestBase.java   Source Code and License Vote up 5 votes
protected String generateAndLogErrorListing(Path src, Path dst) throws
                                                                IOException {
  FileSystem fs = getFileSystem();
  getLog().error(
    "src dir " + ContractTestUtils.ls(fs, src.getParent()));
  String destDirLS = ContractTestUtils.ls(fs, dst.getParent());
  if (fs.isDirectory(dst)) {
    //include the dir into the listing
    destDirLS = destDirLS + "\n" + ContractTestUtils.ls(fs, dst);
  }
  return destDirLS;
}
 
Example 8
Project: alluxio   File: HdfsAndAlluxioUtils_update.java   Source Code and License Vote up 5 votes
/**
 * 此方法用于判断文件是否是dir
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 是否是dir
 */
public static boolean isDirectory(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		pathNotExistCheck(path, fs, uri);
		return fs.isDirectory(uri);
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return false;
}
 
Example 9
Project: aliyun-maxcompute-data-collectors   File: AvroUtil.java   Source Code and License Vote up 5 votes
/**
 * Get the schema of AVRO files stored in a directory
 */
public static Schema getAvroSchema(Path path, Configuration conf)
    throws IOException {
  FileSystem fs = path.getFileSystem(conf);
  Path fileToTest;
  if (fs.isDirectory(path)) {
    FileStatus[] fileStatuses = fs.listStatus(path, new PathFilter() {
      @Override
      public boolean accept(Path p) {
        String name = p.getName();
        return !name.startsWith("_") && !name.startsWith(".");
      }
    });
    if (fileStatuses.length == 0) {
      return null;
    }
    fileToTest = fileStatuses[0].getPath();
  } else {
    fileToTest = path;
  }

  SeekableInput input = new FsInput(fileToTest, conf);
  DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
  FileReader<GenericRecord> fileReader = DataFileReader.openReader(input, reader);

  Schema result = fileReader.getSchema();
  fileReader.close();
  return result;
}
 
Example 10
Project: hadoop   File: AbstractFSContractTestBase.java   Source Code and License Vote up 5 votes
protected String generateAndLogErrorListing(Path src, Path dst) throws
                                                                IOException {
  FileSystem fs = getFileSystem();
  getLog().error(
    "src dir " + ContractTestUtils.ls(fs, src.getParent()));
  String destDirLS = ContractTestUtils.ls(fs, dst.getParent());
  if (fs.isDirectory(dst)) {
    //include the dir into the listing
    destDirLS = destDirLS + "\n" + ContractTestUtils.ls(fs, dst);
  }
  return destDirLS;
}
 
Example 11
Project: dremio-oss   File: GlobalDictionaryBuilder.java   Source Code and License Vote up 5 votes
public static Path getDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long version) throws IOException {
  final Path dictionaryRootDir = new Path(tableDir, dictionaryRootDirName(version));
  if (version != -1 && fs.exists(dictionaryRootDir) && fs.isDirectory(dictionaryRootDir)) {
    return dictionaryRootDir;
  }
  return null;
}
 
Example 12
Project: ditb   File: ExportSnapshot.java   Source Code and License Vote up 5 votes
/**
 * Set path ownership.
 */
private void setOwner(final FileSystem fs, final Path path, final String user,
    final String group, final boolean recursive) throws IOException {
  if (user != null || group != null) {
    if (recursive && fs.isDirectory(path)) {
      for (FileStatus child : fs.listStatus(path)) {
        setOwner(fs, child.getPath(), user, group, recursive);
      }
    }
    fs.setOwner(path, user, group);
  }
}
 
Example 13
Project: ditb   File: ExportSnapshot.java   Source Code and License Vote up 5 votes
/**
 * Set path permission.
 */
private void setPermission(final FileSystem fs, final Path path, final short filesMode,
    final boolean recursive) throws IOException {
  if (filesMode > 0) {
    FsPermission perm = new FsPermission(filesMode);
    if (recursive && fs.isDirectory(path)) {
      for (FileStatus child : fs.listStatus(path)) {
        setPermission(fs, child.getPath(), filesMode, recursive);
      }
    }
    fs.setPermission(path, perm);
  }
}
 
Example 14
Project: rainbow   File: FileGenerator.java   Source Code and License Vote up 4 votes
/**
 * Generate a batch of files on HDFS
 * @param blockSize in bytes
 * @param blockCount
 * @param dataPath
 * @return
 * @throws IOException
 */
public List<FileStatus> generateHDFSFile (final long blockSize, final long blockCount,
                                          String dataPath, ProgressListener progressListener) throws IOException
{
    Configuration conf = new Configuration();

    conf.setBoolean("dfs.support.append", true);
    FileSystem fs = FileSystem.get(URI.create("hdfs://" +
            ConfigFactory.Instance().getProperty("namenode.host") + ":" +
            ConfigFactory.Instance().getProperty("namenode.port")+ dataPath), conf);

    Path path = new Path(dataPath);
    if (fs.exists(path) || !fs.isDirectory(path))
    {
        throw new IOException("data path exists or is not a directory");
    }

    fs.mkdirs(new Path(dataPath));
    List<FileStatus> statuses = new ArrayList<FileStatus>();
    if (dataPath.charAt(dataPath.length()-1) != '/' && dataPath.charAt(dataPath.length()-1) != '\\')
    {
        dataPath += "/";
    }

    // 1 MB buffer
    final int bufferSize = 1 * 1024 * 1024;
    byte[] buffer = new byte[bufferSize];
    buffer[0] = 1;
    buffer[1] = 2;
    buffer[2] = 3;

    // number of buffers to write for each block
    long n = blockSize / bufferSize;

    for (int i = 0; i < blockCount; ++i)
    {
        // one block per file
        Path filePath = new Path(dataPath + i);
        FSDataOutputStream out = fs.create(filePath, false, bufferSize, (short) 1, n * bufferSize);

        for (int j = 0; j < n; ++j)
        {
            out.write(buffer);
        }
        out.flush();
        out.close();
        statuses.add(fs.getFileStatus(filePath));
        progressListener.setPercentage(1.0 * i / blockCount);
    }
    return statuses;
}
 
Example 15
Project: ditb   File: TestMetaMigrationConvertingToPB.java   Source Code and License Vote up 4 votes
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Start up our mini cluster on top of an 0.92 root.dir that has data from
  // a 0.92 hbase run -- it has a table with 100 rows in it  -- and see if
  // we can migrate from 0.92
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);
  Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
  // Untar our test dir.
  File untar = untar(new File(testdir.toString()));
  // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
  Configuration conf = TEST_UTIL.getConfiguration();
  FsShell shell = new FsShell(conf);
  FileSystem fs = FileSystem.get(conf);
  // find where hbase will root itself, so we can copy filesystem there
  Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
  if (!fs.isDirectory(hbaseRootDir.getParent())) {
    // mkdir at first
    fs.mkdirs(hbaseRootDir.getParent());
  }
  doFsCommand(shell,
    new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});

  // windows fix: tgz file has hbase:meta directory renamed as -META- since the original
  // is an illegal name under windows. So we rename it back.
  // See src/test/data//TestMetaMigrationConvertingToPB.README and
  // https://issues.apache.org/jira/browse/HBASE-6821
  doFsCommand(shell, new String [] {"-mv", new Path(hbaseRootDir, "-META-").toString(),
    new Path(hbaseRootDir, ".META.").toString()});
  // See whats in minihdfs.
  doFsCommand(shell, new String [] {"-lsr", "/"});

  //upgrade to namespace as well
  Configuration toolConf = TEST_UTIL.getConfiguration();
  conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
  ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});

  TEST_UTIL.startMiniHBaseCluster(1, 1);
  // Assert we are running against the copied-up filesystem.  The copied-up
  // rootdir should have had a table named 'TestTable' in it.  Assert it
  // present.
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
  ResultScanner scanner = t.getScanner(new Scan());
  int count = 0;
  while (scanner.next() != null) {
    count++;
  }
  // Assert that we find all 100 rows that are in the data we loaded.  If
  // so then we must have migrated it from 0.90 to 0.92.
  Assert.assertEquals(ROW_COUNT, count);
  scanner.close();
  t.close();
}
 
Example 16
Project: alluxio   File: HdfsAndAlluxioUtils_update.java   Source Code and License Vote up 2 votes
/**
 * 此方法用于检测分布式系统中是否是文件夹
 *
 * @param uri
 *            uri
 * @param fs
 *            FileSystem
 * @param path
 *            path
 * @throws IOException
 */
private static void pathNotDirectoryCheck(String uri, FileSystem fs, Path path) throws IOException {
	if (!fs.isDirectory(path)) {
		throw new RuntimeException(NOT_DIR_EXECEPTION_MSG + uri);
	}
}