Java Code Examples for org.apache.hadoop.fs.FileSystem.isFile()

The following are Jave code examples for showing how to use isFile() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: Transwarp-Sample-Code   File: HDFSSequenceFile.java   Source Code and License Vote up 8 votes
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
Example 2
Project: flume-release-1.7.0   File: HDFSSequenceFile.java   Source Code and License Vote up 7 votes
protected void open(Path dstPath, CompressionCodec codeC,
    CompressionType compType, Configuration conf, FileSystem hdfs)
        throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
  } else {
    outStream = hdfs.create(dstPath);
  }
  writer = SequenceFile.createWriter(conf, outStream,
      serializer.getKeyClass(), serializer.getValueClass(), compType, codeC);

  registerCurrentStream(outStream, hdfs, dstPath);
}
 
Example 3
Project: rainbow   File: ParquetMetadataStat.java   Source Code and License Vote up 6 votes
/**
 *
 * @param nameNode the hostname of hdfs namenode
 * @param hdfsPort the port of hdfs namenode, usually 9000 or 8020
 * @param dirPath the path of the directory which contains the parquet files, begin with /, for gen /msra/column/order/parquet/
 * @throws IOException
 * @throws MetadataException
 */
public ParquetMetadataStat(String nameNode, int hdfsPort, String dirPath) throws IOException, MetadataException
{
    Configuration conf = new Configuration();
    FileSystem fileSystem = FileSystem.get(URI.create("hdfs://" + nameNode + ":" + hdfsPort), conf);
    Path hdfsDirPath = new Path(dirPath);
    if (! fileSystem.isFile(hdfsDirPath))
    {
        FileStatus[] fileStatuses = fileSystem.listStatus(hdfsDirPath);
        for (FileStatus status : fileStatuses)
        {
            // compatibility for HDFS 1.x
            if (! status.isDir())
            {
                //System.out.println(status.getPath().toString());
                this.fileMetaDataList.add(new ParquetFileMetadata(conf, status.getPath()));
            }
        }
    }
    if (this.fileMetaDataList.size() == 0)
    {
        throw new MetadataException("fileMetaDataList is empty, path is not a dir.");
    }
    this.fields = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFields();
    this.columnCount = this.fileMetaDataList.get(0).getFileMetaData().getSchema().getFieldCount();
}
 
Example 4
Project: hadoop   File: SimpleCopyListing.java   Source Code and License Vote up 6 votes
private Path computeSourceRootPath(FileStatus sourceStatus,
                                   DistCpOptions options) throws IOException {

  Path target = options.getTargetPath();
  FileSystem targetFS = target.getFileSystem(getConf());
  final boolean targetPathExists = options.getTargetPathExists();

  boolean solitaryFile = options.getSourcePaths().size() == 1
                                              && !sourceStatus.isDirectory();

  if (solitaryFile) {
    if (targetFS.isFile(target) || !targetPathExists) {
      return sourceStatus.getPath();
    } else {
      return sourceStatus.getPath().getParent();
    }
  } else {
    boolean specialHandling = (options.getSourcePaths().size() == 1 && !targetPathExists) ||
        options.shouldSyncFolder() || options.shouldOverwrite();

    return specialHandling && sourceStatus.isDirectory() ? sourceStatus.getPath() :
        sourceStatus.getPath().getParent();
  }
}
 
Example 5
Project: ditb   File: HBaseFsck.java   Source Code and License Vote up 6 votes
/**
 * ls -r for debugging purposes
 */
public static void debugLsr(Configuration conf,
    Path p, ErrorReporter errors) throws IOException {
  if (!LOG.isDebugEnabled() || p == null) {
    return;
  }
  FileSystem fs = p.getFileSystem(conf);

  if (!fs.exists(p)) {
    // nothing
    return;
  }
  errors.print(p.toString());

  if (fs.isFile(p)) {
    return;
  }

  if (fs.getFileStatus(p).isDirectory()) {
    FileStatus[] fss= fs.listStatus(p);
    for (FileStatus status : fss) {
      debugLsr(conf, status.getPath(), errors);
    }
  }
}
 
Example 6
Project: hdfs_to_cos_tools   File: CommonHdfsUtils.java   Source Code and License Vote up 5 votes
/**
 * 判断指定路径的文件是否有效, 即文件存在,且可读
 * 
 * @param hdsfFilePath
 * @return 有效返回true, 否则返回false
 */
public static boolean isLegalHdfsFile(FileSystem hdfsFS, String hdsfFilePath) {
    try {
        return hdfsFS.isFile(new Path(hdsfFilePath));
    } catch (IllegalArgumentException iae) {
        return false;
    } catch (IOException e) {
        return false;
    }
}
 
Example 7
Project: alluxio   File: HdfsAndAlluxioUtils_update.java   Source Code and License Vote up 5 votes
/**
 * 此方法用于判断文件是否是file
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param path
 *            文件路径
 * @return 是否是file
 */
public static boolean isFile(FileSystemInfo fileSystemInfo, String path) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path uri = new Path(path);
	try {
		pathNotExistCheck(path, fs, uri);
		return fs.isFile(uri);
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
	return false;
}
 
Example 8
Project: flume-release-1.7.0   File: HDFSDataStream.java   Source Code and License Vote up 5 votes
protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
Example 9
Project: Transwarp-Sample-Code   File: HDFSDataStream.java   Source Code and License Vote up 5 votes
protected void doOpen(Configuration conf,
  Path dstPath, FileSystem hdfs) throws
  IOException {
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }

  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
          (dstPath)) {
    outStream = hdfs.append(dstPath);
    appending = true;
  } else {
    outStream = hdfs.create(dstPath);
  }

  serializer = EventSerializerFactory.getInstance(
      serializerType, serializerContext, outStream);
  if (appending && !serializer.supportsReopen()) {
    outStream.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType +
        ") does not support append");
  }

  // must call superclass to check for replication issues
  registerCurrentStream(outStream, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
}
 
Example 10
Project: aliyun-maxcompute-data-collectors   File: FilePasswordLoader.java   Source Code and License Vote up 5 votes
/**
 * Verify that given path leads to a file that we can read.
 *
 * @param fs Associated FileSystem
 * @param path Path
 * @throws IOException
 */
protected void verifyPath(FileSystem fs, Path path) throws IOException {
  if (!fs.exists(path)) {
    throw new IOException("The provided password file " + path
      + " does not exist!");
  }

  if (!fs.isFile(path)) {
    throw new IOException("The provided password file " + path
      + " is a directory!");
  }
}
 
Example 11
Project: hadoop   File: WasbFsck.java   Source Code and License Vote up 5 votes
/**
 * Recursively check if a given path and its child paths have colons in their
 * names. It returns true if none of them has a colon or this path does not
 * exist, and false otherwise.
 */
private boolean recursiveCheckChildPathName(FileSystem fs, Path p)
    throws IOException {
  if (p == null) {
    return true;
  }
  if (!fs.exists(p)) {
    System.out.println("Path " + p + " does not exist!");
    return true;
  }

  if (fs.isFile(p)) {
    if (containsColon(p)) {
      System.out.println("Warning: file " + p + " has a colon in its name.");
      return false;
    } else {
      return true;
    }
  } else {
    boolean flag;
    if (containsColon(p)) {
      System.out.println("Warning: directory " + p
          + " has a colon in its name.");
      flag = false;
    } else {
      flag = true;
    }
    FileStatus[] listed = fs.listStatus(p);
    for (FileStatus l : listed) {
      if (!recursiveCheckChildPathName(fs, l.getPath())) {
        flag = false;
      }
    }
    return flag;
  }
}
 
Example 12
Project: hadoop   File: TestCopyMapper.java   Source Code and License Vote up 5 votes
private static void changeUserGroup(String user, String group)
        throws IOException {
  FileSystem fs = cluster.getFileSystem();
  FsPermission changedPermission = new FsPermission(
          FsAction.ALL, FsAction.ALL, FsAction.ALL
  );
  for (Path path : pathList)
    if (fs.isFile(path)) {
      fs.setOwner(path, user, group);
      fs.setPermission(path, changedPermission);
    }
}
 
Example 13
Project: ditb   File: HFileV1Detector.java   Source Code and License Vote up 5 votes
private static boolean isTableDir(final FileSystem fs, final Path path) throws IOException {
  // check for old format, of having /table/.tableinfo; hbase:meta doesn't has .tableinfo,
  // include it.
  if (fs.isFile(path)) return false;
  return (FSTableDescriptors.getTableInfoPath(fs, path) != null || FSTableDescriptors
      .getCurrentTableInfoStatus(fs, path, false) != null) || path.toString().endsWith(".META.");
}
 
Example 14
Project: flume-release-1.7.0   File: HDFSCompressedDataStream.java   Source Code and License Vote up 4 votes
@Override
public void open(String filePath, CompressionCodec codec,
    CompressionType cType) throws IOException {
  Configuration conf = new Configuration();
  Path dstPath = new Path(filePath);
  FileSystem hdfs = dstPath.getFileSystem(conf);
  if (useRawLocalFileSystem) {
    if (hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
    fsOut = hdfs.append(dstPath);
    appending = true;
  } else {
    fsOut = hdfs.create(dstPath);
  }
  if (compressor == null) {
    compressor = CodecPool.getCompressor(codec, conf);
  }
  cmpOut = codec.createOutputStream(fsOut, compressor);
  serializer = EventSerializerFactory.getInstance(serializerType,
      serializerContext, cmpOut);
  if (appending && !serializer.supportsReopen()) {
    cmpOut.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType
        + ") does not support append");
  }

  registerCurrentStream(fsOut, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
  isFinished = false;
}
 
Example 15
Project: Transwarp-Sample-Code   File: HDFSCompressedDataStream.java   Source Code and License Vote up 4 votes
@Override
public void open(String filePath, CompressionCodec codec,
    CompressionType cType) throws IOException {
  Configuration conf = new Configuration();
  Path dstPath = new Path(filePath);
  FileSystem hdfs = dstPath.getFileSystem(conf);
  if(useRawLocalFileSystem) {
    if(hdfs instanceof LocalFileSystem) {
      hdfs = ((LocalFileSystem)hdfs).getRaw();
    } else {
      logger.warn("useRawLocalFileSystem is set to true but file system " +
          "is not of type LocalFileSystem: " + hdfs.getClass().getName());
    }
  }
  boolean appending = false;
  if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
  (dstPath)) {
    fsOut = hdfs.append(dstPath);
    appending = true;
  } else {
    fsOut = hdfs.create(dstPath);
  }
  if(compressor == null) {
    compressor = CodecPool.getCompressor(codec, conf);
  }
  cmpOut = codec.createOutputStream(fsOut, compressor);
  serializer = EventSerializerFactory.getInstance(serializerType,
      serializerContext, cmpOut);
  if (appending && !serializer.supportsReopen()) {
    cmpOut.close();
    serializer = null;
    throw new IOException("serializer (" + serializerType
        + ") does not support append");
  }

  registerCurrentStream(fsOut, hdfs, dstPath);

  if (appending) {
    serializer.afterReopen();
  } else {
    serializer.afterCreate();
  }
  isFinished = false;
}
 
Example 16
Project: ditb   File: LoadIncrementalHFiles.java   Source Code and License Vote up 4 votes
/**
  * Iterate over the bulkDir hfiles.
  * Skip reference, HFileLink, files starting with "_".
  * Check and skip non-valid hfiles by default, or skip this validation by setting
  * 'hbase.loadincremental.validate.hfile' to false.
  */
 private static <TFamily> void visitBulkHFiles(final FileSystem fs, final Path bulkDir,
   final BulkHFileVisitor<TFamily> visitor, final boolean validateHFile) throws IOException {
   if (!fs.exists(bulkDir)) {
     throw new FileNotFoundException("Bulkload dir " + bulkDir + " not found");
   }

   FileStatus[] familyDirStatuses = fs.listStatus(bulkDir);
   if (familyDirStatuses == null) {
     throw new FileNotFoundException("No families found in " + bulkDir);
   }

   for (FileStatus familyStat : familyDirStatuses) {
     if (!familyStat.isDirectory()) {
       LOG.warn("Skipping non-directory " + familyStat.getPath());
       continue;
     }
     Path familyDir = familyStat.getPath();
     byte[] familyName = familyDir.getName().getBytes();
     TFamily family = visitor.bulkFamily(familyName);

     FileStatus[] hfileStatuses = fs.listStatus(familyDir);
     for (FileStatus hfileStatus : hfileStatuses) {
       if (!fs.isFile(hfileStatus.getPath())) {
         LOG.warn("Skipping non-file " + hfileStatus);
         continue;
       }

       Path hfile = hfileStatus.getPath();
       // Skip "_", reference, HFileLink
       String fileName = hfile.getName();
       if (fileName.startsWith("_")) {
         continue;
       }
       if (StoreFileInfo.isReference(fileName)) {
         LOG.warn("Skipping reference " + fileName);
         continue;
       }
       if (HFileLink.isHFileLink(fileName)) {
         LOG.warn("Skipping HFileLink " + fileName);
         continue;
       }

       // Validate HFile Format if needed
       if (validateHFile) {
         try {
           if (!HFile.isHFileFormat(fs, hfile)) {
             LOG.warn("the file " + hfile + " doesn't seems to be an hfile. skipping");
             continue;
           }
         } catch (FileNotFoundException e) {
           LOG.warn("the file " + hfile + " was removed");
           continue;
         }
}

       visitor.bulkHFile(family, hfileStatus);
     }
   }
 }
 
Example 17
Project: ditb   File: HFileV1Detector.java   Source Code and License Vote up 4 votes
private static boolean isRegionDir(final FileSystem fs, final Path path) throws IOException {
  if (fs.isFile(path)) return false;
  Path regionInfo = new Path(path, HRegionFileSystem.REGION_INFO_FILE);
  return fs.exists(regionInfo);

}
 
Example 18
Project: alluxio   File: HdfsAndAlluxioUtils_update.java   Source Code and License Vote up 2 votes
/**
 * 此方法用于检测分布式系统中是否是文件
 *
 * @param uri
 *            uri
 * @param fs
 *            FileSystem
 * @param path
 *            path
 * @throws IOException
 */
private static void pathNotFileCheck(String uri, FileSystem fs, Path path) throws IOException {
	if (!fs.isFile(path)) {
		throw new RuntimeException(NOT_FILE_EXECEPTION_MSG + uri);
	}
}