Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsFileStatus#isDir()

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsFileStatus#isDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NamenodeFsck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void lostFoundInit(DFSClient dfs) {
  lfInited = true;
  try {
    String lfName = "/lost+found";
    
    final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
    if (lfStatus == null) { // not exists
      lfInitedOk = dfs.mkdirs(lfName, null, true);
      lostFound = lfName;
    } else if (!lfStatus.isDir()) { // exists but not a directory
      LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
      lfInitedOk = false;
    }  else { // exists and is a directory
      lostFound = lfName;
      lfInitedOk = true;
    }
  }  catch (Exception e) {
    e.printStackTrace();
    lfInitedOk = false;
  }
  if (lostFound == null) {
    LOG.warn("Cannot initialize /lost+found .");
    lfInitedOk = false;
    internalError = true;
  }
}
 
Example 2
Source File: ListPathsServlet.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Write a node to output.
 * Node information includes path, modification, permission, owner and group.
 * For files, it also includes size, replication and block-size. 
 */
static void writeInfo(final Path fullpath, final HdfsFileStatus i,
    final XMLOutputter doc) throws IOException {
  final SimpleDateFormat ldf = df.get();
  doc.startTag(i.isDir() ? "directory" : "file");
  doc.attribute("path", fullpath.toUri().getPath());
  doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
  doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
  if (!i.isDir()) {
    doc.attribute("size", String.valueOf(i.getLen()));
    doc.attribute("replication", String.valueOf(i.getReplication()));
    doc.attribute("blocksize", String.valueOf(i.getBlockSize()));
  }
  doc.attribute("permission", (i.isDir()? "d": "-") + i.getPermission());
  doc.attribute("owner", i.getOwner());
  doc.attribute("group", i.getGroup());
  doc.endTag();
}
 
Example 3
Source File: ListPathsServlet.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Write a node to output.
 * Node information includes path, modification, permission, owner and group.
 * For files, it also includes size, replication and block-size. 
 */
static void writeInfo(String parent, HdfsFileStatus i, XMLOutputter doc) throws IOException {
  final SimpleDateFormat ldf = df.get();
  doc.startTag(i.isDir() ? "directory" : "file");
  doc.attribute("path", i.getFullPath(new Path(parent)).toUri().getPath());
  doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
  doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
  if (!i.isDir()) {
    doc.attribute("size", String.valueOf(i.getLen()));
    doc.attribute("replication", String.valueOf(i.getReplication()));
    doc.attribute("blocksize", String.valueOf(i.getBlockSize()));
  }
  doc.attribute("permission", (i.isDir()? "d": "-") + i.getPermission());
  doc.attribute("owner", i.getOwner());
  doc.attribute("group", i.getGroup());
  doc.endTag();
}
 
Example 4
Source File: TestDFSUpgradeFromImage.java    From big-c with Apache License 2.0 6 votes vote down vote up
static void recoverAllLeases(DFSClient dfs, 
    Path path) throws IOException {
  String pathStr = path.toString();
  HdfsFileStatus status = dfs.getFileInfo(pathStr);
  if (!status.isDir()) {
    dfs.recoverLease(pathStr);
    return;
  }
  byte prev[] = HdfsFileStatus.EMPTY_NAME;
  DirectoryListing dirList;
  do {
    dirList = dfs.listPaths(pathStr, prev);
    HdfsFileStatus files[] = dirList.getPartialListing();
    for (HdfsFileStatus f : files) {
      recoverAllLeases(dfs, f.getFullPath(path));
    }
    prev = dirList.getLastName();
  } while (dirList.hasMore());
}
 
Example 5
Source File: TestDFSUpgradeFromImage.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static void recoverAllLeases(DFSClient dfs, 
    Path path) throws IOException {
  String pathStr = path.toString();
  HdfsFileStatus status = dfs.getFileInfo(pathStr);
  if (!status.isDir()) {
    dfs.recoverLease(pathStr);
    return;
  }
  byte prev[] = HdfsFileStatus.EMPTY_NAME;
  DirectoryListing dirList;
  do {
    dirList = dfs.listPaths(pathStr, prev);
    HdfsFileStatus files[] = dirList.getPartialListing();
    for (HdfsFileStatus f : files) {
      recoverAllLeases(dfs, f.getFullPath(path));
    }
    prev = dirList.getLastName();
  } while (dirList.hasMore());
}
 
Example 6
Source File: Nfs3Utils.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
 
Example 7
Source File: Nfs3Utils.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
 
Example 8
Source File: NamenodeFsck.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void lostFoundInit(DFSClient dfs) {
  lfInited = true;
  try {
    String lfName = "/lost+found";
    
    final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
    if (lfStatus == null) { // not exists
      lfInitedOk = dfs.mkdirs(lfName, null, true);
      lostFound = lfName;
    } else if (!lfStatus.isDir()) { // exists but not a directory
      LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
      lfInitedOk = false;
    }  else { // exists and is a directory
      lostFound = lfName;
      lfInitedOk = true;
    }
  }  catch (Exception e) {
    e.printStackTrace();
    lfInitedOk = false;
  }
  if (lostFound == null) {
    LOG.warn("Cannot initialize /lost+found .");
    lfInitedOk = false;
    internalError = true;
  }
}
 
Example 9
Source File: ListPathsServlet.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Write a node to output.
 * Node information includes path, modification, permission, owner and group.
 * For files, it also includes size, replication and block-size. 
 */
static void writeInfo(final Path fullpath, final HdfsFileStatus i,
    final XMLOutputter doc) throws IOException {
  final SimpleDateFormat ldf = df.get();
  doc.startTag(i.isDir() ? "directory" : "file");
  doc.attribute("path", fullpath.toUri().getPath());
  doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
  doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
  if (!i.isDir()) {
    doc.attribute("size", String.valueOf(i.getLen()));
    doc.attribute("replication", String.valueOf(i.getReplication()));
    doc.attribute("blocksize", String.valueOf(i.getBlockSize()));
  }
  doc.attribute("permission", (i.isDir()? "d": "-") + i.getPermission());
  doc.attribute("owner", i.getOwner());
  doc.attribute("group", i.getGroup());
  doc.endTag();
}
 
Example 10
Source File: Nfs3Utils.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
    throws IOException {
  HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
  if (fstat == null) {
    return null;
  }

  long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
      .getLen();
  return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
      new NfsTime(fstat.getModificationTime()));
}
 
Example 11
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Convert an HdfsFileStatus and its block locations to a LocatedFileStatus
 * @param stat an HdfsFileStatus
 * @param locs the file's block locations
 * @param src parent path in string representation
 * @return a FileStatus object
 */
private static LocatedFileStatus toLocatedFileStatus(
    HdfsFileStatus stat, LocatedBlocks locs, String src) {
  if (stat == null) {
    return null;
  }
  return new LocatedFileStatus(stat.getLen(),
      stat.isDir(), stat.getReplication(),
      stat.getBlockSize(), stat.getModificationTime(),
      stat.getAccessTime(),
      stat.getPermission(), stat.getOwner(), stat.getGroup(),
      stat.getFullPath(new Path(src)), // full path
      DFSUtil.locatedBlocks2Locations(locs));
}
 
Example 12
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Convert an HdfsFileStatus to a FileStatus
 * @param stat an HdfsFileStatus
 * @param src parent path in string representation
 * @return a FileStatus object
 */
private static FileStatus toFileStatus(HdfsFileStatus stat, String src) {
  if (stat == null) {
    return null;
  }
  return new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(),
      stat.getBlockSize(), stat.getModificationTime(),
      stat.getAccessTime(),
      stat.getPermission(), stat.getOwner(), stat.getGroup(),
      stat.getFullPath(new Path(src))); // full path
}
 
Example 13
Source File: TestStorageMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void verifyRecursively(final Path parent,
    final HdfsFileStatus status) throws Exception {
  if (status.isDir()) {
    Path fullPath = parent == null ?
        new Path("/") : status.getFullPath(parent);
    DirectoryListing children = dfs.getClient().listPaths(
        fullPath.toString(), HdfsFileStatus.EMPTY_NAME, true);
    for (HdfsFileStatus child : children.getPartialListing()) {
      verifyRecursively(fullPath, child);
    }
  } else if (!status.isSymlink()) { // is file
    verifyFile(parent, status, null);
  }
}
 
Example 14
Source File: TestJsonUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      new Path(f.getFullName(parent)));
}
 
Example 15
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
  if (fs == null)
    return null;
  FileType fType = FileType.IS_FILE;
  if (fs.isDir()) {
    fType = FileType.IS_DIR;
  } else if (fs.isSymlink()) {
    fType = FileType.IS_SYMLINK;
  }

  HdfsFileStatusProto.Builder builder = 
   HdfsFileStatusProto.newBuilder().
    setLength(fs.getLen()).
    setFileType(fType).
    setBlockReplication(fs.getReplication()).
    setBlocksize(fs.getBlockSize()).
    setModificationTime(fs.getModificationTime()).
    setAccessTime(fs.getAccessTime()).
    setPermission(PBHelper.convert(fs.getPermission())).
    setOwner(fs.getOwner()).
    setGroup(fs.getGroup()).
    setFileId(fs.getFileId()).
    setChildrenNum(fs.getChildrenNum()).
    setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).
    setStoragePolicy(fs.getStoragePolicy());
  if (fs.isSymlink())  {
    builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
  }
  if (fs.getFileEncryptionInfo() != null) {
    builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
  }
  if (fs instanceof HdfsLocatedFileStatus) {
    final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
    LocatedBlocks locations = lfs.getBlockLocations();
    if (locations != null) {
      builder.setLocations(PBHelper.convert(locations));
    }
  }
  return builder.build();
}
 
Example 16
Source File: WebHdfsFileSystem.java    From big-c with Apache License 2.0 5 votes vote down vote up
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
 
Example 17
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void verifyRecursively(final Path parent,
    final HdfsFileStatus status) throws Exception {
  if (status.isDir()) {
    Path fullPath = parent == null ?
        new Path("/") : status.getFullPath(parent);
    DirectoryListing children = dfs.getClient().listPaths(
        fullPath.toString(), HdfsFileStatus.EMPTY_NAME, true);
    for (HdfsFileStatus child : children.getPartialListing()) {
      verifyRecursively(fullPath, child);
    }
  } else if (!status.isSymlink()) { // is file
    verifyFile(parent, status, null);
  }
}
 
Example 18
Source File: TestJsonUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      new Path(f.getFullName(parent)));
}
 
Example 19
Source File: WebHdfsFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
  return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
      f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
      f.getPermission(), f.getOwner(), f.getGroup(),
      f.isSymlink() ? new Path(f.getSymlink()) : null,
      f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
}
 
Example 20
Source File: Nfs3Utils.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static WccAttr getWccAttr(DFSClient client, String fileIdPath)
    throws IOException {
  HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
  if (fstat == null) {
    return null;
  }

  long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
      .getLen();
  return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
      new NfsTime(fstat.getModificationTime()));
}