org.apache.hadoop.fs.UnresolvedLinkException Java Examples

The following examples show how to use org.apache.hadoop.fs.UnresolvedLinkException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestClientReportBadBlock.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * DFS client read bytes starting from the specified position.
 */
private void dfsClientReadFileFromPosition(Path corruptedFile)
    throws UnresolvedLinkException, IOException {
  DFSInputStream in = dfs.dfs.open(corruptedFile.toUri().getPath());
  byte[] buf = new byte[buffersize];
  int startPosition = 2;
  int nRead = 0; // total number of bytes read
  try {
    do {
      nRead = in.read(startPosition, buf, 0, buf.length);
      startPosition += buf.length;
    } while (nRead > 0);
  } catch (BlockMissingException bme) {
    LOG.debug("DfsClientReadFile caught BlockMissingException.");
  }
}
 
Example #2
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public boolean mkdirs(String src, FsPermission masked, boolean createParent)
    throws AccessControlException, FileAlreadyExistsException,
    FileNotFoundException, NSQuotaExceededException,
    ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
    IOException {
  MkdirsRequestProto req = MkdirsRequestProto.newBuilder()
      .setSrc(src)
      .setMasked(PBHelper.convert(masked))
      .setCreateParent(createParent).build();

  try {
    return rpcProxy.mkdirs(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #3
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  GetListingRequestProto req = GetListingRequestProto.newBuilder()
      .setSrc(src)
      .setStartAfter(ByteString.copyFrom(startAfter))
      .setNeedLocation(needLocation).build();
  try {
    GetListingResponseProto result = rpcProxy.getListing(null, req);
    
    if (result.hasDirList()) {
      return PBHelper.convert(result.getDirList());
    }
    return null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #4
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Get block location info about file
 * 
 * getBlockLocations() returns a list of hostnames that store 
 * data for a specific file region.  It returns a set of hostnames
 * for every block within the indicated region.
 *
 * This function is very useful when writing code that considers
 * data-placement when performing operations.  For example, the
 * MapReduce system tries to schedule tasks on the same machines
 * as the data-block the task processes. 
 */
public BlockLocation[] getBlockLocations(String src, long start, 
      long length) throws IOException, UnresolvedLinkException {
  TraceScope scope = getPathTraceScope("getBlockLocations", src);
  try {
    LocatedBlocks blocks = getLocatedBlocks(src, start, length);
    BlockLocation[] locations =  DFSUtil.locatedBlocks2Locations(blocks);
    HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
    for (int i = 0; i < locations.length; i++) {
      hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
    }
    return hdfsLocations;
  } finally {
    scope.close();
  }
}
 
Example #5
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void rename2(String src, String dst, Rename... options)
    throws AccessControlException, DSQuotaExceededException,
    FileAlreadyExistsException, FileNotFoundException,
    NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
    UnresolvedLinkException, IOException {
  boolean overwrite = false;
  if (options != null) {
    for (Rename option : options) {
      if (option == Rename.OVERWRITE) {
        overwrite = true;
      }
    }
  }
  Rename2RequestProto req = Rename2RequestProto.newBuilder().
      setSrc(src).
      setDst(dst).setOverwriteDest(overwrite).
      build();
  try {
    rpcProxy.rename2(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }

}
 
Example #6
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
 
Example #7
Source File: ViewFs.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public FileStatus getFileStatus(final Path f) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);

  //  FileStatus#getPath is a fully qualified path relative to the root of 
  // target file system.
  // We need to change it to viewfs URI - relative to root of mount table.
  
  // The implementors of RawLocalFileSystem were trying to be very smart.
  // They implement FileStatus#getOwener lazily -- the object
  // returned is really a RawLocalFileSystem that expect the
  // FileStatus#getPath to be unchanged so that it can get owner when needed.
  // Hence we need to interpose a new ViewFsFileStatus that works around.
  
  
  FileStatus status =  res.targetFileSystem.getFileStatus(res.remainingPath);
  return new ViewFsFileStatus(status, this.makeQualified(f));
}
 
Example #8
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #9
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = this.getINode(dirPath);
  if (node != null && node.isDirectory()
      && node.asDirectory().isSnapshottable()) {
    return node;
  }
  return null;
}
 
Example #10
Source File: ViewFs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public BlockLocation[] getFileBlockLocations(final Path f, final long start,
    final long len) throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  return
    res.targetFileSystem.getFileBlockLocations(res.remainingPath, start, len);
}
 
Example #11
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void setPermission(String src, FsPermission permission)
    throws AccessControlException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  SetPermissionRequestProto req = SetPermissionRequestProto.newBuilder()
      .setSrc(src)
      .setPermission(PBHelper.convert(permission))
      .build();
  try {
    rpcProxy.setPermission(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #12
Source File: FSDirMkdirOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static void mkdirForEditLog(FSDirectory fsd, long inodeId, String src,
    PermissionStatus permissions, List<AclEntry> aclEntries, long timestamp)
    throws QuotaExceededException, UnresolvedLinkException, AclException,
    FileAlreadyExistsException {
  assert fsd.hasWriteLock();
  INodesInPath iip = fsd.getINodesInPath(src, false);
  final byte[] localName = iip.getLastLocalName();
  final INodesInPath existing = iip.getParentINodesInPath();
  Preconditions.checkState(existing.getLastINode() != null);
  unprotectedMkdir(fsd, inodeId, existing, localName, permissions, aclEntries,
      timestamp);
}
 
Example #13
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public boolean delete(String src, boolean recursive)
    throws AccessControlException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  DeleteRequestProto req = DeleteRequestProto.newBuilder().setSrc(src).setRecursive(recursive).build();
  try {
    return rpcProxy.delete(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #14
Source File: ViewFs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public boolean truncate(final Path f, final long newLength)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res =
      fsState.resolve(getUriPath(f), true);
  return res.targetFileSystem.truncate(res.remainingPath, newLength);
}
 
Example #15
Source File: FSDirAttrOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static void unprotectedSetPermission(
    FSDirectory fsd, String src, FsPermission permissions)
    throws FileNotFoundException, UnresolvedLinkException,
           QuotaExceededException, SnapshotAccessControlException {
  assert fsd.hasWriteLock();
  final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
  final INode inode = inodesInPath.getLastINode();
  if (inode == null) {
    throw new FileNotFoundException("File does not exist: " + src);
  }
  int snapshotId = inodesInPath.getLatestSnapshotId();
  inode.setPermission(permissions, snapshotId);
}
 
Example #16
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public HdfsFileStatus getFileLinkInfo(String src)
    throws AccessControlException, UnresolvedLinkException, IOException {
  GetFileLinkInfoRequestProto req = GetFileLinkInfoRequestProto.newBuilder()
      .setSrc(src).build();
  try {
    GetFileLinkInfoResponseProto result = rpcProxy.getFileLinkInfo(null, req);
    return result.hasFs() ?  
        PBHelper.convert(rpcProxy.getFileLinkInfo(null, req).getFs()) : null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #17
Source File: ViewFs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void setPermission(final Path f, final FsPermission permission)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  res.targetFileSystem.setPermission(res.remainingPath, permission); 
  
}
 
Example #18
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @return the INodesInPath of the components in src
 * @throws UnresolvedLinkException if symlink can't be resolved
 * @throws SnapshotAccessControlException if path is in RO snapshot
 */
INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
        throws UnresolvedLinkException, SnapshotAccessControlException {
  final byte[][] components = INode.getPathComponents(src);
  INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components,
      resolveLink);
  if (inodesInPath.isSnapshot()) {
    throw new SnapshotAccessControlException(
            "Modification on a read-only snapshot is disallowed");
  }
  return inodesInPath;
}
 
Example #19
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public ContentSummary getContentSummary(String path)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  GetContentSummaryRequestProto req = GetContentSummaryRequestProto
      .newBuilder()
      .setPath(path)
      .build();
  try {
    return PBHelper.convert(rpcProxy.getContentSummary(null, req)
        .getSummary());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #20
Source File: FSDirSymlinkOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static INodeSymlink unprotectedAddSymlink(FSDirectory fsd, INodesInPath iip,
    byte[] localName, long id, String target, long mtime, long atime,
    PermissionStatus perm)
    throws UnresolvedLinkException, QuotaExceededException {
  assert fsd.hasWriteLock();
  final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime,
      target);
  symlink.setLocalName(localName);
  return fsd.addINode(iip, symlink) != null ? symlink : null;
}
 
Example #21
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Add the given filename to the fs.
 * @return the new INodesInPath instance that contains the new INode
 */
INodesInPath addFile(INodesInPath existing, String localName, PermissionStatus
    permissions, short replication, long preferredBlockSize,
    String clientName, String clientMachine)
  throws FileAlreadyExistsException, QuotaExceededException,
    UnresolvedLinkException, SnapshotAccessControlException, AclException {

  long modTime = now();
  INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime,
      modTime, replication, preferredBlockSize);
  newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
  newNode.toUnderConstruction(clientName, clientMachine);

  INodesInPath newiip;
  writeLock();
  try {
    newiip = addINode(existing, newNode);
  } finally {
    writeUnlock();
  }
  if (newiip == null) {
    NameNode.stateChangeLog.info("DIR* addFile: failed to add " +
        existing.getPath() + "/" + localName);
    return null;
  }

  if(NameNode.stateChangeLog.isDebugEnabled()) {
    NameNode.stateChangeLog.debug("DIR* addFile: " + localName + " is added");
  }
  return newiip;
}
 
Example #22
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void concat(String trg, String[] srcs) throws IOException,
    UnresolvedLinkException {
  ConcatRequestProto req = ConcatRequestProto.newBuilder().
      setTrg(trg).
      addAllSrcs(Arrays.asList(srcs)).build();
  try {
    rpcProxy.concat(null, req);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #23
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
boolean isInAnEZ(INodesInPath iip)
    throws UnresolvedLinkException, SnapshotAccessControlException {
  readLock();
  try {
    return ezManager.isInAnEZ(iip);
  } finally {
    readUnlock();
  }
}
 
Example #24
Source File: NamenodeFsck.java    From hadoop with Apache License 2.0 5 votes vote down vote up
boolean hdfsPathExists(String path)
    throws AccessControlException, UnresolvedLinkException, IOException {
  try {
    HdfsFileStatus hfs = namenode.getRpcServer().getFileInfo(path);
    return (hfs != null);
  } catch (FileNotFoundException e) {
    return false;
  }
}
 
Example #25
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * FSEditLogLoader implementation.
 * Unlike FSNamesystem.truncate, this will not schedule block recovery.
 */
void unprotectedTruncate(String src, String clientName, String clientMachine,
                         long newLength, long mtime, Block truncateBlock)
    throws UnresolvedLinkException, QuotaExceededException,
    SnapshotAccessControlException, IOException {
  INodesInPath iip = getINodesInPath(src, true);
  INodeFile file = iip.getLastINode().asFile();
  BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
  boolean onBlockBoundary =
      unprotectedTruncate(iip, newLength, collectedBlocks, mtime, null);

  if(! onBlockBoundary) {
    BlockInfoContiguous oldBlock = file.getLastBlock();
    Block tBlk =
    getFSNamesystem().prepareFileForTruncate(iip,
        clientName, clientMachine, file.computeFileSize() - newLength,
        truncateBlock);
    assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) &&
        tBlk.getNumBytes() == truncateBlock.getNumBytes() :
        "Should be the same block.";
    if(oldBlock.getBlockId() != tBlk.getBlockId() &&
       !file.isBlockInLatestSnapshot(oldBlock)) {
      getBlockManager().removeBlockFromMap(oldBlock);
    }
  }
  assert onBlockBoundary == (truncateBlock == null) :
    "truncateBlock is null iff on block boundary: " + truncateBlock;
  getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
 
Example #26
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Add the given child to the namespace.
 * @param existing the INodesInPath containing all the ancestral INodes
 * @param child the new INode to add
 * @return a new INodesInPath instance containing the new child INode. Null
 * if the adding fails.
 * @throws QuotaExceededException is thrown if it violates quota limit
 */
INodesInPath addINode(INodesInPath existing, INode child)
    throws QuotaExceededException, UnresolvedLinkException {
  cacheName(child);
  writeLock();
  try {
    return addLastINode(existing, child, true);
  } finally {
    writeUnlock();
  }
}
 
Example #27
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Updates namespace, storagespace and typespaces consumed for all
 * directories until the parent directory of file represented by path.
 *
 * @param iip the INodesInPath instance containing all the INodes for
 *            updating quota usage
 * @param nsDelta the delta change of namespace
 * @param ssDelta the delta change of storage space consumed without replication
 * @param replication the replication factor of the block consumption change
 * @throws QuotaExceededException if the new count violates any quota limit
 * @throws FileNotFoundException if path does not exist.
 */
void updateSpaceConsumed(INodesInPath iip, long nsDelta, long ssDelta, short replication)
  throws QuotaExceededException, FileNotFoundException,
  UnresolvedLinkException, SnapshotAccessControlException {
  writeLock();
  try {
    if (iip.getLastINode() == null) {
      throw new FileNotFoundException("Path not found: " + iip.getPath());
    }
    updateCount(iip, nsDelta, ssDelta, replication, true);
  } finally {
    writeUnlock();
  }
}
 
Example #28
Source File: FSDirectory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether the path specifies a directory
 */
boolean isDir(String src) throws UnresolvedLinkException {
  src = normalizePath(src);
  readLock();
  try {
    INode node = getINode(src, false);
    return node != null && node.isDirectory();
  } finally {
    readUnlock();
  }
}
 
Example #29
Source File: ViewFs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public boolean delete(final Path f, final boolean recursive)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);
  // If internal dir or target is a mount link (ie remainingPath is Slash)
  if (res.isInternalDir() || res.remainingPath == InodeTree.SlashPath) {
    throw new AccessControlException(
        "Cannot delete internal mount table directory: " + f);
  }
  return res.targetFileSystem.delete(res.remainingPath, recursive);
}
 
Example #30
Source File: FSDirStatAndListingOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Currently we only support "ls /xxx/.snapshot" which will return all the
 * snapshots of a directory. The FSCommand Ls will first call getFileInfo to
 * make sure the file/directory exists (before the real getListing call).
 * Since we do not have a real INode for ".snapshot", we return an empty
 * non-null HdfsFileStatus here.
 */
private static HdfsFileStatus getFileInfo4DotSnapshot(
    FSDirectory fsd, String src)
    throws UnresolvedLinkException {
  if (fsd.getINode4DotSnapshot(src) != null) {
    return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
        HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
        BlockStoragePolicySuite.ID_UNSPECIFIED);
  }
  return null;
}