org.apache.hadoop.hdfs.protocol.HdfsFileStatus Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsFileStatus. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
Example #2
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void createFileUsingNfs(String fileName, byte[] buffer)
    throws Exception {
  DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0);

  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final WRITE3Request writeReq = new WRITE3Request(handle, 0,
      buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  final XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response: ", null, response);
}
 
Example #3
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
  req.serialize(xdr_req);
  
  // Attempt by an unprivileged user should fail.
  READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #4
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testGetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  GETATTR3Request req = new GETATTR3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #5
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testSymlink() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
      "bar");
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #6
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testSetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
      EnumSet.of(SetAttrField.UID));
  SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #7
Source File: NamenodeFsck.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void lostFoundInit(DFSClient dfs) {
  lfInited = true;
  try {
    String lfName = "/lost+found";
    
    final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
    if (lfStatus == null) { // not exists
      lfInitedOk = dfs.mkdirs(lfName, null, true);
      lostFound = lfName;
    } else if (!lfStatus.isDir()) { // exists but not a directory
      LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
      lfInitedOk = false;
    }  else { // exists and is a directory
      lostFound = lfName;
      lfInitedOk = true;
    }
  }  catch (Exception e) {
    e.printStackTrace();
    lfInitedOk = false;
  }
  if (lostFound == null) {
    LOG.warn("Cannot initialize /lost+found .");
    lfInitedOk = false;
    internalError = true;
  }
}
 
Example #8
Source File: NamenodeFsck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void lostFoundInit(DFSClient dfs) {
  lfInited = true;
  try {
    String lfName = "/lost+found";
    
    final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName);
    if (lfStatus == null) { // not exists
      lfInitedOk = dfs.mkdirs(lfName, null, true);
      lostFound = lfName;
    } else if (!lfStatus.isDir()) { // exists but not a directory
      LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
      lfInitedOk = false;
    }  else { // exists and is a directory
      lostFound = lfName;
      lfInitedOk = true;
    }
  }  catch (Exception e) {
    e.printStackTrace();
    lfInitedOk = false;
  }
  if (lostFound == null) {
    LOG.warn("Cannot initialize /lost+found .");
    lfInitedOk = false;
    internalError = true;
  }
}
 
Example #9
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
Example #10
Source File: FSDirectory.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Get a listing of files given path 'src'
 *
 * This function is admittedly very inefficient right now.  We'll
 * make it better later.
 */
HdfsFileStatus[] getHdfsListing(String src) {
  String srcs = normalizePath(src);

  readLock();
  try {
    INode targetNode = rootDir.getNode(srcs);
    if (targetNode == null)
      return null;
    if (!targetNode.isDirectory()) {
      return new HdfsFileStatus[]{createHdfsFileStatus(
      		HdfsFileStatus.EMPTY_NAME, targetNode)};
    }
    List<INode> contents = ((INodeDirectory)targetNode).getChildren();
    HdfsFileStatus listing[] = new HdfsFileStatus[contents.size()];
    int i = 0;
    for (INode cur : contents) {
      listing[i] = createHdfsFileStatus(cur.name, cur);
      i++;
    }
    return listing;
  } finally {
    readUnlock();
  }
}
 
Example #11
Source File: FSDirAclOp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
    throws IOException {
  String src = srcArg;
  checkAclsConfigFlag(fsd);
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  INodesInPath iip;
  fsd.writeLock();
  try {
    iip = fsd.getINodesInPath4Write(src);
    fsd.checkOwner(pc, iip);
    unprotectedRemoveAcl(fsd, iip);
  } finally {
    fsd.writeUnlock();
  }
  fsd.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
  return fsd.getAuditFileInfo(iip);
}
 
Example #12
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testRead() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  READ3Request readReq = new READ3Request(handle, 0, 5);
  XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #13
Source File: DistributedFileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
public boolean hasNext() throws IOException {
  while (curStat == null && hasNextNoFilter()) {
    T next;
    HdfsFileStatus fileStat = thisListing.getPartialListing()[i++];
    if (needLocation) {
      next = (T)((HdfsLocatedFileStatus)fileStat)
          .makeQualifiedLocated(getUri(), p);
    } else {
      next = (T)fileStat.makeQualified(getUri(), p);
    }
      // apply filter if not null
    if (filter == null || filter.accept(next.getPath())) {
      curStat = next;
    }
  }
  return curStat != null;
}
 
Example #14
Source File: FSDirStatAndListingOp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Get the file info for a specific file.
 * @param fsd FSDirectory
 * @param src The string representation of the path to the file
 * @param isRawPath true if a /.reserved/raw pathname was passed by the user
 * @param includeStoragePolicy whether to include storage policy
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
    boolean includeStoragePolicy)
    throws IOException {
  fsd.readLock();
  try {
    final INode i = src.getLastINode();
    byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
        i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
    return i == null ? null : createFileStatus(
        fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
        src.getPathSnapshotId(), isRawPath, src);
  } finally {
    fsd.readUnlock();
  }
}
 
Example #15
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public CreateResponseProto create(RpcController controller,
    CreateRequestProto req) throws ServiceException {
  try {
    HdfsFileStatus result = server.create(req.getSrc(),
        PBHelper.convert(req.getMasked()), req.getClientName(),
        PBHelper.convertCreateFlag(req.getCreateFlag()), req.getCreateParent(),
        (short) req.getReplication(), req.getBlockSize(),
        PBHelper.convertCryptoProtocolVersions(
            req.getCryptoProtocolVersionList()));

    if (result != null) {
      return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result))
          .build();
    }
    return VOID_CREATE_RESPONSE;
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #16
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
  req.serialize(xdr_req);
  
  // Attempt by an unprivileged user should fail.
  READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #17
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testFsstat() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  FSSTAT3Request req = new FSSTAT3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #18
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testPathconf() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  PATHCONF3Request req = new PATHCONF3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #19
Source File: Nfs3Utils.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
 
Example #20
Source File: TestEditLog.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test case for loading a very simple edit log from a format
 * prior to the inclusion of edit transaction IDs in the log.
 */
@Test
public void testPreTxidEditLogWithEdits() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();

    long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
    assertEquals(3, numEdits);
    // Sanity check the edit
    HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
    assertEquals("supergroup", fileInfo.getGroup());
    assertEquals(3, fileInfo.getReplication());
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #21
Source File: FSDirStatAndListingOp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
    isSuperUser = pc.isSuperUser();
  }
  return getFileInfo(fsd, src, resolveLink,
      FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
 
Example #22
Source File: FSDirStatAndListingOp.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
    isSuperUser = pc.isSuperUser();
  }
  return getFileInfo(fsd, src, resolveLink,
      FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
 
Example #23
Source File: FSDirStatAndListingOp.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Get the file info for a specific file.
 * @param fsd FSDirectory
 * @param src The string representation of the path to the file
 * @param isRawPath true if a /.reserved/raw pathname was passed by the user
 * @param includeStoragePolicy whether to include storage policy
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
    boolean includeStoragePolicy)
    throws IOException {
  fsd.readLock();
  try {
    final INode i = src.getLastINode();
    byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
        i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
    return i == null ? null : createFileStatus(
        fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
        src.getPathSnapshotId(), isRawPath, src);
  } finally {
    fsd.readUnlock();
  }
}
 
Example #24
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void createFileUsingNfs(String fileName, byte[] buffer)
    throws Exception {
  DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0);

  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final WRITE3Request writeReq = new WRITE3Request(handle, 0,
      buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  final XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response: ", null, response);
}
 
Example #25
Source File: TestNamenodeRetryCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test for create file
 */
@Test
public void testCreate() throws Exception {
  String src = "/testNamenodeRetryCache/testCreate/file";
  // Two retried calls succeed
  newCall();
  HdfsFileStatus status = nnRpc.create(src, perm, "holder",
    new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
    (short) 1, BlockSize, null);
  Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
  Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
  
  // A non-retried call fails
  newCall();
  try {
    nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
    Assert.fail("testCreate - expected exception is not thrown");
  } catch (IOException e) {
    // expected
  }
}
 
Example #26
Source File: TestLazyPersistFiles.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, true);
  // checkpoint
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  // Stat the file and check that the lazyPersist flag is returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
 
Example #27
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testSymlink() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
      "bar");
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example #28
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) {
  if (fs == null) return null;
  final int len = fs.length;
  HdfsFileStatusProto[] result = new HdfsFileStatusProto[len];
  for (int i = 0; i < len; ++i) {
    result[i] = PBHelper.convert(fs[i]);
  }
  return result;
}
 
Example #29
Source File: FSDirAttrOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static HdfsFileStatus setTimes(
    FSDirectory fsd, String src, long mtime, long atime)
    throws IOException {
  if (!fsd.isAccessTimeSupported() && atime != -1) {
    throw new IOException(
        "Access time for hdfs is not configured. " +
            " Please set " + DFS_NAMENODE_ACCESSTIME_PRECISION_KEY
            + " configuration parameter.");
  }

  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);

  INodesInPath iip;
  fsd.writeLock();
  try {
    src = fsd.resolvePath(pc, src, pathComponents);
    iip = fsd.getINodesInPath4Write(src);
    // Write access is required to set access and modification times
    if (fsd.isPermissionEnabled()) {
      fsd.checkPathAccess(pc, iip, FsAction.WRITE);
    }
    final INode inode = iip.getLastINode();
    if (inode == null) {
      throw new FileNotFoundException("File/Directory " + src +
                                          " does not exist.");
    }
    boolean changed = unprotectedSetTimes(fsd, inode, mtime, atime, true,
                                          iip.getLatestSnapshotId());
    if (changed) {
      fsd.getEditLog().logTimes(src, mtime, atime);
    }
  } finally {
    fsd.writeUnlock();
  }
  return fsd.getAuditFileInfo(iip);
}
 
Example #30
Source File: DFSClient.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get the file info for a specific file or directory. If src
 * refers to a symlink then the FileStatus of the link is returned.
 * @param src path to a file or directory.
 * 
 * For description of exceptions thrown 
 * @see ClientProtocol#getFileLinkInfo(String)
 */
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("getFileLinkInfo", src);
  try {
    return namenode.getFileLinkInfo(src);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
 }