Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsFileStatus#getFileId()

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsFileStatus#getFileId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testRead() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  READ3Request readReq = new READ3Request(handle, 0, 5);
  XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 2
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLookup() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
  XDR xdr_req = new XDR();
  lookupReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 3
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testAccess() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  ACCESS3Request req = new ACCESS3Request(handle);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  ACCESS3Response response2 = nfsd.access(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 4
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testMkdir() throws Exception {//FixME
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3());
  req.serialize(xdr_req);
  
  // Attempt to mkdir by an unprivileged user should fail.
  MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  XDR xdr_req2 = new XDR();
  MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3());
  req2.serialize(xdr_req2);
  
  // Attempt to mkdir by a privileged user should pass.
  MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 5
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testCreate() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  CREATE3Request req = new CREATE3Request(handle, "fubar",
      Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 6
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testReaddir() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIR3Request req = new READDIR3Request(handle, 0, 0, 100);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 7
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private byte[] getFileContentsUsingNfs(String fileName, int len)
    throws Exception {
  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final READ3Request readReq = new READ3Request(handle, 0, len);
  final XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  final READ3Response response = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code: ", Nfs3Status.NFS3_OK,
      response.getStatus());
  assertTrue("expected full read", response.isEof());
  return response.getData().array();
}
 
Example 8
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testLookup() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
  XDR xdr_req = new XDR();
  lookupReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 9
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testSetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
      EnumSet.of(SetAttrField.UID));
  SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 10
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testPathconf() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  PATHCONF3Request req = new PATHCONF3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 11
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testMkdir() throws Exception {//FixME
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3());
  req.serialize(xdr_req);
  
  // Attempt to mkdir by an unprivileged user should fail.
  MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  XDR xdr_req2 = new XDR();
  MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3());
  req2.serialize(xdr_req2);
  
  // Attempt to mkdir by a privileged user should pass.
  MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 12
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
  req.serialize(xdr_req);
  
  // Attempt by an unprivileged user should fail.
  READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 13
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testGetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  GETATTR3Request req = new GETATTR3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 14
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testWrite() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  byte[] buffer = new byte[10];
  for (int i = 0; i < 10; i++) {
    buffer[i] = (byte) i;
  }

  WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
      WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response:", null, response2);
}
 
Example 15
Source File: TestRpcProgramNfs3.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testReadlink() throws Exception {
  // Create a symlink first.
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
      "bar");
  req.serialize(xdr_req);
  
  SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response.getStatus());

  // Now perform readlink operations.
  FileHandle handle2 = response.getObjFileHandle();
  XDR xdr_req2 = new XDR();
  READLINK3Request req2 = new READLINK3Request(handle2);
  req2.serialize(xdr_req2);

  // Attempt by an unpriviledged user should fail.
  READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READLINK3Response response2 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
Example 16
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  super(getChecksum4Compute(checksum, stat));
  this.dfsClient = dfsClient;
  this.src = src;
  this.fileId = stat.getFileId();
  this.blockSize = stat.getBlockSize();
  this.blockReplication = stat.getReplication();
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
  this.progress = progress;
  this.cachingStrategy = new AtomicReference<CachingStrategy>(
      dfsClient.getDefaultWriteCachingStrategy());
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug(
        "Set non-null progress callback on DFSOutputStream " + src);
  }
  
  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  if (bytesPerChecksum <= 0) {
    throw new HadoopIllegalArgumentException(
        "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0");
  }
  if (blockSize % bytesPerChecksum != 0) {
    throw new HadoopIllegalArgumentException("Invalid values: "
        + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
        + ") must divide block size (=" + blockSize + ").");
  }
  this.checksum4WriteBlock = checksum;

  this.dfsclientSlowLogThresholdMs =
    dfsClient.getConf().dfsclientSlowIoWarningThresholdMs;
  this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
}
 
Example 17
Source File: TestRpcProgramNfs3.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void commit(String fileName, int len) throws Exception {
  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);
  final XDR xdr_req = new XDR();
  final COMMIT3Request req = new COMMIT3Request(handle, 0, len);
  req.serialize(xdr_req);

  Channel ch = Mockito.mock(Channel.class);

  COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(),
      ch, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect COMMIT3Response:", null, response2);
}
 
Example 18
Source File: TestWrites.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 19
Source File: TestWrites.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 20
Source File: TestReaddir.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
// Test readdirplus
public void testReaddirPlus() throws IOException {
  // Get inodeId of /tmp
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  
  // Create related part of the XDR request
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  handle.serialize(xdr_req);
  xdr_req.writeLongAsHyper(0); // cookie
  xdr_req.writeLongAsHyper(0); // verifier
  xdr_req.writeInt(100); // dirCount
  xdr_req.writeInt(1000); // maxCount

  READDIRPLUS3Response responsePlus = nfsd.readdirplus(xdr_req
      .asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost",
      1234));
  List<EntryPlus3> direntPlus = responsePlus.getDirListPlus().getEntries();
  assertTrue(direntPlus.size() == 5); // including dot, dotdot

  // Test start listing from f2
  status = nn.getRpcServer().getFileInfo(testdir + "/f2");
  long f2Id = status.getFileId();

  // Create related part of the XDR request
  xdr_req = new XDR();
  handle = new FileHandle(dirId);
  handle.serialize(xdr_req);
  xdr_req.writeLongAsHyper(f2Id); // cookie
  xdr_req.writeLongAsHyper(0); // verifier
  xdr_req.writeInt(100); // dirCount
  xdr_req.writeInt(1000); // maxCount

  responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
      new InetSocketAddress("localhost", 1234));
  direntPlus = responsePlus.getDirListPlus().getEntries();
  assertTrue(direntPlus.size() == 1);
  EntryPlus3 entryPlus = direntPlus.get(0);
  assertTrue(entryPlus.getName().equals("f3"));

  // When the cookie is deleted, list starts over no including dot, dotdot
  hdfs.delete(new Path(testdir + "/f2"), false);

  responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
      new InetSocketAddress("localhost", 1234));
  direntPlus = responsePlus.getDirListPlus().getEntries();
  assertTrue(direntPlus.size() == 2); // No dot, dotdot
}