Java Code Examples for org.apache.hadoop.hdfs.web.WebHdfsFileSystem

The following examples show how to use org.apache.hadoop.hdfs.web.WebHdfsFileSystem. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: NameNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
 
Example 2
Source Project: hadoop   Source File: TestAuditLogs.java    License: Apache License 2.0 6 votes vote down vote up
/** test that access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfs() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0644));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
  InputStream istream = webfs.open(file);
  int val = istream.read();
  istream.close();

  verifyAuditLogsRepeat(true, 3);
  assertTrue("failed to read from file", val >= 0);
}
 
Example 3
Source Project: hadoop   Source File: TestAuditLogs.java    License: Apache License 2.0 6 votes vote down vote up
/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0644));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
  FileStatus st = webfs.getFileStatus(file);

  verifyAuditLogs(true);
  assertTrue("failed to stat file", st != null && st.isFile());
}
 
Example 4
Source Project: hadoop   Source File: TestAuditLogs.java    License: Apache License 2.0 6 votes vote down vote up
/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0600));
  fs.setOwner(file, "root", null);

  setupAuditLogs();
  try {
    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
    InputStream istream = webfs.open(file);
    int val = istream.read();
    fail("open+read must not succeed, got " + val);
  } catch(AccessControlException E) {
    System.out.println("got access denied, as expected.");
  }
  verifyAuditLogsRepeat(false, 2);
}
 
Example 5
Source Project: big-c   Source File: NameNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
 
Example 6
Source Project: big-c   Source File: TestAuditLogs.java    License: Apache License 2.0 6 votes vote down vote up
/** test that access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfs() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0644));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
  InputStream istream = webfs.open(file);
  int val = istream.read();
  istream.close();

  verifyAuditLogsRepeat(true, 3);
  assertTrue("failed to read from file", val >= 0);
}
 
Example 7
Source Project: big-c   Source File: TestAuditLogs.java    License: Apache License 2.0 6 votes vote down vote up
/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0644));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
  FileStatus st = webfs.getFileStatus(file);

  verifyAuditLogs(true);
  assertTrue("failed to stat file", st != null && st.isFile());
}
 
Example 8
Source Project: big-c   Source File: TestAuditLogs.java    License: Apache License 2.0 6 votes vote down vote up
/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0600));
  fs.setOwner(file, "root", null);

  setupAuditLogs();
  try {
    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
    InputStream istream = webfs.open(file);
    int val = istream.read();
    fail("open+read must not succeed, got " + val);
  } catch(AccessControlException E) {
    System.out.println("got access denied, as expected.");
  }
  verifyAuditLogsRepeat(false, 2);
}
 
Example 9
Source Project: hadoop   Source File: TestHttpFSWithKerberos.java    License: Apache License 2.0 5 votes vote down vote up
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenWithWebhdfsFileSystem() throws Exception {
  testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, false);
}
 
Example 10
Source Project: hadoop   Source File: DFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
 * the configuration.
 *
 * @return list of InetSocketAddresses
 */
public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
    Configuration conf, String scheme) {
  if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
    return getAddresses(conf, null,
        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
    return getAddresses(conf, null,
        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
  } else {
    throw new IllegalArgumentException("Unsupported scheme: " + scheme);
  }
}
 
Example 11
Source Project: hadoop   Source File: NamenodeWebHdfsMethods.java    License: Apache License 2.0 5 votes vote down vote up
private Token<? extends TokenIdentifier> generateDelegationToken(
    final NameNode namenode, final UserGroupInformation ugi,
    final String renewer) throws IOException {
  final Credentials c = DelegationTokenSecretManager.createCredentials(
      namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
  if (c == null) {
    return null;
  }
  final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
  Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND
      : SWebHdfsFileSystem.TOKEN_KIND;
  t.setKind(kind);
  return t;
}
 
Example 12
Source Project: hadoop   Source File: TestSymlinkHdfs.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClassSetup() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.set(FsPermission.UMASK_LABEL, "000");
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
  cluster = new MiniDFSCluster.Builder(conf).build();
  webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
  dfs = cluster.getFileSystem();
}
 
Example 13
Source Project: hadoop   Source File: TestHttpFSPorts.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testWebHdfsCustomDefaultPorts() throws IOException {
  URI uri = URI.create("webhdfs://localhost");
  WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
 
Example 14
Source Project: hadoop   Source File: TestHttpFSPorts.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException {
  URI uri = URI.create("webhdfs://localhost:789");
  WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
 
Example 15
Source Project: hadoop   Source File: TestDelegationToken.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
 
Example 16
Source Project: hadoop   Source File: TestAuditLogs.java    License: Apache License 2.0 5 votes vote down vote up
/** test that open via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsOpen() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0644));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
  webfs.open(file);

  verifyAuditLogsCheckPattern(true, 3, webOpenPattern);
}
 
Example 17
Source Project: hadoop   Source File: TestDFSClientRetries.java    License: Apache License 2.0 5 votes vote down vote up
private static FileSystem createFsWithDifferentUsername(
    final Configuration conf, final boolean isWebHDFS
    ) throws IOException, InterruptedException {
  final String username = UserGroupInformation.getCurrentUser(
      ).getShortUserName() + "_XXX";
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      username, new String[]{"supergroup"});

  return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
      : DFSTestUtil.getFileSystemAs(ugi, conf);
}
 
Example 18
Source Project: big-c   Source File: TestHttpFSWithKerberos.java    License: Apache License 2.0 5 votes vote down vote up
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenWithWebhdfsFileSystem() throws Exception {
  testDelegationTokenWithinDoAs(WebHdfsFileSystem.class, false);
}
 
Example 19
Source Project: big-c   Source File: DFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
 * the configuration.
 *
 * @return list of InetSocketAddresses
 */
public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
    Configuration conf, String scheme) {
  if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
    return getAddresses(conf, null,
        DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
    return getAddresses(conf, null,
        DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
  } else {
    throw new IllegalArgumentException("Unsupported scheme: " + scheme);
  }
}
 
Example 20
Source Project: big-c   Source File: NamenodeWebHdfsMethods.java    License: Apache License 2.0 5 votes vote down vote up
private Token<? extends TokenIdentifier> generateDelegationToken(
    final NameNode namenode, final UserGroupInformation ugi,
    final String renewer) throws IOException {
  final Credentials c = DelegationTokenSecretManager.createCredentials(
      namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
  if (c == null) {
    return null;
  }
  final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
  Text kind = request.getScheme().equals("http") ? WebHdfsFileSystem.TOKEN_KIND
      : SWebHdfsFileSystem.TOKEN_KIND;
  t.setKind(kind);
  return t;
}
 
Example 21
Source Project: big-c   Source File: TestSymlinkHdfs.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClassSetup() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  conf.set(FsPermission.UMASK_LABEL, "000");
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
  cluster = new MiniDFSCluster.Builder(conf).build();
  webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
  dfs = cluster.getFileSystem();
}
 
Example 22
Source Project: big-c   Source File: TestHttpFSPorts.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testWebHdfsCustomDefaultPorts() throws IOException {
  URI uri = URI.create("webhdfs://localhost");
  WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:123", fs.getCanonicalServiceName());
}
 
Example 23
Source Project: big-c   Source File: TestHttpFSPorts.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testWebHdfsCustomUriPortWithCustomDefaultPorts() throws IOException {
  URI uri = URI.create("webhdfs://localhost:789");
  WebHdfsFileSystem fs = (WebHdfsFileSystem) FileSystem.get(uri, conf);

  assertEquals(123, fs.getDefaultPort());
  assertEquals(uri, fs.getUri());
  assertEquals("127.0.0.1:789", fs.getCanonicalServiceName());
}
 
Example 24
Source Project: big-c   Source File: TestDelegationToken.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
 
Example 25
Source Project: big-c   Source File: TestAuditLogs.java    License: Apache License 2.0 5 votes vote down vote up
/** test that open via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsOpen() throws Exception {
  final Path file = new Path(fnames[0]);

  fs.setPermission(file, new FsPermission((short)0644));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME);
  webfs.open(file);

  verifyAuditLogsCheckPattern(true, 3, webOpenPattern);
}
 
Example 26
Source Project: big-c   Source File: TestDFSClientRetries.java    License: Apache License 2.0 5 votes vote down vote up
private static FileSystem createFsWithDifferentUsername(
    final Configuration conf, final boolean isWebHDFS
    ) throws IOException, InterruptedException {
  final String username = UserGroupInformation.getCurrentUser(
      ).getShortUserName() + "_XXX";
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      username, new String[]{"supergroup"});

  return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
      : DFSTestUtil.getFileSystemAs(ugi, conf);
}
 
Example 27
Source Project: hadoop   Source File: TestHttpFSFWithWebhdfsFileSystem.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected Class getFileSystemClass() {
  return WebHdfsFileSystem.class;
}
 
Example 28
Source Project: hadoop   Source File: DelegationTokenIdentifier.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Text getKind() {
  return WebHdfsFileSystem.TOKEN_KIND;
}
 
Example 29
Source Project: hadoop   Source File: NamenodeWebHdfsMethods.java    License: Apache License 2.0 4 votes vote down vote up
private URI redirectURI(final NameNode namenode,
    final UserGroupInformation ugi, final DelegationParam delegation,
    final UserParam username, final DoAsParam doAsUser,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes,
    final Param<?, ?>... parameters) throws URISyntaxException, IOException {
  final DatanodeInfo dn;
  try {
    dn = chooseDatanode(namenode, path, op, openOffset, blocksize,
        excludeDatanodes);
  } catch (InvalidTopologyException ite) {
    throw new IOException("Failed to find datanode, suggest to check cluster health.", ite);
  }

  final String delegationQuery;
  if (!UserGroupInformation.isSecurityEnabled()) {
    //security disabled
    delegationQuery = Param.toSortedString("&", doAsUser, username);
  } else if (delegation.getValue() != null) {
    //client has provided a token
    delegationQuery = "&" + delegation;
  } else {
    //generate a token
    final Token<? extends TokenIdentifier> t = generateDelegationToken(
        namenode, ugi, request.getUserPrincipal().getName());
    delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
  }
  final String query = op.toQueryString() + delegationQuery
      + "&" + new NamenodeAddressParam(namenode)
      + Param.toSortedString("&", parameters);
  final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;

  final String scheme = request.getScheme();
  int port = "http".equals(scheme) ? dn.getInfoPort() : dn
      .getInfoSecurePort();
  final URI uri = new URI(scheme, null, dn.getHostName(), port, uripath,
      query, null);

  if (LOG.isTraceEnabled()) {
    LOG.trace("redirectURI=" + uri);
  }
  return uri;
}
 
Example 30
Source Project: hadoop   Source File: TestQuota.java    License: Apache License 2.0 4 votes vote down vote up
/**
  * Violate a space quota using files of size < 1 block. Test that block
  * allocation conservatively assumes that for quota checking the entire
  * space of the block is used.
  */
 @Test
 public void testBlockAllocationAdjustsUsageConservatively() 
     throws Exception {
   Configuration conf = new HdfsConfiguration();
   final int BLOCK_SIZE = 6 * 1024;
   conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
   MiniDFSCluster cluster = 
     new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
   cluster.waitActive();
   FileSystem fs = cluster.getFileSystem();
   DFSAdmin admin = new DFSAdmin(conf);

   final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
   final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
   System.out.println("webhdfsuri=" + webhdfsuri);
   final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);

   try {
     Path dir = new Path("/test");
     Path file1 = new Path("/test/test1");
     Path file2 = new Path("/test/test2");
     boolean exceededQuota = false;
     final int QUOTA_SIZE = 3 * BLOCK_SIZE; // total space usage including
                                            // repl.
     final int FILE_SIZE = BLOCK_SIZE / 2;
     ContentSummary c;
     
     // Create the directory and set the quota
     assertTrue(fs.mkdirs(dir));
     runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE),
         dir.toString());

     // Creating a file should use half the quota
     DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short) 3, 1L);
     DFSTestUtil.waitReplication(fs, file1, (short) 3);
     c = fs.getContentSummary(dir);
     checkContentSummary(c, webhdfs.getContentSummary(dir));
     assertEquals("Quota is half consumed", QUOTA_SIZE / 2,
                  c.getSpaceConsumed());

     // We can not create the 2nd file because even though the total spaced
     // used by two files (2 * 3 * 512/2) would fit within the quota (3 * 512)
     // when a block for a file is created the space used is adjusted
     // conservatively (3 * block size, ie assumes a full block is written)
     // which will violate the quota (3 * block size) since we've already 
     // used half the quota for the first file.
     try {
       DFSTestUtil.createFile(fs, file2, FILE_SIZE, (short) 3, 1L);
     } catch (QuotaExceededException e) {
       exceededQuota = true;
     }
     assertTrue("Quota not exceeded", exceededQuota);
   } finally {
     cluster.shutdown();
   }
}