Java Code Examples for org.apache.hadoop.http.HttpServer2

The following examples show how to use org.apache.hadoop.http.HttpServer2. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop-ozone   Source File: HddsConfServlet.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
    throws ServletException, IOException {

  if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
      request, response)) {
    return;
  }

  String format = parseAcceptHeader(request);
  if (FORMAT_XML.equals(format)) {
    response.setContentType("text/xml; charset=utf-8");
  } else if (FORMAT_JSON.equals(format)) {
    response.setContentType("application/json; charset=utf-8");
  }

  String name = request.getParameter("name");
  Writer out = response.getWriter();
  String cmd = request.getParameter(COMMAND);

  processCommand(cmd, format, request, response, out, name);
  out.close();
}
 
Example 2
Source Project: hadoop   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testNotificationOnLastRetryNormalShutdown() throws Exception {
  HttpServer2 server = startHttpServer();
  // Act like it is the second attempt. Default max attempts is 2
  MRApp app = spy(new MRAppWithCustomContainerAllocator(
      2, 2, true, this.getClass().getName(), true, 2, true));
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
  // Unregistration succeeds: successfullyUnregistered is set
  app.shutDownJob();
  Assert.assertTrue(app.isLastAMRetry());
  Assert.assertEquals(1, JobEndServlet.calledTimes);
  Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
      JobEndServlet.requestUri.getQuery());
  Assert.assertEquals(JobState.SUCCEEDED.toString(),
    JobEndServlet.foundJobState);
  server.stop();
}
 
Example 3
Source Project: hadoop   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
    throws Exception {
  HttpServer2 server = startHttpServer();
  MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
      this.getClass().getName(), true, 1, false));
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  app.getContext().getEventHandler()
    .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
  app.waitForInternalState(job, JobStateInternal.REBOOT);
  // Now shutdown.
  // Unregistration fails: isLastAMRetry is recalculated, this is not
  app.shutDownJob();
  // Not the last AM attempt. So user should that the job is still running.
  app.waitForState(job, JobState.RUNNING);
  Assert.assertFalse(app.isLastAMRetry());
  Assert.assertEquals(0, JobEndServlet.calledTimes);
  Assert.assertNull(JobEndServlet.requestUri);
  Assert.assertNull(JobEndServlet.foundJobState);
  server.stop();
}
 
Example 4
Source Project: hadoop   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
private static HttpServer2 startHttpServer() throws Exception {
  new File(System.getProperty(
      "build.webapps", "build/webapps") + "/test").mkdirs();
  HttpServer2 server = new HttpServer2.Builder().setName("test")
      .addEndpoint(URI.create("http://localhost:0"))
      .setFindPort(true).build();
  server.addServlet("jobend", "/jobend", JobEndServlet.class);
  server.start();

  JobEndServlet.calledTimes = 0;
  JobEndServlet.requestUri = null;
  JobEndServlet.baseUrl = "http://localhost:"
      + server.getConnectorAddress(0).getPort() + "/";
  JobEndServlet.foundJobState = null;
  return server;
}
 
Example 5
Source Project: hadoop   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
public void setUp() throws Exception {
  new File(System.getProperty("build.webapps", "build/webapps") + "/test"
      ).mkdirs();
  server = new HttpServer2.Builder().setName("test")
      .addEndpoint(URI.create("http://localhost:0"))
      .setFindPort(true).build();
  server.addServlet("delay", "/delay", DelayServlet.class);
  server.addServlet("jobend", "/jobend", JobEndServlet.class);
  server.addServlet("fail", "/fail", FailServlet.class);
  server.start();
  int port = server.getConnectorAddress(0).getPort();
  baseUrl = new URL("http://localhost:" + port + "/");

  JobEndServlet.calledTimes = 0;
  JobEndServlet.requestUri = null;
  DelayServlet.calledTimes = 0;
  FailServlet.calledTimes = 0;
}
 
Example 6
Source Project: hadoop   Source File: Nfs3HttpServer.java    License: Apache License 2.0 6 votes vote down vote up
void start() throws IOException {
  final InetSocketAddress httpAddr = getHttpAddress(conf);

  final String httpsAddrString = conf.get(
      NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY,
      NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "nfs3",
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
      NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);

  this.httpServer = builder.build();
  this.httpServer.start();
  
  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  int connIdx = 0;
  if (policy.isHttpEnabled()) {
    infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
  }

  if (policy.isHttpsEnabled()) {
    infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
  }
}
 
Example 7
Source Project: hadoop   Source File: NameNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
 
Example 8
Source Project: hadoop   Source File: NameNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
private static void setupServlets(HttpServer2 httpServer, Configuration conf) {
  httpServer.addInternalServlet("startupProgress",
      StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
  httpServer.addInternalServlet("getDelegationToken",
      GetDelegationTokenServlet.PATH_SPEC, 
      GetDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("renewDelegationToken", 
      RenewDelegationTokenServlet.PATH_SPEC, 
      RenewDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("cancelDelegationToken", 
      CancelDelegationTokenServlet.PATH_SPEC, 
      CancelDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class,
      true);
  httpServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC,
      ImageServlet.class, true);
  httpServer.addInternalServlet("listPaths", "/listPaths/*",
      ListPathsServlet.class, false);
  httpServer.addInternalServlet("data", "/data/*",
      FileDataServlet.class, false);
  httpServer.addInternalServlet("checksum", "/fileChecksum/*",
      FileChecksumServlets.RedirectServlet.class, false);
  httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
      ContentSummaryServlet.class, false);
}
 
Example 9
Source Project: hadoop   Source File: JournalNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
void start() throws IOException {
  final InetSocketAddress httpAddr = getAddress(conf);

  final String httpsAddrString = conf.get(
      DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "journal",
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);

  httpServer = builder.build();
  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class, true);
  httpServer.start();
}
 
Example 10
Source Project: hadoop   Source File: TestTransferFsImage.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test to verify the read timeout
 */
@Test(timeout = 5000)
public void testGetImageTimeout() throws Exception {
  HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
  try {
    testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
        TestImageTransferServlet.class);
    testServer.start();
    URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
    TransferFsImage.timeout = 2000;
    try {
      TransferFsImage.getFileClient(serverURL, "txid=1", null,
          null, false);
      fail("TransferImage Should fail with timeout");
    } catch (SocketTimeoutException e) {
      assertEquals("Read should timeout", "Read timed out", e.getMessage());
    }
  } finally {
    if (testServer != null) {
      testServer.stop();
    }
  }
}
 
Example 11
Source Project: big-c   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testNotificationOnLastRetryNormalShutdown() throws Exception {
  HttpServer2 server = startHttpServer();
  // Act like it is the second attempt. Default max attempts is 2
  MRApp app = spy(new MRAppWithCustomContainerAllocator(
      2, 2, true, this.getClass().getName(), true, 2, true));
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
  // Unregistration succeeds: successfullyUnregistered is set
  app.shutDownJob();
  Assert.assertTrue(app.isLastAMRetry());
  Assert.assertEquals(1, JobEndServlet.calledTimes);
  Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
      JobEndServlet.requestUri.getQuery());
  Assert.assertEquals(JobState.SUCCEEDED.toString(),
    JobEndServlet.foundJobState);
  server.stop();
}
 
Example 12
Source Project: big-c   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
    throws Exception {
  HttpServer2 server = startHttpServer();
  MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
      this.getClass().getName(), true, 1, false));
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  app.getContext().getEventHandler()
    .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
  app.waitForInternalState(job, JobStateInternal.REBOOT);
  // Now shutdown.
  // Unregistration fails: isLastAMRetry is recalculated, this is not
  app.shutDownJob();
  // Not the last AM attempt. So user should that the job is still running.
  app.waitForState(job, JobState.RUNNING);
  Assert.assertFalse(app.isLastAMRetry());
  Assert.assertEquals(0, JobEndServlet.calledTimes);
  Assert.assertNull(JobEndServlet.requestUri);
  Assert.assertNull(JobEndServlet.foundJobState);
  server.stop();
}
 
Example 13
Source Project: big-c   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
private static HttpServer2 startHttpServer() throws Exception {
  new File(System.getProperty(
      "build.webapps", "build/webapps") + "/test").mkdirs();
  HttpServer2 server = new HttpServer2.Builder().setName("test")
      .addEndpoint(URI.create("http://localhost:0"))
      .setFindPort(true).build();
  server.addServlet("jobend", "/jobend", JobEndServlet.class);
  server.start();

  JobEndServlet.calledTimes = 0;
  JobEndServlet.requestUri = null;
  JobEndServlet.baseUrl = "http://localhost:"
      + server.getConnectorAddress(0).getPort() + "/";
  JobEndServlet.foundJobState = null;
  return server;
}
 
Example 14
Source Project: big-c   Source File: TestJobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
public void setUp() throws Exception {
  new File(System.getProperty("build.webapps", "build/webapps") + "/test"
      ).mkdirs();
  server = new HttpServer2.Builder().setName("test")
      .addEndpoint(URI.create("http://localhost:0"))
      .setFindPort(true).build();
  server.addServlet("delay", "/delay", DelayServlet.class);
  server.addServlet("jobend", "/jobend", JobEndServlet.class);
  server.addServlet("fail", "/fail", FailServlet.class);
  server.start();
  int port = server.getConnectorAddress(0).getPort();
  baseUrl = new URL("http://localhost:" + port + "/");

  JobEndServlet.calledTimes = 0;
  JobEndServlet.requestUri = null;
  DelayServlet.calledTimes = 0;
  FailServlet.calledTimes = 0;
}
 
Example 15
Source Project: big-c   Source File: Nfs3HttpServer.java    License: Apache License 2.0 6 votes vote down vote up
void start() throws IOException {
  final InetSocketAddress httpAddr = getHttpAddress(conf);

  final String httpsAddrString = conf.get(
      NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY,
      NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "nfs3",
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
      NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);

  this.httpServer = builder.build();
  this.httpServer.start();
  
  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  int connIdx = 0;
  if (policy.isHttpEnabled()) {
    infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
  }

  if (policy.isHttpsEnabled()) {
    infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
  }
}
 
Example 16
Source Project: big-c   Source File: NameNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
private void initWebHdfs(Configuration conf) throws IOException {
  if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
    // set user pattern based on configuration file
    UserParam.setUserPattern(conf.get(
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

    // add authentication filter for webhdfs
    final String className = conf.get(
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
    final String name = className;

    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    Map<String, String> params = getAuthFilterParams(conf);
    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
        params, new String[] { pathSpec });
    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
        + ")");

    // add webhdfs packages
    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
        pathSpec);
  }
}
 
Example 17
Source Project: big-c   Source File: NameNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
private static void setupServlets(HttpServer2 httpServer, Configuration conf) {
  httpServer.addInternalServlet("startupProgress",
      StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
  httpServer.addInternalServlet("getDelegationToken",
      GetDelegationTokenServlet.PATH_SPEC, 
      GetDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("renewDelegationToken", 
      RenewDelegationTokenServlet.PATH_SPEC, 
      RenewDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("cancelDelegationToken", 
      CancelDelegationTokenServlet.PATH_SPEC, 
      CancelDelegationTokenServlet.class, true);
  httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class,
      true);
  httpServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC,
      ImageServlet.class, true);
  httpServer.addInternalServlet("listPaths", "/listPaths/*",
      ListPathsServlet.class, false);
  httpServer.addInternalServlet("data", "/data/*",
      FileDataServlet.class, false);
  httpServer.addInternalServlet("checksum", "/fileChecksum/*",
      FileChecksumServlets.RedirectServlet.class, false);
  httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
      ContentSummaryServlet.class, false);
}
 
Example 18
Source Project: big-c   Source File: JournalNodeHttpServer.java    License: Apache License 2.0 6 votes vote down vote up
void start() throws IOException {
  final InetSocketAddress httpAddr = getAddress(conf);

  final String httpsAddrString = conf.get(
      DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "journal",
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);

  httpServer = builder.build();
  httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
  httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  httpServer.addInternalServlet("getJournal", "/getJournal",
      GetJournalEditServlet.class, true);
  httpServer.start();
}
 
Example 19
Source Project: big-c   Source File: TestTransferFsImage.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test to verify the read timeout
 */
@Test(timeout = 5000)
public void testGetImageTimeout() throws Exception {
  HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
  try {
    testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
        TestImageTransferServlet.class);
    testServer.start();
    URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
    TransferFsImage.timeout = 2000;
    try {
      TransferFsImage.getFileClient(serverURL, "txid=1", null,
          null, false);
      fail("TransferImage Should fail with timeout");
    } catch (SocketTimeoutException e) {
      assertEquals("Read should timeout", "Read timed out", e.getMessage());
    }
  } finally {
    if (testServer != null) {
      testServer.stop();
    }
  }
}
 
Example 20
Source Project: hadoop   Source File: WebAppProxy.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  try {
    Configuration conf = getConfig();
    HttpServer2.Builder b = new HttpServer2.Builder()
        .setName("proxy")
        .addEndpoint(
            URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
                + ":" + port)).setFindPort(port == 0).setConf(getConfig())
        .setACL(acl);
    if (YarnConfiguration.useHttps(conf)) {
      WebAppUtils.loadSslConfiguration(b);
    }
    proxyServer = b.build();
    proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
        ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
    proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
    proxyServer
        .setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
    proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
    proxyServer.start();
  } catch (IOException e) {
    LOG.error("Could not start proxy web server",e);
    throw e;
  }
  super.serviceStart();
}
 
Example 21
Source Project: hadoop   Source File: TestWebAppProxyServlet.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  Configuration conf = getConfig();
  String bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
  bindAddress = StringUtils.split(bindAddress, ':')[0];
  AccessControlList acl = new AccessControlList(
      conf.get(YarnConfiguration.YARN_ADMIN_ACL, 
      YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
  proxyServer = new HttpServer2.Builder()
      .setName("proxy")
      .addEndpoint(
          URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
              + ":0")).setFindPort(true)
      .setConf(conf)
      .setACL(acl)
      .build();
  proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
      ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);

  appReportFetcher = new AppReportFetcherForTest(conf);
  proxyServer.setAttribute(FETCHER_ATTRIBUTE,
      appReportFetcher );
  proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, Boolean.TRUE);
  
  String proxy = WebAppUtils.getProxyHostAndPort(conf);
  String[] proxyParts = proxy.split(":");
  String proxyHost = proxyParts[0];
  
  proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
  proxyServer.start();
  LOG.info("Proxy server is started at port {}",
      proxyServer.getConnectorAddress(0).getPort());
}
 
Example 22
Source Project: hadoop   Source File: TestJobEndNotifier.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testNotificationOnLastRetryUnregistrationFailure()
    throws Exception {
  HttpServer2 server = startHttpServer();
  MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
      this.getClass().getName(), true, 2, false));
  // Currently, we will have isLastRetry always equals to false at beginning
  // of MRAppMaster, except staging area exists or commit already started at 
  // the beginning.
  // Now manually set isLastRetry to true and this should reset to false when
  // unregister failed.
  app.isLastAMRetry = true;
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  app.getContext().getEventHandler()
    .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
  app.waitForInternalState(job, JobStateInternal.REBOOT);
  // Now shutdown. User should see FAILED state.
  // Unregistration fails: isLastAMRetry is recalculated, this is
  ///reboot will stop service internally, we don't need to shutdown twice
  app.waitForServiceToStop(10000);
  Assert.assertFalse(app.isLastAMRetry());
  // Since it's not last retry, JobEndServlet didn't called
  Assert.assertEquals(0, JobEndServlet.calledTimes);
  Assert.assertNull(JobEndServlet.requestUri);
  Assert.assertNull(JobEndServlet.foundJobState);
  server.stop();
}
 
Example 23
Source Project: hadoop   Source File: DFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
    Configuration sslConf) {
  return builder
      .needsClientAuth(
          sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
              DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
      .keyPassword(getPassword(sslConf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY))
      .keyStore(sslConf.get("ssl.server.keystore.location"),
          getPassword(sslConf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY),
          sslConf.get("ssl.server.keystore.type", "jks"))
      .trustStore(sslConf.get("ssl.server.truststore.location"),
          getPassword(sslConf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY),
          sslConf.get("ssl.server.truststore.type", "jks"));
}
 
Example 24
Source Project: hadoop   Source File: NameNodeHttpServer.java    License: Apache License 2.0 5 votes vote down vote up
private Map<String, String> getAuthFilterParams(Configuration conf)
    throws IOException {
  Map<String, String> params = new HashMap<String, String>();
  String principalInConf = conf
      .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
  if (principalInConf != null && !principalInConf.isEmpty()) {
    params
        .put(
            DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
            SecurityUtil.getServerPrincipal(principalInConf,
                                            bindAddress.getHostName()));
  } else if (UserGroupInformation.isSecurityEnabled()) {
    HttpServer2.LOG.error(
        "WebHDFS and security are enabled, but configuration property '" +
        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
        "' is not set.");
  }
  String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf,
      DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
  if (httpKeytab != null && !httpKeytab.isEmpty()) {
    params.put(
        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
        httpKeytab);
  } else if (UserGroupInformation.isSecurityEnabled()) {
    HttpServer2.LOG.error(
        "WebHDFS and security are enabled, but configuration property '" +
        DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
        "' is not set.");
  }
  String anonymousAllowed = conf
    .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED);
  if (anonymousAllowed != null && !anonymousAllowed.isEmpty()) {
  params.put(
      DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED,
      anonymousAllowed);
  }
  return params;
}
 
Example 25
Source Project: hadoop   Source File: TestTransferFsImage.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test to verify the timeout of Image upload
 */
@Test(timeout = 10000)
public void testImageUploadTimeout() throws Exception {
  Configuration conf = new HdfsConfiguration();
  NNStorage mockStorage = Mockito.mock(NNStorage.class);
  HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
  try {
    testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
        TestImageTransferServlet.class);
    testServer.start();
    URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
    // set the timeout here, otherwise it will take default.
    TransferFsImage.timeout = 2000;

    File tmpDir = new File(new FileSystemTestHelper().getTestRootDir());
    tmpDir.mkdirs();

    File mockImageFile = File.createTempFile("image", "", tmpDir);
    FileOutputStream imageFile = new FileOutputStream(mockImageFile);
    imageFile.write("data".getBytes());
    imageFile.close();
    Mockito.when(
        mockStorage.findImageFile(Mockito.any(NameNodeFile.class),
            Mockito.anyLong())).thenReturn(mockImageFile);
    Mockito.when(mockStorage.toColonSeparatedString()).thenReturn(
        "storage:info:string");
    
    try {
      TransferFsImage.uploadImageFromStorage(serverURL, conf, mockStorage,
          NameNodeFile.IMAGE, 1L);
      fail("TransferImage Should fail with timeout");
    } catch (SocketTimeoutException e) {
      assertEquals("Upload should timeout", "Read timed out", e.getMessage());
    }
  } finally {
    testServer.stop();
  }
}
 
Example 26
Source Project: hadoop   Source File: SnapshotTestHelper.java    License: Apache License 2.0 5 votes vote down vote up
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }
  
  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));
  
  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
 
Example 27
Source Project: hadoop   Source File: AuthenticationFilterInitializer.java    License: Apache License 2.0 5 votes vote down vote up
public static Map<String, String> getFilterConfigMap(Configuration conf,
    String prefix) {
  Map<String, String> filterConfig = new HashMap<String, String>();

  //setting the cookie path to root '/' so it is used for all resources.
  filterConfig.put(AuthenticationFilter.COOKIE_PATH, "/");

  for (Map.Entry<String, String> entry : conf) {
    String name = entry.getKey();
    if (name.startsWith(prefix)) {
      String value = conf.get(name);
      name = name.substring(prefix.length());
      filterConfig.put(name, value);
    }
  }

  //Resolve _HOST into bind address
  String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
  String principal = filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
  if (principal != null) {
    try {
      principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
    }
    catch (IOException ex) {
      throw new RuntimeException("Could not resolve Kerberos principal name: " + ex.toString(), ex);
    }
    filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal);
  }
  return filterConfig;
}
 
Example 28
Source Project: hadoop   Source File: LogLevel.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {

  // Do the authorization
  if (!HttpServer2.hasAdministratorAccess(getServletContext(), request,
      response)) {
    return;
  }

  PrintWriter out = ServletUtil.initHTML(response, "Log Level");
  String logName = ServletUtil.getParameter(request, "log");
  String level = ServletUtil.getParameter(request, "level");

  if (logName != null) {
    out.println("<br /><hr /><h3>Results</h3>");
    out.println(MARKER
        + "Submitted Log Name: <b>" + logName + "</b><br />");

    Log log = LogFactory.getLog(logName);
    out.println(MARKER
        + "Log Class: <b>" + log.getClass().getName() +"</b><br />");
    if (level != null) {
      out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
    }

    if (log instanceof Log4JLogger) {
      process(((Log4JLogger)log).getLogger(), level, out);
    }
    else if (log instanceof Jdk14Logger) {
      process(((Jdk14Logger)log).getLogger(), level, out);
    }
    else {
      out.println("Sorry, " + log.getClass() + " not supported.<br />");
    }
  }

  out.println(FORMS);
  out.println(ServletUtil.HTML_TAIL);
}
 
Example 29
Source Project: hadoop   Source File: ConfServlet.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
    throws ServletException, IOException {

  if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                 request, response)) {
    return;
  }

  String format = request.getParameter(FORMAT_PARAM);
  if (null == format) {
    format = FORMAT_XML;
  }

  if (FORMAT_XML.equals(format)) {
    response.setContentType("text/xml; charset=utf-8");
  } else if (FORMAT_JSON.equals(format)) {
    response.setContentType("application/json; charset=utf-8");
  }

  Writer out = response.getWriter();
  try {
    writeResponse(getConfFromContext(), out, format);
  } catch (BadFormatException bfe) {
    response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
  }
  out.close();
}
 
Example 30
Source Project: big-c   Source File: WebAppProxy.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  try {
    Configuration conf = getConfig();
    HttpServer2.Builder b = new HttpServer2.Builder()
        .setName("proxy")
        .addEndpoint(
            URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
                + ":" + port)).setFindPort(port == 0).setConf(getConfig())
        .setACL(acl);
    if (YarnConfiguration.useHttps(conf)) {
      WebAppUtils.loadSslConfiguration(b);
    }
    proxyServer = b.build();
    proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
        ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
    proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
    proxyServer
        .setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
    proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
    proxyServer.start();
  } catch (IOException e) {
    LOG.error("Could not start proxy web server",e);
    throw e;
  }
  super.serviceStart();
}