Java Code Examples for org.apache.hadoop.http.HttpServer#start()

The following examples show how to use org.apache.hadoop.http.HttpServer#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DynamicCloudsDaemon.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void initializeServer() throws IOException {

    String serverAddr = conf.get(CLUSTER_BALANCER_ADDR, "localhost:9143");
    InetSocketAddress addr = NetUtils.createSocketAddr(serverAddr);
    clusterDaemonServer = RPC.getServer(this, addr.getHostName(),
            addr.getPort(), conf);
    clusterDaemonServer.start();

    // Http server
    String infoServerAddr = conf.get(CLUSTER_HTTP_BALANCER_ADDR,
            "localhost:50143");
    InetSocketAddress infoAddr = NetUtils.createSocketAddr(infoServerAddr);
    infoServer = new HttpServer("cb", infoAddr.getHostName(),
            infoAddr.getPort(), infoAddr.getPort() == 0, conf);
    infoServer.setAttribute("cluster.balancer", this);
    infoServer.start();
  }
 
Example 2
Source File: CoronaJobTracker.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void startInfoServer() throws IOException {
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
    java.net.InetAddress.getLocalHost().getCanonicalHostName(),
    0);
  String infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("jt", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("job.tracker", this);
  infoServer.start();
  this.infoPort = this.infoServer.getPort();

  String hostname =
    java.net.InetAddress.getLocalHost().getCanonicalHostName();
  this.conf.set(
    "mapred.job.tracker.http.address", hostname + ":" + this.infoPort);
  this.conf.setInt("mapred.job.tracker.info.port", this.infoPort);
  this.conf.set("mapred.job.tracker.info.bindAddress", hostname);

  LOG.info("JobTracker webserver: " + this.infoPort);
}
 
Example 3
Source File: TaskTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected void initHttpServer(JobConf conf,
    boolean useNettyMapOutputs) throws IOException {

  String infoAddr =
    NetUtils.getServerAddress(conf,
                              "tasktracker.http.bindAddress",
                              "tasktracker.http.port",
                              "mapred.task.tracker.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String httpBindAddress = infoSocAddr.getHostName();
  int httpPort = infoSocAddr.getPort();
  server = new HttpServer("task", httpBindAddress, httpPort,
      httpPort == 0, conf);
  workerThreads = conf.getInt("tasktracker.http.threads", 40);
  server.setThreads(1, workerThreads);
  // let the jsp pages get to the task tracker, config, and other relevant
  // objects
  FileSystem local = FileSystem.getLocal(conf);
  this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
  server.setAttribute("task.tracker", this);
  server.setAttribute("local.file.system", local);
  server.setAttribute("conf", conf);
  server.setAttribute("log", LOG);
  server.setAttribute("localDirAllocator", localDirAllocator);
  server.setAttribute("shuffleServerMetrics", shuffleServerMetrics);
  server.setAttribute(ReconfigurationServlet.
                      CONF_SERVLET_RECONFIGURABLE_PREFIX + "/ttconfchange",
                      TaskTracker.this);
  server.setAttribute("nettyMapOutputHttpPort", nettyMapOutputHttpPort);
  server.addInternalServlet("reconfiguration", "/ttconfchange",
                              ReconfigurationServlet.class);
  server.addInternalServlet(
    "mapOutput", "/mapOutput", MapOutputServlet.class);
  server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class);
  server.start();
  this.httpPort = server.getPort();
  checkJettyPort();
}
 
Example 4
Source File: ClusterManager.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Construct ClusterManager given {@link CoronaConf}
 *
 * @param conf the configuration for the ClusterManager
 * @throws IOException
 */
public ClusterManager(CoronaConf conf) throws IOException {
  this.conf = conf;
  initLegalTypes();

  metrics = new ClusterManagerMetrics(getTypes());

  sessionManager = new SessionManager(this);
  sessionManager.setConf(conf);

  sessionHistoryManager = new SessionHistoryManager();
  sessionHistoryManager.setConf(conf);

  HostsFileReader hostsReader =
      new HostsFileReader(conf.getHostsFile(), conf.getExcludesFile());
  nodeManager = new NodeManager(this, hostsReader);
  nodeManager.setConf(conf);

  sessionNotifier = new SessionNotifier(sessionManager, this, metrics);
  sessionNotifier.setConf(conf);

  scheduler = new Scheduler(nodeManager, sessionManager,
      sessionNotifier, getTypes(), metrics, conf);
  scheduler.start();
  metrics.registerUpdater(scheduler, sessionNotifier);

  InetSocketAddress infoSocAddr =
      NetUtils.createSocketAddr(conf.getClusterManagerHttpAddress());
  infoServer =
      new HttpServer("cm", infoSocAddr.getHostName(), infoSocAddr.getPort(),
                     infoSocAddr.getPort() == 0, conf);
  infoServer.setAttribute("cm", this);
  infoServer.start();

  startTime = clock.getTime();
  hostName = infoSocAddr.getHostName();
  safeMode = false;
}
 
Example 5
Source File: Standby.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the webserver so that the primary namenode can fetch
 * transaction logs from standby via http.
 */
void initSecondary(Configuration conf) throws IOException {

  nameNodeAddr = avatarNode.getRemoteNamenodeAddress(conf);
  this.primaryNamenode =
      (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
          NamenodeProtocol.versionID, nameNodeAddr, conf);

  fsName = avatarNode.getRemoteNamenodeHttpName(conf);

  // Initialize other scheduling parameters from the configuration
  checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false);
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf,
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", fsImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort);
  LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
           "(" + checkpointSize/1024 + " KB)");
}
 
Example 6
Source File: TestLogLevel.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testDynamicLogLevel() throws Exception {
  String logName = TestLogLevel.class.getName();
  Log testlog = LogFactory.getLog(logName);

  //only test Log4JLogger
  if (testlog instanceof Log4JLogger) {
    Logger log = ((Log4JLogger)testlog).getLogger();
    log.debug("log.debug1");
    log.info("log.info1");
    log.error("log.error1");
    assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));

    HttpServer server = new HttpServer("..", "localhost", 22222, true);
    server.start();
    int port = server.getPort();

    //servlet
    URL url = new URL("http://localhost:" + port
        + "/logLevel?log=" + logName + "&level=" + Level.ERROR);
    out.println("*** Connecting to " + url);
    URLConnection connection = url.openConnection();
    connection.connect();

    BufferedReader in = new BufferedReader(new InputStreamReader(
        connection.getInputStream()));
    for(String line; (line = in.readLine()) != null; out.println(line));
    in.close();

    log.debug("log.debug2");
    log.info("log.info2");
    log.error("log.error2");
    assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));

    //command line
    String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG};
    LogLevel.main(args);
    log.debug("log.debug3");
    log.info("log.info3");
    log.error("log.error3");
    assertTrue(Level.DEBUG.equals(log.getEffectiveLevel()));
  }
  else {
    out.println(testlog.getClass() + " not tested.");
  }
}
 
Example 7
Source File: ProxyJobTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public ProxyJobTracker(CoronaConf conf) throws IOException {
  this.conf = conf;
  fs = FileSystem.get(conf);
  String infoAddr =
    conf.get("mapred.job.tracker.corona.proxyaddr", "0.0.0.0:0");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String infoBindAddress = infoSocAddr.getHostName();
  int port = infoSocAddr.getPort();
  LOCALMACHINE = infoBindAddress;
  startTime = getClock().getTime();

  CoronaConf coronaConf = new CoronaConf(conf);
  InetSocketAddress rpcSockAddr = NetUtils.createSocketAddr(
    coronaConf.getProxyJobTrackerAddress());
  rpcServer = RPC.getServer(
    this,
    rpcSockAddr.getHostName(),
    rpcSockAddr.getPort(),
    conf.getInt("corona.proxy.job.tracker.handler.count", 10),
    false,
    conf);
  rpcServer.start();

  LOG.info("ProxyJobTracker RPC Server up at " +
    rpcServer.getListenerAddress());

  infoServer = new HttpServer("proxyjt", infoBindAddress, port,
                              port == 0, conf);
  infoServer.setAttribute("proxy.job.tracker", this);
  infoServer.setAttribute("conf", conf);
  infoServer.addServlet("proxy", "/proxy",
                        ProxyJobTrackerServlet.class);
  // initialize history parameters.
  JobConf jobConf = new JobConf(conf);
  boolean historyInitialized = JobHistory.init(
    this, jobConf, this.LOCALMACHINE, this.startTime);
  if (historyInitialized) {
    JobHistory.initDone(jobConf, fs);
    String historyLogDir =
        JobHistory.getCompletedJobHistoryLocation().toString();
    FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf);
    infoServer.setAttribute("historyLogDir", historyLogDir);
    infoServer.setAttribute("fileSys", historyFS);
  }
  infoServer.start();
  LOCALPORT = infoServer.getPort();

  context = MetricsUtil.getContext("mapred");
  metricsRecord = MetricsUtil.createRecord(context, "proxyjobtracker");
  context.registerUpdater(this);

  expireUnusedFilesInCache = new ExpireUnusedFilesInCache(
    conf, getClock(), new Path(getSystemDir()), fs);

  sessionHistoryManager = new SessionHistoryManager();
  sessionHistoryManager.setConf(conf);

  try {
    String target = conf.getProxyJobTrackerThriftAddress();
    InetSocketAddress addr = NetUtils.createSocketAddr(target);
    LOG.info("Trying to start the Thrift Server at: " + target);
    ServerSocket serverSocket = new ServerSocket(addr.getPort());
    server = new TServerThread(
      TFactoryBasedThreadPoolServer.createNewServer(
        new CoronaProxyJobTrackerService.Processor(this),
        serverSocket,
        5000));
    server.start();
    LOG.info("Thrift server started on: " + target);
  } catch (IOException e) {
    LOG.info("Exception while starting the Thrift Server on CPJT: ", e);
  }
}
 
Example 8
Source File: SecondaryNameNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize SecondaryNameNode.
 */
private void initialize(Configuration conf) throws IOException {
  // initiate Java VM metrics
  JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
  
  // Create connection to the namenode.
  shouldRun = true;
  nameNodeAddr = NameNode.getAddress(conf);

  this.conf = conf;
  this.namenode =
      (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
          NamenodeProtocol.versionID, nameNodeAddr, conf);

  // initialize checkpoint directories
  fsName = getInfoServer();
  checkpointDirs = FSImage.getCheckpointDirs(conf,
                                "/tmp/hadoop/dfs/namesecondary");
  checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, 
                                "/tmp/hadoop/dfs/namesecondary");    
  checkpointImage = new CheckpointStorage(conf);
  checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

  // Initialize other scheduling parameters from the configuration
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf, 
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", checkpointImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); 
  LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
           "(" + checkpointSize/1024 + " KB)");
}
 
Example 9
Source File: TestLogLevel.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public void testDynamicLogLevel() throws Exception {
  String logName = TestLogLevel.class.getName();
  Log testlog = LogFactory.getLog(logName);

  //only test Log4JLogger
  if (testlog instanceof Log4JLogger) {
    Logger log = ((Log4JLogger)testlog).getLogger();
    log.debug("log.debug1");
    log.info("log.info1");
    log.error("log.error1");
    assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));

    HttpServer server = new HttpServer("..", "localhost", 22222, true);
    server.start();
    int port = server.getPort();

    //servlet
    URL url = new URL("http://localhost:" + port
        + "/logLevel?log=" + logName + "&level=" + Level.ERROR);
    out.println("*** Connecting to " + url);
    URLConnection connection = url.openConnection();
    connection.connect();

    BufferedReader in = new BufferedReader(new InputStreamReader(
        connection.getInputStream()));
    for(String line; (line = in.readLine()) != null; out.println(line));
    in.close();

    log.debug("log.debug2");
    log.info("log.info2");
    log.error("log.error2");
    assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));

    //command line
    String[] args = {"-setlevel", "localhost:"+port, logName,""+Level.DEBUG};
    LogLevel.main(args);
    log.debug("log.debug3");
    log.info("log.info3");
    log.error("log.error3");
    assertTrue(Level.DEBUG.equals(log.getEffectiveLevel()));
  }
  else {
    out.println(testlog.getClass() + " not tested.");
  }
}
 
Example 10
Source File: SecondaryNameNode.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize SecondaryNameNode.
 */
private void initialize(Configuration conf) throws IOException {
  // initiate Java VM metrics
  JvmMetrics.init("SecondaryNameNode", conf.get("session.id"));
  
  // Create connection to the namenode.
  shouldRun = true;
  nameNodeAddr = NameNode.getAddress(conf);

  this.conf = conf;
  this.namenode =
      (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
          NamenodeProtocol.versionID, nameNodeAddr, conf);

  // initialize checkpoint directories
  fsName = getInfoServer();
  checkpointDirs = FSImage.getCheckpointDirs(conf,
                                "/tmp/hadoop/dfs/namesecondary");
  checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, 
                                "/tmp/hadoop/dfs/namesecondary");    
  checkpointImage = new CheckpointStorage();
  checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);

  // Initialize other scheduling parameters from the configuration
  checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
  checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

  // initialize the webserver for uploading files.
  String infoAddr = 
    NetUtils.getServerAddress(conf, 
                              "dfs.secondary.info.bindAddress",
                              "dfs.secondary.info.port",
                              "dfs.secondary.http.address");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  infoBindAddress = infoSocAddr.getHostName();
  int tmpInfoPort = infoSocAddr.getPort();
  infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
      tmpInfoPort == 0, conf);
  infoServer.setAttribute("name.system.image", checkpointImage);
  this.infoServer.setAttribute("name.conf", conf);
  infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
  infoServer.start();

  // The web-server port can be ephemeral... ensure we have the correct info
  infoPort = infoServer.getPort();
  conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); 
  LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
  LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
           "(" + checkpointPeriod/60 + " min)");
  LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
           "(" + checkpointSize/1024 + " KB)");
}