Java Code Examples for org.apache.hadoop.ipc.RPC#getServer()

The following examples show how to use org.apache.hadoop.ipc.RPC#getServer() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DynamicCloudsDaemon.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void initializeServer() throws IOException {

    String serverAddr = conf.get(CLUSTER_BALANCER_ADDR, "localhost:9143");
    InetSocketAddress addr = NetUtils.createSocketAddr(serverAddr);
    clusterDaemonServer = RPC.getServer(this, addr.getHostName(),
            addr.getPort(), conf);
    clusterDaemonServer.start();

    // Http server
    String infoServerAddr = conf.get(CLUSTER_HTTP_BALANCER_ADDR,
            "localhost:50143");
    InetSocketAddress infoAddr = NetUtils.createSocketAddr(infoServerAddr);
    infoServer = new HttpServer("cb", infoAddr.getHostName(),
            infoAddr.getPort(), infoAddr.getPort() == 0, conf);
    infoServer.setAttribute("cluster.balancer", this);
    infoServer.start();
  }
 
Example 2
Source File: CoronaTaskTracker.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private synchronized void initializeTaskActionServer() throws IOException {
  // Create Hadoop RPC to serve JobTrackers
  actionServerAddr = NetUtils.createSocketAddr(getLocalHostname(), 0);
  int handlerCount = fConf.getInt(CORONA_TASK_TRACKER_HANDLER_COUNT_KEY, 10);
  this.actionServer = RPC.getServer
    (this, actionServerAddr.getHostName(), 0, handlerCount, false, fConf);
  this.actionServer.start();
  actionServerAddr = actionServer.getListenerAddress();
  LOG.info("TaskActionServer up at " +
    actionServerAddr.getHostName() + ":" + actionServerAddr.getPort());
  jobTrackerReporters = new ConcurrentHashMap<JobID, JobTrackerReporter>();
  String dir = fConf.get(JobTracker.MAPRED_SYSTEM_DIR_KEY,
      JobTracker.DEFAULT_MAPRED_SYSTEM_DIR);
  if (dir == null) {
    throw new IOException("Failed to get system directory");
  }
  systemDirectory = new Path(dir);
  systemFS = systemDirectory.getFileSystem(fConf);
}
 
Example 3
Source File: LocalJobRunner.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public Job(JobID jobid, JobConf conf) throws IOException {
  this.doSequential =
    conf.getBoolean("mapred.localrunner.sequential", true);
  this.id = jobid;
  this.mapoutputFile = new MapOutputFile(jobid);
  this.mapoutputFile.setConf(conf);

  this.localFile = new JobConf(conf).getLocalPath(jobDir+id+".xml");
  this.localFs = FileSystem.getLocal(conf);
  persistConf(this.localFs, this.localFile, conf);

  this.job = new JobConf(localFile);
  profile = new JobProfile(job.getUser(), id, localFile.toString(), 
                           "http://localhost:8080/", job.getJobName());
  status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING);

  jobs.put(id, this);

  numSlots = conf.getInt(LOCAL_RUNNER_SLOTS, DEFAULT_LOCAL_RUNNER_SLOTS);
  executor = Executors.newFixedThreadPool(numSlots);

  int handlerCount = numSlots;
  umbilicalServer =
    RPC.getServer(this, LOCALHOST, 0, handlerCount, false, conf);
  umbilicalServer.start();
  umbilicalPort = umbilicalServer.getListenerAddress().getPort();

  this.start();
}
 
Example 4
Source File: UtilizationCollector.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected void initialize(Configuration conf)
  throws IOException {
  InetSocketAddress socAddr = UtilizationCollector.getAddress(conf);
  int handlerCount = conf.getInt(
          "mapred.resourceutilization.handler.count", 10);

  // create rpc server
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress();
  LOG.info("Collector up at: " + this.serverAddress);

  // start RPC server
  this.server.start();

  // How long does the TaskTracker reports expire
  timeLimit = conf.getLong("mapred.resourceutilization.timelimit",
                           DEFAULT_TIME_LIMIT);

  // How long do we consider a job is finished after it stops
  stopTimeLimit = conf.getLong("mapred.resourceutilization.stoptimelimit",
                               DEFAULT_STOP_TIME_LIMIT);

  // How often do we aggregate the reports
  aggregatePeriod = conf.getLong(
          "mapred.resourceutilization.aggregateperiod",
          DEFAULT_AGGREGATE_SLEEP_TIME);

  // Start the daemon thread to aggregate the TaskTracker reports
  this.aggregateDaemon = new Daemon(new AggregateRun());
  this.aggregateDaemon.start();
}
 
Example 5
Source File: DataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void initIpcServer(Configuration conf) throws IOException {
  //init ipc server
  InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
      conf.get("dfs.datanode.ipc.address"));
  ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), 
      conf.getInt("dfs.datanode.handler.count", 3), false, conf);
  ipcServer.start();
}
 
Example 6
Source File: RaidNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void initialize(Configuration conf) throws IOException,
		SAXException, InterruptedException, RaidConfigurationException,
		ClassNotFoundException, ParserConfigurationException {
	this.startTime = RaidNode.now();
	this.conf = conf;
	InetSocketAddress socAddr = RaidNode.getAddress(conf);
	int handlerCount = conf.getInt("fs.raidnode.handler.count", 10);
	// clean up temporay directory
	cleanUpTempDirectory(conf);

	// read in the configuration
	configMgr = new ConfigManager(conf);

	// create rpc server
	this.server = RPC.getServer(this, socAddr.getHostName(),
			socAddr.getPort(), handlerCount, false, conf);

	// The rpc-server port can be ephemeral... ensure we have the correct
	// info
	this.serverAddress = this.server.getListenerAddress();
	LOG.info("RaidNode up at: " + this.serverAddress);
	// Instantiate the metrics singleton.
	RaidNodeMetrics.getInstance(RaidNodeMetrics.DEFAULT_NAMESPACE_ID);

	this.server.start(); // start RPC server

	// Create a block integrity monitor and start its thread(s)
	this.blockIntegrityMonitor = BlockIntegrityMonitor
			.createBlockIntegrityMonitor(conf);

	boolean useBlockFixer = !conf.getBoolean(
			RAID_DISABLE_CORRUPT_BLOCK_FIXER_KEY, false);
	boolean useBlockCopier = !conf.getBoolean(
			RAID_DISABLE_DECOMMISSIONING_BLOCK_COPIER_KEY, false);
	boolean useCorruptFileCounter = !conf.getBoolean(
			RAID_DISABLE_CORRUPTFILE_COUNTER_KEY, false);

	Runnable fixer = blockIntegrityMonitor.getCorruptionMonitor();
	if (useBlockFixer && (fixer != null)) {
		this.blockFixerThread = new Daemon(fixer);
		this.blockFixerThread.setName("Block Fixer");
		this.blockFixerThread.start();
	}

	Runnable copier = blockIntegrityMonitor.getDecommissioningMonitor();
	if (useBlockCopier && (copier != null)) {
		this.blockCopierThread = new Daemon(copier);
		this.blockCopierThread.setName("Block Copier");
		this.blockCopierThread.start();
	}

	Runnable counter = blockIntegrityMonitor.getCorruptFileCounter();
	if (useCorruptFileCounter && counter != null) {
		this.corruptFileCounterThread = new Daemon(counter);
		this.corruptFileCounterThread.setName("Corrupt File Counter");
		this.corruptFileCounterThread.start();
	}

	// start the deamon thread to fire polcies appropriately
	RaidNode.triggerMonitorSleepTime = conf.getLong(
			TRIGGER_MONITOR_SLEEP_TIME_KEY, SLEEP_TIME);
	this.triggerThread = new Daemon(new TriggerMonitor());
	this.triggerThread.setName("Trigger Thread");
	this.triggerThread.start();

	// start the thread that monitor and moves blocks
	this.placementMonitor = new PlacementMonitor(conf);
	this.placementMonitor.start();

	// start the thread that deletes obsolete parity files
	this.purgeMonitor = new PurgeMonitor(conf, placementMonitor);
	this.purgeThread = new Daemon(purgeMonitor);
	this.purgeThread.setName("Purge Thread");
	this.purgeThread.start();

	// start the thread that creates HAR files
	this.harThread = new Daemon(new HarMonitor());
	this.harThread.setName("HAR Thread");
	this.harThread.start();

	// start the thread that collects statistics
	this.statsCollector = new StatisticsCollector(this, configMgr, conf);
	this.statsCollectorThread = new Daemon(statsCollector);
	this.statsCollectorThread.setName("Stats Collector");
	this.statsCollectorThread.start();

	this.directoryTraversalShuffle = conf.getBoolean(
			RAID_DIRECTORYTRAVERSAL_SHUFFLE, true);
	this.directoryTraversalThreads = conf.getInt(
			RAID_DIRECTORYTRAVERSAL_THREADS, 4);

	startHttpServer();

	initialized = true;
}
 
Example 7
Source File: ProxyJobTracker.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public ProxyJobTracker(CoronaConf conf) throws IOException {
  this.conf = conf;
  fs = FileSystem.get(conf);
  String infoAddr =
    conf.get("mapred.job.tracker.corona.proxyaddr", "0.0.0.0:0");
  InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
  String infoBindAddress = infoSocAddr.getHostName();
  int port = infoSocAddr.getPort();
  LOCALMACHINE = infoBindAddress;
  startTime = getClock().getTime();

  CoronaConf coronaConf = new CoronaConf(conf);
  InetSocketAddress rpcSockAddr = NetUtils.createSocketAddr(
    coronaConf.getProxyJobTrackerAddress());
  rpcServer = RPC.getServer(
    this,
    rpcSockAddr.getHostName(),
    rpcSockAddr.getPort(),
    conf.getInt("corona.proxy.job.tracker.handler.count", 10),
    false,
    conf);
  rpcServer.start();

  LOG.info("ProxyJobTracker RPC Server up at " +
    rpcServer.getListenerAddress());

  infoServer = new HttpServer("proxyjt", infoBindAddress, port,
                              port == 0, conf);
  infoServer.setAttribute("proxy.job.tracker", this);
  infoServer.setAttribute("conf", conf);
  infoServer.addServlet("proxy", "/proxy",
                        ProxyJobTrackerServlet.class);
  // initialize history parameters.
  JobConf jobConf = new JobConf(conf);
  boolean historyInitialized = JobHistory.init(
    this, jobConf, this.LOCALMACHINE, this.startTime);
  if (historyInitialized) {
    JobHistory.initDone(jobConf, fs);
    String historyLogDir =
        JobHistory.getCompletedJobHistoryLocation().toString();
    FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf);
    infoServer.setAttribute("historyLogDir", historyLogDir);
    infoServer.setAttribute("fileSys", historyFS);
  }
  infoServer.start();
  LOCALPORT = infoServer.getPort();

  context = MetricsUtil.getContext("mapred");
  metricsRecord = MetricsUtil.createRecord(context, "proxyjobtracker");
  context.registerUpdater(this);

  expireUnusedFilesInCache = new ExpireUnusedFilesInCache(
    conf, getClock(), new Path(getSystemDir()), fs);

  sessionHistoryManager = new SessionHistoryManager();
  sessionHistoryManager.setConf(conf);

  try {
    String target = conf.getProxyJobTrackerThriftAddress();
    InetSocketAddress addr = NetUtils.createSocketAddr(target);
    LOG.info("Trying to start the Thrift Server at: " + target);
    ServerSocket serverSocket = new ServerSocket(addr.getPort());
    server = new TServerThread(
      TFactoryBasedThreadPoolServer.createNewServer(
        new CoronaProxyJobTrackerService.Processor(this),
        serverSocket,
        5000));
    server.start();
    LOG.info("Thrift server started on: " + target);
  } catch (IOException e) {
    LOG.info("Exception while starting the Thrift Server on CPJT: ", e);
  }
}