Java Code Examples for org.apache.hadoop.util.JvmPauseMonitor#start()

The following examples show how to use org.apache.hadoop.util.JvmPauseMonitor#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StorageContainerManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Start service.
 */
public void start() throws IOException {
  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage(
        "StorageContainerLocationProtocol RPC server",
        getClientRpcAddress()));
  }

  ms = HddsServerUtil
      .initializeMetrics(configuration, "StorageContainerManager");

  commandWatcherLeaseManager.start();
  getClientProtocolServer().start();

  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
        "server", getBlockProtocolServer().getBlockRpcAddress()));
  }
  getBlockProtocolServer().start();

  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
        "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
  }
  getDatanodeProtocolServer().start();
  if (getSecurityProtocolServer() != null) {
    getSecurityProtocolServer().start();
  }

  httpServer.start();
  scmBlockManager.start();

  // Start jvm monitor
  jvmPauseMonitor = new JvmPauseMonitor();
  jvmPauseMonitor.init(configuration);
  jvmPauseMonitor.start();

  setStartTime();
}
 
Example 2
Source File: OzoneManager.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
private void startJVMPauseMonitor() {
  // Start jvm monitor
  jvmPauseMonitor = new JvmPauseMonitor();
  jvmPauseMonitor.init(configuration);
  jvmPauseMonitor.start();
}
 
Example 3
Source File: NameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  startCommonServices(conf);
}
 
Example 4
Source File: DataNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
Example 5
Source File: NameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  startCommonServices(conf);
}
 
Example 6
Source File: DataNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}