org.apache.hadoop.util.JvmPauseMonitor Java Examples

The following examples show how to use org.apache.hadoop.util.JvmPauseMonitor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestJvmMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test public void testPresence() {
  JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
  JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
  jvmMetrics.setPauseMonitor(pauseMonitor);
  MetricsRecordBuilder rb = getMetrics(jvmMetrics);
  MetricsCollector mc = rb.parent();

  verify(mc).addRecord(JvmMetrics);
  verify(rb).tag(ProcessName, "test");
  verify(rb).tag(SessionId, "test");
  for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
    if (info.name().startsWith("Mem"))
      verify(rb).addGauge(eq(info), anyFloat());
    else if (info.name().startsWith("Gc"))
      verify(rb).addCounter(eq(info), anyLong());
    else if (info.name().startsWith("Threads"))
      verify(rb).addGauge(eq(info), anyInt());
    else if (info.name().startsWith("Log"))
      verify(rb).addCounter(eq(info), anyLong());
  }
}
 
Example #2
Source File: TestJvmMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test public void testPresence() {
  JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
  JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
  jvmMetrics.setPauseMonitor(pauseMonitor);
  MetricsRecordBuilder rb = getMetrics(jvmMetrics);
  MetricsCollector mc = rb.parent();

  verify(mc).addRecord(JvmMetrics);
  verify(rb).tag(ProcessName, "test");
  verify(rb).tag(SessionId, "test");
  for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
    if (info.name().startsWith("Mem"))
      verify(rb).addGauge(eq(info), anyFloat());
    else if (info.name().startsWith("Gc"))
      verify(rb).addCounter(eq(info), anyLong());
    else if (info.name().startsWith("Threads"))
      verify(rb).addGauge(eq(info), anyInt());
    else if (info.name().startsWith("Log"))
      verify(rb).addCounter(eq(info), anyLong());
  }
}
 
Example #3
Source File: StorageContainerManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Start service.
 */
public void start() throws IOException {
  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage(
        "StorageContainerLocationProtocol RPC server",
        getClientRpcAddress()));
  }

  ms = HddsServerUtil
      .initializeMetrics(configuration, "StorageContainerManager");

  commandWatcherLeaseManager.start();
  getClientProtocolServer().start();

  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
        "server", getBlockProtocolServer().getBlockRpcAddress()));
  }
  getBlockProtocolServer().start();

  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
        "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
  }
  getDatanodeProtocolServer().start();
  if (getSecurityProtocolServer() != null) {
    getSecurityProtocolServer().start();
  }

  httpServer.start();
  scmBlockManager.start();

  // Start jvm monitor
  jvmPauseMonitor = new JvmPauseMonitor();
  jvmPauseMonitor.init(configuration);
  jvmPauseMonitor.start();

  setStartTime();
}
 
Example #4
Source File: OzoneManager.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
private void startJVMPauseMonitor() {
  // Start jvm monitor
  jvmPauseMonitor = new JvmPauseMonitor();
  jvmPauseMonitor.init(configuration);
  jvmPauseMonitor.start();
}
 
Example #5
Source File: NameNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  startCommonServices(conf);
}
 
Example #6
Source File: DataNode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
Example #7
Source File: JvmMetrics.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) {
  this.pauseMonitor = pauseMonitor;
}
 
Example #8
Source File: NameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  startCommonServices(conf);
}
 
Example #9
Source File: DataNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
Example #10
Source File: JvmMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) {
  this.pauseMonitor = pauseMonitor;
}