Java Code Examples for org.apache.hadoop.metrics2.source.JvmMetrics

The following examples show how to use org.apache.hadoop.metrics2.source.JvmMetrics. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: ApplicationHistoryServer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  // init timeline services first
  timelineStore = createTimelineStore(conf);
  addIfService(timelineStore);
  secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
  addService(secretManagerService);
  timelineDataManager = createTimelineDataManager(conf);
  addService(timelineDataManager);

  // init generic history service afterwards
  aclsManager = createApplicationACLsManager(conf);
  historyManager = createApplicationHistoryManager(conf);
  ahsClientService = createApplicationHistoryClientService(historyManager);
  addService(ahsClientService);
  addService((Service) historyManager);

  DefaultMetricsSystem.initialize("ApplicationHistoryServer");
  JvmMetrics.initSingleton("ApplicationHistoryServer", null);
  super.serviceInit(conf);
}
 
Example 2
Source Project: hadoop   Source File: Nfs3Metrics.java    License: Apache License 2.0 6 votes vote down vote up
public Nfs3Metrics(String name, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.name = name;
  this.jvmMetrics = jvmMetrics;
  registry.tag(SessionId, sessionId);

  final int len = intervals.length;
  readNanosQuantiles = new MutableQuantiles[len];
  writeNanosQuantiles = new MutableQuantiles[len];
  commitNanosQuantiles = new MutableQuantiles[len];

  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
        + interval + "s", "Read process in ns", "ops", "latency", interval);
    writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
        + interval + "s", "Write process in ns", "ops", "latency", interval);
    commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
        + interval + "s", "Commit process in ns", "ops", "latency", interval);
  }
}
 
Example 3
Source Project: hadoop   Source File: NameNodeMetrics.java    License: Apache License 2.0 6 votes vote down vote up
NameNodeMetrics(String processName, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.jvmMetrics = jvmMetrics;
  registry.tag(ProcessName, processName).tag(SessionId, sessionId);
  
  final int len = intervals.length;
  syncsQuantiles = new MutableQuantiles[len];
  blockReportQuantiles = new MutableQuantiles[len];
  cacheReportQuantiles = new MutableQuantiles[len];
  
  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    syncsQuantiles[i] = registry.newQuantiles(
        "syncs" + interval + "s",
        "Journal syncs", "ops", "latency", interval);
    blockReportQuantiles[i] = registry.newQuantiles(
        "blockReport" + interval + "s", 
        "Block report", "ops", "latency", interval);
    cacheReportQuantiles[i] = registry.newQuantiles(
        "cacheReport" + interval + "s",
        "Cache report", "ops", "latency", interval);
  }
}
 
Example 4
Source Project: hadoop   Source File: JournalNode.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Start listening for edits via RPC.
 */
public void start() throws IOException {
  Preconditions.checkState(!isStarted(), "JN already running");
  
  validateAndCreateJournalDir(localDir);
  
  DefaultMetricsSystem.initialize("JournalNode");
  JvmMetrics.create("JournalNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
  SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  
  registerJNMXBean();
  
  httpServer = new JournalNodeHttpServer(conf, this);
  httpServer.start();

  httpServerURI = httpServer.getServerURI().toString();

  rpcServer = new JournalNodeRpcServer(conf, this);
  rpcServer.start();
}
 
Example 5
Source Project: big-c   Source File: ApplicationHistoryServer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  // init timeline services first
  timelineStore = createTimelineStore(conf);
  addIfService(timelineStore);
  secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
  addService(secretManagerService);
  timelineDataManager = createTimelineDataManager(conf);
  addService(timelineDataManager);

  // init generic history service afterwards
  aclsManager = createApplicationACLsManager(conf);
  historyManager = createApplicationHistoryManager(conf);
  ahsClientService = createApplicationHistoryClientService(historyManager);
  addService(ahsClientService);
  addService((Service) historyManager);

  DefaultMetricsSystem.initialize("ApplicationHistoryServer");
  JvmMetrics.initSingleton("ApplicationHistoryServer", null);
  super.serviceInit(conf);
}
 
Example 6
Source Project: big-c   Source File: Nfs3Metrics.java    License: Apache License 2.0 6 votes vote down vote up
public Nfs3Metrics(String name, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.name = name;
  this.jvmMetrics = jvmMetrics;
  registry.tag(SessionId, sessionId);

  final int len = intervals.length;
  readNanosQuantiles = new MutableQuantiles[len];
  writeNanosQuantiles = new MutableQuantiles[len];
  commitNanosQuantiles = new MutableQuantiles[len];

  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
        + interval + "s", "Read process in ns", "ops", "latency", interval);
    writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
        + interval + "s", "Write process in ns", "ops", "latency", interval);
    commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
        + interval + "s", "Commit process in ns", "ops", "latency", interval);
  }
}
 
Example 7
Source Project: big-c   Source File: NameNodeMetrics.java    License: Apache License 2.0 6 votes vote down vote up
NameNodeMetrics(String processName, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.jvmMetrics = jvmMetrics;
  registry.tag(ProcessName, processName).tag(SessionId, sessionId);
  
  final int len = intervals.length;
  syncsQuantiles = new MutableQuantiles[len];
  blockReportQuantiles = new MutableQuantiles[len];
  cacheReportQuantiles = new MutableQuantiles[len];
  
  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    syncsQuantiles[i] = registry.newQuantiles(
        "syncs" + interval + "s",
        "Journal syncs", "ops", "latency", interval);
    blockReportQuantiles[i] = registry.newQuantiles(
        "blockReport" + interval + "s", 
        "Block report", "ops", "latency", interval);
    cacheReportQuantiles[i] = registry.newQuantiles(
        "cacheReport" + interval + "s",
        "Cache report", "ops", "latency", interval);
  }
}
 
Example 8
Source Project: big-c   Source File: JournalNode.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Start listening for edits via RPC.
 */
public void start() throws IOException {
  Preconditions.checkState(!isStarted(), "JN already running");
  
  validateAndCreateJournalDir(localDir);
  
  DefaultMetricsSystem.initialize("JournalNode");
  JvmMetrics.create("JournalNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
  SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  
  registerJNMXBean();
  
  httpServer = new JournalNodeHttpServer(conf, this);
  httpServer.start();

  httpServerURI = httpServer.getServerURI().toString();

  rpcServer = new JournalNodeRpcServer(conf, this);
  rpcServer.start();
}
 
Example 9
Source Project: hadoop-ozone   Source File: HddsServerUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Initialize hadoop metrics system for Ozone servers.
 * @param configuration OzoneConfiguration to use.
 * @param serverName    The logical name of the server components.
 */
public static MetricsSystem initializeMetrics(
    OzoneConfiguration configuration, String serverName) {
  MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName);
  try {
    JvmMetrics.create(serverName,
        configuration.get(DFSConfigKeysLegacy.DFS_METRICS_SESSION_ID_KEY),
        DefaultMetricsSystem.instance());
  } catch (MetricsException e) {
    LOG.info("Metrics source JvmMetrics already added to DataNode.");
  }
  return metricsSystem;
}
 
Example 10
Source Project: ambari-metrics   Source File: AMSApplicationServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  DefaultMetricsSystem.initialize("AmbariMetricsSystem");
  JvmMetrics.initSingleton("AmbariMetricsSystem", null);

  startWebApp();
  super.serviceStart();
}
 
Example 11
Source Project: XLearning   Source File: JobHistoryServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  DefaultMetricsSystem.initialize("JobHistoryServer");
  JvmMetrics.initSingleton("JobHistoryServer", null);
  super.serviceStart();

  deleteLogManager = new Thread(new deleteLogMonitor());
  deleteLogManager.setName("Log-delete-monitor");
  deleteLogManager.setDaemon(true);
  deleteLogManager.start();
}
 
Example 12
Source Project: hadoop   Source File: SharedCacheManager.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.store = createSCMStoreService(conf);
  addService(store);

  CleanerService cs = createCleanerService(store);
  addService(cs);

  SharedCacheUploaderService nms =
      createNMCacheUploaderSCMProtocolService(store);
  addService(nms);

  ClientProtocolService cps = createClientProtocolService(store);
  addService(cps);

  SCMAdminProtocolService saps = createSCMAdminProtocolService(cs);
  addService(saps);

  SCMWebServer webUI = createSCMWebServer(this);
  addService(webUI);

  // init metrics
  DefaultMetricsSystem.initialize("SharedCacheManager");
  JvmMetrics.initSingleton("SharedCacheManager", null);

  super.serviceInit(conf);
}
 
Example 13
Source Project: hadoop   Source File: Nfs3Metrics.java    License: Apache License 2.0 5 votes vote down vote up
public static Nfs3Metrics create(Configuration conf, String gatewayName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);

  // Percentile measurement is [50th,75th,90th,95th,99th] currently 
  int[] intervals = conf
      .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
}
 
Example 14
Source Project: hadoop   Source File: NameNodeMetrics.java    License: Apache License 2.0 5 votes vote down vote up
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  String processName = r.toString();
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
  
  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new NameNodeMetrics(processName, sessionId,
      intervals, jm));
}
 
Example 15
Source Project: hadoop   Source File: DataNodeMetrics.java    License: Apache License 2.0 5 votes vote down vote up
public static DataNodeMetrics create(Configuration conf, String dnName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
  String name = "DataNodeActivity-"+ (dnName.isEmpty()
      ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
          : dnName.replace(':', '-'));

  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  
  return ms.register(name, null, new DataNodeMetrics(name, sessionId,
      intervals, jm));
}
 
Example 16
Source Project: nnproxy   Source File: ProxyMetrics.java    License: Apache License 2.0 5 votes vote down vote up
public static ProxyMetrics create(Configuration conf) {
    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
    String processName = "NNPROXY";
    MetricsSystem ms = DefaultMetricsSystem.instance();
    JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);

    return ms.register(new ProxyMetrics(processName, sessionId, jm));
}
 
Example 17
Source Project: big-c   Source File: SharedCacheManager.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.store = createSCMStoreService(conf);
  addService(store);

  CleanerService cs = createCleanerService(store);
  addService(cs);

  SharedCacheUploaderService nms =
      createNMCacheUploaderSCMProtocolService(store);
  addService(nms);

  ClientProtocolService cps = createClientProtocolService(store);
  addService(cps);

  SCMAdminProtocolService saps = createSCMAdminProtocolService(cs);
  addService(saps);

  SCMWebServer webUI = createSCMWebServer(this);
  addService(webUI);

  // init metrics
  DefaultMetricsSystem.initialize("SharedCacheManager");
  JvmMetrics.initSingleton("SharedCacheManager", null);

  super.serviceInit(conf);
}
 
Example 18
Source Project: big-c   Source File: Nfs3Metrics.java    License: Apache License 2.0 5 votes vote down vote up
public static Nfs3Metrics create(Configuration conf, String gatewayName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);

  // Percentile measurement is [50th,75th,90th,95th,99th] currently 
  int[] intervals = conf
      .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
}
 
Example 19
Source Project: big-c   Source File: NameNodeMetrics.java    License: Apache License 2.0 5 votes vote down vote up
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  String processName = r.toString();
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
  
  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new NameNodeMetrics(processName, sessionId,
      intervals, jm));
}
 
Example 20
Source Project: big-c   Source File: DataNodeMetrics.java    License: Apache License 2.0 5 votes vote down vote up
public static DataNodeMetrics create(Configuration conf, String dnName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
  String name = "DataNodeActivity-"+ (dnName.isEmpty()
      ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
          : dnName.replace(':', '-'));

  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  
  return ms.register(name, null, new DataNodeMetrics(name, sessionId,
      intervals, jm));
}
 
Example 21
Source Project: hbase   Source File: BaseSourceImpl.java    License: Apache License 2.0 5 votes vote down vote up
synchronized void init(String name) {
  if (inited) {
    return;
  }

  inited = true;
  DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
  JvmMetrics.initSingleton(name, "");
  // initialize hbase-metrics module based metric system as well. GlobalMetricRegistriesSource
  // initialization depends on the metric system being already initialized, that is why we are
  // doing it here. Once BaseSourceSourceImpl is removed, we should do the initialization of
  // these elsewhere.
  GlobalMetricRegistriesAdapter.init();
}
 
Example 22
Source Project: hadoop   Source File: NodeManagerMetrics.java    License: Apache License 2.0 4 votes vote down vote up
static NodeManagerMetrics create(MetricsSystem ms) {
  JvmMetrics.create("NodeManager", null, ms);
  return ms.register(new NodeManagerMetrics());
}
 
Example 23
Source Project: hadoop   Source File: MRAppMetrics.java    License: Apache License 2.0 4 votes vote down vote up
public static MRAppMetrics create(MetricsSystem ms) {
  JvmMetrics.initSingleton("MRAppMaster", null);
  return ms.register(new MRAppMetrics());
}
 
Example 24
Source Project: hadoop   Source File: JobHistoryServer.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  DefaultMetricsSystem.initialize("JobHistoryServer");
  JvmMetrics.initSingleton("JobHistoryServer", null);
  super.serviceStart();
}
 
Example 25
Source Project: hadoop   Source File: Nfs3Metrics.java    License: Apache License 2.0 4 votes vote down vote up
public JvmMetrics getJvmMetrics() {
  return jvmMetrics;
}
 
Example 26
Source Project: hadoop   Source File: NameNodeMetrics.java    License: Apache License 2.0 4 votes vote down vote up
public JvmMetrics getJvmMetrics() {
  return jvmMetrics;
}
 
Example 27
Source Project: hadoop   Source File: SecondaryNameNode.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Initialize SecondaryNameNode.
 */
private void initialize(final Configuration conf,
    CommandLineOpts commandLineOpts) throws IOException {
  final InetSocketAddress infoSocAddr = getHttpAddress(conf);
  final String infoBindAddress = infoSocAddr.getHostName();
  UserGroupInformation.setConfiguration(conf);
  if (UserGroupInformation.isSecurityEnabled()) {
    SecurityUtil.login(conf,
        DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
        DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress);
  }
  // initiate Java VM metrics
  DefaultMetricsSystem.initialize("SecondaryNameNode");
  JvmMetrics.create("SecondaryNameNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  // Create connection to the namenode.
  shouldRun = true;
  nameNodeAddr = NameNode.getServiceAddress(conf, true);

  this.conf = conf;
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, 
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();

  // initialize checkpoint directories
  fsName = getInfoServer();
  checkpointDirs = FSImage.getCheckpointDirs(conf,
                                "/tmp/hadoop/dfs/namesecondary");
  checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, 
                                "/tmp/hadoop/dfs/namesecondary");    
  checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs);
  checkpointImage.recoverCreate(commandLineOpts.shouldFormat());
  checkpointImage.deleteTempEdits();
  
  namesystem = new FSNamesystem(conf, checkpointImage, true);

  // Disable quota checks
  namesystem.dir.disableQuotaChecks();

  // Initialize other scheduling parameters from the configuration
  checkpointConf = new CheckpointConf(conf);

  final InetSocketAddress httpAddr = infoSocAddr;

  final String httpsAddrString = conf.getTrimmed(
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "secondary",
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);

  nameNodeStatusBeanName = MBeans.register("SecondaryNameNode",
          "SecondaryNameNodeInfo", this);

  infoServer = builder.build();

  infoServer.setAttribute("secondary.name.node", this);
  infoServer.setAttribute("name.system.image", checkpointImage);
  infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC,
      ImageServlet.class, true);
  infoServer.start();

  LOG.info("Web server init done");

  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  int connIdx = 0;
  if (policy.isHttpEnabled()) {
    InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
        NetUtils.getHostPortString(httpAddress));
  }

  if (policy.isHttpsEnabled()) {
    InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
        NetUtils.getHostPortString(httpsAddress));
  }

  legacyOivImageDir = conf.get(
      DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY);

  LOG.info("Checkpoint Period   :" + checkpointConf.getPeriod() + " secs "
      + "(" + checkpointConf.getPeriod() / 60 + " min)");
  LOG.info("Log Size Trigger    :" + checkpointConf.getTxnCount() + " txns");
}
 
Example 28
Source Project: hadoop   Source File: DataNodeMetrics.java    License: Apache License 2.0 4 votes vote down vote up
public DataNodeMetrics(String name, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.name = name;
  this.jvmMetrics = jvmMetrics;    
  registry.tag(SessionId, sessionId);
  
  final int len = intervals.length;
  packetAckRoundTripTimeNanosQuantiles = new MutableQuantiles[len];
  flushNanosQuantiles = new MutableQuantiles[len];
  fsyncNanosQuantiles = new MutableQuantiles[len];
  sendDataPacketBlockedOnNetworkNanosQuantiles = new MutableQuantiles[len];
  sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len];
  ramDiskBlocksEvictionWindowMsQuantiles = new MutableQuantiles[len];
  ramDiskBlocksLazyPersistWindowMsQuantiles = new MutableQuantiles[len];
  
  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
        "packetAckRoundTripTimeNanos" + interval + "s",
        "Packet Ack RTT in ns", "ops", "latency", interval);
    flushNanosQuantiles[i] = registry.newQuantiles(
        "flushNanos" + interval + "s", 
        "Disk flush latency in ns", "ops", "latency", interval);
    fsyncNanosQuantiles[i] = registry.newQuantiles(
        "fsyncNanos" + interval + "s", "Disk fsync latency in ns", 
        "ops", "latency", interval);
    sendDataPacketBlockedOnNetworkNanosQuantiles[i] = registry.newQuantiles(
        "sendDataPacketBlockedOnNetworkNanos" + interval + "s", 
        "Time blocked on network while sending a packet in ns",
        "ops", "latency", interval);
    sendDataPacketTransferNanosQuantiles[i] = registry.newQuantiles(
        "sendDataPacketTransferNanos" + interval + "s", 
        "Time reading from disk and writing to network while sending " +
        "a packet in ns", "ops", "latency", interval);
    ramDiskBlocksEvictionWindowMsQuantiles[i] = registry.newQuantiles(
        "ramDiskBlocksEvictionWindows" + interval + "s",
        "Time between the RamDisk block write and eviction in ms",
        "ops", "latency", interval);
    ramDiskBlocksLazyPersistWindowMsQuantiles[i] = registry.newQuantiles(
        "ramDiskBlocksLazyPersistWindows" + interval + "s",
        "Time between the RamDisk block write and disk persist in ms",
        "ops", "latency", interval);
  }
}
 
Example 29
Source Project: hadoop   Source File: DataNodeMetrics.java    License: Apache License 2.0 4 votes vote down vote up
public JvmMetrics getJvmMetrics() {
  return jvmMetrics;
}
 
Example 30
Source Project: nnproxy   Source File: ProxyMetrics.java    License: Apache License 2.0 4 votes vote down vote up
ProxyMetrics(String processName, String sessionId, final JvmMetrics jvmMetrics) {
    this.jvmMetrics = jvmMetrics;
    registry.tag(ProcessName, processName).tag(SessionId, sessionId);
}