org.apache.hadoop.metrics2.source.JvmMetrics Java Examples

The following examples show how to use org.apache.hadoop.metrics2.source.JvmMetrics. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Nfs3Metrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
public Nfs3Metrics(String name, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.name = name;
  this.jvmMetrics = jvmMetrics;
  registry.tag(SessionId, sessionId);

  final int len = intervals.length;
  readNanosQuantiles = new MutableQuantiles[len];
  writeNanosQuantiles = new MutableQuantiles[len];
  commitNanosQuantiles = new MutableQuantiles[len];

  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
        + interval + "s", "Read process in ns", "ops", "latency", interval);
    writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
        + interval + "s", "Write process in ns", "ops", "latency", interval);
    commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
        + interval + "s", "Commit process in ns", "ops", "latency", interval);
  }
}
 
Example #2
Source File: ApplicationHistoryServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  // init timeline services first
  timelineStore = createTimelineStore(conf);
  addIfService(timelineStore);
  secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
  addService(secretManagerService);
  timelineDataManager = createTimelineDataManager(conf);
  addService(timelineDataManager);

  // init generic history service afterwards
  aclsManager = createApplicationACLsManager(conf);
  historyManager = createApplicationHistoryManager(conf);
  ahsClientService = createApplicationHistoryClientService(historyManager);
  addService(ahsClientService);
  addService((Service) historyManager);

  DefaultMetricsSystem.initialize("ApplicationHistoryServer");
  JvmMetrics.initSingleton("ApplicationHistoryServer", null);
  super.serviceInit(conf);
}
 
Example #3
Source File: NameNodeMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
NameNodeMetrics(String processName, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.jvmMetrics = jvmMetrics;
  registry.tag(ProcessName, processName).tag(SessionId, sessionId);
  
  final int len = intervals.length;
  syncsQuantiles = new MutableQuantiles[len];
  blockReportQuantiles = new MutableQuantiles[len];
  cacheReportQuantiles = new MutableQuantiles[len];
  
  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    syncsQuantiles[i] = registry.newQuantiles(
        "syncs" + interval + "s",
        "Journal syncs", "ops", "latency", interval);
    blockReportQuantiles[i] = registry.newQuantiles(
        "blockReport" + interval + "s", 
        "Block report", "ops", "latency", interval);
    cacheReportQuantiles[i] = registry.newQuantiles(
        "cacheReport" + interval + "s",
        "Cache report", "ops", "latency", interval);
  }
}
 
Example #4
Source File: ApplicationHistoryServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  // init timeline services first
  timelineStore = createTimelineStore(conf);
  addIfService(timelineStore);
  secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
  addService(secretManagerService);
  timelineDataManager = createTimelineDataManager(conf);
  addService(timelineDataManager);

  // init generic history service afterwards
  aclsManager = createApplicationACLsManager(conf);
  historyManager = createApplicationHistoryManager(conf);
  ahsClientService = createApplicationHistoryClientService(historyManager);
  addService(ahsClientService);
  addService((Service) historyManager);

  DefaultMetricsSystem.initialize("ApplicationHistoryServer");
  JvmMetrics.initSingleton("ApplicationHistoryServer", null);
  super.serviceInit(conf);
}
 
Example #5
Source File: JournalNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Start listening for edits via RPC.
 */
public void start() throws IOException {
  Preconditions.checkState(!isStarted(), "JN already running");
  
  validateAndCreateJournalDir(localDir);
  
  DefaultMetricsSystem.initialize("JournalNode");
  JvmMetrics.create("JournalNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
  SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  
  registerJNMXBean();
  
  httpServer = new JournalNodeHttpServer(conf, this);
  httpServer.start();

  httpServerURI = httpServer.getServerURI().toString();

  rpcServer = new JournalNodeRpcServer(conf, this);
  rpcServer.start();
}
 
Example #6
Source File: Nfs3Metrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public Nfs3Metrics(String name, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.name = name;
  this.jvmMetrics = jvmMetrics;
  registry.tag(SessionId, sessionId);

  final int len = intervals.length;
  readNanosQuantiles = new MutableQuantiles[len];
  writeNanosQuantiles = new MutableQuantiles[len];
  commitNanosQuantiles = new MutableQuantiles[len];

  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
        + interval + "s", "Read process in ns", "ops", "latency", interval);
    writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
        + interval + "s", "Write process in ns", "ops", "latency", interval);
    commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
        + interval + "s", "Commit process in ns", "ops", "latency", interval);
  }
}
 
Example #7
Source File: NameNodeMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
NameNodeMetrics(String processName, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.jvmMetrics = jvmMetrics;
  registry.tag(ProcessName, processName).tag(SessionId, sessionId);
  
  final int len = intervals.length;
  syncsQuantiles = new MutableQuantiles[len];
  blockReportQuantiles = new MutableQuantiles[len];
  cacheReportQuantiles = new MutableQuantiles[len];
  
  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    syncsQuantiles[i] = registry.newQuantiles(
        "syncs" + interval + "s",
        "Journal syncs", "ops", "latency", interval);
    blockReportQuantiles[i] = registry.newQuantiles(
        "blockReport" + interval + "s", 
        "Block report", "ops", "latency", interval);
    cacheReportQuantiles[i] = registry.newQuantiles(
        "cacheReport" + interval + "s",
        "Cache report", "ops", "latency", interval);
  }
}
 
Example #8
Source File: JournalNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Start listening for edits via RPC.
 */
public void start() throws IOException {
  Preconditions.checkState(!isStarted(), "JN already running");
  
  validateAndCreateJournalDir(localDir);
  
  DefaultMetricsSystem.initialize("JournalNode");
  JvmMetrics.create("JournalNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
  SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  
  registerJNMXBean();
  
  httpServer = new JournalNodeHttpServer(conf, this);
  httpServer.start();

  httpServerURI = httpServer.getServerURI().toString();

  rpcServer = new JournalNodeRpcServer(conf, this);
  rpcServer.start();
}
 
Example #9
Source File: DataNodeMetrics.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DataNodeMetrics create(Configuration conf, String dnName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
  String name = "DataNodeActivity-"+ (dnName.isEmpty()
      ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
          : dnName.replace(':', '-'));

  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  
  return ms.register(name, null, new DataNodeMetrics(name, sessionId,
      intervals, jm));
}
 
Example #10
Source File: Nfs3Metrics.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static Nfs3Metrics create(Configuration conf, String gatewayName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);

  // Percentile measurement is [50th,75th,90th,95th,99th] currently 
  int[] intervals = conf
      .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
}
 
Example #11
Source File: SharedCacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.store = createSCMStoreService(conf);
  addService(store);

  CleanerService cs = createCleanerService(store);
  addService(cs);

  SharedCacheUploaderService nms =
      createNMCacheUploaderSCMProtocolService(store);
  addService(nms);

  ClientProtocolService cps = createClientProtocolService(store);
  addService(cps);

  SCMAdminProtocolService saps = createSCMAdminProtocolService(cs);
  addService(saps);

  SCMWebServer webUI = createSCMWebServer(this);
  addService(webUI);

  // init metrics
  DefaultMetricsSystem.initialize("SharedCacheManager");
  JvmMetrics.initSingleton("SharedCacheManager", null);

  super.serviceInit(conf);
}
 
Example #12
Source File: ProxyMetrics.java    From nnproxy with Apache License 2.0 5 votes vote down vote up
public static ProxyMetrics create(Configuration conf) {
    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
    String processName = "NNPROXY";
    MetricsSystem ms = DefaultMetricsSystem.instance();
    JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);

    return ms.register(new ProxyMetrics(processName, sessionId, jm));
}
 
Example #13
Source File: NameNodeMetrics.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  String processName = r.toString();
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
  
  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new NameNodeMetrics(processName, sessionId,
      intervals, jm));
}
 
Example #14
Source File: DataNodeMetrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DataNodeMetrics create(Configuration conf, String dnName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
  String name = "DataNodeActivity-"+ (dnName.isEmpty()
      ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
          : dnName.replace(':', '-'));

  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  
  return ms.register(name, null, new DataNodeMetrics(name, sessionId,
      intervals, jm));
}
 
Example #15
Source File: NameNodeMetrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  String processName = r.toString();
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
  
  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new NameNodeMetrics(processName, sessionId,
      intervals, jm));
}
 
Example #16
Source File: Nfs3Metrics.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static Nfs3Metrics create(Configuration conf, String gatewayName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);

  // Percentile measurement is [50th,75th,90th,95th,99th] currently 
  int[] intervals = conf
      .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
  return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
}
 
Example #17
Source File: BaseSourceImpl.java    From hbase with Apache License 2.0 5 votes vote down vote up
synchronized void init(String name) {
  if (inited) {
    return;
  }

  inited = true;
  DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
  JvmMetrics.initSingleton(name, "");
  // initialize hbase-metrics module based metric system as well. GlobalMetricRegistriesSource
  // initialization depends on the metric system being already initialized, that is why we are
  // doing it here. Once BaseSourceSourceImpl is removed, we should do the initialization of
  // these elsewhere.
  GlobalMetricRegistriesAdapter.init();
}
 
Example #18
Source File: HddsServerUtil.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize hadoop metrics system for Ozone servers.
 * @param configuration OzoneConfiguration to use.
 * @param serverName    The logical name of the server components.
 */
public static MetricsSystem initializeMetrics(
    OzoneConfiguration configuration, String serverName) {
  MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName);
  try {
    JvmMetrics.create(serverName,
        configuration.get(DFSConfigKeysLegacy.DFS_METRICS_SESSION_ID_KEY),
        DefaultMetricsSystem.instance());
  } catch (MetricsException e) {
    LOG.info("Metrics source JvmMetrics already added to DataNode.");
  }
  return metricsSystem;
}
 
Example #19
Source File: SharedCacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {

  this.store = createSCMStoreService(conf);
  addService(store);

  CleanerService cs = createCleanerService(store);
  addService(cs);

  SharedCacheUploaderService nms =
      createNMCacheUploaderSCMProtocolService(store);
  addService(nms);

  ClientProtocolService cps = createClientProtocolService(store);
  addService(cps);

  SCMAdminProtocolService saps = createSCMAdminProtocolService(cs);
  addService(saps);

  SCMWebServer webUI = createSCMWebServer(this);
  addService(webUI);

  // init metrics
  DefaultMetricsSystem.initialize("SharedCacheManager");
  JvmMetrics.initSingleton("SharedCacheManager", null);

  super.serviceInit(conf);
}
 
Example #20
Source File: JobHistoryServer.java    From XLearning with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  DefaultMetricsSystem.initialize("JobHistoryServer");
  JvmMetrics.initSingleton("JobHistoryServer", null);
  super.serviceStart();

  deleteLogManager = new Thread(new deleteLogMonitor());
  deleteLogManager.setName("Log-delete-monitor");
  deleteLogManager.setDaemon(true);
  deleteLogManager.start();
}
 
Example #21
Source File: AMSApplicationServer.java    From ambari-metrics with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  DefaultMetricsSystem.initialize("AmbariMetricsSystem");
  JvmMetrics.initSingleton("AmbariMetricsSystem", null);

  startWebApp();
  super.serviceStart();
}
 
Example #22
Source File: DataNodeMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
public DataNodeMetrics(String name, String sessionId, int[] intervals,
    final JvmMetrics jvmMetrics) {
  this.name = name;
  this.jvmMetrics = jvmMetrics;    
  registry.tag(SessionId, sessionId);
  
  final int len = intervals.length;
  packetAckRoundTripTimeNanosQuantiles = new MutableQuantiles[len];
  flushNanosQuantiles = new MutableQuantiles[len];
  fsyncNanosQuantiles = new MutableQuantiles[len];
  sendDataPacketBlockedOnNetworkNanosQuantiles = new MutableQuantiles[len];
  sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len];
  ramDiskBlocksEvictionWindowMsQuantiles = new MutableQuantiles[len];
  ramDiskBlocksLazyPersistWindowMsQuantiles = new MutableQuantiles[len];
  
  for (int i = 0; i < len; i++) {
    int interval = intervals[i];
    packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
        "packetAckRoundTripTimeNanos" + interval + "s",
        "Packet Ack RTT in ns", "ops", "latency", interval);
    flushNanosQuantiles[i] = registry.newQuantiles(
        "flushNanos" + interval + "s", 
        "Disk flush latency in ns", "ops", "latency", interval);
    fsyncNanosQuantiles[i] = registry.newQuantiles(
        "fsyncNanos" + interval + "s", "Disk fsync latency in ns", 
        "ops", "latency", interval);
    sendDataPacketBlockedOnNetworkNanosQuantiles[i] = registry.newQuantiles(
        "sendDataPacketBlockedOnNetworkNanos" + interval + "s", 
        "Time blocked on network while sending a packet in ns",
        "ops", "latency", interval);
    sendDataPacketTransferNanosQuantiles[i] = registry.newQuantiles(
        "sendDataPacketTransferNanos" + interval + "s", 
        "Time reading from disk and writing to network while sending " +
        "a packet in ns", "ops", "latency", interval);
    ramDiskBlocksEvictionWindowMsQuantiles[i] = registry.newQuantiles(
        "ramDiskBlocksEvictionWindows" + interval + "s",
        "Time between the RamDisk block write and eviction in ms",
        "ops", "latency", interval);
    ramDiskBlocksLazyPersistWindowMsQuantiles[i] = registry.newQuantiles(
        "ramDiskBlocksLazyPersistWindows" + interval + "s",
        "Time between the RamDisk block write and disk persist in ms",
        "ops", "latency", interval);
  }
}
 
Example #23
Source File: SecondaryNameNode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize SecondaryNameNode.
 */
private void initialize(final Configuration conf,
    CommandLineOpts commandLineOpts) throws IOException {
  final InetSocketAddress infoSocAddr = getHttpAddress(conf);
  final String infoBindAddress = infoSocAddr.getHostName();
  UserGroupInformation.setConfiguration(conf);
  if (UserGroupInformation.isSecurityEnabled()) {
    SecurityUtil.login(conf,
        DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
        DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress);
  }
  // initiate Java VM metrics
  DefaultMetricsSystem.initialize("SecondaryNameNode");
  JvmMetrics.create("SecondaryNameNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  // Create connection to the namenode.
  shouldRun = true;
  nameNodeAddr = NameNode.getServiceAddress(conf, true);

  this.conf = conf;
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, 
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();

  // initialize checkpoint directories
  fsName = getInfoServer();
  checkpointDirs = FSImage.getCheckpointDirs(conf,
                                "/tmp/hadoop/dfs/namesecondary");
  checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, 
                                "/tmp/hadoop/dfs/namesecondary");    
  checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs);
  checkpointImage.recoverCreate(commandLineOpts.shouldFormat());
  checkpointImage.deleteTempEdits();
  
  namesystem = new FSNamesystem(conf, checkpointImage, true);

  // Disable quota checks
  namesystem.dir.disableQuotaChecks();

  // Initialize other scheduling parameters from the configuration
  checkpointConf = new CheckpointConf(conf);

  final InetSocketAddress httpAddr = infoSocAddr;

  final String httpsAddrString = conf.getTrimmed(
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "secondary",
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);

  nameNodeStatusBeanName = MBeans.register("SecondaryNameNode",
          "SecondaryNameNodeInfo", this);

  infoServer = builder.build();

  infoServer.setAttribute("secondary.name.node", this);
  infoServer.setAttribute("name.system.image", checkpointImage);
  infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
  infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC,
      ImageServlet.class, true);
  infoServer.start();

  LOG.info("Web server init done");

  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  int connIdx = 0;
  if (policy.isHttpEnabled()) {
    InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
        NetUtils.getHostPortString(httpAddress));
  }

  if (policy.isHttpsEnabled()) {
    InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
        NetUtils.getHostPortString(httpsAddress));
  }

  legacyOivImageDir = conf.get(
      DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY);

  LOG.info("Checkpoint Period   :" + checkpointConf.getPeriod() + " secs "
      + "(" + checkpointConf.getPeriod() / 60 + " min)");
  LOG.info("Log Size Trigger    :" + checkpointConf.getTxnCount() + " txns");
}
 
Example #24
Source File: GlobalMetricRegistriesAdapter.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private GlobalMetricRegistriesAdapter() {
    DefaultMetricsSystem.initialize("Phoenix");
    JvmMetrics.initSingleton("Phoenix", "");
}
 
Example #25
Source File: NameNodeMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
public JvmMetrics getJvmMetrics() {
  return jvmMetrics;
}
 
Example #26
Source File: DataNodeMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
public JvmMetrics getJvmMetrics() {
  return jvmMetrics;
}
 
Example #27
Source File: Nfs3Metrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
public JvmMetrics getJvmMetrics() {
  return jvmMetrics;
}
 
Example #28
Source File: JobHistoryServer.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceStart() throws Exception {
  DefaultMetricsSystem.initialize("JobHistoryServer");
  JvmMetrics.initSingleton("JobHistoryServer", null);
  super.serviceStart();
}
 
Example #29
Source File: MRAppMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
public static MRAppMetrics create(MetricsSystem ms) {
  JvmMetrics.initSingleton("MRAppMaster", null);
  return ms.register(new MRAppMetrics());
}
 
Example #30
Source File: NodeManagerMetrics.java    From big-c with Apache License 2.0 4 votes vote down vote up
static NodeManagerMetrics create(MetricsSystem ms) {
  JvmMetrics.create("NodeManager", null, ms);
  return ms.register(new NodeManagerMetrics());
}