Java Code Examples for org.apache.hadoop.metrics2.lib.DefaultMetricsSystem

The following examples show how to use org.apache.hadoop.metrics2.lib.DefaultMetricsSystem. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: ApplicationHistoryServer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  // init timeline services first
  timelineStore = createTimelineStore(conf);
  addIfService(timelineStore);
  secretManagerService = createTimelineDelegationTokenSecretManagerService(conf);
  addService(secretManagerService);
  timelineDataManager = createTimelineDataManager(conf);
  addService(timelineDataManager);

  // init generic history service afterwards
  aclsManager = createApplicationACLsManager(conf);
  historyManager = createApplicationHistoryManager(conf);
  ahsClientService = createApplicationHistoryClientService(historyManager);
  addService(ahsClientService);
  addService((Service) historyManager);

  DefaultMetricsSystem.initialize("ApplicationHistoryServer");
  JvmMetrics.initSingleton("ApplicationHistoryServer", null);
  super.serviceInit(conf);
}
 
Example 2
Source Project: hadoop   Source File: ResourceManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceStop() throws Exception {

  DefaultMetricsSystem.shutdown();

  if (rmContext != null) {
    RMStateStore store = rmContext.getStateStore();
    try {
      store.close();
    } catch (Exception e) {
      LOG.error("Error closing store.", e);
    }
  }

  super.serviceStop();
}
 
Example 3
Source Project: hbase   Source File: JmxCacheBuster.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  if (LOG.isTraceEnabled()) {
    LOG.trace("Clearing JMX mbean cache.");
  }

  // This is pretty extreme but it's the best way that
  // I could find to get metrics to be removed.
  try {
    if (DefaultMetricsSystem.instance() != null) {
      DefaultMetricsSystem.instance().stop();
      // Sleep some time so that the rest of the hadoop metrics
      // system knows that things are done
      Thread.sleep(500);
      DefaultMetricsSystem.instance().start();
    }
  }  catch (Exception exception)  {
    LOG.debug("error clearing the jmx it appears the metrics system hasn't been started",
        exception);
  }
}
 
Example 4
Source Project: hadoop   Source File: FSQueueMetrics.java    License: Apache License 2.0 6 votes vote down vote up
public synchronized 
static FSQueueMetrics forQueue(String queueName, Queue parent,
    boolean enableUserMetrics, Configuration conf) {
  MetricsSystem ms = DefaultMetricsSystem.instance();
  QueueMetrics metrics = queueMetrics.get(queueName);
  if (metrics == null) {
    metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
        .tag(QUEUE_INFO, queueName);
    
    // Register with the MetricsSystems
    if (ms != null) {
      metrics = ms.register(
              sourceName(queueName).toString(), 
              "Metrics for queue: " + queueName, metrics);
    }
    queueMetrics.put(queueName, metrics);
  }

  return (FSQueueMetrics)metrics;
}
 
Example 5
Source Project: big-c   Source File: TestRMHA.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  configuration = new Configuration();
  UserGroupInformation.setConfiguration(configuration);
  configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
  configuration.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + ","
      + RM2_NODE_ID);
  for (String confKey : YarnConfiguration
      .getServiceAddressConfKeys(configuration)) {
    configuration.set(HAUtil.addSuffix(confKey, RM1_NODE_ID), RM1_ADDRESS);
    configuration.set(HAUtil.addSuffix(confKey, RM2_NODE_ID), RM2_ADDRESS);
    configuration.set(HAUtil.addSuffix(confKey, RM3_NODE_ID), RM3_ADDRESS);
  }

  // Enable webapp to test web-services also
  configuration.setBoolean(MockRM.ENABLE_WEBAPP, true);
  configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
  ClusterMetrics.destroy();
  QueueMetrics.clearQueueMetrics();
  DefaultMetricsSystem.shutdown();
}
 
Example 6
Source Project: big-c   Source File: FSQueueMetrics.java    License: Apache License 2.0 6 votes vote down vote up
public synchronized 
static FSQueueMetrics forQueue(String queueName, Queue parent,
    boolean enableUserMetrics, Configuration conf) {
  MetricsSystem ms = DefaultMetricsSystem.instance();
  QueueMetrics metrics = queueMetrics.get(queueName);
  if (metrics == null) {
    metrics = new FSQueueMetrics(ms, queueName, parent, enableUserMetrics, conf)
        .tag(QUEUE_INFO, queueName);
    
    // Register with the MetricsSystems
    if (ms != null) {
      metrics = ms.register(
              sourceName(queueName).toString(), 
              "Metrics for queue: " + queueName, metrics);
    }
    queueMetrics.put(queueName, metrics);
  }

  return (FSQueueMetrics)metrics;
}
 
Example 7
Source Project: hadoop   Source File: QueueMetrics.java    License: Apache License 2.0 5 votes vote down vote up
public synchronized
static QueueMetrics forQueue(String queueName, Queue parent,
                             boolean enableUserMetrics,
	       Configuration conf) {
  return forQueue(DefaultMetricsSystem.instance(), queueName, parent,
                  enableUserMetrics, conf);
}
 
Example 8
Source Project: hadoop-ozone   Source File: TestContainerScrubberMetrics.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testContainerMetaDataScrubberMetricsUnregisters() {
  ContainerMetadataScanner subject =
      new ContainerMetadataScanner(conf, controller);
  String name = subject.getMetrics().getName();

  assertNotNull(DefaultMetricsSystem.instance().getSource(name));

  subject.shutdown();
  subject.run();

  assertNull(DefaultMetricsSystem.instance().getSource(name));
}
 
Example 9
Source Project: big-c   Source File: NodeManager.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStop() throws Exception {
  if (isStopping.getAndSet(true)) {
    return;
  }
  try {
    super.serviceStop();
    DefaultMetricsSystem.shutdown();
  } finally {
    // YARN-3641: NM's services stop get failed shouldn't block the
    // release of NMLevelDBStore.
    stopRecoveryStore();
  }
}
 
Example 10
Source Project: big-c   Source File: StartupProgressMetrics.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new StartupProgressMetrics registered with the metrics system.
 * 
 * @param startupProgress StartupProgress to link
 */
public StartupProgressMetrics(StartupProgress startupProgress) {
  this.startupProgress = startupProgress;
  DefaultMetricsSystem.instance().register(
    STARTUP_PROGRESS_METRICS_INFO.name(),
    STARTUP_PROGRESS_METRICS_INFO.description(), this);
}
 
Example 11
Source Project: hadoop-ozone   Source File: SCMPipelineMetrics.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create and returns SCMPipelineMetrics instance.
 *
 * @return SCMPipelineMetrics
 */
public static synchronized SCMPipelineMetrics create() {
  if (instance != null) {
    return instance;
  }
  MetricsSystem ms = DefaultMetricsSystem.instance();
  instance = ms.register(SOURCE_NAME, "SCM PipelineManager Metrics",
      new SCMPipelineMetrics());
  return instance;
}
 
Example 12
Source Project: big-c   Source File: TestNodeStatusUpdater.java    License: Apache License 2.0 5 votes vote down vote up
@After
public void tearDown() {
  this.registeredNodes.clear();
  heartBeatID = 0;
  ServiceOperations.stop(nm);
  assertionFailedInThread.set(false);
  DefaultMetricsSystem.shutdown();
}
 
Example 13
Source Project: big-c   Source File: TestMRAppMaster.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() throws AccessControlException,
    FileNotFoundException, IllegalArgumentException, IOException {
  //Do not error out if metrics are inited multiple times
  DefaultMetricsSystem.setMiniClusterMode(true);
  File dir = new File(stagingDir);
  stagingDir = dir.getAbsolutePath();
  localFS = FileContext.getLocalFSFileContext();
  localFS.delete(new Path(testDir.getAbsolutePath()), true);
  testDir.mkdir();
}
 
Example 14
Source Project: hadoop-ozone   Source File: RocksDBStoreMBean.java    License: Apache License 2.0 5 votes vote down vote up
public static RocksDBStoreMBean create(Statistics statistics,
                                       String contextName) {

  RocksDBStoreMBean rocksDBStoreMBean = new RocksDBStoreMBean(
      statistics, contextName);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  MetricsSource metricsSource = ms.getSource(rocksDBStoreMBean.contextName);
  if (metricsSource != null) {
    return (RocksDBStoreMBean)metricsSource;
  } else {
    return ms.register(rocksDBStoreMBean.contextName,
        "RocksDB Metrics",
        rocksDBStoreMBean);
  }
}
 
Example 15
Source Project: hadoop   Source File: RpcProgramNfs3.java    License: Apache License 2.0 5 votes vote down vote up
public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  DefaultMetricsSystem.initialize("Nfs3");
  String displayName = DNS.getDefaultHost("default", "default")
      + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
          NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT);
  metrics = Nfs3Metrics.create(config, displayName);
  return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts);
}
 
Example 16
Source Project: hadoop   Source File: TestFairScheduler.java    License: Apache License 2.0 5 votes vote down vote up
@After
public void tearDown() {
  if (scheduler != null) {
    scheduler.stop();
    scheduler = null;
  }
  if (resourceManager != null) {
    resourceManager.stop();
    resourceManager = null;
  }
  QueueMetrics.clearQueueMetrics();
  DefaultMetricsSystem.shutdown();
}
 
Example 17
Source Project: hadoop-ozone   Source File: BaseHttpServer.java    License: Apache License 2.0 5 votes vote down vote up
public void start() throws IOException {
  if (httpServer != null && isEnabled()) {
    httpServer.start();
    if (prometheusSupport) {
      DefaultMetricsSystem.instance()
          .register("prometheus", "Hadoop metrics prometheus exporter",
              prometheusMetricsSink);
    }
    updateConnectorAddress();
  }

}
 
Example 18
Source Project: hbase   Source File: BaseSourceImpl.java    License: Apache License 2.0 5 votes vote down vote up
synchronized void init(String name) {
  if (inited) {
    return;
  }

  inited = true;
  DefaultMetricsSystem.initialize(HBASE_METRICS_SYSTEM_NAME);
  JvmMetrics.initSingleton(name, "");
  // initialize hbase-metrics module based metric system as well. GlobalMetricRegistriesSource
  // initialization depends on the metric system being already initialized, that is why we are
  // doing it here. Once BaseSourceSourceImpl is removed, we should do the initialization of
  // these elsewhere.
  GlobalMetricRegistriesAdapter.init();
}
 
Example 19
Source Project: incubator-tez   Source File: TezChild.java    License: Apache License 2.0 5 votes vote down vote up
private void shutdown() {
  executor.shutdownNow();
  if (taskReporter != null) {
    taskReporter.shutdown();
  }
  RPC.stopProxy(umbilical);
  DefaultMetricsSystem.shutdown();
  LogManager.shutdown();
}
 
Example 20
Source Project: hbase   Source File: TestBalancerStatusTagInJMXMetrics.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Gets the balancer status tag from the Metrics registry
 */
public String getStatus() throws Exception {
  MetricsSource source =
      DefaultMetricsSystem.instance().getSource(MetricsBalancerSource.METRICS_JMX_CONTEXT);
  if (source instanceof MetricsBalancerSourceImpl) {
    MetricsTag status = ((MetricsBalancerSourceImpl) source).getMetricsRegistry()
        .getTag(MetricsBalancerSource.BALANCER_STATUS);
    return status.value();
  } else {
    LOG.warn("Balancer JMX Metrics not registered");
    throw new Exception("MetricsBalancer JMX Context not found");
  }
}
 
Example 21
Source Project: hadoop-ozone   Source File: MiniOzoneChaosCluster.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public MiniOzoneChaosCluster build() throws IOException {

  DefaultMetricsSystem.setMiniClusterMode(true);
  initializeConfiguration();
  if (numOfOMs > 1) {
    initOMRatisConf();
  }

  StorageContainerManager scm;
  List<OzoneManager> omList;
  try {
    scm = createSCM();
    scm.start();
    if (numOfOMs > 1) {
      omList = createOMService();
    } else {
      OzoneManager om = createOM();
      om.start();
      omList = Arrays.asList(om);
    }
  } catch (AuthenticationException ex) {
    throw new IOException("Unable to build MiniOzoneCluster. ", ex);
  }

  final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(
      scm, null);

  MiniOzoneChaosCluster cluster =
      new MiniOzoneChaosCluster(conf, omList, scm, hddsDatanodes,
          omServiceId, clazzes);

  if (startDataNodes) {
    cluster.startHddsDatanodes();
  }
  return cluster;
}
 
Example 22
Source Project: big-c   Source File: ClientSCMMetrics.java    License: Apache License 2.0 5 votes vote down vote up
static ClientSCMMetrics create() {
  MetricsSystem ms = DefaultMetricsSystem.instance();

  ClientSCMMetrics metrics = new ClientSCMMetrics();
  ms.register("clientRequests", null, metrics);
  return metrics;
}
 
Example 23
Source Project: hadoop-ozone   Source File: MiniOzoneClusterImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void shutdown() {
  try {
    LOG.info("Shutting down the Mini Ozone Cluster");
    File baseDir = new File(GenericTestUtils.getTempPath(
        MiniOzoneClusterImpl.class.getSimpleName() + "-" +
            scm.getClientProtocolServer().getScmInfo().getClusterId()));
    stop();
    FileUtils.deleteDirectory(baseDir);
    ContainerCache.getInstance(conf).shutdownCache();
    DefaultMetricsSystem.shutdown();
  } catch (IOException e) {
    LOG.error("Exception while shutting down the cluster.", e);
  }
}
 
Example 24
Source Project: hadoop-ozone   Source File: TestSecureOzoneCluster.java    License: Apache License 2.0 5 votes vote down vote up
@Before
  public void init() {
    try {
      conf = new OzoneConfiguration();
      conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");

      conf.setInt(OZONE_SCM_CLIENT_PORT_KEY,
          getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
      conf.setInt(OZONE_SCM_DATANODE_PORT_KEY,
          getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
      conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
          getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
      conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
          getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));

      DefaultMetricsSystem.setMiniClusterMode(true);
      final String path = folder.newFolder().toString();
      Path metaDirPath = Paths.get(path, "om-meta");
      conf.set(OZONE_METADATA_DIRS, metaDirPath.toString());
      conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
      conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());

      workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());

      startMiniKdc();
      setSecureConfig();
      createCredentialsInKDC();
      generateKeyPair();
//      OzoneManager.setTestSecureOmFlag(true);
    } catch (Exception e) {
      LOG.error("Failed to initialize TestSecureOzoneCluster", e);
    }
  }
 
Example 25
Source Project: hadoop   Source File: TestMRAppMaster.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() throws AccessControlException,
    FileNotFoundException, IllegalArgumentException, IOException {
  //Do not error out if metrics are inited multiple times
  DefaultMetricsSystem.setMiniClusterMode(true);
  File dir = new File(stagingDir);
  stagingDir = dir.getAbsolutePath();
  localFS = FileContext.getLocalFSFileContext();
  localFS.delete(new Path(testDir.getAbsolutePath()), true);
  testDir.mkdir();
}
 
Example 26
Source Project: hadoop   Source File: ApplicationHistoryServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceStop() throws Exception {
  if (webApp != null) {
    webApp.stop();
  }

  DefaultMetricsSystem.shutdown();
  super.serviceStop();
}
 
Example 27
Source Project: hadoop-ozone   Source File: TestSecureContainerServer.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
static public void setup() throws Exception {
  DefaultMetricsSystem.setMiniClusterMode(true);
  CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR);
  CONF.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
  CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
  caClient = new CertificateClientTestImpl(CONF);
}
 
Example 28
Source Project: hadoop   Source File: FSOpDurations.java    License: Apache License 2.0 5 votes vote down vote up
private FSOpDurations() {
  registry = new MetricsRegistry(RECORD_INFO);
  registry.tag(RECORD_INFO, "FSOpDurations");

  MetricsSystem ms = DefaultMetricsSystem.instance();
  if (ms != null) {
    ms.register(RECORD_INFO.name(), RECORD_INFO.description(), this);
  }
}
 
Example 29
Source Project: big-c   Source File: MBeans.java    License: Apache License 2.0 5 votes vote down vote up
static private ObjectName getMBeanName(String serviceName, String nameName) {
  ObjectName name = null;
  String nameStr = "Hadoop:service="+ serviceName +",name="+ nameName;
  try {
    name = DefaultMetricsSystem.newMBeanName(nameStr);
  } catch (Exception e) {
    LOG.warn("Error creating MBean object name: "+ nameStr, e);
  }
  return name;
}
 
Example 30
Source Project: kylin-on-parquet-v2   Source File: StandaloneExample.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    final MetricRegistry metrics = new MetricRegistry();

    final HadoopMetrics2Reporter metrics2Reporter = HadoopMetrics2Reporter.forRegistry(metrics).build(
            DefaultMetricsSystem.initialize("StandaloneTest"), // The application-level name
            "Test", // Component name
            "Test", // Component description
            "Test"); // Name for each metric record
    final ConsoleReporter consoleReporter = ConsoleReporter.forRegistry(metrics).build();

    MetricsSystem metrics2 = DefaultMetricsSystem.instance();
    // Writes to stdout without a filename configuration
    // Will be invoked every 10seconds by default
    FileSink sink = new FileSink();
    metrics2.register("filesink", "filesink", sink);
    sink.init(new SubsetConfiguration(null, null) {
        public String getString(String key) {
            if (key.equals("filename")) {
                return null;
            }
            return super.getString(key);
        }
    });

    // How often should the dropwizard reporter be invoked
    metrics2Reporter.start(500, TimeUnit.MILLISECONDS);
    // How often will the dropwziard metrics be logged to the console
    consoleReporter.start(2, TimeUnit.SECONDS);

    generateMetrics(metrics, 5000, 25, TimeUnit.MILLISECONDS, metrics2Reporter, 10);
}