java.lang.management.ManagementFactory Scala Examples

The following examples show how to use java.lang.management.ManagementFactory. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: MetricsSystem.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.metrics

import java.io.Closeable
import java.lang.management.ManagementFactory
import java.util.concurrent.TimeUnit

import com.codahale.metrics._
import com.codahale.metrics.jvm._
import org.apache.kyuubi.Logging
import org.apache.spark.{KyuubiSparkUtil, SparkConf}
import org.apache.spark.KyuubiConf._

private[kyuubi] class MetricsSystem(conf: SparkConf) extends Logging {

  private val registry = new MetricRegistry

  registry.registerAll(new GarbageCollectorMetricSet)
  registry.registerAll(new MemoryUsageGaugeSet)
  registry.registerAll(new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer))
  registry.registerAll(new ThreadStatesGaugeSet)
  registry.registerAll(new ClassLoadingGaugeSet)

  private val reportInterval = KyuubiSparkUtil.timeStringAsMs(conf.get(METRICS_REPORT_INTERVAL))

  private val reporter: Array[Closeable] =
    conf.get(METRICS_REPORTER).split(",").map(_.trim.toUpperCase).flatMap {
    case "CONSOLE" =>
      val reporter = ConsoleReporter.forRegistry(registry)
        .convertDurationsTo(TimeUnit.SECONDS)
        .convertDurationsTo(TimeUnit.MILLISECONDS)
        .build()
      reporter.start(reportInterval, TimeUnit.MILLISECONDS)
      Some(reporter)
    case "JMX" =>
      val reporter = JmxReporter.forRegistry(registry)
        .convertRatesTo(TimeUnit.SECONDS)
        .convertDurationsTo(TimeUnit.MILLISECONDS)
        .build()
      reporter.start()
      Some(reporter)
    case "JSON" =>
      val reporter = new JsonFileReporter(conf, registry)
      reporter.start()
      Some(reporter)
    case other =>
      warn(s"$other as a metrics report is not support yet")
      None
  }

  def registerGauge[T](name: String, value: => T, default: T): Unit = {
    registry.register(MetricRegistry.name(name), new Gauge[T] {
      override def getValue: T = Option(value).getOrElse(default)
    })
  }

  def close(): Unit = {
    reporter.foreach(_.close())
  }

  val OPEN_CONNECTIONS: Counter = registry.counter(MetricRegistry.name("open_connections"))
  val OPEN_OPERATIONS: Counter = registry.counter(MetricRegistry.name("open_operations"))
  val TOTAL_CONNECTIONS: Counter = registry.counter(MetricRegistry.name("total_connections"))
  val RUNNING_QUERIES: Counter = registry.counter(MetricRegistry.name("running_queries"))
  val ERROR_QUERIES: Counter = registry.counter(MetricRegistry.name("error_queries"))
  val TOTAL_QUERIES: Counter = registry.counter(MetricRegistry.name("total_queries"))
}

object MetricsSystem {

  private var maybeSystem: Option[MetricsSystem] = None

  def init(conf: SparkConf): Option[MetricsSystem] = {
    if (conf.get(METRICS_ENABLE).toBoolean) {
      val system = new MetricsSystem(conf)
      maybeSystem = Some(system)
      maybeSystem
    } else {
      None
    }
  }

  def get: Option[MetricsSystem] = maybeSystem

  def close(): Unit = {
    maybeSystem.foreach(_.close())
    maybeSystem = None
  }
} 
Example 2
Source File: MetricsExtension.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics

import java.lang.management.ManagementFactory

import akka.actor._
import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.jvm.{BufferPoolMetricSet, GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet}

class MetricsExtension(extendedSystem: ExtendedActorSystem) extends Extension {

  // Allow access to the extended system
  val system = extendedSystem
  // The application wide metrics registry.
  val metricRegistry = new MetricRegistry()

  // Register the Jvm metrics
  val srv = ManagementFactory.getPlatformMBeanServer
  metricRegistry.register("jvm.buffer-pool", new BufferPoolMetricSet(srv))
  metricRegistry.register("jvm.gc", new GarbageCollectorMetricSet)
  metricRegistry.register("jvm.memory", new MemoryUsageGaugeSet)
  metricRegistry.register("jvm.thread", new ThreadStatesGaugeSet)
}

object Metrics extends ExtensionId[MetricsExtension]
with ExtensionIdProvider {

  //The lookup method is required by ExtensionIdProvider,
  // so we return ourselves here, this allows us
  // to configure our extension to be loaded when
  // the ActorSystem starts up
  override def lookup = Metrics

  //This method will be called by Akka
  // to instantiate our Extension
  override def createExtension(system: ExtendedActorSystem) = new MetricsExtension(system)

  def apply()(implicit system: ActorSystem): MetricsExtension = system.registerExtension(this)

} 
Example 3
Source File: SoakMain.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.soak

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.remote.RARP
import akka.stream.ActorMaterializer
import com.sun.management.OperatingSystemMXBean
import scala.concurrent.duration._
import akka.util.PrettyDuration._

object SoakMain extends App {

  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  val log = system.log

  import java.lang.management.ManagementFactory
  log.info("Java version: {}", sys.props("java.version"))
  log.info("Cores: " + Runtime.getRuntime.availableProcessors)
  log.info("Total Memory: " + Runtime.getRuntime.totalMemory / 1000000 + "Mb")
  log.info("Max Memory: " + Runtime.getRuntime.maxMemory / 1000000 + "Mb")
  log.info("Free Memory: " + Runtime.getRuntime.freeMemory / 1000000 + "Mb")

  val memorySize =
    ManagementFactory.getOperatingSystemMXBean.asInstanceOf[OperatingSystemMXBean].getTotalPhysicalMemorySize
  log.info("RAM: " + memorySize / 1000000 + "Mb")

  log.info("JAVA env vars: {}", sys.env.filterKeys(_.contains("JAVA")))
  log.info("JVM env vars: {}", sys.env.filterKeys(_.contains("JVM")))

  val management = AkkaManagement(system).start()
  val bootstrapStart = System.nanoTime()
  val bootstrap = ClusterBootstrap(system).start()

  val listeningOn = RARP(system).provider.getDefaultAddress.host.getOrElse("0.0.0.0")
  log.info("Listening on {}", listeningOn)

  Cluster(system).registerOnMemberUp({
    val joiningTime = (System.nanoTime() - bootstrapStart).nano
    system.actorOf(PingPong.serverProps(), "server")
    val client = system.actorOf(PingPong.clientProps(joiningTime), "client")
    val clusterStats = new StatsEndpoint(system, client)
    log.info("Cluster member is up! Starting tests and binding http server. Joining time: {}", joiningTime.pretty)
    Http().bindAndHandle(clusterStats.route, listeningOn, 8080)
  })

} 
Example 4
Source File: BootstrapInfo.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.streamlets

import java.lang.management.ManagementFactory
import collection.JavaConverters._

import com.typesafe.config._

object BootstrapInfo {
  private def getGCInfo: List[(String, javax.management.ObjectName)] = {
    val gcMxBeans = ManagementFactory.getGarbageCollectorMXBeans()
    gcMxBeans.asScala.map(b ⇒ (b.getName, b.getObjectName)).toList
  }

  def box(str: String): String =
    if ((str == null) || (str.isEmpty)) ""
    else {
      val line = s"""+${"-" * 80}+"""
      s"$line\n$str\n$line"
    }

  private def prettyPrintConfig(c: Config): String =
    c.root
      .render(
        ConfigRenderOptions
          .concise()
          .setFormatted(true)
          .setJson(false)
      )

  private def getJVMRuntimeParameters: String = {
    val runtime = Runtime.getRuntime
    import runtime._

    s"""
     |Available processors    : $availableProcessors
     |Free Memory in the JVM  : $freeMemory
     |Max Memory JVM can use  : $maxMemory
     |Total Memory in the JVM : $maxMemory
    """.stripMargin
  }

  // TODO move this, this is Akka specific?
  def startRunnerMessage(blockingIODispatcherConfig: Config,
                         dispatcherConfig: Config,
                         deploymentConfig: Config,
                         streamletConfig: Config): String =
    s"""
      |\n${box("JVM Resources")}
      |${getJVMRuntimeParameters}
      |\n${box("Akka Deployment Config")}
      |\n${prettyPrintConfig(deploymentConfig)}
      |\n${box("Akka Default Blocking IO Dispatcher Config")}
      |\n${prettyPrintConfig(blockingIODispatcherConfig)}
      |\n${box("Akka Default Dispatcher Config")}
      |\n${prettyPrintConfig(dispatcherConfig)}
      |\n${box("Streamlet Config")}
      |\n${prettyPrintConfig(streamletConfig)}
      |\n${box("GC Type")}
      |\n${getGCInfo}
      """.stripMargin
} 
Example 5
Source File: NoWellKnownActorsSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.actorregistry

import java.lang.management.ManagementFactory
import javax.management.ObjectName

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.JMX.prefix
import org.squbs.unicomplex._

object NoWellKnownActorsSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/NoWellKnownActorsCube").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = ActorRegistryNoWellKnownActorsSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class NoWellKnownActorsSpec extends TestKit(NoWellKnownActorsSpec.boot.actorSystem)
  with ImplicitSender with FunSpecLike with Matchers with BeforeAndAfterAll {

  import NoWellKnownActorsSpec._

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  describe("ActorRegistry") {
    it ("should initialize even if there is no well-known actor in the classpath") {
      awaitAssert {
        boot.started shouldBe true
        Unicomplex(system).uniActor ! SystemState
        expectMsg(Active)
      }
    }

    it ("should show well-known actor count as zero") {
      val o = new ObjectName(prefix(boot.actorSystem) + "org.squbs.unicomplex:type=ActorRegistry")
      val count = ManagementFactory.getPlatformMBeanServer.getAttribute(o, "Count").asInstanceOf[Int]
      count should be (0)
    }
  }
} 
Example 6
Source File: ActorRegistryBean.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.actorregistry

import java.lang.management.ManagementFactory
import javax.management.MXBean

import akka.actor.{ActorContext, ActorRef}
import org.squbs.unicomplex.JMX._



private[actorregistry] object ActorRegistryBean {
  val Pattern  = "org.squbs.unicomplex:type=ActorRegistry,name="
  val Total = Pattern + "*"

  def objName(actor: ActorRef) (implicit context: ActorContext)= prefix + Pattern + actor.path.toString.split(s"${actor.path.root}user/").mkString("")
  def totalBeans(implicit context: ActorContext) = ManagementFactory.getPlatformMBeanServer.queryNames(prefix + Total, null)
}

@MXBean
private[actorregistry] trait ActorRegistryMXBean {
  def getPath : String
  def getActorMessageTypeList: java.util.List[String]
}

@MXBean
private[actorregistry] trait ActorRegistryConfigMXBean {
  def getCount : Int
  def getTimeout: Int
}

private[actorregistry] class ActorRegistryConfigBean(timeout: Int, implicit val context: ActorContext) extends ActorRegistryConfigMXBean {
  def getCount : Int = ActorRegistryBean.totalBeans.size
  def getTimeout: Int = timeout
} 
Example 7
Source File: ResolverRegistryJMXSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.resolver

import java.lang.management.ManagementFactory
import javax.management.{InstanceNotFoundException, ObjectName}
import javax.management.openmbean.CompositeData

import akka.actor.ActorSystem
import akka.testkit.TestKit
import org.scalatest.{FlatSpecLike, Matchers}

import scala.language.postfixOps

class ResolverRegistryJMXSpec extends TestKit(ActorSystem("ResolverRegistryJMXSpec"))
with FlatSpecLike with Matchers {

  private val oName = ObjectName.getInstance(s"org.squbs.configuration.${system.name}:type=ResolverRegistry")

  it should "not be registered at all when not accessed" in {
    an [InstanceNotFoundException] should be thrownBy {
      ManagementFactory.getPlatformMBeanServer.getAttribute(oName, "ResolverInfo").
        asInstanceOf[Array[CompositeData]]
    }
  }

  it should "not list any endpoint resolvers when not registered" in {
    ResolverRegistry(system)
    val resolvers = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, "ResolverInfo").
      asInstanceOf[Array[CompositeData]]

    resolvers should have length 0
  }

  it should "list all resolvers with position" in {

    val dummyLocalhostResolver = new DummyLocalhostResolver
    val dummyServiceEndpointResolver = new DummyServiceResolver

    ResolverRegistry(system).register(dummyLocalhostResolver)
    ResolverRegistry(system).register(dummyServiceEndpointResolver)

    val resolvers = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, "ResolverInfo").
      asInstanceOf[Array[CompositeData]]

    resolvers should have length 2
    resolvers(0).get("position") shouldBe 0
    resolvers(0).get("name") shouldEqual dummyServiceEndpointResolver.name
    resolvers(0).get("className") shouldEqual dummyServiceEndpointResolver.getClass.getName

    resolvers(1).get("position") shouldBe 1
    resolvers(1).get("name") shouldEqual dummyLocalhostResolver.name
    resolvers(1).get("className") shouldEqual dummyLocalhostResolver.getClass.getName
  }
} 
Example 8
Source File: CircuitBreakerStateSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.streams.circuitbreaker

import java.lang.management.ManagementFactory
import javax.management.ObjectName

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{FlatSpecLike, Matchers}
import org.squbs.streams.circuitbreaker.impl.AtomicCircuitBreakerState

import scala.language.postfixOps

class CircuitBreakerStateSpec extends TestKit(ActorSystem("CircuitBreakerStateSpec")) with FlatSpecLike with Matchers {

  implicit val scheduler = system.scheduler
  import system.dispatcher

  import scala.concurrent.duration._

  it should "use default exponential backoff settings" in {
    AtomicCircuitBreakerState(
      "params-with-default-exponential-backoff",
      1,
      50.milliseconds,
      20.milliseconds)

    assertJmxValue("params-with-default-exponential-backoff", "MaxFailures", 1)
    assertJmxValue("params-with-default-exponential-backoff", "CallTimeout", "50 milliseconds")
    assertJmxValue("params-with-default-exponential-backoff", "ResetTimeout", "20 milliseconds")
    assertJmxValue("params-with-default-exponential-backoff", "MaxResetTimeout", "36500 days")
    assertJmxValue("params-with-default-exponential-backoff", "ExponentialBackoffFactor", 1.0)
  }

  it should "create circuit breaker state with provided exponential backoff settings" in {
    AtomicCircuitBreakerState(
      "params-with-custom-exponential-backoff",
      1,
      50.milliseconds,
      20.milliseconds,
      2.minutes,
      16.0)
    assertJmxValue("params-with-custom-exponential-backoff", "MaxFailures", 1)
    assertJmxValue("params-with-custom-exponential-backoff", "CallTimeout", "50 milliseconds")
    assertJmxValue("params-with-custom-exponential-backoff", "ResetTimeout", "20 milliseconds")
    assertJmxValue("params-with-custom-exponential-backoff", "MaxResetTimeout", "2 minutes")
    assertJmxValue("params-with-custom-exponential-backoff", "ExponentialBackoffFactor", 16.0)
  }

  it should "create circuit breaker state from configuration" in {
    val config = ConfigFactory.parseString(
      """
        |max-failures = 1
        |call-timeout = 50 ms
        |reset-timeout = 20 ms
        |max-reset-timeout = 1 minute
        |exponential-backoff-factor = 16.0
      """.stripMargin)

    AtomicCircuitBreakerState("from-config", config)
    assertJmxValue("from-config", "MaxFailures", 1)
    assertJmxValue("from-config", "CallTimeout", "50 milliseconds")
    assertJmxValue("from-config", "ResetTimeout", "20 milliseconds")
    assertJmxValue("from-config", "MaxResetTimeout", "1 minute")
    assertJmxValue("from-config", "ExponentialBackoffFactor", 16.0)
  }

  it should "fallback to default values when configuration is empty" in {
    AtomicCircuitBreakerState("empty-config", ConfigFactory.empty())
    assertJmxValue("empty-config", "MaxFailures", 5)
    assertJmxValue("empty-config", "CallTimeout", "1 second")
    assertJmxValue("empty-config", "ResetTimeout", "5 seconds")
    assertJmxValue("empty-config", "MaxResetTimeout", "36500 days")
    assertJmxValue("empty-config", "ExponentialBackoffFactor", 1.0)
  }

  def assertJmxValue(name: String, key: String, expectedValue: Any) = {
    val oName = ObjectName.getInstance(
      s"org.squbs.configuration:type=squbs.circuitbreaker,name=${ObjectName.quote(name)}")
    val actualValue = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, key)
    actualValue shouldEqual expectedValue
  }
} 
Example 9
Source File: EnvironmentResolverRegistryJMXSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.env

import java.lang.management.ManagementFactory
import javax.management.{InstanceNotFoundException, ObjectName}
import javax.management.openmbean.CompositeData

import akka.actor.ActorSystem
import akka.testkit.TestKit
import org.scalatest.{FlatSpecLike, Matchers}

import scala.language.postfixOps

class EnvironmentResolverRegistryJMXSpec extends TestKit(ActorSystem("EnvironmentResolverRegistryJMXSpec"))
with FlatSpecLike with Matchers {

  val oName = ObjectName.getInstance(s"org.squbs.configuration.${system.name}:type=EnvironmentResolverRegistry")

  it should "not be registered if not accessed at all" in {
    an [InstanceNotFoundException] should be thrownBy {
      ManagementFactory.getPlatformMBeanServer.getAttribute(oName, "EnvironmentResolverInfo").
        asInstanceOf[Array[CompositeData]]
    }
  }

  it should "not list any environment resolvers when not registered but accessed" in {
    EnvironmentResolverRegistry(system).resolve
    val resolvers = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, "EnvironmentResolverInfo").
      asInstanceOf[Array[CompositeData]]

    resolvers should have length 0
  }

  it should "list all environment resolvers with position" in {
    EnvironmentResolverRegistry(system).register(DummyProdEnvironmentResolver)
    EnvironmentResolverRegistry(system).register(DummyQAEnvironmentResolver)

    val resolvers = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, "EnvironmentResolverInfo").
      asInstanceOf[Array[CompositeData]]

    resolvers should have length 2
    resolvers(0).get("position") shouldBe 0
    resolvers(0).get("name") shouldEqual DummyQAEnvironmentResolver.name
    resolvers(0).get("className") shouldEqual DummyQAEnvironmentResolver.getClass.getName

    resolvers(1).get("position") shouldBe 1
    resolvers(1).get("name") shouldEqual DummyProdEnvironmentResolver.name
    resolvers(1).get("className") shouldEqual DummyProdEnvironmentResolver.getClass.getName
  }
} 
Example 10
Source File: Environment.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.env

import java.beans.ConstructorProperties
import java.lang.management.ManagementFactory
import java.util
import javax.management.{ObjectName, MXBean}

import akka.actor._
import com.typesafe.scalalogging.LazyLogging

import scala.beans.BeanProperty

abstract class Environment {
  def name: String

  def lowercaseName: String = name.toLowerCase
}

case object Default extends Environment {
  override val name: String = "DEFAULT"

  
  val value: Environment = this
}

case class RawEnv(name: String) extends Environment

trait EnvironmentResolver {
  def name: String

  def resolve: Environment
}

class EnvironmentResolverRegistryExtension(system: ExtendedActorSystem) extends Extension with LazyLogging {
  private[env] var environmentResolvers = List.empty[EnvironmentResolver]

  def register(resolver: EnvironmentResolver): Unit = {
    environmentResolvers.find(_.name == resolver.name) match {
      case None => environmentResolvers = resolver :: environmentResolvers
      case Some(oldResolver) =>
        logger.warn("Env Resolver:" + oldResolver.name + " already registered, skipped!")
    }
  }

  def unregister(name: String): Unit = {
    val originalLength = environmentResolvers.length
    environmentResolvers = environmentResolvers.filterNot(_.name == name)
    if(environmentResolvers.length == originalLength)
      logger.warn("Env Resolver:" + name + " cannot be found, skipping unregister!")
  }

  def resolve: Environment = {
    val resolvedEnv = environmentResolvers.view.map(_.resolve).collectFirst {
      case env if env != Default => env
    } getOrElse Default

    logger.debug(s"The environment is: " + resolvedEnv.lowercaseName)
    resolvedEnv
  }
}

object EnvironmentResolverRegistry extends ExtensionId[EnvironmentResolverRegistryExtension] with ExtensionIdProvider {

  override def lookup() = EnvironmentResolverRegistry

  override def createExtension(system: ExtendedActorSystem): EnvironmentResolverRegistryExtension = {
    val mBeanServer = ManagementFactory.getPlatformMBeanServer
    val beanName = new ObjectName(s"org.squbs.configuration.${system.name}:type=EnvironmentResolverRegistry")
    if (!mBeanServer.isRegistered(beanName))
      mBeanServer.registerMBean(EnvironmentResolverRegistryMXBeanImpl(system), beanName)
    new EnvironmentResolverRegistryExtension(system)
  }

  override def get(system: ActorSystem): EnvironmentResolverRegistryExtension = super.get(system)
}

// $COVERAGE-OFF$
case class EnvironmentResolverInfo @ConstructorProperties(
  Array("position", "name", "className"))(@BeanProperty position: Int,
                                          @BeanProperty name: String,
                                          @BeanProperty className: String)

// $COVERAGE-ON$

@MXBean
trait EnvironmentResolverRegistryMXBean {
  def getEnvironmentResolverInfo: java.util.List[EnvironmentResolverInfo]
}

case class EnvironmentResolverRegistryMXBeanImpl(system: ActorSystem) extends EnvironmentResolverRegistryMXBean {

  override def getEnvironmentResolverInfo: util.List[EnvironmentResolverInfo] = {
    import scala.collection.JavaConverters._
    EnvironmentResolverRegistry(system).environmentResolvers.zipWithIndex.map { case(resolver, position) =>
      EnvironmentResolverInfo(position, resolver.name, resolver.getClass.getName)
    }.asJava
  }
} 
Example 11
Source File: MBeanUtil.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.admin

import java.lang.management.ManagementFactory
import javax.management.{InstanceNotFoundException, ObjectName}
import javax.management.openmbean.{CompositeData, TabularData}

import com.fasterxml.jackson.core.util.{DefaultPrettyPrinter, DefaultIndenter}
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._

import scala.collection.JavaConverters._
import scala.util.Try

object MBeanUtil {

  val server = ManagementFactory.getPlatformMBeanServer

  def allObjectNames: List[String] = {
    val beans = server.queryMBeans(null, null)
    beans.asScala.map { bean =>
      bean.getObjectName.toString
    } .toList.sorted
  }

  def asJSON(beanName: String, exclusions: Set[String] = Set.empty): Option[String] = {
    val objectName = new ObjectName(beanName)
    val c = new MBean2JSON(exclusions)
    try {
      val fields = server.getMBeanInfo(objectName).getAttributes.toList.map { _.getName}.sorted.collect {
        case name if !(exclusions contains name) => name -> c.eval {
          server.getAttribute(objectName, name)
        }
      }
      Option(prettyPrint(render(fields)))
    } catch {
      case e: InstanceNotFoundException => None
    }
  }

  private def prettyPrint(v: JValue) = {
    val printer = new DefaultPrettyPrinter().withArrayIndenter(new DefaultIndenter)
    mapper.writer(printer).writeValueAsString(v)
  }

  class MBean2JSON(exclusions: Set[String]) {

    private def toJValue(valueOption: Option[Any]): JValue = valueOption match {
      case Some(value) => value match {
        case table: TabularData =>
          val list = table.values.toArray
          optionJObject(list) getOrElse {
            toJArray(list)
          }
        case list: Array[_] => optionJObject(list) getOrElse {
          toJArray(list)
        }
        case c: CompositeData => toJObject(c)
        case v: java.lang.Double => v.doubleValue
        case v: java.lang.Float => v.floatValue
        case v: java.lang.Number => v.longValue
        case v: java.lang.Boolean => v.booleanValue
        case v => v.toString
      }
      case None => JNull
    }

    private def toJArray(list: Array[_]) = JArray(list.toList map { v => toJValue(Option(v)) })

    private def optionJObject(list: Array[_]) = {
      try {
        val kc = list.map {
          case c: CompositeData =>
            val keySet = c.getCompositeType.keySet
            if (keySet.size == 2 && keySet.contains("key") && keySet.contains("value"))
              c.get("key").asInstanceOf[String] -> c
            else throw new ClassCastException("CompositeData member is not a key/value pair")
          case _ => throw new ClassCastException("Non-CompositeData value")
        }
        val fields = kc.toList collect {
          case (k, c) if !(exclusions contains k) => k -> eval { c.get("value") }
        }
        Some(JObject(fields))
      } catch {
        case e: ClassCastException => None
      }
    }

    private def toJObject(c: CompositeData) =
      JObject(c.getCompositeType.keySet.asScala.toList.sorted collect {
        case key if !(exclusions contains key) => key -> eval { c.get(key)}
      })

    // Note: Try {...} .toOption can give you a Some(null), especially with Java APIs.
    // Need to flatMap with Option again to ensure Some(null) is None.
    def eval(fn: => Any) = toJValue(Try { fn } .toOption flatMap { v => Option(v) })
  }
} 
Example 12
Source File: ProtoBuffTest.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4actor

import java.lang.management.ManagementFactory
import java.util
import java.util.concurrent.{Callable, Executors}

import ee.cone.c4actor.AnyAdapter._
import ee.cone.c4actor.AnyOrigProtocol.N_AnyOrig
import ee.cone.c4actor.ProtoBuffTestProtocol.{D_TestOrig, D_TestOrigForDecode}
import ee.cone.c4di.{c4, c4app}
import ee.cone.c4proto._

import scala.collection.immutable
import scala.util.Random

trait ProtoBuffTestProtocolAppBase

@protocol("ProtoBuffTestProtocolApp") object ProtoBuffTestProtocol {

  @Id(0x1) case class D_TestOrig(
    @Id(0x2) srcId: String,
    @Id(0x3) list: List[String],
    @Id(0x4) byteStr: List[N_AnyOrig]
  )

  @Id(0x5) case class D_TestOrigForDecode(
    @Id(0x6) srcId: String,
    @Id(0x7) number: Long
  )

}

@c4app class SeqProtoBuffTestAppBase extends ProtoBuffTestApp
@c4app class ParProtoBuffTestAppBase extends ProtoBuffTestApp

trait ProtoBuffTestApp
  extends VMExecutionApp with ExecutableApp
    with BaseApp with ProtoApp
    with ProtoBuffTestProtocolApp
    with AnyOrigProtocolApp



class SerializationRunnable(pid: Int, testOrigs: Seq[D_TestOrigForDecode], qAdapterRegistry: QAdapterRegistry) extends Callable[Long] {

  def call(): Long = {
    TestCode.test(testOrigs, qAdapterRegistry)
  }
}

object TestCode {
  def test(testOrigs: Seq[D_TestOrigForDecode], qAdapterRegistry: QAdapterRegistry): Long = {
    val time = System.currentTimeMillis()
    val encoded: immutable.Seq[N_AnyOrig] = testOrigs.map(encode(qAdapterRegistry)(_))
    val testOrigsss: immutable.Seq[D_TestOrig] = encoded.zipWithIndex.map { case (a, b) => D_TestOrig(b.toString, a.toString.split(",").toList, List(a)) }
    val encoded2: immutable.Seq[N_AnyOrig] = testOrigsss.map(encode(qAdapterRegistry)(_))
    val decoded: immutable.Seq[D_TestOrig] = encoded2.map(decode[D_TestOrig](qAdapterRegistry))
    // assert (testOrigsss == decoded)
    val time2 = System.currentTimeMillis()
    time2 - time
  }
} 
Example 13
Source File: ProgressObserverImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4actor

import java.lang.management.ManagementFactory
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Files, Path, Paths}
import java.time.Instant
import java.util.UUID

import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor.QProtocol.S_Firstborn
import ee.cone.c4actor.Types.{NextOffset, SrcId}
import ee.cone.c4assemble.Types.{Each, Values}
import ee.cone.c4assemble.{Single, c4assemble}
import ee.cone.c4di.c4

import scala.annotation.tailrec
import scala.concurrent.Future

@c4("ServerCompApp") final class ProgressObserverFactoryImpl(
  inner: TxObserver, config: ListConfig,
  execution: Execution, getToStart: DeferredSeq[Executable]
) extends ProgressObserverFactory {
  def create(endOffset: NextOffset): Observer[RichContext] = {
    val lateExObserver: Observer[RichContext]  = new LateExecutionObserver(execution,getToStart.value,inner.value)
    val readyObserver = Single.option(config.get("C4ROLLING")).fold(lateExObserver)(path=>
      new ReadyObserverImpl(lateExObserver, Paths.get(path), 0L)
    )
    new ProgressObserverImpl(readyObserver,endOffset)
  }
}

// states:
//   loading
//   loading ready
//   master
// trans:
//   loading -> loading
//   loading -> loading ready
//   loading ready -> loading ready
//   loading ready -> master

class ProgressObserverImpl(inner: Observer[RichContext], endOffset: NextOffset, until: Long=0) extends Observer[RichContext] with LazyLogging {
  def activate(rawWorld: RichContext): Observer[RichContext] =
    if (rawWorld.offset < endOffset) {
      val now = System.currentTimeMillis
      if(now < until) this else {
        logger.debug(s"loaded ${rawWorld.offset}/$endOffset")
        new ProgressObserverImpl(inner, endOffset, now+1000)
      }
    } else {
      logger.info(s"Stats OK -- loaded ALL/$endOffset -- uptime ${ManagementFactory.getRuntimeMXBean.getUptime}ms")
      inner.activate(rawWorld)
    }
}

class ReadyObserverImpl(inner: Observer[RichContext], path: Path, until: Long=0) extends Observer[RichContext] with LazyLogging {
  private def ignoreTheSamePath(path: Path): Unit = ()
  def activate(rawWorld: RichContext): Observer[RichContext] = {
    if(until == 0) ignoreTheSamePath(Files.write(path.resolve("c4is-ready"),Array.empty[Byte]))
    val now = System.currentTimeMillis
    if(now < until) this
    else if(Files.exists(path.resolve("c4is-master"))) {
      logger.info(s"becoming master")
      inner.activate(rawWorld)
    } else {
      logger.debug(s"ready/waiting")
      new ReadyObserverImpl(inner, path, now+1000)
    }
  }

}


@c4("ServerCompApp") final class LocalElectorDeath(config: ListConfig, execution: Execution) extends Executable with Early {
  def run(): Unit =
    for(path <- config.get("C4ELECTOR_PROC_PATH")) iteration(Paths.get(path))
  @tailrec private def iteration(path: Path): Unit = {
    if(Files.notExists(path)) execution.complete()
    Thread.sleep(1000)
    iteration(path)
  }
}

////

@c4("ServerCompApp") final class ServerExecutionFilter(inner: ExecutionFilter)
  extends ExecutionFilter(e=>inner.check(e) && e.isInstanceOf[Early])

class LateExecutionObserver(
  execution: Execution, toStart: Seq[Executable], inner: Observer[RichContext]
) extends Observer[RichContext] with LazyLogging {
  def activate(world: RichContext): Observer[RichContext] = {
    logger.info(s"tracking ${toStart.size} late services")
    toStart.filterNot(_.isInstanceOf[Early]).foreach(f => execution.fatal(Future(f.run())(_)))
    inner.activate(world)
  }
} 
Example 14
Source File: ResultTask.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.io._
import java.lang.management.ManagementFactory
import java.nio.ByteBuffer
import java.util.Properties

import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD


private[spark] class ResultTask[T, U](
    stageId: Int,
    stageAttemptId: Int,
    taskBinary: Broadcast[Array[Byte]],
    partition: Partition,
    locs: Seq[TaskLocation],
    val outputId: Int,
    localProperties: Properties,
    serializedTaskMetrics: Array[Byte],
    jobId: Option[Int] = None,
    appId: Option[String] = None,
    appAttemptId: Option[String] = None)
  extends Task[U](stageId, stageAttemptId, partition.index, localProperties, serializedTaskMetrics,
    jobId, appId, appAttemptId)
  with Serializable {

  @transient private[this] val preferredLocs: Seq[TaskLocation] = {
    if (locs == null) Nil else locs.toSet.toSeq
  }

  override def runTask(context: TaskContext): U = {
    // Deserialize the RDD and the func using the broadcast variables.
    val threadMXBean = ManagementFactory.getThreadMXBean
    val deserializeStartTime = System.currentTimeMillis()
    val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime
    } else 0L
    val ser = SparkEnv.get.closureSerializer.newInstance()
    val (rdd, func) = ser.deserialize[(RDD[T], (TaskContext, Iterator[T]) => U)](
      ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
    _executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
    _executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
    } else 0L

    func(context, rdd.iterator(partition, context))
  }

  // This is only callable on the driver side.
  override def preferredLocations: Seq[TaskLocation] = preferredLocs

  override def toString: String = "ResultTask(" + stageId + ", " + partitionId + ")"
} 
Example 15
Source File: HengHa.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.sparktest
import java.lang.management.ManagementFactory
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props

class Heng(ha: ActorRef) extends Actor {
  def receive = {
    case "start" => ha ! "heng"
    case "ha" =>
      println("哈")
      ha ! "heng"
    case _ => println("heng what?")
  }
}
class Ha extends Actor {
  def receive = {
    case "heng" =>
      println("哼")
      sender ! "ha"
    case _ => println("ha what?")
  }
}

object HengHa {
  def main(args: Array[String]): Unit = {
    //ActorSystem是重量级的对象,会创建1...N个线程,所以一个application一个ActorSystem
    val system = ActorSystem("HengHaSystem")
   //actorOf要创建Actor,
    val ha = system.actorOf(Props[Ha], name = "ha")
    val heng = system.actorOf(Props(new Heng(ha)), name = "heng")

    //heng ! "start"    
    
    //ManagementFactory.getGarbageCollectorMXBeans.map(_.getCollectionTime).sum
  }
} 
Example 16
Source File: ShuffleMapTask.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.lang.management.ManagementFactory
import java.nio.ByteBuffer
import java.util.Properties

import scala.language.existentials

import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.shuffle.ShuffleWriter
import org.apache.spark.storage.BlockManagerId


  def this(partitionId: Int) {
    this(0, 0, null, new Partition { override def index: Int = 0 }, null, new Properties, null)
  }

  @transient private val preferredLocs: Seq[TaskLocation] = {
    if (locs == null) Nil else locs.toSet.toSeq
  }

  var rdd: RDD[_] = null
  var dep: ShuffleDependency[_, _, _] = null

  override def prepTask(): Unit = {
    // Deserialize the RDD using the broadcast variable.
    val threadMXBean = ManagementFactory.getThreadMXBean
    val deserializeStartTime = System.currentTimeMillis()
    val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime
    } else 0L
    val ser = SparkEnv.get.closureSerializer.newInstance()
    val (_rdd, _dep) = ser.deserialize[(RDD[_], ShuffleDependency[_, _, _])](
      ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
     rdd = _rdd
     dep = _dep
    _executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
    _executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
    } else 0L
  }

  override def runTask(context: TaskContext): MapStatus = {
    if (dep == null || rdd == null) {
      prepTask()
    }

    var writer: ShuffleWriter[Any, Any] = null
    try {
      val manager = SparkEnv.get.shuffleManager
      writer = manager.getWriter[Any, Any](dep.shuffleHandle, partitionId, context)
      writer.write(rdd.iterator(partition, context).asInstanceOf[Iterator[_ <: Product2[Any, Any]]])
      val status = writer.stop(success = true).get
      FutureTaskNotifier.taskCompleted(status, partitionId, dep.shuffleId,
        dep.partitioner.numPartitions, nextStageLocs, metrics.shuffleWriteMetrics, false)
      status
    } catch {
      case e: Exception =>
        try {
          if (writer != null) {
            writer.stop(success = false)
          }
        } catch {
          case e: Exception =>
            log.debug("Could not stop writer", e)
        }
        throw e
    }
  }

  override def preferredLocations: Seq[TaskLocation] = preferredLocs

  override def toString: String = "ShuffleMapTask(%d, %d)".format(stageId, partitionId)
}

object ShuffleMapTask {

  def apply(
      stageId: Int,
      stageAttemptId: Int,
      partition: Partition,
      properties: Properties,
      internalAccumulatorsSer: Array[Byte],
      isFutureTask: Boolean,
      rdd: RDD[_],
      dep: ShuffleDependency[_, _, _],
      nextStageLocs: Option[Seq[BlockManagerId]]): ShuffleMapTask = {

    val smt = new ShuffleMapTask(stageId, stageAttemptId, null, partition, null,
      properties, internalAccumulatorsSer, isFutureTask, nextStageLocs)

    smt.rdd = rdd
    smt.dep = dep
    smt
  }
} 
Example 17
Source File: ResultTask.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.io._
import java.lang.management.ManagementFactory
import java.nio.ByteBuffer
import java.util.Properties

import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rdd.RDD


private[spark] class ResultTask[T, U](
    stageId: Int,
    stageAttemptId: Int,
    taskBinary: Broadcast[Array[Byte]],
    partition: Partition,
    locs: Seq[TaskLocation],
    val outputId: Int,
    localProperties: Properties,
    metrics: TaskMetrics,
    jobId: Option[Int] = None,
    appId: Option[String] = None,
    appAttemptId: Option[String] = None)
  extends Task[U](stageId, stageAttemptId, partition.index, metrics, localProperties, jobId,
    appId, appAttemptId)
  with Serializable {

  @transient private[this] val preferredLocs: Seq[TaskLocation] = {
    if (locs == null) Nil else locs.toSet.toSeq
  }

  override def runTask(context: TaskContext, user: String): U = {
    // Deserialize the RDD and the func using the broadcast variables.
    val threadMXBean = ManagementFactory.getThreadMXBean
    val deserializeStartTime = System.currentTimeMillis()
    val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime
    } else 0L
    val ser = SparkEnv.get(user).closureSerializer.newInstance()
    val (rdd, func) = ser.deserialize[(RDD[T], (TaskContext, Iterator[T]) => U)](
      ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
    _executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
    _executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
    } else 0L

    func(context, rdd.iterator(partition, context))
  }

  // This is only callable on the driver side.
  override def preferredLocations: Seq[TaskLocation] = preferredLocs

  override def toString: String = "ResultTask(" + stageId + ", " + partitionId + ")"
} 
Example 18
Source File: package.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.util

import javax.management.ObjectName
import java.lang.management.ManagementFactory
import scala.language.implicitConversions


package object jmx {
  implicit def string2objectName(name: String): ObjectName = new ObjectName(name)

  def jmxRegister(ob: Object, obname: ObjectName): Unit = {
    val mServer = ManagementFactory.getPlatformMBeanServer

    if (!mServer.isRegistered(obname)) {
      mServer.registerMBean(ob, obname)
    }
  }
  def jmxRegister(ob: Object): Unit = {
    val objFullName = this.getClass.getCanonicalName
    val (packageName, className) = objFullName.lastIndexOf('.') match {
      case i if i > 0 => (objFullName.take(i), objFullName.takeRight(objFullName.length - (i + 1)))
      case _          => (objFullName, objFullName)
    }
    val oName = new ObjectName(s"$packageName:type=$className")
    jmxRegister(ob, oName)

  }

  def jmxUnRegister(obname: ObjectName) = ManagementFactory.getPlatformMBeanServer.unregisterMBean(obname)
} 
Example 19
Source File: CpuProfiler.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.profiling

import java.lang.management.ManagementFactory

import com.sun.management.OperatingSystemMXBean
import cool.graph.metrics.MetricsManager

case class CpuProfiler(metricsManager: MetricsManager) {
  val mxBean = ManagementFactory.getOperatingSystemMXBean match {
    case x: OperatingSystemMXBean =>
      // docs for the bean available at https://docs.oracle.com/javase/8/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html#getSystemCpuLoad--
      Some(x)
    case _ =>
      println("com.sun.management.OperatingSystemMXBean is not available on this JVM. CPU Metrics are therefore not available.")
      None
  }

//  val processCpuLoad = metricsManager.defineGauge("processCpuLoadPercentage")
  val systemCpuLoad = metricsManager.defineGauge("systemCpuLoadPercentage")

  def profile(): Unit = {
    mxBean.foreach { mxBean =>
//      processCpuLoad.set(convertToPercent(mxBean.getProcessCpuLoad))
      systemCpuLoad.set(convertToPercent(mxBean.getSystemCpuLoad))
    }
  }

  def convertToPercent(double: Double): Long = (double * 100).toLong
} 
Example 20
Source File: PidLock.scala    From scaladex   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package ch.epfl.scala.index
package data
package util

import java.lang.management.ManagementFactory

import java.nio.file.{Files, Paths}
import java.nio.charset.StandardCharsets

object PidLock {
  def create(prefix: String): Unit = {
    val pid = ManagementFactory.getRuntimeMXBean().getName().split("@").head
    val pidFile = Paths.get(s"$prefix-PID")
    Files.write(pidFile, pid.getBytes(StandardCharsets.UTF_8))
    sys.addShutdownHook {
      Files.delete(pidFile)
    }

    ()
  }
} 
Example 21
Source File: ScastieFileUtil.scala    From scastie   with Apache License 2.0 5 votes vote down vote up
package com.olegych.scastie.util

import java.nio.file._
import java.lang.management.ManagementFactory
import java.nio.charset.StandardCharsets

object ScastieFileUtil {
  def slurp(src: Path): Option[String] = {
    if (Files.exists(src)) Some(Files.readAllLines(src).toArray.mkString("\n"))
    else None
  }

  def write(dst: Path, content: String, truncate: Boolean = false, append: Boolean = false): Unit = {
    if (!Files.exists(dst)) {
      Files.write(dst, content.getBytes, StandardOpenOption.CREATE_NEW)
      ()
    } else if (truncate) {
      Files.write(dst, content.getBytes, StandardOpenOption.TRUNCATE_EXISTING)
      ()
    } else if (append) {
      Files.write(dst, content.getBytes, StandardOpenOption.APPEND)
      ()
    }
  }

  def writeRunningPid(): String = {
    val pid = ManagementFactory.getRuntimeMXBean.getName.split("@").head
    val pidFile = Paths.get("RUNNING_PID")
    Files.write(pidFile, pid.getBytes(StandardCharsets.UTF_8))
    sys.addShutdownHook {
      Files.delete(pidFile)
    }
    pid
  }
} 
Example 22
Source File: ExecutionConfig.scala    From cuesheet   with Apache License 2.0 5 votes vote down vote up
package com.kakao.cuesheet

import java.lang.management.ManagementFactory

import com.typesafe.config.{Config, ConfigFactory}

import scala.collection.JavaConversions._
import scala.util.Try

object ExecutionConfig {

  // load the Typesafe config, which contains all information about the spark & deployment environment
  // use System properties config.{resource,file,url} to change the configuration file to load
  // default is application.conf in the classpath root
  val conf: Config = ConfigFactory.load()
  val systemUser: String = Option(sys.props("user.name")).getOrElse("unknown")
  val sparkUser: String = Try(conf.getString("spark.user.name")).getOrElse(systemUser)
  val hadoopUser: String = Try(conf.getString("spark.hadoop.user.name")).getOrElse(sparkUser)

  // set hadoop user name
  sys.props("user.name") = hadoopUser
  sys.props("HADOOP_USER_NAME") = hadoopUser

  
  lazy val mode: DeployMode = {
    if (manager == LOCAL)
      CLIENT
    else
      config.get("spark.deploy.mode").map(_.toLowerCase) match {
        case Some("cluster") => CLUSTER
        case Some("client") => CLIENT
        case _ => CLUSTER
      }
  }

} 
Example 23
Source File: ResultTask.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.io._
import java.lang.management.ManagementFactory
import java.nio.ByteBuffer
import java.util.Properties

import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rdd.RDD


private[spark] class ResultTask[T, U](
    stageId: Int,
    stageAttemptId: Int,
    taskBinary: Broadcast[Array[Byte]],
    partition: Partition,
    locs: Seq[TaskLocation],
    val outputId: Int,
    localProperties: Properties,
    metrics: TaskMetrics,
    jobId: Option[Int] = None,
    appId: Option[String] = None,
    appAttemptId: Option[String] = None)
  extends Task[U](stageId, stageAttemptId, partition.index, metrics, localProperties, jobId,
    appId, appAttemptId)
  with Serializable {

  @transient private[this] val preferredLocs: Seq[TaskLocation] = {
    if (locs == null) Nil else locs.toSet.toSeq
  }

  override def runTask(context: TaskContext): U = {
    // Deserialize the RDD and the func using the broadcast variables.
    val threadMXBean = ManagementFactory.getThreadMXBean
    val deserializeStartTime = System.currentTimeMillis()
    val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime
    } else 0L
    val ser = SparkEnv.get.closureSerializer.newInstance()
    val (rdd, func) = ser.deserialize[(RDD[T], (TaskContext, Iterator[T]) => U)](
      ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
    _executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
    _executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
    } else 0L

    func(context, rdd.iterator(partition, context))
  }

  // This is only callable on the driver side.
  override def preferredLocations: Seq[TaskLocation] = preferredLocs

  override def toString: String = "ResultTask(" + stageId + ", " + partitionId + ")"
} 
Example 24
Source File: JavaMetricsScreen.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akka_oled

import java.lang.management.ManagementFactory
import java.text.DecimalFormat

import com.sun.management.OperatingSystemMXBean
import org.apache.commons.io.FileUtils

import scala.collection.mutable

trait JavaMetricsScreen {
   def getJavaMetrics(): Array[Array[String]] = {
      val bean = ManagementFactory.getPlatformMXBean(classOf[OperatingSystemMXBean])
      val formatter = new DecimalFormat("#0.00")
      val map = mutable.LinkedHashMap[String, String](
         "Max mem:" -> FileUtils.byteCountToDisplaySize( ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getMax),
         "Curr mem:" -> FileUtils.byteCountToDisplaySize(ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getUsed),
         "CPU:" -> (formatter.format(bean.getSystemCpuLoad) + "%"),
         "Threads:" -> ManagementFactory.getThreadMXBean.getThreadCount.toString,
         "Classes:" -> ManagementFactory.getClassLoadingMXBean.getLoadedClassCount.toString)
      map.toArray.map(x => Array(x._1, x._2))
   }
} 
Example 25
Source File: LoggerJMXTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.log
import java.lang.management.ManagementFactory

import javax.management.{Attribute, ObjectName}


class LoggerJMXTest extends Spec {
  override protected def beforeAll = {
    // Make sure registering the JMX mBean that can be loadable from the current class loader
    LogEnv.unregisterJMX
    LogEnv.registerJMX
  }

  override protected def afterAll: Unit = {
    LogEnv.unregisterJMX
  }

  def `be registered`: Unit = {
    // Initialize a logger
    val l = Logger.rootLogger

    val mbeanServer = ManagementFactory.getPlatformMBeanServer
    val name        = new ObjectName("wvlet.log:type=Logger")
    assert(mbeanServer.isRegistered(name))

    // Check the default log level
    assert(mbeanServer.getAttribute(name, "DefaultLogLevel").toString == l.getLogLevel.toString)

    val currentLogLevel = l.getLogLevel
    try {
      mbeanServer.setAttribute(name, new Attribute("DefaultLogLevel", "error"))
      assert(l.getLogLevel == LogLevel.ERROR)
    } finally {
      l.setLogLevel(currentLogLevel)
    }
  }

  def `support setting log levels through JMX`: Unit = {
    // Creating JMX proxy is a bit complicated, so just test LoggerJMX impl here
    val current = LoggerJMX.getLogLevel("wvlet.log")
    try {
      LoggerJMX.setLogLevel("wvlet.log", "WARN")
      assert(LoggerJMX.getLogLevel("wvlet.log") == "warn")
    } finally {
      LoggerJMX.setLogLevel("wvlet.log", current)
    }
  }
} 
Example 26
Source File: LogEnv.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.log
import java.io.PrintStream
import java.lang.management.ManagementFactory

import javax.management.{InstanceAlreadyExistsException, ObjectName}
import wvlet.log.LogFormatter.SourceCodeLogFormatter


  override def getLoggerName(cl: Class[_]): String = {
    var name = cl.getName

    if (name.endsWith("$")) {
      // Remove trailing $ of Scala Object name
      name = name.substring(0, name.length - 1)
    }

    // When class is an anonymous trait
    if (name.contains("$anon$")) {
      import collection.JavaConverters._
      val interfaces = cl.getInterfaces
      if (interfaces != null && interfaces.length > 0) {
        // Use the first interface name instead of the anonymous name
        name = interfaces(0).getName
      }
    }
    name
  }
  override def scheduleLogLevelScan: Unit = {
    LogLevelScanner.scheduleLogLevelScan
  }
  override def stopScheduledLogLevelScan: Unit = {
    LogLevelScanner.stopScheduledLogLevelScan
  }
  override def scanLogLevels: Unit = {
    LogLevelScanner.scanLogLevels
  }
  override def scanLogLevels(loglevelFileCandidates: Seq[String]): Unit = {
    LogLevelScanner.scanLogLevels(loglevelFileCandidates)
  }

  private def onGraalVM: Boolean = {
    // https://www.graalvm.org/sdk/javadoc/index.html?constant-values.html
    val graalVMFlag = Option(System.getProperty("org.graalvm.nativeimage.kind"))
    graalVMFlag.map(p => p == "executable" || p == "shared").getOrElse(false)
  }

  private val mBeanName = new ObjectName("wvlet.log:type=Logger")

  // Register JMX entry upon start-up
  registerJMX

  override def registerJMX: Unit = {
    if (!onGraalVM) {
      // Register the log level configuration interface to JMX
      val mbeanServer = ManagementFactory.getPlatformMBeanServer
      if (!mbeanServer.isRegistered(mBeanName)) {
        try {
          mbeanServer.registerMBean(LoggerJMX, mBeanName)
        } catch {
          case e: InstanceAlreadyExistsException =>
          // this exception can happen as JMX entries can be initialized by different class loaders while running sbt
        }
      }
    }
  }

  override def unregisterJMX: Unit = {
    if (!onGraalVM) {
      val mbeanServer = ManagementFactory.getPlatformMBeanServer
      if (mbeanServer.isRegistered(mBeanName)) {
        mbeanServer.unregisterMBean(mBeanName)
      }
    }
  }
} 
Example 27
Source File: JMXAgent.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.jmx

import java.lang.management.ManagementFactory

import javax.management.remote.{JMXConnector, JMXConnectorFactory, JMXServiceURL}
import javax.management.{MBeanInfo, ObjectName}
import wvlet.log.LogSupport
import wvlet.log.io.IOUtil.withResource

case class HostAndPort(host: String, port: Int)

case class JMXConfig(registryPort: Option[Int] = None, rmiPort: Option[Int] = None)


object JMXAgent extends LogSupport {
  def start(registryPort: Int) = new JMXAgent(JMXConfig(registryPort = Some(registryPort)))

  lazy val defaultAgent: JMXAgent = new JMXAgent(new JMXConfig())
}

trait JMXMBeanServerService {
  protected lazy val mbeanServer = ManagementFactory.getPlatformMBeanServer
}

class JMXAgent(config: JMXConfig) extends JMXRegistry with JMXMBeanServerService with LogSupport {
  val serviceUrl: JMXServiceURL = {
    val url = JMXUtil.currentJMXRegistry match {
      case Some(jmxReg) =>
        info(s"JMX registry is already running at ${jmxReg}")
        if (config.registryPort.isDefined) {
          val expectedPort = config.registryPort.get
          if (expectedPort != jmxReg.port) {
            throw new IllegalStateException(
              s"JMX registry is already running using an unexpected port: ${jmxReg.port}. Expected port = ${expectedPort}"
            )
          }
        }
        s"service:jmx:rmi:///jndi/rmi://${jmxReg.host}:${jmxReg.port}/jmxrmi"
      case None =>
        JMXUtil.startAndGetAgentURL(config)
    }
    new JMXServiceURL(url)
  }

  def withConnector[U](f: JMXConnector => U): U = {
    withResource(JMXConnectorFactory.connect(serviceUrl)) { connector => f(connector) }
  }

  def getMBeanInfo(mbeanName: String): MBeanInfo = {
    withConnector { connector => connector.getMBeanServerConnection.getMBeanInfo(new ObjectName(mbeanName)) }
  }

  def getMBeanAttribute(mbeanName: String, attrName: String): Any = {
    withConnector { connector => connector.getMBeanServerConnection.getAttribute(new ObjectName(mbeanName), attrName) }
  }
} 
Example 28
Source File: ConfigMXBean.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common
import java.lang.management.ManagementFactory

import com.typesafe.config.{ConfigFactory, ConfigRenderOptions}
import javax.management.ObjectName

trait ConfigMXBean {

  
  def getConfig(path: String, originComment: Boolean): String
}

object ConfigMXBean extends ConfigMXBean {
  val name = new ObjectName("org.apache.openwhisk:name=config")
  private val renderOptions =
    ConfigRenderOptions.defaults().setComments(false).setOriginComments(true).setFormatted(true).setJson(false)

  override def getConfig(path: String, originComment: Boolean): String = {
    val config = ConfigFactory.load()
    val co = if (path == ".") config.root() else config.getConfig(path).root()
    co.render(renderOptions.setOriginComments(originComment))
  }

  def register(): Unit = {
    ManagementFactory.getPlatformMBeanServer.registerMBean(ConfigMXBean, name)
  }

  def unregister(): Unit = {
    ManagementFactory.getPlatformMBeanServer.unregisterMBean(name)
  }
} 
Example 29
Source File: ConfigMXBeanTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common
import java.lang.management.ManagementFactory

import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FlatSpec, Matchers}

@RunWith(classOf[JUnitRunner])
class ConfigMXBeanTests extends FlatSpec with Matchers {
  behavior of "ConfigMBean"

  it should "return config at path" in {
    ConfigMXBean.register()
    val config = ManagementFactory.getPlatformMBeanServer.invoke(
      ConfigMXBean.name,
      "getConfig",
      Array("whisk.spi", java.lang.Boolean.FALSE),
      Array("java.lang.String", "boolean"))
    config.asInstanceOf[String] should include("ArtifactStoreProvider")
    ConfigMXBean.unregister()
  }
} 
Example 30
Source File: ResultTask.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.io._
import java.lang.management.ManagementFactory
import java.nio.ByteBuffer
import java.util.Properties

import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rdd.RDD


private[spark] class ResultTask[T, U](
    stageId: Int,
    stageAttemptId: Int,
    taskBinary: Broadcast[Array[Byte]],
    partition: Partition,
    locs: Seq[TaskLocation],
    val outputId: Int,
    localProperties: Properties,
    serializedTaskMetrics: Array[Byte] =
      SparkEnv.get.closureSerializer.newInstance().serialize(TaskMetrics.registered).array(),
    isFutureTask: Boolean = false,
    depShuffleIds: Option[Seq[Seq[Int]]] = None,
    depShuffleNumMaps: Option[Seq[Int]] = None,
    jobId: Option[Int] = None,
    appId: Option[String] = None,
    appAttemptId: Option[String] = None)
  extends Task[U](stageId, stageAttemptId, partition.index,
    serializedTaskMetrics, localProperties, isFutureTask, depShuffleIds, depShuffleNumMaps,
    jobId, appId, appAttemptId)
  with Serializable {

  var rdd: RDD[T] = null
  var func: (TaskContext, Iterator[T]) => U = null

  @transient private[this] val preferredLocs: Seq[TaskLocation] = {
    if (locs == null) Nil else locs.toSet.toSeq
  }

  override def prepTask(): Unit = {
    // Deserialize the RDD and the func using the broadcast variables.
    val threadMXBean = ManagementFactory.getThreadMXBean
    val deserializeStartTime = System.currentTimeMillis()
    val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime
    } else 0L
    val ser = SparkEnv.get.closureSerializer.newInstance()
    val (_rdd, _func) = ser.deserialize[(RDD[T], (TaskContext, Iterator[T]) => U)](
      ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
    rdd = _rdd
    func = _func
    _executorDeserializeTime = System.currentTimeMillis() - deserializeStartTime
    _executorDeserializeCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
      threadMXBean.getCurrentThreadCpuTime - deserializeStartCpuTime
    } else 0L
  }

  override def runTask(context: TaskContext): U = {
    // Deserialize the RDD and the func using the broadcast variables.
    if (func == null || rdd == null) {
      prepTask()
    }
    func(context, rdd.iterator(partition, context))
  }

  // This is only callable on the driver side.
  override def preferredLocations: Seq[TaskLocation] = preferredLocs

  override def toString: String = "ResultTask(" + stageId + ", " + partitionId + ")"
}

object ResultTask {

  def apply[T, U](
      stageId: Int,
      stageAttemptId: Int,
      partition: Partition,
      outputId: Int,
      localProperties: Properties,
      internalAccumulatorsSer: Array[Byte],
      isFutureTask: Boolean,
      rdd: RDD[T],
      func: (TaskContext, Iterator[T]) => U): ResultTask[T, U] = {
    val rt = new ResultTask[T, U](stageId, stageAttemptId, null, partition, Seq.empty, outputId,
      localProperties, internalAccumulatorsSer, isFutureTask)
    rt.rdd = rdd
    rt.func = func
    rt
  }

}