java.util.logging.Logger Scala Examples

The following examples show how to use java.util.logging.Logger. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: RemoraKafkaConsumerGroupService.scala    From remora   with MIT License 5 votes vote down vote up
package kafka.admin

import java.util.logging.Logger

import config.KafkaSettings
import kafka.admin.ConsumerGroupCommand.ConsumerGroupCommandOptions
import models.{GroupInfo, Node, PartitionAssignmentState}
import org.apache.kafka.clients.admin

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContextExecutor, Future}

trait ConsumerGroupService {
  def list(): Future[List[String]]

  def describeCluster(): Future[admin.DescribeClusterResult]
  def describeConsumerGroup(group: String): Future[GroupInfo]
}

class RemoraKafkaConsumerGroupService(kafkaSettings: KafkaSettings)
                                     (implicit executionContext: ExecutionContextExecutor) extends ConsumerGroupService
  with nl.grons.metrics.scala.DefaultInstrumented {

  private val logger = Logger.getLogger(RemoraKafkaConsumerGroupService.this.getClass.getName)

  private val listTimer = metrics.timer("list-timer")
  private val describeTimer = metrics.timer("describe-timer")
  private val describeGroupTimer = metrics.timer("describe-group-timer")

  private def createKafkaConsumerGroupService(groupId: Option[String] = None): ConsumerGroupCommand.ConsumerGroupService = {
    groupId match {
      case Some(g) => createKafkaConsumerGroupService(baseConfig() ++ Array("--group", g))
      case None => createKafkaConsumerGroupService(baseConfig())
    }
  }

  private def baseConfig(): Array[String] = {
    var baseConfig: ArrayBuffer[String] = ArrayBuffer("--bootstrap-server", kafkaSettings.address)

    if (!kafkaSettings.commandConfig.isEmpty) {
      baseConfig ++= Array("--command-config", kafkaSettings.commandConfig)
    }

    baseConfig.toArray
  }

  def createKafkaConsumerGroupService(consumerGroupCommandArgs: Array[String]): ConsumerGroupCommand.ConsumerGroupService = {
    new ConsumerGroupCommand.ConsumerGroupService(new ConsumerGroupCommandOptions(consumerGroupCommandArgs))
  }

  override def describeCluster(): Future[admin.DescribeClusterResult] = Future {
    describeTimer.time {
      kafkaSettings.adminClient.describeCluster()
    }
  }

  override def list(): Future[List[String]] = Future {
    listTimer.time {
      val groupService = createKafkaConsumerGroupService()
      try {
        groupService.listGroups()
      } finally {
        groupService.close()
      }
    }
  }

  override def describeConsumerGroup(group: String): Future[GroupInfo] = Future {
    describeGroupTimer.time {
      val kafkaConsumerGroupService = createKafkaConsumerGroupService(Some(group))
      try {
        val (state, assignments) = kafkaConsumerGroupService.collectGroupOffsets()
        assignments match {
          case Some(partitionAssignmentStates) =>
            val assignments = Some(partitionAssignmentStates.map(a => PartitionAssignmentState(a.group,
              a.coordinator match {
                case Some(c) => Some(Node(Option(c.id), Option(c.idString), Option(c.host), Option(c.port), Option(c.rack)))
                case None => None
              },
              a.topic, a.partition, a.offset,
              a.lag, a.consumerId, a.host, a.clientId, a.logEndOffset)))

            val lagPerTopic = Some(partitionAssignmentStates.filter(state => state.topic.isDefined)
              .groupBy(state => state.topic.get)
              .map { case (topic, partitions) => (topic, partitions.map(_.lag).map {
                case Some(lag) => lag.toLong
                case None => 0L
              }.sum)
              })

            GroupInfo(state, assignments, lagPerTopic)
          case None => GroupInfo(state)
        }
      } finally {
        kafkaConsumerGroupService.close()
      }
    }
  }
} 
Example 2
Source File: HelloWorldServer.scala    From grpc-scala-sample   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package io.grpc.examples.helloworld

import java.util.logging.Logger

import io.grpc.{Server, ServerBuilder}
import io.grpc.examples.helloworld.helloworld.{GreeterGrpc, HelloRequest, HelloReply}

import scala.concurrent.{ExecutionContext, Future}


object HelloWorldServer {
  private val logger = Logger.getLogger(classOf[HelloWorldServer].getName)

  def main(args: Array[String]): Unit = {
    val server = new HelloWorldServer(ExecutionContext.global)
    server.start()
    server.blockUntilShutdown()
  }

  private val port = 50051
}

class HelloWorldServer(executionContext: ExecutionContext) { self =>
  private[this] var server: Server = null

  private def start(): Unit = {
    server = ServerBuilder.forPort(HelloWorldServer.port).addService(GreeterGrpc.bindService(new GreeterImpl, executionContext)).build.start
    HelloWorldServer.logger.info("Server started, listening on " + HelloWorldServer.port)
    sys.addShutdownHook {
      System.err.println("*** shutting down gRPC server since JVM is shutting down")
      self.stop()
      System.err.println("*** server shut down")
    }
  }

  private def stop(): Unit = {
    if (server != null) {
      server.shutdown()
    }
  }

  private def blockUntilShutdown(): Unit = {
    if (server != null) {
      server.awaitTermination()
    }
  }

  private class GreeterImpl extends GreeterGrpc.Greeter {
    override def sayHello(req: HelloRequest) = {
      val reply = HelloReply(message = "Hello " + req.name)
      Future.successful(reply)
    }
  }

} 
Example 3
Source File: HelloWorldClient.scala    From grpc-scala-sample   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package io.grpc.examples.helloworld

import java.util.concurrent.TimeUnit
import java.util.logging.{Level, Logger}

import io.grpc.examples.helloworld.helloworld.{HelloRequest, GreeterGrpc}
import io.grpc.examples.helloworld.helloworld.GreeterGrpc.GreeterBlockingStub
import io.grpc.{StatusRuntimeException, ManagedChannelBuilder, ManagedChannel}


  def greet(name: String): Unit = {
    logger.info("Will try to greet " + name + " ...")
    val request = HelloRequest(name = name)
    try {
      val response = blockingStub.sayHello(request)
      logger.info("Greeting: " + response.message)
    }
    catch {
      case e: StatusRuntimeException =>
        logger.log(Level.WARNING, "RPC failed: {0}", e.getStatus)
    }
  }
} 
Example 4
Source File: TestCapabilities.scala    From scala-js-env-selenium   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package org.scalajs.jsenv.selenium

import org.openqa.selenium.Capabilities
import org.openqa.selenium.firefox.{FirefoxOptions, FirefoxDriverLogLevel}
import org.openqa.selenium.chrome.ChromeOptions

import java.util.logging.{Logger, Level}

object TestCapabilities {
  // Lower the logging level for Selenium to avoid spam.
  Logger.getLogger("org.openqa.selenium").setLevel(Level.WARNING)

  def fromEnv: Capabilities = nameFromEnv match {
    case "firefox" =>
      new FirefoxOptions()
        .setHeadless(true)
        .setLogLevel(FirefoxDriverLogLevel.ERROR)

    case "chrome" =>
      new ChromeOptions()
        .setHeadless(true)

    case name =>
      throw new IllegalArgumentException(s"Unknown browser $name")
  }

  def nameFromEnv: String = sys.env.getOrElse("SJS_TEST_BROWSER", "firefox")
} 
Example 5
Source File: RouteGuideRestGateway.scala    From grpcexample   with MIT License 5 votes vote down vote up
package io.grpc.routeguide

import java.util.logging.Logger

import grpcgateway.server.GrpcGatewayServerBuilder
import io.grpc.ManagedChannelBuilder

import scala.concurrent.ExecutionContext
import scala.sys.ShutdownHookThread

class RouteGuideRestGateway(port: Int, grpcHost: String, grpcPort: Int)(implicit ec: ExecutionContext) {
  private val logger: Logger = Logger.getLogger(classOf[RouteGuideServer].getName)

  private val channel = ManagedChannelBuilder
    .forAddress(grpcHost, grpcPort)
    .usePlaintext(true)
    .build()

  private val gateway = GrpcGatewayServerBuilder
    .forPort(port)
    .addService(new RouteGuideHandler(channel))
    .build()

  private var shutdownHook: Option[ShutdownHookThread] = None

  def start(): Unit = {
    gateway.start()
    logger.info(s"GRPC Gateway started, listening on $port")
    shutdownHook = Option(
      sys.addShutdownHook {
        // Use stderr here since the logger may has been reset by its JVM shutdown hook.
        System.err.println("*** shutting down gRPC gateway since JVM is shutting down")
        stop()
        System.err.println("*** gRPC Gateway shut down")
      }
    )
  }

  def stop(): Unit = gateway.shutdown()

  def blockUntilShutdown(): Unit = shutdownHook.foreach(_.join())
}

object RouteGuideRestGateway extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  // Expects GRPC server to be running on localhost:8980
  val gateway = new RouteGuideRestGateway(8981, "localhost", 8980)

  gateway.start()
  gateway.blockUntilShutdown()
} 
Example 6
Source File: RouteGuideAkkaStreamService.scala    From grpcexample   with MIT License 5 votes vote down vote up
package io.grpc.routeguide

import java.util.concurrent.TimeUnit.NANOSECONDS
import java.util.logging.Logger

import akka.NotUsed
import akka.stream.scaladsl.Flow
import concurrency.AtomicRef

class RouteGuideAkkaStreamService(features: Seq[Feature]) extends RouteGuideGrpcAkkaStream.RouteGuide {

  val logger: Logger = Logger.getLogger(classOf[RouteGuideAkkaStreamService].getName)

  private val routeNotes: AtomicRef[Map[Point, Seq[RouteNote]]] = new AtomicRef(Map.empty)

  
  override def routeChat: Flow[RouteNote, RouteNote, NotUsed] =
    Flow[RouteNote].mapConcat { note =>
      addNote(note)
      getNotes(note.getLocation).to[collection.immutable.Iterable]
    }

  private def findFeature(point: Point): Feature = {
    features.find { feature =>
      feature.getLocation.latitude == point.latitude && feature.getLocation.longitude == point.longitude
    } getOrElse new Feature(location = Some(point))
  }

  private def getNotes(point: Point): Seq[RouteNote] = {
    routeNotes.get.getOrElse(point, Seq.empty)
  }

  private def addNote(note: RouteNote): Unit = {
    routeNotes.updateAndGet { notes =>
      val existingNotes = notes.getOrElse(note.getLocation, Seq.empty)
      val updatedNotes = existingNotes :+ note
      notes + (note.getLocation -> updatedNotes)
    }
  }
} 
Example 7
Source File: RouteGuidePersistence.scala    From grpcexample   with MIT License 5 votes vote down vote up
package io.grpc.routeguide

import java.net.URL
import java.util.logging.Logger

import com.trueaccord.scalapb.json.JsonFormat

import scala.io.Source

object RouteGuidePersistence {
  val logger: Logger = Logger.getLogger(getClass.getName)

  val defaultFeatureFile: URL = getClass.getClassLoader.getResource("route_guide.json")

  
  def parseFeatures(file: URL): Seq[Feature] = {
    logger.info(s"Loading features from ${file.getPath}")
    var features: Seq[Feature] = Seq.empty
    val input = file.openStream
    try {
      val source = Source.fromInputStream(input)
      try {
        features = JsonFormat.fromJsonString[PersistedFeatures](source.getLines().mkString("\n")).features
      } finally source.close()
    } finally input.close
    logger.info(s"Loaded ${features.size} features")
    features
  }

} 
Example 8
Source File: RouteGuideServer.scala    From grpcexample   with MIT License 5 votes vote down vote up
package io.grpc.routeguide

import java.util.logging.Logger

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import io.grpc.{Server, ServerBuilder}

class RouteGuideServer(server: Server) {

  val logger: Logger = Logger.getLogger(classOf[RouteGuideServer].getName)

  def start(): Unit = {
    server.start()
    logger.info(s"Server started, listening on ${server.getPort}")
    sys.addShutdownHook {
      // Use stderr here since the logger may has been reset by its JVM shutdown hook.
      System.err.println("*** shutting down gRPC server since JVM is shutting down")
      stop()
      System.err.println("*** server shut down")
    }
    ()
  }

  def stop(): Unit = {
    server.shutdown()
  }

  
  def blockUntilShutdown(): Unit = {
    server.awaitTermination()
  }
}

object RouteGuideServer extends App {
  val features = RouteGuidePersistence.parseFeatures(
    Thread.currentThread.getContextClassLoader.getResource("route_guide.json")
  )

  val server = new RouteGuideServer(
    ServerBuilder
      .forPort(8980)
      .addService(
        RouteGuideGrpc.bindService(
          new RouteGuideService(features),
          scala.concurrent.ExecutionContext.global
        )
      )
      .build()
  )
  server.start()
  server.blockUntilShutdown()
}

object RouteGuideMonixServer extends App {
  val features = RouteGuidePersistence.parseFeatures(
    Thread.currentThread.getContextClassLoader.getResource("route_guide.json")
  )
  val server = new RouteGuideServer(
    ServerBuilder
      .forPort(8980)
      .addService(
        RouteGuideGrpcMonix.bindService(
          new RouteGuideMonixService(features),
          monix.execution.Scheduler.global
        )
      )
      .build()
  )
  server.start()
  server.blockUntilShutdown()
}

object RouteGuideAkkaStreamServer extends App {
  val features = RouteGuidePersistence.parseFeatures(
    Thread.currentThread.getContextClassLoader.getResource("route_guide.json")
  )
  val system = ActorSystem("RouteGuideAkkaStreamServer")
  implicit val materializer = ActorMaterializer.create(system)

  val server = new RouteGuideServer(
    ServerBuilder
      .forPort(8980)
      .addService(
        RouteGuideGrpcAkkaStream.bindService(
          new RouteGuideAkkaStreamService(features)
        )
      )
      .build()
  )
  server.start()
  server.blockUntilShutdown()
  system.terminate()
} 
Example 9
Source File: RouteGuideMonixService.scala    From grpcexample   with MIT License 5 votes vote down vote up
package io.grpc.routeguide

import java.util.concurrent.TimeUnit.NANOSECONDS
import java.util.logging.Logger

import concurrency.AtomicRef
import monix.eval.Task
import monix.reactive.Observable

class RouteGuideMonixService(features: Seq[Feature]) extends RouteGuideGrpcMonix.RouteGuide {

  val logger: Logger = Logger.getLogger(classOf[RouteGuideMonixService].getName)

  private val routeNotes: AtomicRef[Map[Point, Seq[RouteNote]]] = new AtomicRef(Map.empty)

  
  override def routeChat(notes: Observable[RouteNote]): Observable[RouteNote] =
    notes.flatMap { note =>
      addNote(note)
      Observable.fromIterable(getNotes(note.getLocation))
    }

  private def findFeature(point: Point): Feature = {
    features.find { feature =>
      feature.getLocation.latitude == point.latitude && feature.getLocation.longitude == point.longitude
    } getOrElse new Feature(location = Some(point))
  }

  private def getNotes(point: Point): Seq[RouteNote] = {
    routeNotes.get.getOrElse(point, Seq.empty)
  }

  private def addNote(note: RouteNote): Unit = {
    routeNotes.updateAndGet { notes =>
      val existingNotes = notes.getOrElse(note.getLocation, Seq.empty)
      val updatedNotes = existingNotes :+ note
      notes + (note.getLocation -> updatedNotes)
    }
  }
} 
Example 10
Source File: KsqlDriver.scala    From ksql-jdbc-driver   with Apache License 2.0 5 votes vote down vote up
package com.github.mmolimar.ksql.jdbc

import java.sql.{Connection, Driver, DriverPropertyInfo}
import java.util.Properties
import java.util.logging.Logger

import com.github.mmolimar.ksql.jdbc.Exceptions._

import scala.util.matching.Regex

object KsqlDriver {

  val ksqlName = "ksqlDB"
  val ksqlPrefix = "jdbc:ksql://"

  val driverName = "ksqlDB JDBC driver"
  val driverMajorVersion = 1
  val driverMinorVersion = 2
  val driverVersion = s"$driverMajorVersion.$driverMinorVersion"

  val jdbcMajorVersion = 4
  val jdbcMinorVersion = 1

  val ksqlMajorVersion = 5
  val ksqlMinorVersion = 4
  val ksqlMicroVersion = 0
  val ksqlVersion = s"$ksqlMajorVersion.$ksqlMinorVersion.$ksqlMicroVersion"

  private val ksqlUserPassRegex = "((.+):(.+)@){0,1}"
  private val ksqlServerRegex = "([A-Za-z0-9._%+-]+):([0-9]{1,5})"
  private val ksqlPropsRegex = "(\\?([A-Za-z0-9._-]+=[A-Za-z0-9._-]+(&[A-Za-z0-9._-]+=[A-Za-z0-9._-]+)*)){0,1}"

  val urlRegex: Regex = s"$ksqlPrefix$ksqlUserPassRegex$ksqlServerRegex$ksqlPropsRegex\\z".r

  def parseUrl(url: String): KsqlConnectionValues = url match {
    case urlRegex(_, username, password, ksqlServer, port, _, props, _) =>
      KsqlConnectionValues(
        ksqlServer,
        port.toInt,
        Option(username),
        Option(password),
        Option(props).map(_.split("&").map(_.split("=")).map(p => p(0) -> p(1)).toMap).getOrElse(Map.empty)
      )
    case _ => throw InvalidUrl(url)
  }
}

class KsqlDriver extends Driver {

  override def acceptsURL(url: String): Boolean = Option(url).exists(_.startsWith(KsqlDriver.ksqlPrefix))

  override def jdbcCompliant: Boolean = false

  override def getPropertyInfo(url: String, info: Properties): scala.Array[DriverPropertyInfo] = scala.Array.empty

  override def getMinorVersion: Int = KsqlDriver.driverMinorVersion

  override def getMajorVersion: Int = KsqlDriver.driverMajorVersion

  override def getParentLogger: Logger = throw NotSupported("getParentLogger")

  override def connect(url: String, properties: Properties): Connection = {
    if (!acceptsURL(url)) throw InvalidUrl(url)

    val connection = buildConnection(KsqlDriver.parseUrl(url), properties)
    connection.validate()
    connection
  }

  private[jdbc] def buildConnection(values: KsqlConnectionValues, properties: Properties): KsqlConnection = {
    new KsqlConnection(values, properties)
  }
} 
Example 11
Source File: DynamoDBEmbeddedSpecSupport.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.reactive.aws.dynamodb

import java.io.File
import java.util.logging.{ Level, Logger }

import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials }
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import com.amazonaws.services.dynamodbv2.local.server.{
  DynamoDBProxyServer,
  LocalDynamoDBRequestHandler,
  LocalDynamoDBServerHandler
}
import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBClientBuilder }
import com.github.j5ik2o.reactive.aws.test.RandomPortSupport
import org.scalatest.{ BeforeAndAfterAll, Suite }
import org.seasar.util.io.ResourceUtil

import scala.concurrent.duration._

@SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.Var", "org.wartremover.warts.While"))
trait DynamoDBEmbeddedSpecSupport extends BeforeAndAfterAll with RandomPortSupport { this: Suite =>

  protected val waitIntervalForDynamoDBLocal: FiniteDuration = 500 milliseconds

  protected def sqlite4javaLibraryPath: File = new File(ResourceUtil.getBuildDir(getClass), "/../../../native-libs")

  protected val region: Regions = Regions.AP_NORTHEAST_1

  protected lazy val accessKeyId: String = "x"

  protected lazy val secretAccessKey: String = "x"

  protected lazy val dynamoDBPort: Int = temporaryServerPort()

  protected lazy val dynamoDBEndpoint: String = s"http://127.0.0.1:$dynamoDBPort"

  protected lazy val dynamoDBProxyServer: DynamoDBProxyServer = {
    System.setProperty("sqlite4java.library.path", sqlite4javaLibraryPath.toString)
    val inMemory = true
    // scalastyle:off
    val dbPath     = null
    val sharedDb   = false
    val corsParams = null
    // scalastyle:on
    new DynamoDBProxyServer(
      dynamoDBPort,
      new LocalDynamoDBServerHandler(
        new LocalDynamoDBRequestHandler(0, inMemory, dbPath, sharedDb, false),
        corsParams
      )
    )
  }

  protected lazy val dynamoDBClient: AmazonDynamoDB = {
    AmazonDynamoDBClientBuilder
      .standard().withCredentials(
        new AWSStaticCredentialsProvider(
          new BasicAWSCredentials(accessKeyId, secretAccessKey)
        )
      )
      .withEndpointConfiguration(
        new EndpointConfiguration(dynamoDBEndpoint, region.getName)
      ).build()
  }

  protected def waitDynamoDBLocal(): Unit = {
    var isWaken: Boolean = false
    while (!isWaken) {
      try {
        dynamoDBClient.listTables()
        isWaken = true
      } catch {
        case _: Exception =>
          Thread.sleep(waitIntervalForDynamoDBLocal.toMillis)
      }
    }
  }

  protected def startDynamoDBLocal(): Unit = {
    Logger.getLogger("com.almworks.sqlite4java").setLevel(Level.OFF)
    dynamoDBProxyServer.start()
  }

  protected def shutdownDynamoDBLocal(): Unit = {
    dynamoDBProxyServer.stop()
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    startDynamoDBLocal()
    waitDynamoDBLocal()
  }

  override protected def afterAll(): Unit = {
    shutdownDynamoDBLocal()
    super.afterAll()
  }

} 
Example 12
Source File: AmDecoder.scala    From Argus-SAF   with Apache License 2.0 5 votes vote down vote up
package org.argus.amandroid.core.decompile

import java.io.File

import org.argus.jawa.core.util._
import java.util.logging.Logger
import java.util.logging.LogManager

import brut.androlib.ApkDecoder
import brut.androlib.err.CantFindFrameworkResException
import org.argus.amandroid.core.util.ApkFileUtil

object AmDecoder {
  final private val TITLE = "AmDecoder"
  
  def decode(sourcePathUri: FileResourceUri, outputUri: FileResourceUri, forceDelete: Boolean = true, createFolder: Boolean = true, srcFolder: String): FileResourceUri = {
    // make it as quiet mode
    val logger = Logger.getLogger("")
    logger.getHandlers.foreach { h =>
      logger.removeHandler(h)
    }
    LogManager.getLogManager.reset()

    val apkFile = FileUtil.toFile(sourcePathUri)
    val outputDir = 
      if(createFolder) FileUtil.toFile(ApkFileUtil.getOutputUri(sourcePathUri, outputUri))
      else FileUtil.toFile(outputUri)
    if(new File(outputDir, srcFolder).exists() && !forceDelete) return FileUtil.toUri(outputDir)
    try {
      val decoder = new ApkDecoder
      decoder.setDecodeSources(0x0000) // DECODE_SOURCES_NONE = 0x0000
      decoder.setApkFile(apkFile)
      decoder.setOutDir(outputDir)
      decoder.setForceDelete(true)
      decoder.decode()
    } catch {
      case ie: InterruptedException => throw ie
      case fe: CantFindFrameworkResException =>
        System.err.println(TITLE + ": Can't find framework resources for package of id: " + fe.getPkgId + ". You must install proper framework files, see apk-tool website for more info.")
      case e: Exception =>
        System.err.println(TITLE + ": " + e.getMessage + ". See apk-tool website for more info.")
    }
    FileUtil.toUri(outputDir)
  }
} 
Example 13
Source File: HelloWorldTopicMDB.scala    From jboss-wildfly-test   with Apache License 2.0 5 votes vote down vote up
package mdb

import java.util.logging.Logger
import javax.jms.{ MessageListener, Message, TextMessage }
import javax.ejb.{ MessageDriven, ActivationConfigProperty }

@MessageDriven(name = "HelloWorldTopicMDB", activationConfig = Array(
  new ActivationConfigProperty(propertyName = "destination", propertyValue = "HELLOWORLDMDBTopic"),
  new ActivationConfigProperty(propertyName = "destinationType", propertyValue = "javax.jms.Topic"),
  new ActivationConfigProperty(propertyName = "acknowledgeMode", propertyValue = "Auto-acknowledge")
))
class HelloWorldTopicMDB extends MessageListener {
  val logger = Logger.getLogger(this.getClass.getName)

  override def onMessage(message: Message): Unit = {
    message match {
      case msg: TextMessage ⇒
        logger.info("Received Message from queue: " + msg.getText)
      case msg ⇒
        logger.warning("Message of wrong type: " + msg.getClass.getName)
    }
  }
} 
Example 14
Source File: TestBase.scala    From open-korean-text   with Apache License 2.0 5 votes vote down vote up
package org.openkoreantext.processor

import java.util.logging.{Level, Logger}

import org.junit.runner.RunWith
import org.openkoreantext.processor.util.KoreanDictionaryProvider._
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner

object TestBase {

  case class ParseTime(time: Long, chunk: String)

  def time[R](block: => R): Long = {
    val t0 = System.currentTimeMillis()
    block
    val t1 = System.currentTimeMillis()
    t1 - t0
  }

  def assertExamples(exampleFiles: String, log: Logger, f: (String => String)) {
    assert({
      val input = readFileByLineFromResources(exampleFiles)

      val (parseTimes, hasErrors) = input.foldLeft((List[ParseTime](), true)) {
        case ((l: List[ParseTime], output: Boolean), line: String) =>
          val s = line.split("\t")
          val (chunk, parse) = (s(0), if (s.length == 2) s(1) else "")

          val oldTokens = parse
          val t0 = System.currentTimeMillis()
          val newTokens = f(chunk)
          val t1 = System.currentTimeMillis()

          val oldParseMatches = oldTokens == newTokens

          if (!oldParseMatches) {
            System.err.println("Example set match error: %s \n - EXPECTED: %s\n - ACTUAL  : %s".format(
              chunk, oldTokens, newTokens))
          }

          (ParseTime(t1 - t0, chunk) :: l, output && oldParseMatches)
      }

      val averageTime = parseTimes.map(_.time).sum.toDouble / parseTimes.size
      val maxItem = parseTimes.maxBy(_.time)

      log.log(Level.INFO, ("Parsed %d chunks. \n" +
          "       Total time: %d ms \n" +
          "       Average time: %.2f ms \n" +
          "       Max time: %d ms, %s").format(
            parseTimes.size,
            parseTimes.map(_.time).sum,
            averageTime,
            maxItem.time,
            maxItem.chunk
          ))
      hasErrors
    }, "Some parses did not match the example set.")
  }
}

@RunWith(classOf[JUnitRunner])
abstract class TestBase extends FunSuite 
Example 15
Source File: BatchTokenizeTweets.scala    From open-korean-text   with Apache License 2.0 5 votes vote down vote up
package org.openkoreantext.processor.qa

import java.util.logging.{Level, Logger}

import org.openkoreantext.processor.OpenKoreanTextProcessor
import org.openkoreantext.processor.tokenizer.KoreanTokenizer.KoreanToken
import org.openkoreantext.processor.util.KoreanPos

import scala.io.Source


object BatchTokenizeTweets {

  case class ParseTime(time: Long, chunk: String)

  private val LOG = Logger.getLogger(getClass.getSimpleName)
  private val VERBOSE = true
  private val NON_NOUNS = Set(KoreanPos.Adjective, KoreanPos.Adverb, KoreanPos.Verb)

  def main(args: Array[String]) {
    if (args.length != 1) {
      println("The first arg should be an input file of Korean tweets.")
      return
    }
    val parseTimesAll = Source.fromFile(args(0)).getLines().foldLeft(List[ParseTime]()) {
      case (l: List[ParseTime], line: String) =>
        val t0 = System.currentTimeMillis()
        val parsed = OpenKoreanTextProcessor.tokenize(line)
        val t1 = System.currentTimeMillis()

        if (VERBOSE) {
          println(parsed.map(t => t.text + "/" + t.pos).mkString(" "))
        }
        ParseTime(t1 - t0, line.trim) :: l
    }

    val loadingTime = parseTimesAll.last

    LOG.log(Level.INFO, "The first one \"%s\" took %d ms including the loading time.".format(loadingTime.chunk, loadingTime.time))

    val parseTimes = parseTimesAll.init

    val averageTweetLength = parseTimes.map(_.chunk.length).sum.toDouble / parseTimes.size

    val averageTime = parseTimes.map(_.time).sum.toDouble / parseTimes.size
    val maxItem = parseTimes.maxBy(_.time)

    LOG.log(Level.INFO, ("Parsed %d items. \n" +
        "       Total time: %d s \n" +
        "       Average tweet length: %.2f chars \n" +
        "       Average time per tweet: %.2f ms \n" +
        "       Max time: %d ms, %s\n" +
        "       Parsed: %s"
        ).format(
          parseTimes.size,
          parseTimes.map(_.time).sum / 1000,
          averageTweetLength,
          averageTime,
          maxItem.time,
          maxItem.chunk,
          OpenKoreanTextProcessor.tokenize(maxItem.chunk).map {
            case t if t.unknown => t.text.toString + t.pos + "*"
            case t => t.text + t.pos.toString
          }.mkString(" ")
        ))
  }

  private def parseToString(parsed: Seq[KoreanToken]): String = {
    parsed.map {
      case t if t.unknown => t.text.toString + t.pos + "*"
      case t => t.text + t.pos.toString
    }.mkString(" ")
  }
} 
Example 16
Source File: BatchGetUnknownNouns.scala    From open-korean-text   with Apache License 2.0 5 votes vote down vote up
package org.openkoreantext.processor.qa

import java.util.logging.Logger

import org.openkoreantext.processor.OpenKoreanTextProcessor
import org.openkoreantext.processor.tokenizer.KoreanChunker._
import org.openkoreantext.processor.tokenizer.KoreanTokenizer._
import org.openkoreantext.processor.util.KoreanPos

import scala.io.Source


object BatchGetUnknownNouns {
  private val LOG = Logger.getLogger(getClass.getSimpleName)
  private val VERBOSE = true

  case class ChunkWithTweet(chunk: String, tweet: String)

  def main(args: Array[String]) {
    if (args.length != 1) {
      println("The first arg should be an input file path of Korean tweets.")
      return
    }
    val chunksWithUnknowns = Source.fromFile(args(0)).getLines().foldLeft(List[ChunkWithTweet]()) {
      case (l: List[ChunkWithTweet], line: String) if line.trim.length > 5 =>
        chunk(line).flatMap {
          case t: KoreanToken if t.pos == KoreanPos.Korean && tokenize(t.text).exists(_.unknown) =>
            Some(ChunkWithTweet(t.text, line.trim))
          case t: KoreanToken => None
        }.toList ::: l
      case (l: List[ChunkWithTweet], line: String) => l
    }.toSet

    chunksWithUnknowns.toSeq.sortBy(_.chunk).foreach {
      chunkWithTweet: ChunkWithTweet =>
        println(chunkWithTweet.tweet)
        println(OpenKoreanTextProcessor
            .tokenize(chunkWithTweet.tweet)
            .mkString(" "))

        println(chunkWithTweet.chunk + ": " +
            tokenize(chunkWithTweet.chunk).mkString(" "))
        println()
    }

  }
} 
Example 17
Source File: RpcChannelImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import java.net.InetSocketAddress
import com.google.protobuf.Descriptors.MethodDescriptor
import com.google.protobuf.RpcCallback
import com.google.protobuf.Message
import com.google.protobuf.RpcChannel
import com.google.protobuf.RpcController
import com.google.protobuf.Service
import java.util.logging.Logger
import com.twitter.util.Duration
import com.twitter.util.FuturePool
import com.twitter.finagle.builder.ClientBuilder
import java.util.concurrent.ExecutorService
import com.twitter.finagle.protobuf.rpc.RpcControllerWithOnFailureCallback
import com.twitter.finagle.protobuf.rpc.channel.ProtoBufCodec
import com.twitter.finagle.ChannelClosedException
import com.twitter.finagle.protobuf.rpc.Util
import com.twitter.finagle.protobuf.rpc.ExceptionResponseHandler

class RpcChannelImpl(cb: ClientBuilder[(String, Message), (String, Message), Any, Any, Any], s: Service, handler: ExceptionResponseHandler[Message], executorService: ExecutorService) extends RpcChannel {

  private val log = Logger.getLogger(getClass.toString)

  private val futurePool = FuturePool(executorService)

  private val client: com.twitter.finagle.Service[(String, Message), (String, Message)] = cb
    .codec(new ProtoBufCodec(s))
    .unsafeBuild()

  def callMethod(m: MethodDescriptor, controller: RpcController,
                 request: Message, responsePrototype: Message,
                 done: RpcCallback[Message]): Unit = {
    // retries is a workaround for ChannelClosedException raised when servers shut down.
    val retries = 3

    callMethod(m, controller, request, responsePrototype, done, retries)
  }

  def callMethod(m: MethodDescriptor, controller: RpcController,
                 request: Message, responsePrototype: Message,
                 done: RpcCallback[Message], retries: Int): Unit = {

    Util.log("Request", m.getName(), request)
    val req = (m.getName(), request)

    client(req) onSuccess {
      result =>
        Util.log("Response", m.getName(), result._2)
        futurePool({
          handle(done, controller, result._2)
        })
    } onFailure {
      e =>
        log.warning("#callMethod# Failed. "+ e.getMessage)
        e match {
          case cc: ChannelClosedException => if (retries > 1) {
            log.warning("#callMethod# Retrying.")
            callMethod(m, controller, request, responsePrototype, done, retries - 1);
          } else {
            controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(e)
          }
          case _ => controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(e)
        }
    }
  }

  def handle(done: RpcCallback[Message], controller: RpcController, m: Message) {
    if (handler.canHandle(m)) {
      controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(handler.handle(m))
    } else {
      done.run(m)
    }
  }

  def release() {
     client.close()
  }
} 
Example 18
Source File: RpcServerImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import com.twitter.finagle.protobuf.rpc.channel.ProtoBufCodec
import com.twitter.finagle.protobuf.rpc.{RpcServer, Util}
import com.twitter.util._
import com.twitter.util.Duration
import com.twitter.util.FuturePool
import com.twitter.finagle.builder.{Server, ServerBuilder, ServerConfig}
import java.net.InetSocketAddress
import java.util.logging.Logger
import scala.None
import java.util.concurrent.Executors
import java.util.concurrent.ExecutorService
import com.google.common.base.Preconditions
import com.twitter.finagle.protobuf.rpc.ServiceExceptionHandler
import com.google.protobuf.DynamicMessage
import com.google.protobuf.DynamicMessage.Builder
import com.google.protobuf._
import com.google.protobuf.Descriptors._
import com.twitter.util.Promise

class RpcServerImpl(sb: ServerBuilder[(String, Message), (String, Message), Any, Any, Any], port: Int, service: Service, handler: ServiceExceptionHandler[Message], executorService: ExecutorService) extends RpcServer {

  private val log = Logger.getLogger(getClass.toString)

  Preconditions.checkNotNull(executorService)
  Preconditions.checkNotNull(handler)

  private val execFuturePool = new ExecutorServiceFuturePool(executorService)

  private val server: Server = ServerBuilder.safeBuild(ServiceDispatcher(service, handler, execFuturePool),
    sb
      .codec(new ProtoBufCodec(service))
      .name(getClass().getName())
      .bindTo(new InetSocketAddress(port)))

  def close(d: Duration) = {
    server.close(d)
  }
}

class ServiceDispatcher(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool) extends com.twitter.finagle.Service[(String, Message), (String, Message)] {

  private val log = Logger.getLogger(getClass.toString)

  def apply(request: (String, Message)) = {

    val methodName = request._1
    val reqMessage = request._2

    Util.log("Request", methodName, reqMessage)
    val m = service.getDescriptorForType().findMethodByName(methodName);
    if (m == null) {
      throw new java.lang.AssertionError("Should never happen, we already decoded " + methodName)
    }

    val promise = new Promise[(String, Message)]()

    // dispatch to the service method
    val task = () => {
      try {
        service.callMethod(m, null, reqMessage, new RpcCallback[Message]() {

          def run(msg: Message) = {
            Util.log("Response", methodName, msg)
            promise.setValue((methodName, msg))
          }

        })
      } catch {
        case e: RuntimeException => {
          log.warning("#apply# Exception: "+e.getMessage)
          if (handler.canHandle(e)) {
            promise.setValue((methodName, handler.handle(e, constructEmptyResponseMessage(m))))
          }
        }
      }
    }
    futurePool(task())
    promise
  }

  def constructEmptyResponseMessage(m: MethodDescriptor): Message = {
    val outputType = m.getOutputType();
    DynamicMessage.newBuilder(outputType).build()
  }
}

object ServiceDispatcher {
  def apply(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool): ServiceDispatcher = {
    new ServiceDispatcher(service, handler, futurePool)
  }
} 
Example 19
Source File: PDCacheInvalidateListener.scala    From tispark   with Apache License 2.0 5 votes vote down vote up
package com.pingcap.tispark.listener

import java.util.logging.Logger

import com.pingcap.tispark.accumulator.CacheInvalidateAccumulator
import com.pingcap.tispark.handler.CacheInvalidateEventHandler
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd}

class PDCacheInvalidateListener(
    accumulator: CacheInvalidateAccumulator,
    handler: CacheInvalidateEventHandler)
    extends SparkListener {
  private final val logger: Logger = Logger.getLogger(getClass.getName)

  override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit =
    if (accumulator != null && !accumulator.isZero && handler != null) {
      synchronized {
        if (!accumulator.isZero) {
          val events = accumulator.value
          logger.info(
            s"Receiving ${events.size} cache invalidation request(s) from job ${jobEnd.jobId} at driver. " +
              s"This indicates that there's exception(s) thrown in executor node when communicating with " +
              s"TiKV, checkout executors' log for more information.")
          events.foreach(handler.handle)
        }
      }
    }
} 
Example 20
Source File: ParquetLogMute.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.parquet.util

import java.util.logging.Logger

import scala.util.Try

object ParquetLogMute {

  def apply(): Unit = {
    Try {
      Class.forName("org.apache.parquet.Log")
    }

    Try {
      Class.forName("parquet.Log")
    }

    for (pack <- List("org.apache.parquet", "parquet")) {
      Try {
        val logger = Logger.getLogger(pack)
        logger.getHandlers.foreach(logger.removeHandler)
        logger.setUseParentHandlers(false)
      }
    }

    val rootLogger = Logger.getLogger("")
    rootLogger.getHandlers.foreach(rootLogger.removeHandler)
  }
} 
Example 21
Source File: DatabaseInitializer.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc.slick.helpers

import java.io.PrintWriter
import java.sql.Connection
import java.util.logging.Logger
import javax.sql.DataSource

import com.typesafe.config.Config
import org.flywaydb.core.Flyway
import slick.jdbc.JdbcBackend

import scala.concurrent.ExecutionContext

class DatabaseInitializer(db: JdbcBackend.Database) {
  def initDatabase()(implicit executionContext: ExecutionContext) = {
    migrateIfNeeded(db)
    db
  }

  private def migrateIfNeeded(db: JdbcBackend.Database) = {
    Flyway.configure
      .dataSource(new DatabaseDataSource(db))
      .baselineOnMigrate(true)
      .load
      .migrate
  }
}

object DatabaseInitializer {
  def apply(config: Config) = {
    val db = JdbcBackend.Database.forConfig("db", config)
    new DatabaseInitializer(db)
  }
}

class DatabaseDataSource(db: JdbcBackend.Database) extends DataSource {
  private val conn = db.createSession().conn

  override def getConnection: Connection = conn
  override def getConnection(username: String, password: String): Connection = conn
  override def unwrap[T](iface: Class[T]): T = conn.unwrap(iface)
  override def isWrapperFor(iface: Class[_]): Boolean = conn.isWrapperFor(iface)

  override def setLogWriter(out: PrintWriter): Unit = ???
  override def getLoginTimeout: Int = ???
  override def setLoginTimeout(seconds: Int): Unit = ???
  override def getParentLogger: Logger = ???
  override def getLogWriter: PrintWriter = ???
} 
Example 22
Source File: SlickJdbcMigration.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc.slick

import java.io.PrintWriter
import java.lang.reflect.{InvocationHandler, Method, Proxy}
import java.sql.Connection
import java.util.logging.Logger

import javax.sql.DataSource
import org.flywaydb.core.api.migration.{BaseJavaMigration, Context}
import slick.jdbc.JdbcProfile

import scala.concurrent.Await
import scala.concurrent.duration._

trait SlickJdbcMigration extends BaseJavaMigration {

  protected val profile: JdbcProfile

  import profile.api._

  def migrateActions: DBIOAction[Any, NoStream, _ <: Effect]

  override final def migrate(context: Context): Unit = {
    val database = Database.forDataSource(new AlwaysUsingSameConnectionDataSource(context.getConnection), None)
    Await.result(database.run(migrateActions), 10 minute)
  }

}

class AlwaysUsingSameConnectionDataSource(conn: Connection) extends DataSource {
  private val notClosingConnection = Proxy.newProxyInstance(
    ClassLoader.getSystemClassLoader,
    Array[Class[_]](classOf[Connection]),
    SuppressCloseHandler
  ).asInstanceOf[Connection]

  object SuppressCloseHandler extends InvocationHandler {
    override def invoke(proxy: AnyRef, method: Method, args: Array[AnyRef]): AnyRef = {
      if (method.getName != "close") {
        method.invoke(conn, args : _*)
      } else {
        null
      }
    }
  }

  override def getConnection: Connection = notClosingConnection
  override def getConnection(username: String, password: String): Connection = notClosingConnection
  override def unwrap[T](iface: Class[T]): T = conn.unwrap(iface)
  override def isWrapperFor(iface: Class[_]): Boolean = conn.isWrapperFor(iface)

  override def setLogWriter(out: PrintWriter): Unit = ???
  override def getLoginTimeout: Int = ???
  override def setLoginTimeout(seconds: Int): Unit = ???
  override def getParentLogger: Logger = ???
  override def getLogWriter: PrintWriter = ???
} 
Example 23
Source File: HelloWorldQueueMDB.scala    From jboss-wildfly-test   with Apache License 2.0 5 votes vote down vote up
package mdb

import java.util.logging.Logger
import javax.jms.{ MessageListener, Message, TextMessage }
import javax.ejb.{ MessageDriven, ActivationConfigProperty }

@MessageDriven(name = "HelloWorldQueueMDB", activationConfig = Array(
  new ActivationConfigProperty(propertyName = "destination", propertyValue = "HELLOWORLDMDBQueue"),
  new ActivationConfigProperty(propertyName = "destinationType", propertyValue = "javax.jms.Queue"),
  new ActivationConfigProperty(propertyName = "acknowledgeMode", propertyValue = "Auto-acknowledge")
))
class HelloWorldQueueMDB extends MessageListener {
  val logger = Logger.getLogger(this.getClass.getName)
  override def onMessage(message: Message): Unit = {
    message match {
      case msg: TextMessage ⇒
        logger.info("Received Message from queue: " + msg.getText)
      case msg ⇒
        logger.warning("Message of wrong type: " + msg.getClass.getName)
    }
  }
}