java.util.concurrent.Executor Scala Examples

The following examples show how to use java.util.concurrent.Executor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: FutureConversions.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.cassandra.util

import java.util.concurrent.Executor

import com.google.common.util.concurrent.ListenableFuture

import scala.concurrent.{ ExecutionContext, Future, Promise }
import scala.util.Try

object FutureConversions {

  implicit class ListenableFutureConverter[A](val lf: ListenableFuture[A]) extends AnyVal {
    def asScala(implicit ec: ExecutionContext): Future[A] = {
      val promise = Promise[A]
      lf.addListener(new Runnable {
        def run(): Unit = {
          promise.complete(Try(lf.get()))
          ()
        }
      }, ec.asInstanceOf[Executor])
      promise.future
    }

    def asScalaWithDefaultGlobal: Future[A] = {
      import scala.concurrent.ExecutionContext.Implicits.global
      asScala(global)
    }
  }

} 
Example 2
Source File: ExecutorExtension.scala    From kafka-connect-common   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.concurrent

import java.util.concurrent.Executor

import scala.concurrent.{Future, Promise}

object ExecutorExtension {

  implicit class RunnableWrapper(val executor: Executor) extends AnyVal {
    def submit[T](thunk: => T): Future[T] = {
      val promise = Promise[T]()
      executor.execute(new Runnable {
        override def run(): Unit = {
          try {
            val t = thunk
            promise.success(t)
          } catch {
            case t: Throwable => promise.failure(t)
          }
        }
      })
      promise.future
    }
  }

} 
Example 3
Source File: NotifierSpec.scala    From cave   with MIT License 5 votes vote down vote up
package worker

import java.util.concurrent.Executor

import akka.actor._
import akka.testkit.TestKit
import com.cave.metrics.data.{AlertJsonData, Check}
import org.scalatest.{BeforeAndAfterAll, WordSpecLike}
import worker.web.{BadStatus, NotificationSender}

import scala.concurrent.Future

object NotifierSpec extends AlertJsonData {

  object FakeNotificationSender extends NotificationSender {
    def send(check: Check)(implicit exec: Executor): Future[Boolean] =
      if (check.schedule.alert.description == AlertDescription) Future.successful(true)
      else if (check.schedule.alert.description == AlertFiveDescription) Future.successful(false)
      else Future.failed(BadStatus(401))

    def shutdown(): Unit = { }
  }

  def fakeNotifier(n: Check): Props = Props(new Notifier(n) {
    override def client = FakeNotificationSender
  })
}

class NotifierSpec extends TestKit(ActorSystem()) with WordSpecLike with BeforeAndAfterAll {

  import worker.NotifierSpec._

  override def afterAll() = {
    system.shutdown()
  }

  "A notifier" must {
    "send Done(true) when successful" in {
      val notifier = system.actorOf(Props(new StepParent(fakeNotifier(InsufficientOrders), testActor)), "successful")

      expectMsg(Notifier.Done(result = true))
      watch(notifier)
      expectTerminated(notifier)
    }

    "send Done(false) when unsuccessful" in {
      val notifier = system.actorOf(Props(new StepParent(fakeNotifier(InsufficientOrdersFive), testActor)), "unsuccessful")

      expectMsg(Notifier.Done(result = false))
      watch(notifier)
      expectTerminated(notifier)
    }

    "properly finish in case of error" in {
      val notifier = system.actorOf(Props(new StepParent(fakeNotifier(OrdersLessThanPredicted), testActor)), "error")

      watch(notifier)
      expectTerminated(notifier)
    }
  }
} 
Example 4
Source File: Checker.scala    From cave   with MIT License 5 votes vote down vote up
package worker

import java.util.concurrent.Executor

import akka.actor.{Actor, ActorLogging, Status}
import akka.pattern.pipe
import com.cave.metrics.data._
import com.cave.metrics.data.evaluator.{CheckEvaluator, DataFetcher}
import init.Init

import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try

object Checker {
  type Result = Try[Boolean]

  case class Done(alarm: Result)
  case class Aborted(reason: String)
}

class Checker(check: Check) extends Actor with ActorLogging {

  implicit val exec = context.dispatcher.asInstanceOf[Executor with ExecutionContext]
  val evaluator = new CheckEvaluator(check)
  def fetcher = new DataFetcher(Init.influxClientFactory)

  this run check pipeTo self

  def receive = {
    case alarm: Checker.Result =>
      context.parent ! Checker.Done(alarm)
      stop()

    case x: Status.Failure =>
      context.parent ! Checker.Aborted(x.cause.getMessage)
      stop()
  }

  def stop(): Unit = {
    context stop self
  }

  private[worker] def run(check: Check)(implicit ec: ExecutionContext): Future[Try[Boolean]] = {
    val result = evaluator.evaluate(fetcher)
    result map { v =>
      log.warning("Result of evaluation: " + v)
    }
    result
  }
} 
Example 5
Source File: NotificationSender.scala    From cave   with MIT License 5 votes vote down vote up
package worker.web

import java.util.concurrent.Executor

import com.cave.metrics.data.Check
import com.ning.http.client.AsyncHttpClient
import org.jboss.netty.handler.codec.http.HttpHeaders.Names._
import play.api.libs.json.{JsValue, Json}
import worker.converter.ConverterFactory

import scala.concurrent._

trait NotificationSender {
  def send(notification: Check)(implicit exec: Executor): Future[Boolean]
  def shutdown(): Unit
}

case class BadStatus(status: Int) extends RuntimeException

class AsyncNotificationSender(converterFactory: ConverterFactory) extends NotificationSender {

  private val client = new AsyncHttpClient

  override def send(notification: Check)(implicit exec: Executor): Future[Boolean] = {

    def sendJson(url: String, json: JsValue): Future[Boolean] = {
      val f = client.preparePost(url)
        .addHeader(CONTENT_TYPE, "application/json")
        .setBody(Json.stringify(json))
        .execute()

      val p = Promise[Boolean]()

      f.addListener(new Runnable {

        override def run(): Unit = {
          val response = f.get
          if (response.getStatusCode < 400) p.success(true)
          else p.failure(BadStatus(response.getStatusCode))
        }
      }, exec)
      p.future
    }

    val url = notification.schedule.notificationUrl
    converterFactory.getConverter(url).convert(notification) match {
      case Some(json) =>
        println("JSON: " + json)
        sendJson(url, json)
      case None =>
        println("Failed to convert notification to JSON. Entity was " + notification)
        Future.successful(false)
    }
  }

  override def shutdown(): Unit = client.close()
} 
Example 6
Source File: Scheduler.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import java.util.concurrent.{Executor, TimeUnit}

import akka.actor.{Actor, ActorLogging}
import akka.pattern.ask
import akka.util.Timeout
import com.cave.metrics.data.evaluator.AlertParser
import com.cave.metrics.data.{Check, Schedule}
import init.{AwsWrapper, Init}
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.{Minutes, LocalTime, DateTime, DateTimeZone}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object Scheduler {
  object DoWork
  object Die
  case class NotificationUrlChange(newUrl: String)
}
class Scheduler(schedule: Schedule, awsWrapper: AwsWrapper) extends Actor with ActorLogging with AlertParser {

  private[actors] def leader = Init.leader
  var notificationUrl: String = schedule.notificationUrl
  implicit val timeout = Timeout(2, TimeUnit.SECONDS)

  val (waitTime, period) = getSchedule(schedule.alert.period)

  val Formatter = ISODateTimeFormat.dateTimeNoMillis()

  implicit val executor = context.dispatcher.asInstanceOf[Executor with ExecutionContext]
  private val queueCheckSchedule = context.system.scheduler.schedule(waitTime, period, self, Scheduler.DoWork)

  override def receive = {
    case Scheduler.DoWork =>
      leader ? Leadership.IsLeader onComplete {
        case scala.util.Success(imLeader: Boolean) =>
          if (imLeader) {
            awsWrapper.sendMessage(Check(Schedule(schedule.orgName, schedule.teamName, schedule.clusterName, notificationUrl, schedule.alert), now()))
          }

        case scala.util.Success(e) =>
          log.error("Unexpected result returned by the leader actor: " + e)

        case scala.util.Failure(t) =>
          log.error("Failed to query the leader actor, error was " + t)
      }


    case Scheduler.NotificationUrlChange(url) =>
      log.debug(s"Updating the notification URL, from $notificationUrl to $url.")
      notificationUrl = url

    case Scheduler.Die =>
      context stop self
  }

  override def postStop(): Unit = queueCheckSchedule.cancel()

  
  private[actors] def getSchedule(alertPeriod: String): (FiniteDuration, FiniteDuration) =
    parseAll(duration, alertPeriod) match {
      case Success(p, _) => (0.minutes, p)

      case NoSuccess(_, message) =>
        parseAll(daily, alertPeriod) match {
          case Success(time, _) => (getWait(nowLocal(), time), 1.day)

          case NoSuccess(_, message2) =>
            sys.error(s"Unexpected alert period $alertPeriod. Not a duration ($message) and not a daily scheduler ($message2).")
        }
    }

  private[actors] def getWait(now: LocalTime, until: LocalTime): FiniteDuration = {
    val wait = Minutes.minutesBetween(now, until).getMinutes
    val minutes = if (wait < 0) 1440 + wait else wait
    minutes.minutes
  }
} 
Example 7
Source File: HttpUtils.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import java.util.concurrent.Executor

import com.ning.http.client.AsyncHttpClient
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import org.jsoup.select.Elements
import scala.collection.JavaConverters._
import scala.concurrent.{ Promise, ExecutionContext, Future }
import scala.util.Try

object HttpClient {
  def apply(): AsyncHttpClient = new AsyncHttpClient

  implicit class HttpClientToScala(client: AsyncHttpClient) {
    def get(url: String)(implicit ec: Executor): Future[String] = {
      val f = client.prepareGet(url).execute
      val p = Promise[String]()
      f.addListener(new Runnable {
        override def run(): Unit = {
          val response = f.get
          if (response.getStatusCode < 400)
            p.success(response.getResponseBodyExcerpt(131072))
          else p.failure(new RuntimeException(s"BadStatus: ${response.getStatusCode}"))
        }
      }, ec)
      p.future
    }
  }
}

object HttpUtils {
  implicit class FindLinksFuture(self: Future[String])(implicit ec: ExecutionContext) {
    def links: Future[Option[Iterator[String]]] =
      self.map(body ⇒ findLinks(body))
  }

  def findLinks(body: String): Option[Iterator[String]] =
    Try(Jsoup.parse(body)).map { (document: Document) ⇒
      val links: Elements = document.select("a[href]")
      for (link ← links.iterator().asScala; if link.absUrl("href").startsWith("http://")) yield link.absUrl("href")
    }.toOption
} 
Example 8
Source File: ExecutionContextScheduler.scala    From reactor-scala-extensions   with Apache License 2.0 5 votes vote down vote up
package reactor.core.scala.scheduler

import java.util.concurrent.Executor

import reactor.core.Disposable
import reactor.core.scheduler.Scheduler.Worker
import reactor.core.scheduler.{Scheduler, Schedulers}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, ExecutionContextExecutorService}

class ExecutionContextScheduler private(val scheduler: Scheduler) extends Scheduler {
  override def schedule(task: Runnable): Disposable = scheduler.schedule(task)

  override def createWorker(): Worker = scheduler.createWorker()
}


object ExecutionContextScheduler {
  def apply(executionContext: ExecutionContext): ExecutionContextScheduler = {
    executionContext match {
      case eces: ExecutionContextExecutorService => new ExecutionContextScheduler(Schedulers.fromExecutorService(eces))
      case ece: ExecutionContextExecutor => new ExecutionContextScheduler(Schedulers.fromExecutor(ece))
      case _ => new ExecutionContextScheduler(Schedulers.fromExecutor(new Executor {
        override def execute(command: Runnable): Unit = executionContext.execute(command)
      }))
    }
  }
} 
Example 9
Source File: Session.scala    From aecor   with MIT License 5 votes vote down vote up
package akka.persistence.cassandra
import java.util.concurrent.Executor

import cats.data.Kleisli
import cats.effect.{ Async, ContextShift }
import com.datastax.driver.core.{ ResultSet, TypeCodec, Session => DatastaxSession }

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

trait Session[F[_]] {
  def execute(query: String): F[ResultSet]
  def registerCodec[A](codec: TypeCodec[A]): F[Unit]
}

object Session {
  type Init[F[_]] = Kleisli[F, Session[F], Unit]
  def Init[F[_]](f: Session[F] => F[Unit]): Init[F] = Kleisli(f)
  private val immediateExecutor = new Executor {
    override def execute(command: Runnable): Unit =
      command.run()
  }

  private val immediateExecutionContext = ExecutionContext.fromExecutor(immediateExecutor)

  def apply[F[_]](datastaxSession: DatastaxSession)(implicit F: Async[F],
                                                    contextShift: ContextShift[F]): Session[F] =
    new Session[F] {
      final override def execute(query: String): F[ResultSet] =
        contextShift.evalOn(immediateExecutionContext) {
          F.async { cb =>
            val future = datastaxSession.executeAsync(query)
            val runnable = new Runnable {
              override def run(): Unit =
                try {
                  cb(Right(future.get()))
                } catch {
                  case NonFatal(e) =>
                    cb(Left(e))
                }
            }
            future.addListener(runnable, immediateExecutor)
          }
        }
      override def registerCodec[A](codec: TypeCodec[A]): F[Unit] =
        F.delay {
          datastaxSession.getCluster.getConfiguration.getCodecRegistry.register(codec)
          ()
        }
    }
} 
Example 10
Source File: Writer.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.querydb

//#writer
import java.lang.{ Long => JLong }

import akka.actor.ActorRef

import com.datastax.driver.core._
import com.rbmhtechnology.eventuate.EventsourcedWriter

import scala.concurrent.Future


  override def readSuccess(result: Long): Option[Long] =
    Some(result + 1L)
}

object Writer {
  import java.util.concurrent.Executor

  import com.google.common.util.concurrent.ListenableFuture

  import scala.concurrent.{ ExecutionContext, Promise }
  import scala.language.implicitConversions
  import scala.util.Try

  implicit class ListenableFutureConverter[A](lf: ListenableFuture[A])(implicit executionContext: ExecutionContext) {

    def toFuture: Future[A] = {
      val promise = Promise[A]
      lf.addListener(new Runnable {
        def run() = promise.complete(Try(lf.get()))
      }, executionContext.asInstanceOf[Executor])
      promise.future
    }
  }
}
//# 
Example 11
Source File: ExecutorProxy.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.infra

import java.lang.reflect.{InvocationHandler, InvocationTargetException, Method}
import java.util.concurrent.Executor
import java.util.concurrent.atomic.AtomicInteger

import org.slf4s.Logging

import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise, TimeoutException}
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}

class ExecutorProxy(executor: Executor) {
  import scala.collection.JavaConverters._

  def createFor[A <: AnyRef : ClassTag](instance: A): A = {
    val clazz = implicitly[ClassTag[A]].runtimeClass
    java.lang.reflect.Proxy.newProxyInstance(clazz.getClassLoader, Array(clazz), new Handler(instance)).asInstanceOf[A]
  }

  class Handler(instance: AnyRef) extends InvocationHandler with Logging {
    import scala.concurrent.ExecutionContext.Implicits._
    private val className = instance.getClass.getName

    private val idGen = new AtomicInteger(0)
    private var awaitingCalls = Map[Int, String]()

    override def invoke(proxy: scala.Any, method: Method, args: Array[AnyRef]): AnyRef = {
      val resultPromise = Promise[AnyRef]()

      val before = System.nanoTime()

      val id = idGen.getAndIncrement()
      val argss = Option(args).getOrElse(Array.empty)
      val desc = s"$method(${argss.mkString(", ")})[$id]"
      log.trace(s"Waiting to execute: $desc")

      // Snapshot of waiting calls prior to submitting to the executor
      val waitingCallsAtEntry = awaitingCalls

      executor.execute(() => {
        log.trace(s"Execute: $id")
        Try(method.invoke(instance, args: _*)) match {
          case Success(f: Future[_]) => resultPromise.completeWith(f.asInstanceOf[Future[AnyRef]])
          case Success(result) => resultPromise.success(result)
          case Failure(t: InvocationTargetException) => resultPromise.failure(t.getCause)
          case Failure(t) => resultPromise.failure(t)
        }
      })

      resultPromise.future.onComplete { _ =>
        val methodName = method.getName
        val millis = (System.nanoTime() - before).nanos.toMillis
        log.trace(s"Elapsed time for $className.$methodName = $millis ms")
      }

      if (classOf[Future[_]].isAssignableFrom(method.getReturnType)) resultPromise.future
      else {
        // Update with this call
        awaitingCalls += (id -> desc)
        //TODO: Configurable timeout
        try Await.result(resultPromise.future, 30.seconds) catch {
          case _: TimeoutException =>
            val other = waitingCallsAtEntry.values
            val sb = new StringBuilder(s"Timed out waiting for '$desc' to complete. Calls at entry: ${other.mkString("'", "', '", "'")}. Stack:\n")
            appendStackTraces(sb)
            log.debug(sb.toString())
            throw new TimeoutException(s"Timed out waiting for '$desc' to complete.")
        } finally {
          // Done with this call
          awaitingCalls -= id
          log.trace(s"Done: $id")
        }
      }
    }

    private def appendStackTraces(sb: StringBuilder): Unit = {
      Thread.getAllStackTraces.asScala.foreach { tup =>
        sb.append("\n> THREAD ").append(tup._1.getName).append("\n")
        tup._2.foreach(ste => sb.append("  ").append(ste).append("\n"))
      }
    }
  }
} 
Example 12
Source File: CaffeineImplForAsyncCache.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.cache

import java.util.concurrent.{CompletableFuture, Executor}
import java.util.function.BiFunction

import com.github.benmanes.caffeine.cache.{AsyncCacheLoader, Caffeine, AsyncLoadingCache => AsyncCaffeineCache, Cache => CaffeineCache}
import scala.compat.java8.FunctionConverters._
import scala.compat.java8.FutureConverters._

import scala.concurrent.{ExecutionContext, Future}

object CaffeineImplForAsyncCache {
  def lfu[K, V >: Null](initialCapacity: Int, maxCapacity: Int)(implicit ec: ExecutionContext): AsyncCache[K, V] = {
    val caffeineCache = Caffeine
      .newBuilder()
      .initialCapacity(initialCapacity)
      .maximumSize(maxCapacity)
      .asInstanceOf[Caffeine[K, V]]
      .buildAsync[K, V](dummyLoader[K, V])
    CaffeineImplForAsyncCache(caffeineCache)
  }

  //LfuCache requires a loader function on creation - this will not be used.
  private def dummyLoader[K, V] = new AsyncCacheLoader[K, V] {
    def asyncLoad(k: K, e: Executor) =
      Future.failed[V](new RuntimeException("Dummy loader should not be used by LfuCache")).toJava.toCompletableFuture
  }
}

case class CaffeineImplForAsyncCache[K, V >: Null](underlying: AsyncCaffeineCache[K, V])(implicit ec: ExecutionContext) extends AsyncCache[K, V] {

  override def get(key: K): Future[Option[V]] = {
    val cacheEntry = underlying.getIfPresent(key)
    if (cacheEntry != null) {
      cacheEntry.toScala.map(Some(_))
    } else {
      Future.successful(None)
    }
  }

  override def put(key: K, value: Future[Option[V]]): Unit = {
    val asCompletableNullableFuture = value.map(_.orNull).toJava.toCompletableFuture
    underlying.put(key, asCompletableNullableFuture)
  }

  override def remove(key: K): Unit = underlying.synchronous().invalidate(key)

  override def getOrUpdate(key: K, fn: () => Future[V]): Future[V] = {
    val javaFn = toCaffeineMappingFunction[K, V](fn)
    underlying.get(key, javaFn).toScala
  }

  override def getOrUpdateOpt(key: K, fn: () => Future[Option[V]]): Future[Option[V]] = {
    val nullable: () => Future[V] = () => fn().map(_.orNull)
    val javaFn                    = toCaffeineMappingFunction[K, V](nullable)
    val cacheEntry                = underlying.get(key, javaFn)
    if (cacheEntry != null) {
      cacheEntry.toScala.map(Option(_))
    } else {
      Future.successful(None)
    }
  }

  private def toCaffeineMappingFunction[K, V](genValue: () ⇒ Future[V]): BiFunction[K, Executor, CompletableFuture[V]] = {
    asJavaBiFunction[K, Executor, CompletableFuture[V]]((_, _) ⇒ genValue().toJava.toCompletableFuture)
  }
} 
Example 13
Source File: DefaultThreadPool.scala    From scrapy4s   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.scrapy4s.thread

import java.util.concurrent.{BlockingQueue, CountDownLatch, Executor, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.scrapy4s.exception.QueueTimeOutException
import org.slf4j.LoggerFactory


class DefaultThreadPool(
                name: String,
                threadCount: Int,
                queue: BlockingQueue[Runnable]
                ) extends ThreadPool {

  val logger = LoggerFactory.getLogger(classOf[DefaultThreadPool])
  val startFlag = new AtomicBoolean(false)
  val countDownLatch = new CountDownLatch(threadCount)
  init()

  private def init(): Unit = {
    if (startFlag.compareAndSet(false, true)) {
      (1 to threadCount).foreach(i => {
        val thread = new Thread(() => {task()})
        thread.setName(s"pool-$name-$i")
        thread.start()
      })
    } else {
      throw new Exception("线程池已经启动")
    }
  }



  def task() = {
    try {
      while (startFlag.get()) {
        try {
          val runnable = queue.poll(1, TimeUnit.SECONDS)
          if (runnable == null) {
            throw new QueueTimeOutException()
          }
          runnable.run()
        } catch {
          case _: QueueTimeOutException =>
          case e: Exception =>
            logger.error("thread pool exception", e)
        }
      }
    } finally {
      countDownLatch.countDown()
    }
  }

  override def shutdown() = {
    startFlag.compareAndSet(true, false)
  }

  override def waitForStop() = {
    countDownLatch.await()
  }

  override def waitForStop(timeout: Long, unit: TimeUnit): Boolean = {
    countDownLatch.await(timeout, unit)
  }

  override def execute(command: Runnable) = {
    if (command == null) throw new NullPointerException()
    queue.put(command)
  }
} 
Example 14
Source File: AccessTokenCallCredentials.scala    From grpc-scala-microservice-kit   with Apache License 2.0 5 votes vote down vote up
package mu.node.echod.grpc

import java.util.concurrent.Executor

import io.grpc.{Attributes, CallCredentials, Metadata, MethodDescriptor}

class AccessTokenCallCredentials(accessToken: String) extends CallCredentials {

  override def applyRequestMetadata(method: MethodDescriptor[_, _],
                                    attributes: Attributes,
                                    appExecutor: Executor,
                                    applier: CallCredentials.MetadataApplier): Unit = {
    appExecutor.execute(new Runnable {
      override def run(): Unit = {
        val headers = new Metadata()
        val authorizationHeaderKey =
          Metadata.Key.of("Authorization", Metadata.ASCII_STRING_MARSHALLER)
        headers.put(authorizationHeaderKey, "Bearer " + accessToken)
        applier.apply(headers)
      }
    })
  }
} 
Example 15
Source File: DI_05_LifecycleHooks.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.examples.di

import java.util.concurrent.{Executor, ExecutorService, Executors}

import wvlet.log.LogSupport


object DI_05_LifecycleHooks extends App {
  import wvlet.airframe._

  trait MyApp extends LogSupport {
    private val threadManager = bind[ExecutorService] { Executors.newCachedThreadPool() }
      .onStart { x => info(f"Started a thread manager: ${x.hashCode()}%x") }
      .onShutdown { x =>
        info(f"Shutting down the thread manager: ${x.hashCode()}%x")
        x.shutdown()
      }
  }

  val d = newDesign

  d.build[MyApp] { app =>
    // Thread manager will start here
  }
  // Thread manager will be shutdown here.
} 
Example 16
Source File: CacheAsyncConnection.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play.api.libs.ws.ahc.cache

import java.util.concurrent.Callable
import java.util.concurrent.CompletableFuture
import java.util.concurrent.Executor
import java.util.concurrent.TimeUnit
import java.util.function.BiConsumer

import play.shaded.ahc.org.asynchttpclient.AsyncHandler
import play.shaded.ahc.org.asynchttpclient.ListenableFuture
import play.shaded.ahc.org.asynchttpclient.Request
import org.slf4j.LoggerFactory
import play.shaded.ahc.org.asynchttpclient.handler.ProgressAsyncHandler


class CacheFuture[T](handler: AsyncHandler[T]) extends ListenableFuture[T] {

  private var innerFuture: java.util.concurrent.CompletableFuture[T] = _

  def setInnerFuture(future: java.util.concurrent.CompletableFuture[T]) = {
    innerFuture = future
  }

  override def isDone: Boolean = innerFuture.isDone

  override def done(): Unit = {}

  override def touch(): Unit = {}

  override def abort(t: Throwable): Unit = {
    innerFuture.completeExceptionally(t)
  }

  override def isCancelled: Boolean = {
    innerFuture.isCancelled
  }

  override def get(): T = {
    get(1000L, java.util.concurrent.TimeUnit.MILLISECONDS)
  }

  override def get(timeout: Long, unit: TimeUnit): T = {
    innerFuture.get(timeout, unit)
  }

  override def cancel(mayInterruptIfRunning: Boolean): Boolean = {
    innerFuture.cancel(mayInterruptIfRunning)
  }

  override def toString: String = {
    s"CacheFuture"
  }

  override def toCompletableFuture: CompletableFuture[T] = innerFuture

  override def addListener(listener: Runnable, executor: Executor): ListenableFuture[T] = {
    innerFuture.whenCompleteAsync(
      new BiConsumer[T, Throwable]() {
        override def accept(t: T, u: Throwable): Unit = listener.run()
      },
      executor
    )
    this
  }
} 
Example 17
Source File: HttpApp.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.server.rest

import java.util.concurrent.Executor

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.RouteConcatenation
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import it.agilelab.darwin.common.Logging

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor}

class HttpApp(config: Config, services: Service*)
             (implicit system: ActorSystem, materializer: ActorMaterializer) extends Logging {
  def run(): Unit = {
    val interface = config.getString("interface")
    val port = config.getInt("port")


    val route = RouteConcatenation.concat(services.map(_.route): _*)

    log.info("Starting http server on {}:{}", interface, port)
    val eventuallyBinding = Http().bindAndHandle(route, interface, port)
    val binding = Await.result(eventuallyBinding, Duration.Inf)
    log.info("Started http server on {}:{}", interface, port)

    val shutdownThread = new Thread(new Runnable {
      override def run(): Unit = {
        implicit val ec: ExecutionContext = newSameThreadExecutor
        log.info("Received shutdown hook")

        val termination = for {
          _ <- binding.unbind()
          terminated <- system.terminate()
        } yield terminated

        Await.ready(termination, Duration.Inf)
        log.info("Shutdown")
      }
    })

    shutdownThread.setName("shutdown")

    Runtime.getRuntime.addShutdownHook(shutdownThread)

    log.info("registered shutdown hook")
  }


  private def newSameThreadExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(new Executor {
    override def execute(command: Runnable): Unit = command.run()
  })
}

object HttpApp {
  def apply(config:Config, services: Service*)(implicit system: ActorSystem, materializer: ActorMaterializer): HttpApp =
    new HttpApp(config, services: _*)
}