scala.concurrent.Await Scala Examples

The following examples show how to use scala.concurrent.Await. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TimeLimitedFutureSpec.scala    From gfc-concurrent   with Apache License 2.0 9 votes vote down vote up
package com.gilt.gfc.concurrent

import java.util.concurrent.TimeoutException
import scala.concurrent.{ Future, Await }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import org.scalatest.{WordSpec, Matchers}

class TimeLimitedFutureSpec extends WordSpec with Matchers {
  import TimeLimitedFutureSpec._

  "RichFuture" when {
    import ScalaFutures._

    "waiting for a result to happen" should {
      "return the completed original Future if it completes before the given timeout" in {
        val now = System.currentTimeMillis
        val future: Future[String] = (Future { Thread.sleep(1000); "Here I am" }).withTimeout(Duration(5, "seconds"))
        val msg: String = Await.result(future, Duration(10, "seconds"))
        val elapsed = (System.currentTimeMillis - now)
        msg should equal ("Here I am")
        elapsed should be (2000L +- 1000L)
      }

      "return the failure of the original Future if it fails before the given timeout" in {
        val now = System.currentTimeMillis
        val future = (Future { Thread.sleep(1000); throw new NullPointerException("That hurts!") }).withTimeout(Duration(5, "seconds"))
        a [NullPointerException] should be thrownBy { Await.result(future, Duration(10, "seconds")) }
        val elapsed = (System.currentTimeMillis - now)
        elapsed should be (2000L +- 1000L)
      }

      "return the timeout of the original Future if it had one and it went off and was shorter than the given one" in {
        val now = System.currentTimeMillis
        val timingOutEarlier = Timeouts.timeout(Duration(1, "seconds"))
        val future = timingOutEarlier.withTimeout(Duration(5, "seconds"))
        a [TimeoutException] should be thrownBy { Await.result(future, Duration(10, "seconds")) }
        val elapsed: Long = (System.currentTimeMillis - now)
        elapsed should be >= 500l
        elapsed should be <= 4000l
      }

      "return the timeout if the original Future does not timeout of its own" in {
        val now = System.currentTimeMillis
        val timingOutLater = Timeouts.timeout(Duration(3, "seconds"))
        val future = timingOutLater.withTimeout(Duration(1, "seconds"))
        a [TimeoutException] should be thrownBy  { Await.result(future, Duration(10, "seconds")) }
        val elapsed: Long = (System.currentTimeMillis - now)
        elapsed should be >= 1000l
        elapsed should be <= 2500l
      }
    }

    // an example of how it could be used
    "used in our most common use case" should {
      "fit nicely" in {
        val call: Future[String] = svcCall(1000).withTimeout(Duration(5000, "milliseconds")).recover {
          case _: TimeoutException => "recover.timeout"
          case other => s"recover.${other.getMessage}"
        }
        Await.result(call, Duration(10, "seconds")) should be ("data-1000")

        val call2: Future[String] = svcCall(5000).withTimeout(Duration(1000, "milliseconds")).recover {
          case _: TimeoutException => "recover.timeout"
          case other => s"recover.${other.getMessage}"
        }
        Await.result(call2, Duration(10, "seconds")) should be ("recover.timeout")
      }
    }
  }
}

object TimeLimitedFutureSpec {
  def svcCall(latency: Long): Future[String] = Future { Thread.sleep(latency); s"data-${latency}" }
} 
Example 2
Source File: KVStore.scala    From Freasy-Monad   with MIT License 6 votes vote down vote up
package examples.cats

import cats.Id
import cats.free.Free
import freasymonad.cats.free

import scala.collection.mutable
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

@free trait KVStore {
  type KVStoreF[A] = Free[GrammarADT, A]
  sealed trait GrammarADT[A]

  def put[T](key: String, value: T): KVStoreF[Unit]
  def get[T](key: String): KVStoreF[Option[T]]
  def delete(key: String): KVStoreF[Unit]

  def update[T](key: String, f: T => T): KVStoreF[Unit] =
    for {
      vMaybe <- get[T](key)
      _      <- vMaybe.map(v => put[T](key, f(v))).getOrElse(Free.pure(()))
    } yield ()
}

object Main extends App {
  import KVStore.ops._

  def program: KVStoreF[Option[Int]] =
    for {
      _ <- put("wild-cats", 2)
      _ <- update[Int]("wild-cats", _ + 12)
      _ <- put("tame-cats", 5)
      n <- get[Int]("wild-cats")
      _ <- delete("tame-cats")
    } yield n

  val idInterpreter = new KVStore.Interp[Id] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Id[Option[T]] = {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Id[Unit] = {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Id[Unit] = {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resId: Id[Option[Int]] = idInterpreter.run(program)

  import cats.implicits.catsStdInstancesForFuture
  import scala.concurrent.ExecutionContext.Implicits.global

  val futureInterpreter = new KVStore.Interp[Future] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Future[Option[T]] = Future {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Future[Unit] = Future {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Future[Unit] = Future {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resFuture: Future[Option[Int]] = futureInterpreter.run(program)
  Await.ready(resFuture, Duration.Inf)
} 
Example 3
Source File: DistShellAppMasterSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.examples.distributedshell

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import org.scalatest.{BeforeAndAfter, Matchers, WordSpec}

import org.apache.gearpump.cluster.AppMasterToMaster.{GetAllWorkers, RegisterAppMaster, RequestResource}
import org.apache.gearpump.cluster.AppMasterToWorker.LaunchExecutor
import org.apache.gearpump.cluster.MasterToAppMaster.{AppMasterRegistered, ResourceAllocated, WorkerList}
import org.apache.gearpump.cluster._
import org.apache.gearpump.cluster.appmaster.{AppMasterRuntimeEnvironment, ApplicationRuntimeInfo}
import org.apache.gearpump.cluster.scheduler.{Relaxation, Resource, ResourceAllocation, ResourceRequest}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.util.ActorSystemBooter.RegisterActorSystem
import org.apache.gearpump.util.ActorUtil

class DistShellAppMasterSpec extends WordSpec with Matchers with BeforeAndAfter {
  implicit val system = ActorSystem("AppMasterSpec", TestUtil.DEFAULT_CONFIG)
  val mockMaster = TestProbe()(system)
  val mockWorker1 = TestProbe()(system)
  val masterProxy = mockMaster.ref
  val appId = 0
  val userName = "test"
  val masterExecutorId = 0
  val workerList = List(WorkerId(1, 0L), WorkerId(2, 0L), WorkerId(3, 0L))
  val resource = Resource(1)
  val appJar = None
  val appDescription = AppDescription("app0", classOf[DistShellAppMaster].getName, UserConfig.empty)

  "DistributedShell AppMaster" should {
    "launch one ShellTask on each worker" in {
      val appMasterInfo = ApplicationRuntimeInfo(appId, appName = appId.toString)
      val appMasterContext = AppMasterContext(appId, userName, resource, null, appJar, masterProxy)
      TestActorRef[DistShellAppMaster](
        AppMasterRuntimeEnvironment.props(List(masterProxy.path), appDescription,
          appMasterContext))
      mockMaster.expectMsgType[RegisterAppMaster]
      mockMaster.reply(AppMasterRegistered(appId))
      // The DistributedShell AppMaster asks for worker list from Master.
      mockMaster.expectMsg(GetAllWorkers)
      mockMaster.reply(WorkerList(workerList))
      // After worker list is ready, DistributedShell AppMaster requests resource on each worker
      workerList.foreach { workerId =>
        mockMaster.expectMsg(RequestResource(appId, ResourceRequest(Resource(1), workerId,
          relaxation = Relaxation.SPECIFICWORKER)))
      }
      mockMaster.reply(ResourceAllocated(
        Array(ResourceAllocation(resource, mockWorker1.ref, WorkerId(1, 0L)))))
      mockWorker1.expectMsgClass(classOf[LaunchExecutor])
      mockWorker1.reply(RegisterActorSystem(ActorUtil.getSystemAddress(system).toString))
    }
  }

  after {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 4
Source File: WindowAverageProcessorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.state.processor

import java.time.Instant

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.testkit.TestProbe
import com.twitter.algebird.AveragedValue
import org.mockito.Mockito._
import org.scalacheck.Gen
import org.scalatest.prop.PropertyChecks
import org.scalatest.{Matchers, PropSpec}

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.MockUtil
import org.apache.gearpump.streaming.state.impl.{InMemoryCheckpointStoreFactory, PersistentStateConfig, WindowConfig}
import org.apache.gearpump.streaming.task.UpdateCheckpointClock
import org.apache.gearpump.streaming.transaction.api.CheckpointStoreFactory

class WindowAverageProcessorSpec extends PropSpec with PropertyChecks with Matchers {
  property("WindowAverageProcessor should update state") {

    implicit val system = ActorSystem("test")
    val longGen = Gen.chooseNum[Long](1, 1000)
    forAll(longGen, longGen) {
      (data: Long, num: Long) =>
        val taskContext = MockUtil.mockTaskContext

        val windowSize = num
        val windowStep = num

        val conf = UserConfig.empty
          .withBoolean(PersistentStateConfig.STATE_CHECKPOINT_ENABLE, true)
          .withLong(PersistentStateConfig.STATE_CHECKPOINT_INTERVAL_MS, num)
          .withValue[CheckpointStoreFactory](PersistentStateConfig.STATE_CHECKPOINT_STORE_FACTORY,
            new InMemoryCheckpointStoreFactory)
          .withValue(WindowConfig.NAME, WindowConfig(windowSize, windowStep))

        val windowAverage = new WindowAverageProcessor(taskContext, conf)

        val appMaster = TestProbe()(system)
        when(taskContext.appMaster).thenReturn(appMaster.ref)

        windowAverage.onStart(Instant.EPOCH)
        appMaster.expectMsg(UpdateCheckpointClock(taskContext.taskId, 0L))

        for (i <- 0L until num) {
          windowAverage.onNext(Message("" + data, i))
          windowAverage.getState.get shouldBe Some(AveragedValue(i + 1, data))
        }

        // Time to checkpoint
        windowAverage.onWatermarkProgress(Instant.ofEpochMilli(num))
        appMaster.expectMsg(UpdateCheckpointClock(taskContext.taskId, num))
    }

    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 5
Source File: CountProcessorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.state.processor

import java.time.Instant

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.testkit.TestProbe
import org.mockito.Mockito._
import org.scalacheck.Gen
import org.scalatest.prop.PropertyChecks
import org.scalatest.{Matchers, PropSpec}

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.MockUtil
import org.apache.gearpump.streaming.state.impl.{InMemoryCheckpointStoreFactory, PersistentStateConfig}
import org.apache.gearpump.streaming.task.UpdateCheckpointClock
import org.apache.gearpump.streaming.transaction.api.CheckpointStoreFactory

class CountProcessorSpec extends PropSpec with PropertyChecks with Matchers {

  property("CountProcessor should update state") {

    val taskContext = MockUtil.mockTaskContext

    implicit val system = ActorSystem("test")

    val longGen = Gen.chooseNum[Long](1, 1000)
    forAll(longGen) {
      (num: Long) =>

        val conf = UserConfig.empty
          .withBoolean(PersistentStateConfig.STATE_CHECKPOINT_ENABLE, true)
          .withLong(PersistentStateConfig.STATE_CHECKPOINT_INTERVAL_MS, num)
          .withValue[CheckpointStoreFactory](PersistentStateConfig.STATE_CHECKPOINT_STORE_FACTORY,
            new InMemoryCheckpointStoreFactory)

        val count = new CountProcessor(taskContext, conf)

        val appMaster = TestProbe()(system)
        when(taskContext.appMaster).thenReturn(appMaster.ref)

        count.onStart(Instant.EPOCH)
        appMaster.expectMsg(UpdateCheckpointClock(taskContext.taskId, 0L))

        for (i <- 0L to num) {
          count.onNext(Message("", i))
          count.getState.get shouldBe Some(i + 1)
        }

        // Time to checkpoint
        count.onWatermarkProgress(Instant.ofEpochMilli(num))
        // Only the state before checkpoint time is checkpointed
        appMaster.expectMsg(UpdateCheckpointClock(taskContext.taskId, num))
    }

    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 6
Source File: NumberGeneratorProcessorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.state.processor

import java.time.Instant

import org.apache.gearpump.streaming.source.Watermark

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.ActorSystem
import akka.testkit.TestProbe
import org.mockito.Mockito._
import org.mockito.{Matchers => MockitoMatchers}
import org.scalatest.{Matchers, WordSpec}

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.MockUtil

class NumberGeneratorProcessorSpec extends WordSpec with Matchers {
  "NumberGeneratorProcessor" should {
    "send random numbers" in {

      val taskContext = MockUtil.mockTaskContext

      implicit val system = ActorSystem("test")

      val mockTaskActor = TestProbe()

      // Mock self ActorRef
      when(taskContext.self).thenReturn(mockTaskActor.ref)

      val conf = UserConfig.empty
      val genNum = new NumberGeneratorProcessor(taskContext, conf)
      genNum.onStart(Instant.EPOCH)
      mockTaskActor.expectMsgType[Watermark]

      genNum.onNext(Message("next"))
      verify(taskContext).output(MockitoMatchers.any[Message])

      system.terminate()
      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 7
Source File: SplitSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.wordcount

import java.time.Instant

import akka.actor.ActorSystem
import org.apache.gearpump.Message
import akka.testkit.TestProbe
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.streaming.MockUtil
import org.mockito.Mockito._
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class SplitSpec extends WordSpec with Matchers {

  "Split" should {
    "split the text and deliver to next task" in {

      val taskContext = MockUtil.mockTaskContext

      implicit val system: ActorSystem = ActorSystem("test", TestUtil.DEFAULT_CONFIG)

      val mockTaskActor = TestProbe()

      when(taskContext.self).thenReturn(mockTaskActor.ref)

      val split = new Split
      split.open(taskContext, Instant.now())
      split.read() shouldBe a[Message]
      split.close()
      split.getWatermark
      system.terminate()
      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 8
Source File: DistServiceAppMasterSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.distributeservice

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import org.scalatest.{BeforeAndAfter, Matchers, WordSpec}

import org.apache.gearpump.cluster.AppMasterToMaster.{GetAllWorkers, RegisterAppMaster, RequestResource}
import org.apache.gearpump.cluster.AppMasterToWorker.LaunchExecutor
import org.apache.gearpump.cluster.MasterToAppMaster.{AppMasterRegistered, ResourceAllocated, WorkerList}
import org.apache.gearpump.cluster.appmaster.AppMasterRuntimeEnvironment
import org.apache.gearpump.cluster.scheduler.{Relaxation, Resource, ResourceAllocation, ResourceRequest}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.cluster.{AppDescription, AppMasterContext, TestUtil, UserConfig}
import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.{FileContainer, GetFileContainer}
import org.apache.gearpump.util.ActorSystemBooter.RegisterActorSystem
import org.apache.gearpump.util.ActorUtil

class DistServiceAppMasterSpec extends WordSpec with Matchers with BeforeAndAfter {
  implicit val system = ActorSystem("AppMasterSpec", TestUtil.DEFAULT_CONFIG)
  val mockMaster = TestProbe()(system)
  val mockWorker1 = TestProbe()(system)
  val client = TestProbe()(system)
  val masterProxy = mockMaster.ref
  val appId = 0
  val userName = "test"
  val masterExecutorId = 0
  val workerList = List(WorkerId(1, 0L), WorkerId(2, 0L), WorkerId(3, 0L))
  val resource = Resource(1)
  val appJar = None
  val appDescription = AppDescription("app0", classOf[DistServiceAppMaster].getName,
    UserConfig.empty)

  "DistService AppMaster" should {
    "responsable for service distributing" in {
      val appMasterContext = AppMasterContext(appId, userName, resource, null, appJar, masterProxy)
      TestActorRef[DistServiceAppMaster](
        AppMasterRuntimeEnvironment.props(List(masterProxy.path), appDescription,
          appMasterContext))
      val registerAppMaster = mockMaster.receiveOne(15.seconds)
      assert(registerAppMaster.isInstanceOf[RegisterAppMaster])

      val appMaster = registerAppMaster.asInstanceOf[RegisterAppMaster].appMaster
      mockMaster.reply(AppMasterRegistered(appId))
      // The DistributedShell AppMaster will ask for worker list
      mockMaster.expectMsg(GetAllWorkers)
      mockMaster.reply(WorkerList(workerList))
      // After worker list is ready, DistributedShell AppMaster will request resouce on each worker
      workerList.foreach { workerId =>
        mockMaster.expectMsg(RequestResource(appId, ResourceRequest(Resource(1), workerId,
          relaxation = Relaxation.SPECIFICWORKER)))
      }
      mockMaster.reply(ResourceAllocated(Array(ResourceAllocation(resource, mockWorker1.ref,
        WorkerId(1, 0L)))))
      mockWorker1.expectMsgClass(classOf[LaunchExecutor])
      mockWorker1.reply(RegisterActorSystem(ActorUtil.getSystemAddress(system).toString))

      appMaster.tell(GetFileContainer, client.ref)
      client.expectMsgClass(15.seconds, classOf[FileContainer])
    }
  }

  after {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 9
Source File: RestServices.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Route, _}
import akka.stream.ActorMaterializer
import akka.util.Timeout
import org.apache.commons.lang.exception.ExceptionUtils

import org.apache.gearpump.jarstore.JarStoreClient
import org.apache.gearpump.util.{Constants, LogUtil}
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._


class RestServices(master: ActorRef, mat: ActorMaterializer, system: ActorSystem)
  extends RouteService {

  private val LOG = LogUtil.getLogger(getClass)

  implicit val timeout = Constants.FUTURE_TIMEOUT

  private val config = system.settings.config

  private val jarStoreClient = new JarStoreClient(config, system)

  private val securityEnabled = config.getBoolean(
    Constants.GEARPUMP_UI_SECURITY_AUTHENTICATION_ENABLED)

  private val supervisorPath = system.settings.config.getString(
    Constants.GEARPUMP_SERVICE_SUPERVISOR_PATH)

  private val myExceptionHandler: ExceptionHandler = ExceptionHandler {
    case ex: Throwable => {
      extractUri { uri =>
        LOG.error(s"Request to $uri could not be handled normally", ex)
        complete(InternalServerError, ExceptionUtils.getStackTrace(ex))
      }
    }
  }

  // Makes sure staticRoute is the final one, as it will try to lookup resource in local path
  // if there is no match in previous routes
  private val static = new StaticService(system, supervisorPath).route

  def supervisor: ActorRef = {
    if (supervisorPath == null || supervisorPath.isEmpty()) {
      null
    } else {
      val actorRef = system.actorSelection(supervisorPath).resolveOne()
      Await.result(actorRef, new Timeout(Duration.create(5, "seconds")).duration)
    }
  }

  override def route: Route = {
    if (securityEnabled) {
      val security = new SecurityService(services, system)
      handleExceptions(myExceptionHandler) {
        security.route ~ static
      }
    } else {
      handleExceptions(myExceptionHandler) {
        services.route ~ static
      }
    }
  }

  private def services: RouteService = {

    val admin = new AdminService(system)
    val masterService = new MasterService(master, jarStoreClient, system)
    val worker = new WorkerService(master, system)
    val app = new AppMasterService(master, jarStoreClient, system)
    val sup = new SupervisorService(master, supervisor, system)

    new RouteService {
      override def route: Route = {
        admin.route ~ sup.route ~ masterService.route ~ worker.route ~ app.route
      }
    }
  }
} 
Example 10
Source File: MockOAuth2Server.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services.security.oauth2

import scala.concurrent.{Await, Future}

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink

import org.apache.gearpump.util.Util
// NOTE: This cannot be removed!!
import org.apache.gearpump.services.util.UpickleUtil._


class MockOAuth2Server(
    actorSystem: ActorSystem,
    var requestHandler: HttpRequest => HttpResponse) {

  implicit val system: ActorSystem = actorSystem
  implicit val materializer = ActorMaterializer()
  implicit val ec = system.dispatcher

  private var _port: Int = 0
  private var bindingFuture: Future[ServerBinding] = null

  def port: Int = _port

  def start(): Unit = {
    _port = Util.findFreePort().get

    val serverSource = Http().bind(interface = "127.0.0.1", port = _port)
    bindingFuture = {
      serverSource.to(Sink.foreach { connection =>
        connection handleWithSyncHandler requestHandler
      }).run()
    }
  }

  def stop(): Unit = {
    import scala.concurrent.duration._
    Await.result(bindingFuture.map(_.unbind()), 120.seconds)
  }
} 
Example 11
Source File: AdminServiceSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.http.scaladsl.testkit.{RouteTestTimeout, ScalatestRouteTest}
import com.typesafe.config.Config
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.TestUtil

// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._

class AdminServiceSpec
  extends FlatSpec with ScalatestRouteTest with Matchers with BeforeAndAfterAll {

  override def testConfig: Config = TestUtil.DEFAULT_CONFIG

  implicit def actorSystem: ActorSystem = system

  it should "shutdown the ActorSystem when receiving terminate" in {
    val route = new AdminService(actorSystem).route
    implicit val customTimeout = RouteTestTimeout(15.seconds)
    (Post(s"/terminate") ~> route) ~> check {
      assert(status.intValue() == 404)
    }

    Await.result(actorSystem.whenTerminated, 20.seconds)

    // terminate should terminate current actor system
    assert(actorSystem.whenTerminated.isCompleted)
  }
} 
Example 12
Source File: Test6.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.scaladsl.Sink
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.akkastream.scaladsl.GearSource
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.streaming.dsl.scalaapi.CollectionDataSource
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._




import org.apache.gearpump.akkastream.scaladsl.Implicits._

object Test6 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test6", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    val echo = system.actorOf(Props(Echo()))
    val sink = Sink.actorRef(echo, "COMPLETE")
    val sourceData = new CollectionDataSource(
      List(
        "this is a good start",
        "this is a good time",
        "time to start",
        "congratulations",
        "green plant",
        "blue sky")
    )
    val source = GearSource.from[String](sourceData)
    source.mapConcat({line =>
      line.split(" ").toList
    }).groupBy2(x => x)
      .map(word => (word, 1))
      .reduce({(a, b) =>
        (a._1, a._2 + b._2)
      })
      .log("word-count")
      .runWith(sink)

    Await.result(system.whenTerminated, 60.minutes)
  }

  case class Echo() extends Actor {
    def receive: Receive = {
      case any: AnyRef =>
        println("Confirm received: " + any)
    }
  }
  // scalastyle:on println
} 
Example 13
Source File: Test.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.scaladsl.{Sink, Source}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.akkastream.graph.GraphPartitioner
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._



object Test extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    implicit val system = ActorSystem("Test", akkaConf)
    implicit val materializer = GearpumpMaterializer(GraphPartitioner.AllRemoteStrategy)

    val echo = system.actorOf(Props(new Echo()))
    val sink = Sink.actorRef(echo, "COMPLETE")

    Source(
      List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky")
    ).filter(_.startsWith("red")).fold("Items:") {(a, b) =>
      a + "|" + b
    }.map("I want to order item: " + _).runWith(sink)

    Await.result(system.whenTerminated, 60.minutes)
  }

  class Echo extends Actor {
    def receive: Receive = {
      case any: AnyRef =>
        println("Confirm received: " + any)
    }
  }
  // scalastyle:on println
} 
Example 14
Source File: Test16.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.ActorSystem
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.akkastream.scaladsl.{GearSink, GearSource}
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.streaming.dsl.scalaapi.{CollectionDataSource, LoggerSink}
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test16 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    implicit val system = ActorSystem("Test16", akkaConf)
    implicit val materializer = GearpumpMaterializer()

    val sink = GearSink.to(new LoggerSink[String])
    val sourceData = new CollectionDataSource(
      List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky"))
    val source = GearSource.from[String](sourceData)
    source.filter(_.startsWith("red")).map("I want to order item: " + _).runWith(sink)

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 15
Source File: Test12.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.stream.{ClosedShape, UniformFanInShape}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.{Await, Future}
 

object Test12 extends AkkaApp with ArgumentsParser{
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    import akka.actor.ActorSystem
    import akka.stream.scaladsl._

    import scala.concurrent.duration._

    implicit val system = ActorSystem("Test12", akkaConfig)
    // implicit val materializer = ActorMaterializer(
    //   ActorMaterializerSettings(system).withAutoFusing(false)
    //   )
    implicit val materializer = GearpumpMaterializer()
    implicit val ec = system.dispatcher

    val pickMaxOfThree = GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._

      val zip1 = b.add(ZipWith[Int, Int, Int](math.max))
      val zip2 = b.add(ZipWith[Int, Int, Int](math.max))

      zip1.out ~> zip2.in0

      UniformFanInShape(zip2.out, zip1.in0, zip1.in1, zip2.in1)
    }

    val resultSink = Sink.head[Int]

    val g = RunnableGraph.fromGraph(GraphDSL.create(resultSink) { implicit b =>
      sink =>
        import GraphDSL.Implicits._

        // Importing the partial shape will return its shape (inlets & outlets)
        val pm3 = b.add(pickMaxOfThree)

        Source.single(1) ~> pm3.in(0)
        Source.single(2) ~> pm3.in(1)
        Source.single(3) ~> pm3.in(2)

        pm3.out ~> sink.in

        ClosedShape
    })

    val max: Future[Int] = g.run()
    max.map(x => println(s"maximum of three numbers : $x"))

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 16
Source File: Test15.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test15 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test15", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    import akka.stream.scaladsl.GraphDSL.Implicits._
    RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
      val A = builder.add(Source.single(0)).out
      val B = builder.add(Broadcast[Int](2))
      val C = builder.add(Merge[Int](2).named("C"))
      val D = builder.add(Flow[Int].map(_ + 1).named("D"))
      val E = builder.add(Balance[Int](2).named("E"))
      val F = builder.add(Merge[Int](2).named("F"))
      val G = builder.add(Sink.foreach(println).named("G")).in

      C <~ F
      A ~> B ~> C ~> F
      B ~> D ~> E ~> F
      E ~> G

      ClosedShape
    }).run()

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 17
Source File: Test7.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Broadcast, Merge, Sink, Source}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._




object Test7 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    implicit val system = ActorSystem("Test7", akkaConf)
    implicit val materializer = GearpumpMaterializer()
    implicit val ec = system.dispatcher
 
    val sourceA = Source(List(1))
    val sourceB = Source(List(2))
    val mergedSource = Source.combine(sourceA, sourceB)(Merge(_))

    val sinkA = Sink.foreach[Int](x => println(s"In SinkA : $x"))
    val sinkB = Sink.foreach[Int](x => println(s"In SinkB : $x"))
    val sink = Sink.combine(sinkA, sinkB)(Broadcast[Int](_))
    mergedSource.runWith(sink)

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 18
Source File: Test5.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.ClosedShape
import akka.stream.scaladsl._
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test5 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    implicit val system = ActorSystem("Test5", akkaConf)
    implicit val materializer = GearpumpMaterializer()

    val echo = system.actorOf(Props(new Echo()))
    val source = Source(List(("male", "24"), ("female", "23")))
    val sink = Sink.actorRef(echo, "COMPLETE")

    RunnableGraph.fromGraph(
      GraphDSL.create() { implicit b =>
        import GraphDSL.Implicits._
        val unzip = b.add(Unzip[String, String]())
        val sink1 = Sink.actorRef(echo, "COMPLETE")
        val sink2 = Sink.actorRef(echo, "COMPLETE")
        source ~> unzip.in
        unzip.out0 ~> sink1
        unzip.out1 ~> sink1
        ClosedShape
      }
    ).run()

    Await.result(system.whenTerminated, 60.minutes)
  }

  class Echo extends Actor {
    def receive: Receive = {
      case any: AnyRef =>
        println("Confirm received: " + any)
    }
  }
  // scalastyle:on println
} 
Example 19
Source File: Test10.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.NotUsed
import akka.stream.{ClosedShape, ThrottleMode}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._
 

object Test10 extends AkkaApp with ArgumentsParser {

  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    import akka.actor.ActorSystem
    import akka.stream.scaladsl._

    implicit val system = ActorSystem("Test10", akkaConfig)
    implicit val materializer = GearpumpMaterializer()
    implicit val ec = system.dispatcher

    // Conflate[A] - (2 inputs, 1 output) concatenates two streams
    // (first consumes one, then the second one)
    def stream(x: String) = Stream.continually(x)

    val sourceA = Source(stream("A"))
    val sourceB = Source(stream("B"))

    val throttler: Flow[String, String, NotUsed] =
      Flow[String].throttle(1, 1.second, 1, ThrottleMode.Shaping)
    val conflateFlow: Flow[String, String, NotUsed] =
      Flow[String].conflate((x: String, y: String) => x: String)
      ((acc: String, x: String) => s"$acc::$x")

    val printFlow: Flow[(String, String), String, NotUsed] =
      Flow[(String, String)].map {
        x =>
          println(s" lengths are : ${x._1.length} and ${x._2.length}  ;  ${x._1} zip ${x._2}")
          x.toString
      }

    val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._

      val zipping = b.add(Zip[String, String]())

      sourceA ~> throttler ~> zipping.in0
      sourceB ~> conflateFlow ~> zipping.in1

      zipping.out ~> printFlow ~> Sink.ignore

      ClosedShape
    })

    graph.run()

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 20
Source File: Test4.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.ActorSystem
import akka.stream.scaladsl.Source
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.akkastream.scaladsl.GearSink
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.streaming.dsl.scalaapi.LoggerSink
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test4 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    implicit val system = ActorSystem("Test4", akkaConf)
    implicit val materializer = GearpumpMaterializer()

    Source(
      List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky")
    ).filter(_.startsWith("red")).
      map("I want to order item: " + _).
      runWith(GearSink.to(new LoggerSink[String]))

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 21
Source File: Test11.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.NotUsed
import akka.stream.ClosedShape
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._
 

object Test11 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    import akka.actor.ActorSystem
    import akka.stream.scaladsl._

    implicit val system = ActorSystem("Test11", akkaConfig)
    implicit val materializer = GearpumpMaterializer()
    // implicit val materializer =
    //   ActorMaterializer(ActorMaterializerSettings(system).withAutoFusing(false))
    implicit val ec = system.dispatcher

    val g = RunnableGraph.fromGraph(GraphDSL.create() {
      implicit builder: GraphDSL.Builder[NotUsed] =>

      import GraphDSL.Implicits._
      val in = Source(1 to 10)
      val output: (Any) => Unit = any => {
        val s = s"**** $any"
        println(s)
      }
      val out = Sink.foreach(output)

      val broadcast = builder.add(Broadcast[Int](2))
      val merge = builder.add(Merge[Int](2))

      val f1, f2, f3, f4 = Flow[Int].map(_ + 10)

      in ~> f1 ~> broadcast ~> f2 ~> merge ~> f3 ~> out
      broadcast ~> f4 ~> merge

      ClosedShape
    })

    g.run()

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 22
Source File: Test2.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.scaladsl._
import akka.stream.{ActorMaterializer, ClosedShape}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.akkastream.scaladsl.{GearSink, GearSource}
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test2 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test2", akkaConf)
    val gearpumpMaterializer = GearpumpMaterializer()

    val echo = system.actorOf(Props(new Echo()))
    val source = GearSource.bridge[String, String]
    val sink = GearSink.bridge[String, String]

    val flow = Flow[String].filter(_.startsWith("red")).map("I want to order item: " + _)
    val (entry, exit) = flow.runWith(source, sink)(gearpumpMaterializer)

    val actorMaterializer = ActorMaterializer()

    val externalSource = Source(
      List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky")
    )
    val externalSink = Sink.actorRef(echo, "COMPLETE")

    RunnableGraph.fromGraph(
      GraphDSL.create() { implicit b =>
        import GraphDSL.Implicits._
        externalSource ~> Sink.fromSubscriber(entry)
        Source.fromPublisher(exit) ~> externalSink
        ClosedShape
      }
    ).run()(actorMaterializer)

    Await.result(system.whenTerminated, 60.minutes)
  }

  class Echo extends Actor {
    def receive: Receive = {
      case any: AnyRef =>
        println("Confirm received: " + any)
    }
  }
  // scalastyle:on println
} 
Example 23
Source File: Test3.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.akkastream.scaladsl.GearSource
import akka.stream.scaladsl.Sink
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.streaming.dsl.scalaapi.CollectionDataSource
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test3 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test3", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    val echo = system.actorOf(Props(new Echo()))
    val sink = Sink.actorRef(echo, "COMPLETE")
    val sourceData = new CollectionDataSource(
      List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky"))
    val source = GearSource.from[String](sourceData)
    source.filter(_.startsWith("red")).map("I want to order item: " + _).runWith(sink)

    Await.result(system.whenTerminated, 60.minutes)
  }

  class Echo extends Actor {
    def receive: Receive = {
      case any: AnyRef =>
        println("Confirm received: " + any)
    }
  }
  // scalastyle:on println
} 
Example 24
Source File: Test9.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.NotUsed
import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings, ClosedShape}
import akka.stream.scaladsl._
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._
 

object Test9 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test9", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    implicit val ec = system.dispatcher

    val sinkActor = system.actorOf(Props(new SinkActor()))
    val source = Source((1 to 5))
    val sink = Sink.actorRef(sinkActor, "COMPLETE")
    val flowA: Flow[Int, Int, NotUsed] = Flow[Int].map {
      x => println(s"processing broadcasted element : $x in flowA"); x
    }
    val flowB: Flow[Int, Int, NotUsed] = Flow[Int].map {
      x => println(s"processing broadcasted element : $x in flowB"); x
    }

    val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._
      val broadcast = b.add(Broadcast[Int](2))
      val merge = b.add(Merge[Int](2))
      source ~> broadcast
      broadcast ~> flowA ~> merge
      broadcast ~> flowB ~> merge
      merge ~> sink
      ClosedShape
    })

    graph.run()

    Await.result(system.whenTerminated, 60.minutes)
  }

  class SinkActor extends Actor {
    def receive: Receive = {
      case any: AnyRef =>
        println("Confirm received: " + any)
    }
  }
  // scalastyle:on println
} 
Example 25
Source File: Test8.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import akka.stream.scaladsl._
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
 

object Test8 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test8", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    implicit val ec = system.dispatcher

    // Source gives 1 to 100 elements
    val source: Source[Int, NotUsed] = Source(Stream.from(1).take(100))
    val sink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _)

    val result: Future[Int] = source.runWith(sink)
    result.map(sum => {
      println(s"Sum of stream elements => $sum")
    })

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 26
Source File: UIServiceSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.yarn.appmaster

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.experiments.yarn.appmaster.UIServiceSpec.{Info, MockUI}
import org.apache.gearpump.transport.HostPort
import org.apache.gearpump.util.Constants
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class UIServiceSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit var system: ActorSystem = null

  override def beforeAll(): Unit = {
    system = ActorSystem(getClass.getSimpleName, TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "start UI server correctly" in {
    val probe = TestProbe()
    val masters = List(
      HostPort("127.0.0.1", 3000),
      HostPort("127.0.0.1", 3001),
      HostPort("127.0.0.1", 3002)
    )
    val host = "local"
    val port = 8091

    val ui = system.actorOf(Props(new MockUI(masters, host, port, probe.ref)))

    probe.expectMsgPF() {
      case info: Info => {
        assert(info.masterHost == "127.0.0.1")
        assert(info.masterPort == 3000)
        val conf = ConfigFactory.parseFile(new java.io.File(info.configFile))
        assert(conf.getString(Constants.GEARPUMP_SERVICE_HOST) == host)
        assert(conf.getString(Constants.GEARPUMP_SERVICE_HTTP) == "8091")
        assert(conf.getString(Constants.NETTY_TCP_HOSTNAME) == host)
      }
    }

    system.stop(ui)
  }
}

object UIServiceSpec {

  case class Info(supervisor: String, masterHost: String, masterPort: Int, configFile: String)

  class MockUI(masters: List[HostPort], host: String, port: Int, probe: ActorRef)
    extends UIService(masters, host, port) {

    override def launch(
        supervisor: String, masterHost: String, masterPort: Int, configFile: String): Unit = {
      probe ! Info(supervisor, masterHost, masterPort, configFile)
    }
  }
} 
Example 27
Source File: StreamAppSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.dsl.scalaapi

import akka.actor.ActorSystem
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.client.ClientContext
import org.apache.gearpump.streaming.dsl.scalaapi
import org.apache.gearpump.streaming.partitioner.PartitionerDescription
import org.apache.gearpump.streaming.source.DataSourceTask
import org.apache.gearpump.streaming.{ProcessorDescription, StreamApplication}
import org.apache.gearpump.util.Graph
import org.mockito.Mockito.when
import org.scalatest._
import org.scalatest.mock.MockitoSugar

import scala.concurrent.Await
import scala.concurrent.duration.Duration
class StreamAppSpec extends FlatSpec with Matchers with BeforeAndAfterAll with MockitoSugar {

  implicit var system: ActorSystem = _

  override def beforeAll(): Unit = {
    system = ActorSystem("test", TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "be able to generate multiple new streams" in {
    val context: ClientContext = mock[ClientContext]
    when(context.system).thenReturn(system)

    val dsl = StreamApp("dsl", context)
    dsl.source(List("A"), 2, "A") shouldBe a [scalaapi.Stream[_]]
    dsl.source(List("B"), 3, "B") shouldBe a [scalaapi.Stream[_]]

    val application = dsl.plan()
    application shouldBe a [StreamApplication]
    application.name shouldBe "dsl"
    val dag = application.userConfig
      .getValue[Graph[ProcessorDescription, PartitionerDescription]](StreamApplication.DAG).get
    dag.getVertices.size shouldBe 2
    dag.getVertices.foreach { processor =>
      processor.taskClass shouldBe classOf[DataSourceTask[_, _]].getName
      if (processor.description == "A") {
        processor.parallelism shouldBe 2
      } else if (processor.description == "B") {
        processor.parallelism shouldBe 3
      } else {
        fail(s"undefined source ${processor.description}")
      }
    }
  }
} 
Example 28
Source File: JarSchedulerSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.appmaster

import akka.actor.ActorSystem
import org.apache.gearpump.cluster.scheduler.{Resource, ResourceRequest}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.cluster.{AppJar, TestUtil}
import org.apache.gearpump.jarstore.FilePath
import org.apache.gearpump.streaming.partitioner.{HashPartitioner, Partitioner}
import org.apache.gearpump.streaming.appmaster.TaskSchedulerSpec.{TestTask1, TestTask2}
import org.apache.gearpump.streaming.task.TaskId
import org.apache.gearpump.streaming.{DAG, ProcessorDescription, _}
import org.apache.gearpump.util.Graph
import org.apache.gearpump.util.Graph._
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.{Await, Future}

class JarSchedulerSpec extends WordSpec with Matchers {
  val mockJar1 = AppJar("jar1", FilePath("path"))
  val mockJar2 = AppJar("jar2", FilePath("path"))
  val task1 = ProcessorDescription(id = 0, taskClass = classOf[TestTask1].getName, parallelism = 1,
    jar = mockJar1)
  val task2 = ProcessorDescription(id = 1, taskClass = classOf[TestTask2].getName, parallelism = 1,
    jar = mockJar1)
  val task3 = ProcessorDescription(id = 2, taskClass = classOf[TestTask2].getName, parallelism = 2,
    jar = mockJar2)
  val dag = DAG(Graph(task1 ~ Partitioner[HashPartitioner] ~> task2))

  import scala.concurrent.duration._

  "JarScheduler" should {
    "schedule tasks depends on app jar" in {
      val system = ActorSystem("JarSchedulerSpec")
      implicit val dispatcher = system.dispatcher
      val manager = new JarScheduler(0, "APP", TestUtil.DEFAULT_CONFIG, system)
      manager.setDag(dag, Future {
        0L
      })
      val requests = Array(ResourceRequest(Resource(2), WorkerId.unspecified))
      val result = Await.result(manager.getResourceRequestDetails(), 15.seconds)
      assert(result.length == 1)
      assert(result.head.jar == mockJar1)
      assert(result.head.requests.deep == requests.deep)

      val tasks = Await.result(manager.scheduleTask(mockJar1, WorkerId(0, 0L), 0,
        Resource(2)), 15.seconds)
      assert(tasks.contains(TaskId(0, 0)))
      assert(tasks.contains(TaskId(1, 0)))

      val newDag = replaceDAG(dag, 1, task3, 1)

      manager.setDag(newDag, Future {
        0
      })
      val requestDetails = Await.result(manager.getResourceRequestDetails().
        map(_.sortBy(_.jar.name)), 15.seconds)
      assert(requestDetails.length == 2)
      assert(requestDetails.last.jar == mockJar2)
      assert(requestDetails.last.requests.deep == requests.deep)

      system.terminate()
      Await.result(system.whenTerminated, Duration.Inf)
    }
  }

  def replaceDAG(
      dag: DAG, oldProcessorId: ProcessorId, newProcessor: ProcessorDescription, newVersion: Int)
    : DAG = {
    val oldProcessorLife = LifeTime(dag.processors(oldProcessorId).life.birth,
      newProcessor.life.birth)
    val newProcessorMap = dag.processors ++
      Map(oldProcessorId -> dag.processors(oldProcessorId).copy(life = oldProcessorLife),
        newProcessor.id -> newProcessor)
    val newGraph = dag.graph.subGraph(oldProcessorId).
      replaceVertex(oldProcessorId, newProcessor.id).addGraph(dag.graph)
    new DAG(newVersion, newProcessorMap, newGraph)
  }
} 
Example 29
Source File: DagManagerSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.appmaster

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import org.apache.gearpump.cluster.{TestUtil, UserConfig}
import org.apache.gearpump.streaming.partitioner.{HashPartitioner, Partitioner}
import org.apache.gearpump.streaming.appmaster.DagManager.{DAGOperationFailed, DAGOperationSuccess, GetLatestDAG, GetTaskLaunchData, LatestDAG, NewDAGDeployed, ReplaceProcessor, TaskLaunchData, WatchChange}
import org.apache.gearpump.streaming.task.{Subscriber, TaskActor}
import org.apache.gearpump.streaming._
import org.apache.gearpump.util.Graph
import org.apache.gearpump.util.Graph._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class DagManagerSpec extends WordSpecLike with Matchers with BeforeAndAfterAll {

  val hash = Partitioner[HashPartitioner]
  val task1 = ProcessorDescription(id = 1, taskClass = classOf[TaskActor].getName, parallelism = 1)
  val task2 = ProcessorDescription(id = 2, taskClass = classOf[TaskActor].getName, parallelism = 1)
  val graph = Graph(task1 ~ hash ~> task2)
  val dag = DAG(graph)
  implicit var system: ActorSystem = null
  val appId = 0
  lazy val userConfig = UserConfig.empty.withValue(StreamApplication.DAG, graph)

  "DagManager" should {
    import org.apache.gearpump.streaming.appmaster.ClockServiceSpec.Store
    "maintain the dags properly" in {
      val store = new Store

      val dagManager = system.actorOf(Props(new DagManager(appId, userConfig, store, Some(dag))))
      val client = TestProbe()
      client.send(dagManager, GetLatestDAG)
      client.expectMsg(LatestDAG(dag))

      client.send(dagManager, GetTaskLaunchData(dag.version, task1.id, null))
      val task1LaunchData = TaskLaunchData(task1, Subscriber.of(task1.id, dag))
      client.expectMsg(task1LaunchData)

      val task2LaunchData = TaskLaunchData(task2, Subscriber.of(task2.id, dag))
      client.send(dagManager, GetTaskLaunchData(dag.version, task2.id, null))
      client.expectMsg(task2LaunchData)

      val watcher = TestProbe()
      client.send(dagManager, WatchChange(watcher.ref))
      val task3 = task2.copy(id = 3, life = LifeTime(100, Long.MaxValue))

      client.send(dagManager, ReplaceProcessor(task2.id, task3, inheritConf = false))
      client.expectMsg(DAGOperationSuccess)

      client.send(dagManager, GetLatestDAG)
      val newDag = client.expectMsgPF() {
        case LatestDAG(latestDag) => latestDag
      }
      assert(newDag.processors.contains(task3.id))
      watcher.expectMsgType[LatestDAG]

      val task4 = task3.copy(id = 4)
      client.send(dagManager, ReplaceProcessor(task3.id, task4, inheritConf = false))
      client.expectMsgType[DAGOperationFailed]

      client.send(dagManager, NewDAGDeployed(newDag.version))
      client.send(dagManager, ReplaceProcessor(task3.id, task4, inheritConf = false))
      client.expectMsg(DAGOperationSuccess)
    }

    "retrieve last stored dag properly" in {
      val store = new Store
      val newGraph = Graph(task1 ~ hash ~> task2)
      val newDag = DAG(newGraph)
      val dagManager = system.actorOf(Props(new DagManager(appId, userConfig, store, Some(newDag))))
      dagManager ! NewDAGDeployed(0)
      val client = TestProbe()
      client.send(dagManager, GetLatestDAG)
      client.expectMsgType[LatestDAG].dag shouldBe newDag
    }
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  override def beforeAll(): Unit = {
    this.system = ActorSystem("DagManagerSpec", TestUtil.DEFAULT_CONFIG)
  }
} 
Example 30
Source File: InMemoryAppStoreOnMasterSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.storage

import scala.concurrent.Await
import scala.concurrent.duration._

import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}

import org.apache.gearpump.cluster.{MasterHarness, MiniCluster}
import org.apache.gearpump.streaming.StreamingTestUtil
import org.apache.gearpump.util.Constants

class InMemoryAppStoreOnMasterSpec extends WordSpec with Matchers with BeforeAndAfterAll {
  implicit val timeout = Constants.FUTURE_TIMEOUT
  implicit val dispatcher = MasterHarness.cachedPool

  "InMemoryAppStoreOnMaster" should {
    "save and return the data properly" in {
      val appId = 0
      val miniCluster = new MiniCluster
      val master = miniCluster.mockMaster
      StreamingTestUtil.startAppMaster(miniCluster, appId)
      val store = new InMemoryAppStoreOnMaster(appId, master)

      Thread.sleep(500)

      store.put("String_type", "this is a string")
      store.put("Int_type", 1024)
      store.put("Tuple2_type", ("element1", 1024))

      val future1 = store.get("String_type").map { value =>
        value.asInstanceOf[String] should be("this is a string")
      }
      val future2 = store.get("Int_type").map { value => value.asInstanceOf[Int] should be(1024) }
      val future3 = store.get("Tuple2_type").map { value =>
        value.asInstanceOf[(String, Int)] should be(("element1", 1024))
      }
      val future4 = store.get("key").map { value => value.asInstanceOf[Object] should be(null) }
      Await.result(future1, 15.seconds)
      Await.result(future2, 15.seconds)
      Await.result(future3, 15.seconds)
      Await.result(future4, 15.seconds)
      miniCluster.shutDown
    }
  }
} 
Example 31
Source File: ExecutorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.executor

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import org.mockito.Matchers._
import org.mockito.Mockito.{times, _}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.appmaster.WorkerInfo
import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.cluster.{ExecutorContext, TestUtil, UserConfig}
import org.apache.gearpump.streaming.AppMasterToExecutor._
import org.apache.gearpump.streaming.ExecutorToAppMaster.RegisterTask
import org.apache.gearpump.streaming.appmaster.TaskRegistry.TaskLocations
import org.apache.gearpump.streaming.executor.TaskLauncherSpec.MockTask
import org.apache.gearpump.streaming.task.{Subscriber, TaskId}
import org.apache.gearpump.streaming.{LifeTime, ProcessorDescription}
import org.apache.gearpump.transport.HostPort

class ExecutorSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  val appId = 0
  val executorId = 0
  val workerId = WorkerId(0, 0L)
  var appMaster: TestProbe = null
  implicit var system: ActorSystem = null
  val userConf = UserConfig.empty

  override def beforeAll(): Unit = {
    system = ActorSystem("TaskLauncherSpec", TestUtil.DEFAULT_CONFIG)
    appMaster = TestProbe()
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "call launcher to launch task" in {
    val worker = TestProbe()
    val workerInfo = WorkerInfo(workerId, worker.ref)
    val executorContext = ExecutorContext(executorId, workerInfo, appId, "app",
      appMaster.ref, Resource(2))
    val taskLauncher = mock(classOf[ITaskLauncher])
    val executor = system.actorOf(Props(new Executor(executorContext, userConf, taskLauncher)))
    val processor = ProcessorDescription(id = 0, taskClass = classOf[MockTask].getName,
      parallelism = 2)
    val taskIds = List(TaskId(0, 0), TaskId(0, 1))
    val launchTasks = LaunchTasks(taskIds, dagVersion = 0, processor, List.empty[Subscriber])

    val task = TestProbe()
    when(taskLauncher.launch(any(), any(), any(), any(), any()))
      .thenReturn(taskIds.map((_, task.ref)).toMap)

    val client = TestProbe()
    client.send(executor, launchTasks)
    client.expectMsg(TasksLaunched)

    verify(taskLauncher, times(1)).launch(any(), any(), any(), any(), any())

    executor ! RegisterTask(TaskId(0, 0), executorId, HostPort("localhost:80"))
    executor ! RegisterTask(TaskId(0, 1), executorId, HostPort("localhost:80"))

    executor ! TaskRegistered(TaskId(0, 0), 0, 0)

    task.expectMsgType[TaskRegistered]

    executor ! TaskRegistered(TaskId(0, 1), 0, 0)

    task.expectMsgType[TaskRegistered]

    executor ! TaskLocationsReady(TaskLocations(Map.empty), dagVersion = 0)
    executor ! StartAllTasks(dagVersion = 0)

    task.expectMsgType[StartTask]
    task.expectMsgType[StartTask]

    val changeTasks = ChangeTasks(taskIds, dagVersion = 1, life = LifeTime(0, Long.MaxValue),
      List.empty[Subscriber])

    client.send(executor, changeTasks)
    client.expectMsgType[TasksChanged]

    executor ! TaskLocationsReady(TaskLocations(Map.empty), 1)
    executor ! StartAllTasks(dagVersion = 1)

    task.expectMsgType[ChangeTask]
    task.expectMsgType[ChangeTask]
  }
} 
Example 32
Source File: TaskLauncherSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.executor

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.{Actor, ActorSystem}
import akka.testkit.TestProbe
import org.scalatest._

import org.apache.gearpump.cluster.{TestUtil, UserConfig}
import org.apache.gearpump.serializer.SerializationFramework
import org.apache.gearpump.streaming.ProcessorDescription
import org.apache.gearpump.streaming.executor.TaskLauncher.TaskArgument
import org.apache.gearpump.streaming.executor.TaskLauncherSpec.{MockTask, MockTaskActor}
import org.apache.gearpump.streaming.task.{Task, TaskContext, TaskContextData, TaskId, TaskWrapper}

class TaskLauncherSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  val appId = 0
  val executorId = 0
  var appMaster: TestProbe = null
  implicit var system: ActorSystem = null
  val userConf = UserConfig.empty

  override def beforeAll(): Unit = {
    system = ActorSystem("TaskLauncherSpec", TestUtil.DEFAULT_CONFIG)
    appMaster = TestProbe()
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "able to launch tasks" in {
    val launcher = new TaskLauncher(appId, "app", executorId, appMaster.ref,
      userConf, classOf[MockTaskActor])
    val taskIds = List(TaskId(0, 0), TaskId(0, 1))
    val processor = ProcessorDescription(id = 0, taskClass = classOf[MockTask].getName,
      parallelism = 2)
    val argument = TaskArgument(0, processor, null)

    val tasks = launcher.launch(taskIds, argument, system, null,
      "gearpump.shared-thread-pool-dispatcher")
    tasks.keys.toSet shouldBe taskIds.toSet
  }
}

object TaskLauncherSpec {
  class MockTaskActor(
      val taskId: TaskId,
      val taskContextData : TaskContextData,
      userConf : UserConfig,
      val task: TaskWrapper,
      serializer: SerializationFramework) extends Actor {
    def receive: Receive = null
  }

  class MockTask(taskContext: TaskContext, userConf: UserConfig)
    extends Task(taskContext, userConf) {
  }
} 
Example 33
Source File: Worker.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.main

import akka.actor.{ActorSystem, Props}
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.master.MasterProxy
import org.apache.gearpump.cluster.worker.{Worker => WorkerActor}
import org.apache.gearpump.transport.HostPort
import org.apache.gearpump.util.Constants._
import org.apache.gearpump.util.LogUtil.ProcessType
import org.apache.gearpump.util.{AkkaApp, LogUtil}
import org.slf4j.Logger

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration


object Worker extends AkkaApp with ArgumentsParser {
  protected override def akkaConfig = ClusterConfig.worker()

  override val description = "Start a worker daemon"

  var LOG: Logger = LogUtil.getLogger(getClass)

  private def uuid = java.util.UUID.randomUUID.toString

  def main(akkaConf: Config, args: Array[String]): Unit = {
    val id = uuid

    this.LOG = {
      LogUtil.loadConfiguration(akkaConf, ProcessType.WORKER)
      // Delay creation of LOG instance to avoid creating an empty log file as we
      // reset the log file name here
      LogUtil.getLogger(getClass)
    }

    val system = ActorSystem(id, akkaConf)

    val masterAddress = akkaConf.getStringList(GEARPUMP_CLUSTER_MASTERS).asScala.map { address =>
      val hostAndPort = address.split(":")
      HostPort(hostAndPort(0), hostAndPort(1).toInt)
    }

    LOG.info(s"Trying to connect to masters " + masterAddress.mkString(",") + "...")
    val masterProxy = system.actorOf(MasterProxy.props(masterAddress), s"masterproxy${system.name}")

    system.actorOf(Props(classOf[WorkerActor], masterProxy),
      classOf[WorkerActor].getSimpleName + id)

    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 34
Source File: Local.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.main

import akka.actor.{ActorSystem, Props}
import com.typesafe.config.ConfigValueFactory
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.master.{Master => MasterActor}
import org.apache.gearpump.cluster.worker.{Worker => WorkerActor}
import org.apache.gearpump.util.Constants._
import org.apache.gearpump.util.LogUtil.ProcessType
import org.apache.gearpump.util.{ActorUtil, Constants, LogUtil, MasterClientCommand, Util}
import org.slf4j.Logger

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Local extends MasterClientCommand with ArgumentsParser {
  override def akkaConfig: Config = ClusterConfig.master()

  var LOG: Logger = LogUtil.getLogger(getClass)

  override val options: Array[(String, CLIOption[Any])] =
    Array("sameprocess" -> CLIOption[Boolean]("", required = false, defaultValue = Some(false)),
      "workernum" -> CLIOption[Int]("<how many workers to start>", required = false,
        defaultValue = Some(2)))

  override val description = "Start a local cluster"

  def main(akkaConf: Config, args: Array[String]): Unit = {

    this.LOG = {
      LogUtil.loadConfiguration(akkaConf, ProcessType.LOCAL)
      LogUtil.getLogger(getClass)
    }

    val config = parse(args)
    if (null != config) {
      local(config.getInt("workernum"), config.getBoolean("sameprocess"), akkaConf)
    }
  }

  def local(workerCount: Int, sameProcess: Boolean, akkaConf: Config): Unit = {
    if (sameProcess) {
      LOG.info("Starting local in same process")
      System.setProperty("LOCAL", "true")
    }
    val masters = akkaConf.getStringList(Constants.GEARPUMP_CLUSTER_MASTERS)
      .asScala.flatMap(Util.parseHostList)
    val local = akkaConf.getString(Constants.GEARPUMP_HOSTNAME)

    if (masters.size != 1 && masters.head.host != local) {
      LOG.error(s"The ${Constants.GEARPUMP_CLUSTER_MASTERS} is not match " +
        s"with ${Constants.GEARPUMP_HOSTNAME}")
    } else {

      val hostPort = masters.head
      implicit val system = ActorSystem(MASTER, akkaConf.
        withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(hostPort.port))
      )

      val master = system.actorOf(Props[MasterActor], MASTER)
      val masterPath = ActorUtil.getSystemAddress(system).toString + s"/user/$MASTER"

      0.until(workerCount).foreach { id =>
        system.actorOf(Props(classOf[WorkerActor], master), classOf[WorkerActor].getSimpleName + id)
      }

      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 35
Source File: EmbeddedCluster.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.embedded

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import akka.actor.{ActorRef, ActorSystem, Props}
import com.typesafe.config.{Config, ConfigValueFactory}
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.master.{Master => MasterActor}
import org.apache.gearpump.cluster.worker.{Worker => WorkerActor}
import org.apache.gearpump.util.Constants.{GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS, GEARPUMP_CLUSTER_MASTERS, GEARPUMP_METRIC_ENABLED, MASTER}
import org.apache.gearpump.util.{LogUtil, Util}


class EmbeddedCluster(inputConfig: Config) {
  private val LOG = LogUtil.getLogger(getClass)
  private val workerCount: Int = 1
  private val port = Util.findFreePort().get
  private[embedded] val config: Config = getConfig(inputConfig, port)
  private[embedded] val system: ActorSystem = ActorSystem(MASTER, config)
  private[embedded] val master: ActorRef = system.actorOf(Props[MasterActor], MASTER)

  0.until(workerCount).foreach { id =>
    system.actorOf(Props(classOf[WorkerActor], master), classOf[WorkerActor].getSimpleName + id)
  }

  LOG.info("=================================")
  LOG.info("Local Cluster is started at: ")
  LOG.info(s"                 127.0.0.1:$port")
  LOG.info(s"To see UI, run command: services -master 127.0.0.1:$port")

  private def getConfig(inputConfig: Config, port: Int): Config = {
    val config = inputConfig.
      withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(port)).
      withValue(GEARPUMP_CLUSTER_MASTERS,
        ConfigValueFactory.fromIterable(List(s"127.0.0.1:$port").asJava)).
      withValue(GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS,
        ConfigValueFactory.fromAnyRef(true)).
      withValue(GEARPUMP_METRIC_ENABLED, ConfigValueFactory.fromAnyRef(true)).
      withValue("akka.actor.provider",
        ConfigValueFactory.fromAnyRef("akka.cluster.ClusterActorRefProvider"))
    config
  }

  def stop(): Unit = {
    system.stop(master)
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
}

object EmbeddedCluster {
  def apply(): EmbeddedCluster = {
    new EmbeddedCluster(ClusterConfig.master())
  }
} 
Example 36
Source File: JarStoreClient.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.jarstore

import java.io.File
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.Await

import akka.pattern.ask
import akka.actor.{ActorSystem, ActorRef}
import com.typesafe.config.Config
import org.apache.gearpump.cluster.master.MasterProxy
import org.apache.gearpump.util.{Util, Constants, LogUtil}
import org.slf4j.Logger

import org.apache.gearpump.cluster.ClientToMaster.{GetJarStoreServer, JarStoreServerAddress}
import scala.concurrent.{Future, ExecutionContext}

class JarStoreClient(config: Config, system: ActorSystem) {
  private def LOG: Logger = LogUtil.getLogger(getClass)
  private implicit val timeout = Constants.FUTURE_TIMEOUT
  private implicit def dispatcher: ExecutionContext = system.dispatcher

  private val master: ActorRef = {
    val masters = config.getStringList(Constants.GEARPUMP_CLUSTER_MASTERS)
      .asScala.flatMap(Util.parseHostList)
    system.actorOf(MasterProxy.props(masters), s"masterproxy${Util.randInt()}")
  }

  private lazy val client = (master ? GetJarStoreServer).asInstanceOf[Future[JarStoreServerAddress]]
    .map { address =>
      val client = new FileServer.Client(system, address.url)
      client
    }

  
  def copyFromLocal(localFile: File): FilePath = {
    val future = client.flatMap(_.upload(localFile))
    Await.result(future, Duration(60, TimeUnit.SECONDS))
  }
} 
Example 37
Source File: ConfigFileBasedAuthenticatorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.security

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.ActorSystem
import org.scalatest.{FlatSpec, Matchers}

import org.apache.gearpump.cluster.TestUtil

class ConfigFileBasedAuthenticatorSpec extends FlatSpec with Matchers {
  it should "authenticate correctly" in {
    val config = TestUtil.UI_CONFIG
    implicit val system = ActorSystem("ConfigFileBasedAuthenticatorSpec", config)
    implicit val ec = system.dispatcher
    val timeout = 30.seconds

    val authenticator = new ConfigFileBasedAuthenticator(config)
    val guest = Await.result(authenticator.authenticate("guest", "guest", ec), timeout)
    val admin = Await.result(authenticator.authenticate("admin", "admin", ec), timeout)

    val nonexist = Await.result(authenticator.authenticate("nonexist", "nonexist", ec), timeout)

    val failedGuest = Await.result(authenticator.authenticate("guest", "wrong", ec), timeout)
    val failedAdmin = Await.result(authenticator.authenticate("admin", "wrong", ec), timeout)

    assert(guest == Authenticator.Guest)
    assert(admin == Authenticator.Admin)
    assert(nonexist == Authenticator.UnAuthenticated)
    assert(failedGuest == Authenticator.UnAuthenticated)
    assert(failedAdmin == Authenticator.UnAuthenticated)

    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 38
Source File: NettySpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.transport

import java.util.concurrent.TimeUnit

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.{ActorRef, ActorSystem}
import akka.testkit.TestProbe
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}

import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.transport.MockTransportSerializer.NettyMessage
import org.apache.gearpump.transport.netty.{TaskMessage, Context}
import org.apache.gearpump.util.Util

class NettySpec extends FlatSpec with Matchers with MockitoSugar {

  "Netty Transport" should "send and receive message correctly " in {
    val conf = TestUtil.DEFAULT_CONFIG
    val system = ActorSystem("transport", conf)
    val context = new Context(system, conf)
    val serverActor = TestProbe()(system)

    val port = Util.findFreePort()

    import system.dispatcher
    system.scheduler.scheduleOnce(Duration(1, TimeUnit.SECONDS)) {
      context.bind("server", new ActorLookupById {
        override def lookupLocalActor(id: Long): Option[ActorRef] = Some(serverActor.ref)
      }, false, port.get)
    }
    val client = context.connect(HostPort("127.0.0.1", port.get))

    val data = NettyMessage(0)
    val msg = new TaskMessage(0, 1, 2, data)
    client ! msg
    serverActor.expectMsg(15.seconds, data)

    context.close()
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 39
Source File: SerializerSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.serializer

import akka.actor.{ActorSystem, ExtendedActorSystem}

import com.esotericsoftware.kryo.io.{Input, Output}
import com.esotericsoftware.kryo.{Kryo, Serializer => KryoSerializer}
import com.typesafe.config.{ConfigFactory, ConfigValueFactory}

import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.serializer.SerializerSpec._

import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration


class SerializerSpec extends FlatSpec with Matchers with MockitoSugar {
  val config = ConfigFactory.empty.withValue("gearpump.serializers",
    ConfigValueFactory.fromAnyRef(Map(classOf[ClassA].getName -> classOf[ClassASerializer].getName,
      classOf[ClassB].getName -> classOf[ClassBSerializer].getName).asJava))

  "GearpumpSerialization" should "register custom serializers" in {
    val serialization = new GearpumpSerialization(config)
    val kryo = new Kryo
    serialization.customize(kryo)

    val forB = kryo.getRegistration(classOf[ClassB])
    assert(forB.getSerializer.isInstanceOf[ClassBSerializer])

    val forA = kryo.getRegistration(classOf[ClassA])
    assert(forA.getSerializer.isInstanceOf[ClassASerializer])
  }

  "FastKryoSerializer" should "serialize correctly" in {
    val myConfig = config.withFallback(TestUtil.DEFAULT_CONFIG.withoutPath("gearpump.serializers"))
    val system = ActorSystem("my", myConfig)

    val serializer = new FastKryoSerializer(system.asInstanceOf[ExtendedActorSystem])

    val bytes = serializer.serialize(new ClassA)
    val anotherA = serializer.deserialize(bytes)

    assert(anotherA.isInstanceOf[ClassA])
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
}

object SerializerSpec {

  class ClassA {}

  class ClassASerializer extends KryoSerializer[ClassA] {
    override def write(kryo: Kryo, output: Output, `object`: ClassA): Unit = {
      output.writeString(classOf[ClassA].getName)
    }

    override def read(kryo: Kryo, input: Input, `type`: Class[ClassA]): ClassA = {
      val className = input.readString()
      Class.forName(className).newInstance().asInstanceOf[ClassA]
    }
  }

  class ClassB {}

  class ClassBSerializer extends KryoSerializer[ClassA] {
    override def write(kryo: Kryo, output: Output, `object`: ClassA): Unit = {}

    override def read(kryo: Kryo, input: Input, `type`: Class[ClassA]): ClassA = {
      null
    }
  }
} 
Example 40
Source File: MasterWatcherSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.main

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.Config
import org.apache.gearpump.cluster.TestUtil
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._

class MasterWatcherSpec extends FlatSpec with Matchers {
  def config: Config = TestUtil.MASTER_CONFIG

  "MasterWatcher" should "kill itself when can not get a quorum" in {
    val system = ActorSystem("ForMasterWatcher", config)

    val actorWatcher = TestProbe()(system)

    val masterWatcher = system.actorOf(Props(classOf[MasterWatcher], "watcher"))
    actorWatcher watch masterWatcher
    actorWatcher.expectTerminated(masterWatcher, 5.seconds)
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 41
Source File: ExecutorSystemLauncherSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.appmaster

import org.apache.gearpump.cluster.worker.WorkerId

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigValueFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.AppMasterToWorker.LaunchExecutor
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.WorkerToAppMaster.ExecutorLaunchRejected
import org.apache.gearpump.cluster.appmaster.ExecutorSystemLauncher._
import org.apache.gearpump.cluster.appmaster.ExecutorSystemScheduler.Session
import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.util.ActorSystemBooter.{ActorSystemRegistered, RegisterActorSystem}
import org.apache.gearpump.util.Constants

class ExecutorSystemLauncherSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit var system: ActorSystem = null
  val workerId: WorkerId = WorkerId(0, 0L)
  val appId = 0
  val executorId = 0
  val url = "akka.tcp://[email protected]:3000"
  val session = Session(null, null)
  val launchExecutorSystemTimeout = 3000
  val activeConfig = TestUtil.DEFAULT_CONFIG.
    withValue(Constants.GEARPUMP_START_EXECUTOR_SYSTEM_TIMEOUT_MS,
      ConfigValueFactory.fromAnyRef(launchExecutorSystemTimeout))

  override def beforeAll(): Unit = {
    system = ActorSystem("test", activeConfig)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "report success when worker launch the system successfully" in {
    val worker = TestProbe()
    val client = TestProbe()

    val launcher = system.actorOf(Props(new ExecutorSystemLauncher(appId, session)))
    client.watch(launcher)
    client.send(launcher, LaunchExecutorSystem(WorkerInfo(workerId, worker.ref), 0, Resource(1)))

    worker.expectMsgType[LaunchExecutor]
    worker.reply(RegisterActorSystem(url))

    worker.expectMsgType[ActorSystemRegistered]

    client.expectMsgType[LaunchExecutorSystemSuccess]
    client.expectTerminated(launcher)
  }

  it should "report failure when worker refuse to launch the system explicitly" in {
    val worker = TestProbe()
    val client = TestProbe()

    val resource = Resource(4)

    val launcher = system.actorOf(Props(new ExecutorSystemLauncher(appId, session)))
    client.watch(launcher)
    client.send(launcher, LaunchExecutorSystem(WorkerInfo(workerId, worker.ref), 0, resource))

    worker.expectMsgType[LaunchExecutor]
    worker.reply(ExecutorLaunchRejected())

    client.expectMsg(LaunchExecutorSystemRejected(resource, null, session))
    client.expectTerminated(launcher)
  }

  it should "report timeout when trying to start a executor system on worker, " +
    "and worker doesn't response" in {
    val client = TestProbe()
    val worker = TestProbe()
    val launcher = system.actorOf(Props(new ExecutorSystemLauncher(appId, session)))
    client.send(launcher, LaunchExecutorSystem(WorkerInfo(workerId, worker.ref), 0, Resource(1)))
    client.watch(launcher)
    val waitFor = launchExecutorSystemTimeout + 10000
    client.expectMsgType[LaunchExecutorSystemTimeout](waitFor.milliseconds)
    client.expectTerminated(launcher, waitFor.milliseconds)
  }
} 
Example 42
Source File: MasterConnectionKeeperSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.appmaster

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.TestProbe
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.AppMasterToMaster.RegisterAppMaster
import org.apache.gearpump.cluster.MasterToAppMaster.AppMasterRegistered
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.appmaster.MasterConnectionKeeper.MasterConnectionStatus.{MasterConnected, _}
import org.apache.gearpump.cluster.appmaster.MasterConnectionKeeperSpec.ConnectionKeeperTestEnv
import org.apache.gearpump.cluster.master.MasterProxy.WatchMaster

class MasterConnectionKeeperSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  implicit var system: ActorSystem = null
  val appId = 0
  val register = RegisterAppMaster(appId, null, null)

  override def beforeAll(): Unit = {
    system = ActorSystem("test", TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  private def startMasterConnectionKeeper: ConnectionKeeperTestEnv = {
    val statusChangeSubscriber = TestProbe()
    val master = TestProbe()

    val keeper = system.actorOf(Props(
      new MasterConnectionKeeper(register, master.ref, statusChangeSubscriber.ref)))
    statusChangeSubscriber.watch(keeper)

    master.expectMsgType[WatchMaster]

    // Master is alive, response to RegisterAppMaster
    master.expectMsgType[RegisterAppMaster]
    master.reply(AppMasterRegistered(appId))

    // Notify listener that master is alive
    statusChangeSubscriber.expectMsg(MasterConnected)
    ConnectionKeeperTestEnv(master, keeper, statusChangeSubscriber)
  }

  it should "start correctly and notify listener that master is alive" in {
    startMasterConnectionKeeper
  }

  it should "re-register the appmaster when master is restarted" in {
    import org.apache.gearpump.cluster.master.MasterProxy.MasterRestarted
    val ConnectionKeeperTestEnv(master, keeper, masterChangeListener) = startMasterConnectionKeeper

    // Master is restarted
    master.send(keeper, MasterRestarted)
    master.expectMsgType[RegisterAppMaster]
    master.reply(AppMasterRegistered(appId))
    masterChangeListener.expectMsg(MasterConnected)

    // Recovery from Master restart is transparent to listener
    masterChangeListener.expectNoMsg()
  }

  it should "notify listener and then shutdown itself when master is dead" in {
    val ConnectionKeeperTestEnv(master, keeper, masterChangeListener) = startMasterConnectionKeeper

    // Master is dead
    master.send(keeper, MasterStopped)

    // Keeper should tell the listener that master is stopped before shutting down itself
    masterChangeListener.expectMsg(MasterStopped)
    masterChangeListener.expectTerminated(keeper)
  }

  it should "mark the master as dead when timeout" in {
    val statusChangeSubscriber = TestProbe()
    val master = TestProbe()

    // MasterConnectionKeeper register itself to master by sending RegisterAppMaster
    val keeper = system.actorOf(Props(new MasterConnectionKeeper(register,
      master.ref, statusChangeSubscriber.ref)))

    // Master doesn't reply to keeper,
    statusChangeSubscriber.watch(keeper)

    // Timeout, keeper notify listener, and then make suicide
    statusChangeSubscriber.expectMsg(60.seconds, MasterStopped)
    statusChangeSubscriber.expectTerminated(keeper, 60.seconds)
  }
}

object MasterConnectionKeeperSpec {
  case class ConnectionKeeperTestEnv(
      master: TestProbe, keeper: ActorRef, masterChangeListener: TestProbe)
} 
Example 43
Source File: RunningApplicationSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.client

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.testkit.TestProbe
import akka.util.Timeout
import org.apache.gearpump.cluster.ClientToMaster.{ResolveAppId, ShutdownApplication}
import org.apache.gearpump.cluster.MasterToClient.{ResolveAppIdResult, ShutdownApplicationResult}
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.client.RunningApplicationSpec.{MockAskAppMasterRequest, MockAskAppMasterResponse}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global

class RunningApplicationSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit var system: ActorSystem = _

  override def beforeAll(): Unit = {
    system = ActorSystem("test", TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  "RunningApplication" should "be able to shutdown application" in {
    val errorMsg = "mock exception"
    val master = TestProbe()
    val timeout = Timeout(90, TimeUnit.SECONDS)
    val application = new RunningApplication(1, master.ref, timeout)
    Future {
      application.shutDown()
    }
    master.expectMsg(ShutdownApplication(1))
    master.reply(ShutdownApplicationResult(Success(1)))

    val result = Future {
      intercept[Exception] {
        application.shutDown()
      }
    }
    master.expectMsg(ShutdownApplication(1))
    master.reply(ShutdownApplicationResult(Failure(new Exception(errorMsg))))
    val exception = Await.result(result, Duration.Inf)
    assert(exception.getMessage.equals(errorMsg))
  }

  "RunningApplication" should "be able to ask appmaster" in {
    val master = TestProbe()
    val appMaster = TestProbe()
    val appId = 1
    val timeout = Timeout(90, TimeUnit.SECONDS)
    val request = MockAskAppMasterRequest("request")
    val application = new RunningApplication(appId, master.ref, timeout)
    val future = application.askAppMaster[MockAskAppMasterResponse](request)
    master.expectMsg(ResolveAppId(appId))
    master.reply(ResolveAppIdResult(Success(appMaster.ref)))
    appMaster.expectMsg(MockAskAppMasterRequest("request"))
    appMaster.reply(MockAskAppMasterResponse("response"))
    val result = Await.result(future, Duration.Inf)
    assert(result.res.equals("response"))

    // ResolveAppId should not be called multiple times
    val future2 = application.askAppMaster[MockAskAppMasterResponse](request)
    appMaster.expectMsg(MockAskAppMasterRequest("request"))
    appMaster.reply(MockAskAppMasterResponse("response"))
    val result2 = Await.result(future2, Duration.Inf)
    assert(result2.res.equals("response"))
  }
}

object RunningApplicationSpec {
  case class MockAskAppMasterRequest(req: String)

  case class MockAskAppMasterResponse(res: String)
} 
Example 44
Source File: MiniCluster.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.TestActorRef
import com.typesafe.config.ConfigValueFactory
import org.apache.gearpump.cluster.AppMasterToMaster.GetAllWorkers
import org.apache.gearpump.cluster.MasterToAppMaster.WorkerList
import org.apache.gearpump.cluster.master.Master
import org.apache.gearpump.cluster.worker.Worker
import org.apache.gearpump.util.Constants

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class MiniCluster {
  private val mockMasterIP = "127.0.0.1"

  implicit val system = ActorSystem("system", TestUtil.MASTER_CONFIG.
    withValue(Constants.NETTY_TCP_HOSTNAME, ConfigValueFactory.fromAnyRef(mockMasterIP)))

  val (mockMaster, worker) = {
    val master = system.actorOf(Props(classOf[Master]), "master")
    val worker = system.actorOf(Props(classOf[Worker], master), "worker")

    // Wait until worker register itself to master
    waitUtilWorkerIsRegistered(master)
    (master, worker)
  }

  def launchActor(props: Props): TestActorRef[Actor] = {
    TestActorRef(props)
  }

  private def waitUtilWorkerIsRegistered(master: ActorRef): Unit = {
    while (!isWorkerRegistered(master)) {}
  }

  private def isWorkerRegistered(master: ActorRef): Boolean = {
    import scala.concurrent.duration._
    implicit val dispatcher = system.dispatcher

    implicit val futureTimeout = Constants.FUTURE_TIMEOUT

    val workerListFuture = (master ? GetAllWorkers).asInstanceOf[Future[WorkerList]]

    // Waits until the worker is registered.
    val workers = Await.result[WorkerList](workerListFuture, 15.seconds)
    workers.workers.size > 0
  }

  def shutDown(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 45
Source File: ConfigsSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.util

import java.io.File
import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.ActorSystem
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}

import org.apache.gearpump.cluster.{ClusterConfig, ClusterConfigSource, UserConfig}

class ConfigsSpec extends FlatSpec with Matchers with MockitoSugar {
  "Typesafe Cluster Configs" should "follow the override rules" in {

    val conf =
      """
      gearpump {
        gear = "gearpump"
      }

      gearpump-master {
        conf = "master"
      }
      gearpump-worker {
        conf = "worker"
      }
      conf = "base"
      """

    val file = File.createTempFile("test", ".conf")
    FileUtils.write(file, conf)

    val raw = ClusterConfig.load(ClusterConfigSource(file.toString))

    assert(raw.master.getString("conf") == "master", "master > base")
    assert(raw.worker.getString("conf") == "worker", "worker > base")
    assert(raw.default.getString("conf") == "base", "application > base")

    file.delete()
  }

  "ClusterConfigSource" should "return empty for non-exist files" in {
    val source = ClusterConfigSource("non-exist")
    var config = source.getConfig
    assert(config.isEmpty)

    val nullCheck = ClusterConfigSource(null)
    config = nullCheck.getConfig
    assert(config.isEmpty)
  }

  "User Config" should "work" in {

    implicit val system = ActorSystem("forSerialization")

    val map = Map[String, String]("key1" -> "1", "key2" -> "value2")

    val user = new UserConfig(map)
      .withLong("key3", 2L)
      .withBoolean("key4", value = true)
      .withFloat("key5", 3.14F)
      .withDouble("key6", 2.718)

    assert(user.getInt("key1").get == 1)
    assert(user.getString("key1").get == "1")
    assert(user.getLong("key3").get == 2L)
    assert(user.getBoolean("key4").get == true)
    assert(user.getFloat("key5").get == 3.14F)
    assert(user.getDouble("key6").get == 2.718)

    val data = new ConfigsSpec.Data(3)
    assert(data == user.withValue("data", data).getValue[ConfigsSpec.Data]("data").get)
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
}

object ConfigsSpec {
  case class Data(value: Int)
} 
Example 46
Source File: ActorSystemBooterSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.util

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestProbe
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}

import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.util.ActorSystemBooter.{ActorCreated, RegisterActorSystem, _}
import org.apache.gearpump.util.ActorSystemBooterSpec._

class ActorSystemBooterSpec extends FlatSpec with Matchers with MockitoSugar {

  "ActorSystemBooter" should "report its address back" in {
    val boot = bootSystem()
    boot.prob.expectMsgType[RegisterActorSystem]
    boot.shutdown()
  }

  "ActorSystemBooter" should "terminate itself when parent actor dies" in {
    val boot = bootSystem()
    boot.prob.expectMsgType[RegisterActorSystem]

    val dummy = boot.host.actorOf(Props(classOf[Dummy]), "dummy")
    boot.prob.reply(ActorSystemRegistered(boot.prob.ref))
    boot.prob.reply(BindLifeCycle(dummy))
    boot.host.stop(dummy)
    val terminated = retry(5)(boot.bootedSystem.whenTerminated.isCompleted)
    assert(terminated)
    boot.shutdown()
  }

  "ActorSystemBooter" should "create new actor" in {
    val boot = bootSystem()
    boot.prob.expectMsgType[RegisterActorSystem]
    boot.prob.reply(ActorSystemRegistered(boot.prob.ref))
    boot.prob.reply(CreateActor(Props(classOf[AcceptThreeArguments], 1, 2, 3), "three"))
    boot.prob.expectMsgType[ActorCreated]

    boot.prob.reply(CreateActor(Props(classOf[AcceptZeroArguments]), "zero"))
    boot.prob.expectMsgType[ActorCreated]

    boot.shutdown()
  }

  private def bootSystem(): Boot = {
    val booter = ActorSystemBooter(TestUtil.DEFAULT_CONFIG)

    val system = ActorSystem("reportback", TestUtil.DEFAULT_CONFIG)

    val receiver = TestProbe()(system)
    val address = ActorUtil.getFullPath(system, receiver.ref.path)

    val bootSystem = booter.boot("booter", address)

    Boot(system, receiver, bootSystem)
  }

  case class Boot(host: ActorSystem, prob: TestProbe, bootedSystem: ActorSystem) {
    def shutdown(): Unit = {
      host.terminate()
      bootedSystem.terminate()
      Await.result(host.whenTerminated, Duration.Inf)
      Await.result(bootedSystem.whenTerminated, Duration.Inf)
    }
  }

  def retry(seconds: Int)(fn: => Boolean): Boolean = {
    val result = fn
    if (result) {
      result
    } else {
      Thread.sleep(1000)
      retry(seconds - 1)(fn)
    }
  }
}

object ActorSystemBooterSpec {
  class Dummy extends Actor {
    def receive: Receive = {
      case _ =>
    }
  }

  class AcceptZeroArguments extends Actor {
    def receive: Receive = {
      case _ =>
    }
  }

  class AcceptThreeArguments(a: Int, b: Int, c: Int) extends Actor {
    def receive: Receive = {
      case _ =>
    }
  }
} 
Example 47
Source File: MongoFeederSource.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo.feeder

import com.ringcentral.gatling.mongo.MongoUtils
import io.gatling.core.feeder.Record
import play.api.libs.iteratee._
import play.api.libs.json.{JsObject, Json}
import reactivemongo.api.{QueryOpts, ReadPreference}
import reactivemongo.play.iteratees.cursorProducer
import reactivemongo.play.json.ImplicitBSONHandlers._
import reactivemongo.play.json.collection.JSONCollection

import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

object MongoFeederSource {

  val defaultPostProcessor: JsObject => Map[String, Any] = o => o.fields.toMap

  def apply(url: String, collectionName: String, query: String, limit: Int, batchSize: Int, connectionTimeout: FiniteDuration, receiveTimeout: FiniteDuration, postProcessor: JsObject => Map[String, Any]): Vector[Record[Any]] = {
    Await.result(run(url, collectionName, query, limit, batchSize, connectionTimeout, postProcessor), receiveTimeout)
  }


  private def run(url: String, collectionName: String, query: String, limit: Int, batchSize: Int, connectionTimeout: FiniteDuration, postProcessor: (JsObject) => Map[String, Any]): Future[Vector[Record[Any]]] = {
    val document = Json.parse(query).as[JsObject]
    val collection: JSONCollection = MongoUtils.connectToDB(url, connectionTimeout).collection[JSONCollection](collectionName)
    val resultSet: Enumerator[Map[String, Any]] = collection.find(document).options(QueryOpts().batchSize(batchSize)).cursor[JsObject](ReadPreference.primary).enumerator(limit).map(postProcessor)

    resultSet.run(Iteratee.fold(Vector.empty[Record[Any]]) { (acc, next) => acc :+ next })
  }
} 
Example 48
Source File: MongoUtils.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo

import reactivemongo.api.MongoConnection.{ParsedURI, URIParsingException}
import reactivemongo.api.{DefaultDB, MongoConnection, MongoDriver}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success, Try}

// fixme remove global context
import scala.concurrent.ExecutionContext.Implicits.global

object MongoUtils {

  private val defaultPort: Int = 27017
  private lazy val mongoDriver = new MongoDriver()

  private def establishConnection(uri: ParsedURI, dbName: String, connectionTimeout: FiniteDuration): DefaultDB = {
    Await.result(establishConnection(uri, dbName), connectionTimeout)
  }

  private def establishConnection(uri: ParsedURI, dbName: String): Future[DefaultDB] =
    Try(mongoDriver.connection(uri).database(dbName))
    match {
      case Success(db) => db
      case Failure(err) =>
        throw new IllegalStateException(s"Can't connect to database ${printHosts(uri.hosts)}: ${err.getMessage}", err)
    }

  private def printHosts(hosts: List[(String, Int)]): String = hosts.map { case (host, port) => s"$host:$port" }.mkString(", ")

  def connectToDB(uri: ParsedURI, connectionTimeout: FiniteDuration): DefaultDB =
    uri.db match {
      case Some(dbName) => establishConnection(uri, dbName, connectionTimeout)
      case None => throw new IllegalStateException(s"Can't connect to database $uri.")
    }

  def connectToDB(uri: String, connectionTimeout: FiniteDuration): DefaultDB =  connectToDB(parseUri(uri), connectionTimeout)

  def parseHosts(hosts: Seq[String]): Seq[(String, Int)] = hosts.map { hostAndPort =>
    hostAndPort.split(':').toList match {
      case host :: port :: Nil =>
        host -> Try(port.toInt).filter(p => p > 0 && p < 65536)
          .getOrElse(throw new URIParsingException(s"Could not parse hosts '$hosts' from URI: invalid port '$port'"))
      case host :: Nil =>
        host -> defaultPort
      case _ => throw new URIParsingException(s"Could not parse hosts from URI: invalid definition '$hosts'")
    }
  }

  def parseUri(uri: String): ParsedURI = {
    MongoConnection.parseURI(uri) match {
      case Success(parsedUri) => parsedUri
      case Failure(err) => throw new IllegalStateException(s"Can't parse database uri. $err")
    }
  }
} 
Example 49
Source File: ShellClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.Actor
import akka.util.Timeout
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.communication.security.SecurityActorType
import org.apache.toree.kernel.protocol.v5.client.{ActorLoader, Utilities}
import org.apache.toree.kernel.protocol.v5.{KernelMessage, UUID}
import Utilities._
import org.apache.toree.kernel.protocol.v5.client.execution.{DeferredExecution, DeferredExecutionManager}
import org.apache.toree.kernel.protocol.v5.content.ExecuteReply

import org.apache.toree.utils.LogLike
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.pattern.ask


class ShellClient(
  socketFactory: SocketFactory,
  actorLoader: ActorLoader,
  signatureEnabled: Boolean
) extends Actor with LogLike {
  logger.debug("Created shell client actor")
  implicit val timeout = Timeout(21474835.seconds)

  val socket = socketFactory.ShellClient(context.system, self)

  def receiveExecuteReply(parentId:String, kernelMessage: KernelMessage): Unit = {
    val deOption: Option[DeferredExecution] = DeferredExecutionManager.get(parentId)
    deOption match {
      case None =>
        logger.warn(s"No deferred execution for parent id ${parentId}")
      case Some(de) =>
        Utilities.parseAndHandle(kernelMessage.contentString,
          ExecuteReply.executeReplyReads, (er: ExecuteReply) => de.resolveReply(er))
    }
  }

  override def receive: Receive = {
    // from shell
    case message: ZMQMessage =>
      logger.debug("Received shell kernel message.")
      val kernelMessage: KernelMessage = message

      // TODO: Validate incoming message signature

      logger.trace(s"Kernel message is ${kernelMessage}")
      receiveExecuteReply(message.parentHeader.msg_id,kernelMessage)

    // from handler
    case message: KernelMessage =>
      logger.trace(s"Sending kernel message ${message}")
      val signatureManager =
        actorLoader.load(SecurityActorType.SignatureManager)

      import scala.concurrent.ExecutionContext.Implicits.global
      val messageWithSignature = if (signatureEnabled) {
        val signatureMessage = signatureManager ? message
        Await.result(signatureMessage, 100.milliseconds)
          .asInstanceOf[KernelMessage]
      } else message

      val zMQMessage: ZMQMessage = messageWithSignature

      socket ! zMQMessage
  }
} 
Example 50
Source File: StdinClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.Actor
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.communication.security.SecurityActorType
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.apache.toree.kernel.protocol.v5.{HeaderBuilder, KMBuilder, KernelMessage}
import org.apache.toree.kernel.protocol.v5.content.{InputReply, InputRequest}
import org.apache.toree.utils.LogLike
import org.apache.toree.kernel.protocol.v5.client.Utilities._
import play.api.libs.json.Json

import StdinClient._
import akka.pattern.ask

import scala.concurrent.duration._
import scala.concurrent.Await

object StdinClient {
  case class ResponseFunctionMessage(responseFunction: ResponseFunction)
  type ResponseFunction = (String, Boolean) => String
  val EmptyResponseFunction: ResponseFunction = (_, _) => ""
}


  private var responseFunc: ResponseFunction = EmptyResponseFunction

  override def receive: Receive = {
    case responseFunctionMessage: ResponseFunctionMessage =>
      logger.debug("Updating response function")
      this.responseFunc = responseFunctionMessage.responseFunction

    case message: ZMQMessage =>
      logger.debug("Received stdin kernel message")
      val kernelMessage: KernelMessage = message
      val messageType = kernelMessage.header.msg_type

      if (messageType == InputRequest.toTypeString) {
        logger.debug("Message is an input request")

        val inputRequest =
          Json.parse(kernelMessage.contentString).as[InputRequest]
        val value = responseFunc(inputRequest.prompt, inputRequest.password)
        val inputReply = InputReply(value)

        val newKernelMessage = KMBuilder()
          .withParent(kernelMessage)
          .withHeader(HeaderBuilder.empty.copy(
            msg_type = InputReply.toTypeString,
            session = getSessionId
          ))
          .withContentString(inputReply)
          .build

        import scala.concurrent.ExecutionContext.Implicits.global
        val messageWithSignature = if (signatureEnabled) {
          val signatureManager =
            actorLoader.load(SecurityActorType.SignatureManager)
          val signatureMessage = signatureManager ? newKernelMessage
          Await.result(signatureMessage, 100.milliseconds)
            .asInstanceOf[KernelMessage]
        } else newKernelMessage

        val zmqMessage: ZMQMessage = messageWithSignature

        socket ! zmqMessage
      } else {
        logger.debug(s"Unknown message of type $messageType")
      }
  }
} 
Example 51
Source File: KernelInputStream.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.stream

import java.io.InputStream
import java.nio.charset.Charset

import akka.pattern.ask
import org.apache.toree.kernel.protocol.v5.content.InputRequest
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.kernel.Utilities.timeout
import org.apache.toree.kernel.protocol.v5.{KMBuilder, MessageType}

import scala.collection.mutable.ListBuffer
import scala.concurrent.{Await, Future}

import KernelInputStream._

object KernelInputStream {
  val DefaultPrompt = ""
  val DefaultPassword = false
}


  override def read(): Int = {
    if (!this.hasByte) this.requestBytes()

    this.nextByte()
  }

  private def hasByte: Boolean = internalBytes.nonEmpty

  private def nextByte(): Int = {
    val byte = internalBytes.head

    internalBytes = internalBytes.tail

    byte
  }

  private def requestBytes(): Unit = {
    val inputRequest = InputRequest(prompt, password)
    // NOTE: Assuming already provided parent header and correct ids
    val kernelMessage = kmBuilder
      .withHeader(MessageType.Outgoing.InputRequest)
      .withContentString(inputRequest)
      .build

    // NOTE: The same handler is being used in both request and reply
    val responseFuture: Future[String] =
      (actorLoader.load(MessageType.Incoming.InputReply) ? kernelMessage)
      .mapTo[String]

    // Block until we get a response
    import scala.concurrent.duration._
    internalBytes ++=
      Await.result(responseFuture, Duration.Inf).getBytes(EncodingType)
  }
} 
Example 52
Source File: BasketQuerySpec.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.persistence

import scala.concurrent.duration._
import scala.concurrent.Future
import scala.concurrent.Await
import akka.NotUsed
import akka.actor._
import akka.testkit._

import akka.stream._
import akka.stream.scaladsl._
import akka.persistence.query._
import akka.persistence.query.journal.leveldb.scaladsl._

class BasketQuerySpec extends PersistenceSpec(ActorSystem("test"))
     {

  val shopperId = 3L
  val macbookPro = Item("Apple Macbook Pro", 1, BigDecimal(2499.99))
  val macPro = Item("Apple Mac Pro", 1, BigDecimal(10499.99))
  val displays = Item("4K Display", 3, BigDecimal(2499.99))
  val appleMouse = Item("Apple Mouse", 1, BigDecimal(99.99))
  val appleKeyboard = Item("Apple Keyboard", 1, BigDecimal(79.99))
  val dWave = Item("D-Wave One", 1, BigDecimal(14999999.99))

  "Querying the journal for a basket" should {
    "return all basket events currently stored" in {
      val basket = system.actorOf(Basket.props, Basket.name(shopperId))
      basket ! Basket.Add(macbookPro, shopperId)
      basket ! Basket.Add(displays, shopperId)
      basket ! Basket.GetItems(shopperId)
      expectMsg(Items(macbookPro, displays))
      killActors(basket)

      implicit val mat = ActorMaterializer()(system)
      val queries = 
        PersistenceQuery(system).readJournalFor[LeveldbReadJournal](
          LeveldbReadJournal.Identifier
        )
      
      val src: Source[EventEnvelope, NotUsed] =
        queries.currentEventsByPersistenceId(
          Basket.name(shopperId), 0L, Long.MaxValue)
 
      val events: Source[Basket.Event, NotUsed] = 
        src.map(_.event.asInstanceOf[Basket.Event]) 

      val res: Future[Seq[Basket.Event]] = events.runWith(Sink.seq)
     
       Await.result(res, 10 seconds) should equal(
         Vector(
           Basket.Added(macbookPro),
           Basket.Added(displays)
         )
       )
    }
  }
} 
Example 53
Source File: GetTicketInfoSpec.scala    From 006877   with MIT License 5 votes vote down vote up
package com.goticks

import org.scalatest.MustMatchers
import org.scalatest.WordSpec
import scala.concurrent.{Future, Await}

class GetTicketInfoSpec extends WordSpec with MustMatchers {

  object TicketInfoService extends TicketInfoService with MockWebServiceCalls
  import TicketInfoService._
  import scala.concurrent.duration._

  "getTicketInfo" must {
    "return a complete ticket info when all futures are successful" in {
      val ticketInfo = Await.result(getTicketInfo("1234", Location(1d,2d)), 10.seconds)

      ticketInfo.event.isEmpty must be(false)
      ticketInfo.event.foreach( event=> event.name must be("Quasimoto"))
      ticketInfo.travelAdvice.isEmpty must be(false)
      ticketInfo.suggestions.map(_.name) must be (Seq("Madlib", "OhNo", "Flying Lotus"))
    }
    "return an incomplete ticket info when getEvent fails" in {
      val ticketInfo = Await.result(getTicketInfo("4321", Location(1d,2d)), 10.seconds)

      ticketInfo.event.isEmpty must be(true)
      ticketInfo.travelAdvice.isEmpty must be(true)
      ticketInfo.suggestions.isEmpty must be (true)
    }
  }
}

trait MockWebServiceCalls extends WebServiceCalls {
  import com.github.nscala_time.time.Imports._
  import scala.concurrent.ExecutionContext.Implicits.global

  def getEvent(ticketNr: String, location: Location): Future[TicketInfo] = {
      Future {
        if(ticketNr == "1234") {
          TicketInfo(ticketNr, location, event = Some(Event("Quasimoto", Location(4.324218908d,53.12311144d), new DateTime(2013,10,1,22,30))))
        } else throw new Exception("crap")
      }
  }

  def callWeatherXService(ticketInfo: TicketInfo): Future[Option[Weather]] = {
    Future { Some(Weather(30, false)) }
  }

  def callWeatherYService(ticketInfo: TicketInfo): Future[Option[Weather]] = {
    Future { Some(Weather(30, false)) }
  }

  def callTrafficService(origin: Location, destination: Location, time: DateTime): Future[Option[RouteByCar]] = {
    Future {
      Some(RouteByCar("route1", time - (35.minutes), origin, destination, 30.minutes, 5.minutes))
    }
  }

  def callPublicTransportService(origin: Location, destination: Location, time: DateTime): Future[Option[PublicTransportAdvice]] = {
    Future {
      Some(PublicTransportAdvice("public transport route 1", time - (20.minutes), origin, destination, 20.minutes))
    }
  }

  def callSimilarArtistsService(event: Event): Future[Seq[Artist]] = {
    Future {
      Seq(Artist("Madlib", "madlib.com/calendar"), Artist("OhNo", "ohno.com/calendar"), Artist("Flying Lotus", "fly.lo/calendar"))
    }
  }

  def callArtistCalendarService(artist: Artist, nearLocation: Location): Future[Event] = {
    Future {
      Event(artist.name,Location(1d,1d), DateTime.now)
    }
  }
} 
Example 54
Source File: EchoActorTest.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.testdriven

import akka.testkit.{ TestKit, ImplicitSender }
import akka.actor.{ Props, Actor, ActorSystem }
import org.scalatest.WordSpecLike

import akka.util.Timeout
import scala.concurrent.Await
import scala.util.{ Success, Failure }

import scala.language.postfixOps


class EchoActorTest extends TestKit(ActorSystem("testsystem"))
  with WordSpecLike
  with ImplicitSender
  with StopSystemAfterAll {


  "An EchoActor" must {
    "Reply with the same message it receives" in {

      import akka.pattern.ask
      import scala.concurrent.duration._
      implicit val timeout = Timeout(3 seconds)
      implicit val ec = system.dispatcher
      val echo = system.actorOf(Props[EchoActor], "echo1")
      val future = echo.ask("some message")
      future.onComplete {
        case Failure(_)   => //실패 처리
        case Success(msg) => //성공 처리
      }

      Await.ready(future, timeout.duration)
    }

    "Reply with the same message it receives without ask" in {
      val echo = system.actorOf(Props[EchoActor], "echo2")
      echo ! "some message"
      expectMsg("some message")

    }

  }
}


class EchoActor extends Actor {
  def receive = {
    case msg =>
      sender() ! msg
  }
} 
Example 55
Source File: BasketSpec.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.next

import akka.actor._
import akka.testkit._

class BasketSpec extends PersistenceSpec(ActorSystem("test"))
    with PersistenceCleanup {

  val shopperId = 5L
  val macbookPro = Item("Apple Macbook Pro", 1, BigDecimal(2499.99))
  val displays = Item("4K Display", 3, BigDecimal(2499.99))

  "The basket" should {
    "return the items" in {
      val basket = system.actorOf(Basket.props, Basket.name(shopperId))
      basket ! Basket.Add(macbookPro, shopperId)
      basket ! Basket.Add(displays, shopperId)

      basket ! Basket.GetItems(shopperId)
      //basket ! Basket.GetItems
      expectMsg(Items(macbookPro, displays))
      killActors(basket)
    }

    "return the items in a typesafe way" in {
      import akka.typed._
      import akka.typed.scaladsl.AskPattern._
      import scala.concurrent.Future
      import scala.concurrent.duration._
	  import scala.concurrent.Await
      
      implicit val timeout = akka.util.Timeout(1 second)

      val macbookPro =
        TypedBasket.Item("Apple Macbook Pro", 1, BigDecimal(2499.99))
      val displays =
        TypedBasket.Item("4K Display", 3, BigDecimal(2499.99))

	  // TypedBasket.basketBehavior가 메서드로 변경됐기 때문에
	  // ()를 붙여서 호출해 줘야 한다.
	  // 예전의 deprecated된 DSL을 사용하는
	  // val로 정의된 TypedBasket.basketBehavior를 
	  // 사용하고 싶다면 basketBehavior()에서 ()를 
	  // 없애면 된다.
      val sys: ActorSystem[TypedBasket.Command] =
        ActorSystem("typed-basket", TypedBasket.basketBehavior())
      sys ! TypedBasket.Add(macbookPro, shopperId)
      sys ! TypedBasket.Add(displays, shopperId)

      implicit def scheduler = sys.scheduler
      val items: Future[TypedBasket.Items] =
        sys ? (TypedBasket.GetItems(shopperId, _))

      val res = Await.result(items, 10 seconds)
      res should equal(TypedBasket.Items(Vector(macbookPro, displays)))
      //sys ? Basket.GetItems
      sys.terminate()
    }

  }
} 
Example 56
Source File: LogstashWriter.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.logstash

import io.circe.Json
import io.youi.client.HttpClient
import io.youi.http.content.Content
import io.youi.http.HttpResponse
import io.youi.net._
import profig.JsonUtil
import scribe.{LogRecord, MDC}
import scribe.writer.Writer
import perfolation._
import scribe.output.{EmptyOutput, LogOutput}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scribe.Execution.global

case class LogstashWriter(url: URL,
                          service: String,
                          additionalFields: Map[String, String] = Map.empty,
                          asynchronous: Boolean = true) extends Writer {
  private lazy val client = HttpClient.url(url).post

  override def write[M](record: LogRecord[M], output: LogOutput): Unit = {
    val future = log(record)
    if (!asynchronous) {
      Await.result(future, 10.seconds)
    }
  }

  def log[M](record: LogRecord[M]): Future[HttpResponse] = {
    val l = record.timeStamp
    val timestamp = p"${l.t.F}T${l.t.T}.${l.t.L}${l.t.z}"
    val r = LogstashRecord(
      message = record.message.plainText,
      service = service,
      level = record.level.name,
      value = record.value,
      throwable = record.throwable.map(LogRecord.throwable2LogOutput(EmptyOutput, _).plainText),
      fileName = record.fileName,
      className = record.className,
      methodName = record.methodName,
      line = record.line,
      thread = record.thread.getName,
      `@timestamp` = timestamp,
      mdc = MDC.map.map {
        case (key, function) => key -> function()
      }
    )

    val jsonObj = JsonUtil.toJson(r).asObject.get
    val jsonWithFields = additionalFields.foldLeft(jsonObj) { (obj, field) =>
      obj.add(field._1, Json.fromString(field._2))
    }
    val json = Json.fromJsonObject(jsonWithFields).noSpaces

    val content = Content.string(json, ContentType.`application/json`)
    client.content(content).send()
  }
} 
Example 57
Source File: FutureTracingSpec.scala    From scribe   with MIT License 5 votes vote down vote up
package spec

import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

import scala.concurrent.duration.Duration
import scala.concurrent.Await

class FutureTracingSpec extends AnyWordSpec with Matchers {
  "Future tracing" when {
    "using scribe implicits" should {
      "future trace back" in {
        val exception = intercept[RuntimeException](Await.result(FutureTesting.position(), Duration.Inf))
        val trace = exception.getStackTrace
        trace(0).getFileName should be("FutureTesting.scala")
        trace(0).getLineNumber should be(27)

        trace(1).getFileName should be("FutureTesting.scala")
        trace(1).getMethodName should be("three")
        trace(1).getLineNumber should be(26)

        trace(2).getFileName should be("FutureTesting.scala")
        trace(2).getMethodName should be("two")
        trace(2).getLineNumber should be(20)

        trace(3).getFileName should be("FutureTesting.scala")
        trace(3).getMethodName should be("one")
        trace(3).getLineNumber should be(14)

        trace(4).getFileName should be("FutureTesting.scala")
        trace(4).getMethodName should be("position")
        trace(4).getLineNumber should be(9)
      }
      "async trace back" in {
        val exception = intercept[RuntimeException](Await.result(AsyncTesting.position(), Duration.Inf))
        val trace = exception.getStackTrace

        var i = 0

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getLineNumber should be(34)
        i += 1

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("three")
        trace(i).getLineNumber should be(32)
        i += 1

        if (trace(i).getMethodName == "three") {
          trace(i).getFileName should be("AsyncTesting.scala")
          trace(i).getMethodName should be("three")
          trace(i).getLineNumber should be(33)
          i += 1
        }

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("two")
        trace(i).getLineNumber should be(25)
        i += 1

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("one")
        trace(i).getLineNumber should be(17)
        i += 1

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("position")
        trace(i).getLineNumber should be(10)
      }
    }
  }
} 
Example 58
Source File: ScalaClientTestUtils.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.scalaapi

import java.util.Random
import java.util.concurrent.{CountDownLatch, TimeUnit}

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._

import org.scalatest.FunSuite

import org.apache.livy.LivyBaseUnitTestSuite

object ScalaClientTestUtils extends FunSuite with LivyBaseUnitTestSuite {

  val Timeout = 40

  def helloJob(context: ScalaJobContext): String = "hello"

  def throwExceptionJob(context: ScalaJobContext): Unit = throw new CustomTestFailureException

  def simpleSparkJob(context: ScalaJobContext): Long = {
    val r = new Random
    val count = 5
    val partitions = Math.min(r.nextInt(10) + 1, count)
    val buffer = new ArrayBuffer[Int]()
    for (a <- 1 to count) {
      buffer += r.nextInt()
    }
    context.sc.parallelize(buffer, partitions).count()
  }

  def assertAwait(lock: CountDownLatch): Unit = {
    assert(lock.await(Timeout, TimeUnit.SECONDS) == true)
  }

  def assertTestPassed[T](future: Future[T], expectedValue: T): Unit = {
    val result = Await.result(future, Timeout second)
    assert(result === expectedValue)
  }
} 
Example 59
Source File: BaseSessionSpec.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.repl

import java.util.Properties
import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps

import org.apache.spark.SparkConf
import org.json4s._
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.Eventually._

import org.apache.livy.LivyBaseUnitTestSuite
import org.apache.livy.rsc.RSCConf
import org.apache.livy.rsc.driver.{Statement, StatementState}
import org.apache.livy.sessions._

abstract class BaseSessionSpec(kind: Kind)
    extends FlatSpec with Matchers with LivyBaseUnitTestSuite {

  implicit val formats = DefaultFormats

  private val rscConf = new RSCConf(new Properties()).set(RSCConf.Entry.SESSION_KIND, kind.toString)

  private val sparkConf = new SparkConf()

  protected def execute(session: Session)(code: String): Statement = {
    val id = session.execute(code)
    eventually(timeout(30 seconds), interval(100 millis)) {
      val s = session.statements(id)
      s.state.get() shouldBe StatementState.Available
      s
    }
  }

  protected def withSession(testCode: Session => Any): Unit = {
    val stateChangedCalled = new AtomicInteger()
    val session =
      new Session(rscConf, sparkConf, None, { _ => stateChangedCalled.incrementAndGet() })
    try {
      // Session's constructor should fire an initial state change event.
      stateChangedCalled.intValue() shouldBe 1
      Await.ready(session.start(), 30 seconds)
      assert(session.state === SessionState.Idle)
      // There should be at least 1 state change event fired when session transits to idle.
      stateChangedCalled.intValue() should (be > 1)
      testCode(session)
    } finally {
      session.close()
    }
  }

  it should "start in the starting or idle state" in {
    val session = new Session(rscConf, sparkConf)
    val future = session.start()
    try {
      Await.ready(future, 60 seconds)
      session.state should (equal (SessionState.Starting) or equal (SessionState.Idle))
    } finally {
      session.close()
    }
  }

  it should "eventually become the idle state" in withSession { session =>
    session.state should equal (SessionState.Idle)
  }

} 
Example 60
Source File: Implicits.scala    From openlaw-core   with Apache License 2.0 5 votes vote down vote up
package org.adridadou.openlaw.result

import cats.implicits._
import cats.data.NonEmptyList

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.language.implicitConversions
import scala.util.{Try, Failure => TFailure, Success => TSuccess}

object Implicits {

  implicit class RichNonEmptyList[T](val nel: NonEmptyList[T]) extends AnyVal {
    def mkString: String = mkString(", ")
    def mkString(sep: String): String = nel.toList.mkString(sep)
  }

  implicit class RichTry[T](val t: Try[T]) extends AnyVal {
    def toResult: Result[T] = t match {
      case TSuccess(v)            => Success(v)
      case TFailure(e: Exception) => Failure(e)

      // don't try to handle Error instances
      case TFailure(t) => throw t
    }
  }

  implicit class RichEither[T](val either: Either[String, T]) extends AnyVal {
    def toResult: Result[T] = either.left.map(FailureMessage(_))
  }

  implicit class RichFuture[T](val future: Future[T]) extends AnyVal {
    def getResult(timeout: Duration): Result[T] =
      attempt(Await.result(future, timeout))
  }

  implicit class RichResult[T](val result: Result[T]) extends AnyVal {
    def addCause(cause: Failure[T]): ResultNel[T] = result match {
      case Success(_)     => cause.toResultNel
      case Left(original) => FailureNel(original, cause.value)
    }
    def addFailure[U >: T](cause: FailureCause): ResultNel[U] = result match {
      case s @ Success(_) => s.toResultNel
      case Left(original) => FailureNel(cause, original)
    }
    def addMessageToFailure[U >: T](message: String): ResultNel[U] =
      result match {
        case s @ Success(_) => s.toResultNel
        case Left(original) => FailureNel(FailureMessage(message), original)
      }
    def convert(pf: PartialFunction[Exception, Exception]): Result[T] =
      result.left.map {
        case FailureException(e, _) if pf.isDefinedAt(e) =>
          FailureException(pf(e))
        case f => f
      }
    def recoverMerge(f: FailureCause => T): T =
      result.fold(failure => f(failure), success => success)
    def recoverWith(pf: PartialFunction[FailureCause, Result[T]]): Result[T] =
      result.leftFlatMap { error =>
        if (pf.isDefinedAt(error)) {
          pf(error)
        } else {
          result
        }
      }
    def toResultNel: ResultNel[T] = result.toValidatedNel
    def toFuture: Future[T] = result match {
      case Success(value) => Future.successful(value)
      case Failure(e, _)  => Future.failed(e)
    }
    def getOrThrow(): T = result.valueOr(_.throwException())
  }

  implicit class RichOption[T](val option: Option[T]) extends AnyVal {
    def toResult(message: String): Result[T] =
      option.map(x => Success(x)).getOrElse(Failure(message))
  }

  implicit class RichResultNel[T](val result: ResultNel[T]) extends AnyVal {
    def toUnit: ResultNel[Unit] = result.map(_ => ())
    def toResult: Result[T] = result.toEither.leftMap {
      case NonEmptyList(x, Seq()) => x
      case nel                    => FailureException(MultipleCauseException(nel))
    }
  }

  implicit def exception2Result[A](e: Exception): Result[A] = Failure[A](e)
  implicit def unitResultConversion[T](wrapped: Result[T]): Result[Unit] =
    wrapped.map(_ => ())
  implicit def failureCause2Exception[T](wrapped: FailureCause): Exception =
    wrapped.e
} 
Example 61
Source File: GrpcAkkaStreamsServerCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import io.grpc.ServerCallHandler
import io.grpc.stub.{CallStreamObserver, ServerCalls, StreamObserver}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}

object GrpcAkkaStreamsServerCalls {

  def unaryCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncUnaryCall(
    new ServerCalls.UnaryMethod[I, O] {
      override def invoke(request: I, responseObserver: StreamObserver[O]) =
        Source
          .single(request)
          .via(service)
          .runForeach(responseObserver.onNext)
          .onComplete {
            case Success(_) => responseObserver.onCompleted()
            case Failure(t) => responseObserver.onError(t)
          }(mat.executionContext)
    }
  )

  def serverStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] =
    ServerCalls.asyncServerStreamingCall(
      new ServerCalls.ServerStreamingMethod[I, O] {
        override def invoke(request: I, responseObserver: StreamObserver[O]) =
          Source
            .single(request)
            .via(service)
            .runWith(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            )))
      }
    )

  def clientStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncClientStreamingCall(
    new ServerCalls.ClientStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )

  def bidiStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncBidiStreamingCall(
    new ServerCalls.BidiStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )
} 
Example 62
Source File: UnMarshalling.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.{HttpMethods, HttpRequest, HttpResponse, MessageEntity}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.{ActorMaterializer, Materializer}
import akka.util.ByteString

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import spray.json._


object UnMarshalling {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    //type FromStringUnmarshaller[T] = Unmarshaller[String, T]
    val intFuture = Unmarshal("42").to[Int]
    val int = Await.result(intFuture, 1.second)
    println("int unmarshalling "+int)

    //type FromStringUnmarshaller[T] = Unmarshaller[String, T]
    val boolFuture = Unmarshal("off").to[Boolean]
    val bool = Await.result(boolFuture, 1.second)
    println("off unmarshalling "+bool)

    //type ToEntityMarshaller[T] = Marshaller[T, MessageEntity]
    val string = "Yeah"
    val entityFuture = Marshal(string).to[MessageEntity]
    val entity = Await.result(entityFuture, 1.second) // don't block in non-test code!
    println(entity)

    //type ToResponseMarshaller[T] = Marshaller[T, HttpResponse]
    val errorMsg = "Not found, pal!"
    val responseFuture = Marshal(404 -> errorMsg).to[HttpResponse]
    val response = Await.result(responseFuture, 1.second)
    println(response)


    //type FromEntityUnmarshaller[T] = Unmarshaller[HttpEntity, T]
    val jsonByteString = ByteString("""{"name":"Hello"}""")
    val httpRequest = HttpRequest(HttpMethods.POST, entity = jsonByteString)
    val jsonDataUnmarshalledFuture = Unmarshal(httpRequest).to[String]
    val jsonDataUnmarshalled = Await.result(jsonDataUnmarshalledFuture, 1.second)
    println(jsonDataUnmarshalled)

    sys.terminate()

  }

} 
Example 63
Source File: ConnectionLevel.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.client

import akka.actor.ActorSystem
import akka.http.javadsl.settings.ClientConnectionSettings
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._


object ConnectionLevel {

  def main(args: Array[String]) {
    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat = ActorMaterializer()

    val connectionFlow = Http().outgoingConnection("localhost", 8090)

    val responseFuture =
      Source.single(HttpRequest(uri = "/welcome"))
        .via(connectionFlow)
        .runWith(Sink.head)

    val response = Await.result(responseFuture, 10 seconds)
    response.entity.dataBytes.map(_.utf8String).runForeach(println)
    sys.terminate()
  }

} 
Example 64
Source File: CountDownLatch.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}

import scala.concurrent.{Await, Future, Promise}
import scala.concurrent.duration._

object CountDownLatch {
  case object CountDown

  def apply(count:Int)(implicit actorSystem: ActorSystem) = {
    val promise = Promise[Done]()
    val props = Props(classOf[CountDownLatchActor], count, promise)
    val countDownLatchActor = actorSystem.actorOf(props, "countDownLatchActor")
    new CountDownLatch(countDownLatchActor, promise)
  }
}

class CountDownLatch(private val actor: ActorRef, private val promise: Promise[Done]) {
  import CountDownLatch._

  def countDown() = actor ! CountDown
  def await() : Unit = Await.result(promise.future, 10 minutes)
  val result : Future[Done] = promise.future
}


class CountDownLatchActor(count: Int, promise: Promise[Done]) extends Actor with ActorLogging {
  import CountDownLatch._

  var remaining = count

  def receive = {
    case CountDown if remaining - 1  == 0 =>
      log.info("Counting down")
      promise.success(Done)
      log.info("Gate opened")
      context.stop(self)
    case CountDown =>
      log.info("Counting down")
      remaining -= 1
  }
} 
Example 65
Source File: FibonacciActor.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter1

import akka.actor.{Props, ActorSystem, Actor}
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.Await
import scala.concurrent.duration._

class FibonacciActor extends Actor {
  override def receive: Receive = {
    case num: Int =>
      val fibonacciNumber = fib(num)
      sender ! fibonacciNumber
  }

  def fib(n: Int): Int = n match {
    case 0 | 1 => n
    case _ => fib(n - 1) + fib(n - 2)
  }
}

object FibonacciActorApp extends App {

  implicit val timeout = Timeout(10 seconds)
  val actorSystem = ActorSystem("HelloAkka")

  val actor = actorSystem.actorOf(Props[FibonacciActor])


  // asking for result from actor
  val future = (actor ? 10).mapTo[Int]
  val fiboacciNumber = Await.result(future, 10 seconds)
  println(fiboacciNumber)
} 
Example 66
Source File: DistributedDataApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.ddata.Replicator.{ReadFrom, ReadMajority}
import akka.pattern.ask
import akka.util.Timeout
import com.packt.chapter7.SubscriptionManager._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random

object DistributedDataApplication extends App {
  val actorSystem = ActorSystem("ClusterSystem")

  Cluster(actorSystem).registerOnMemberUp {
    val subscriptionManager = actorSystem.actorOf(SubscriptionManager.props)

    val subscription = Subscription(Random.nextInt(3), Cluster(actorSystem).selfUniqueAddress.toString, System.currentTimeMillis())
    subscriptionManager ! AddSubscription(subscription)

    //Let's simulate some time has passed. Never use Thread.sleep in production!
    Thread.sleep(10000)

    implicit val timeout = Timeout(5 seconds)

    val readMajority = ReadMajority(timeout = 5.seconds)
    val readFrom = ReadFrom(n = 2, timeout = 5.second)

    Await.result(subscriptionManager ? GetSubscriptions(readMajority), 5 seconds) match {
      case GetSubscriptionsSuccess(subscriptions) =>
        println(s"The current set of subscriptions is $subscriptions")
      case GetSubscriptionsFailure =>
        println(s"Subscription manager was not able to get subscriptions successfully.")
    }

    subscriptionManager ! RemoveSubscription(subscription)

    Await.result(subscriptionManager ? GetSubscriptions(readFrom), 5 seconds) match {
      case GetSubscriptionsSuccess(subscriptions) =>
        println(s"The current set of subscriptions is $subscriptions")
      case GetSubscriptionsFailure =>
        println(s"Subscription manager was not able to get subscriptions successfully.")
    }
  }
} 
Example 67
Source File: HelloWorldServiceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.example.helloworld.impl

import com.lightbend.lagom.scaladsl.server.LocalServiceLocator
import com.lightbend.lagom.scaladsl.testkit.ServiceTest
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec
import com.example.helloworld.api._
import org.scalatest.concurrent.Eventually

import scala.concurrent.Await
import scala.concurrent.duration._

class HelloWorldServiceSpec
    extends AsyncWordSpec
    with Matchers
    with BeforeAndAfterAll
    with Eventually {

  private val server = ServiceTest.startServer(
    ServiceTest.defaultSetup
      .withCassandra()
  ) { ctx =>
    new HelloWorldApplication(ctx) with LocalServiceLocator
  }

  val client: HelloWorldService =
    server.serviceClient.implement[HelloWorldService]

  override protected def afterAll(): Unit = server.stop()

  "Hello World service" should {

    "say hello" in {
      client.hello("Alice").invoke().map { answer =>
        answer should ===("""Hello, Alice!
            |Started reports: default-projected-message
            |Stopped reports: default-projected-message
            |""".stripMargin)
      }
    }

    "allow responding with a custom message" in {
      for {
        _ <- client.useGreeting("Bob", "Hi").invoke()
        answer <- client.hello("Bob").invoke()
      } yield {
        answer should ===("""Hi, Bob!
              |Started reports: default-projected-message
              |Stopped reports: default-projected-message
              |""".stripMargin)
      }

      implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 25.seconds, interval = 300.millis)
      eventually{
        client.hello("Bob").invoke().map(_ should ===(
            """Hi, Bob!
              |Started reports: Hi
              |Stopped reports: default-projected-message
              |""".stripMargin
          )
        )
      }

    }
  }
} 
Example 68
Source File: LagomDevModeConsoleHelper.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.devmode

import java.io.Closeable
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit

import com.lightbend.lagom.devmode.Reloader.DevServerBinding
import play.dev.filewatch.LoggerProxy

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration._


class ConsoleHelper(colors: Colors) {
  def printStartScreen(log: LoggerProxy, services: Seq[ServiceBindingInfo]): Unit = {
    services.foreach {
      case ServiceBindingInfo(name, bindings) =>
        bindings.foreach(b => log.info(s"Service $name listening for ${b.protocol} on ${b.address}:${b.port}"))
    }
    log.info(
      colors.green(
        s"(Service${if (services.size > 1) "s" else ""} started, press enter to stop and go back to the console...)"
      )
    )
  }

  def blockUntilExit() = {
    // blocks until user presses enter
    System.in.read()
  }

  def shutdownAsynchronously(log: LoggerProxy, services: Seq[Closeable], infrastructureServices: Seq[Closeable]) = {
    // shut down all running services
    log.info("Stopping services")

    val n = java.lang.Runtime.getRuntime.availableProcessors
    log.debug("nb proc : " + n)
    //creating a dedicated execution context
    // with a fixed number of thread (indexed on number of cpu)
    implicit val ecn = ExecutionContext.fromExecutorService(
      Executors.newFixedThreadPool(n)
    )

    try {
      //Stop services in asynchronous manner
      val closing = Future.traverse(services)(serv =>
        Future {
          serv.close()
        }
      )
      closing.onComplete(_ => log.info("All services are stopped"))
      Await.result(closing, 60.seconds)

      println()
      // and finally shut down any other possibly running embedded server
      Await.result(
        Future.traverse(infrastructureServices)(serv =>
          Future {
            serv.close()
          }
        ),
        60.seconds
      )
    } finally {
      // and the last part concern the closing of execution context that has been created above
      ecn.shutdown()
      ecn.awaitTermination(60, TimeUnit.SECONDS)
    }
  }
}

class Colors(logNoFormat: String) {
  import scala.Console._

  val isANSISupported: Boolean = {
    Option(System.getProperty(logNoFormat))
      .map(_ != "true")
      .orElse {
        Option(System.getProperty("os.name"))
          .map(_.toLowerCase(java.util.Locale.ENGLISH))
          .filter(_.contains("windows"))
          .map(_ => false)
      }
      .getOrElse(true)
  }

  private def color(code: String, str: String) = if (isANSISupported) code + str + RESET else str

  def red(str: String): String     = color(RED, str)
  def blue(str: String): String    = color(BLUE, str)
  def cyan(str: String): String    = color(CYAN, str)
  def green(str: String): String   = color(GREEN, str)
  def magenta(str: String): String = color(MAGENTA, str)
  def white(str: String): String   = color(WHITE, str)
  def black(str: String): String   = color(BLACK, str)
  def yellow(str: String): String  = color(YELLOW, str)
} 
Example 69
Source File: PostSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package docs.home.scaladsl.persistence

//#unit-test
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.Done
import akka.actor.ActorSystem
import com.lightbend.lagom.scaladsl.persistence.PersistentEntity.InvalidCommandException
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.lightbend.lagom.scaladsl.testkit.PersistentEntityTestDriver
import com.typesafe.config.ConfigFactory
import org.scalactic.TypeCheckedTripleEquals
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

class PostSpec extends AnyWordSpecLike with Matchers with BeforeAndAfterAll with TypeCheckedTripleEquals {
  val system = ActorSystem("PostSpec", JsonSerializerRegistry.actorSystemSetupFor(BlogPostSerializerRegistry))

  override def afterAll(): Unit = {
    Await.ready(system.terminate, 10.seconds)
  }

  "Blog Post entity" must {
    "handle AddPost" in {
      val driver  = new PersistentEntityTestDriver(system, new Post, "post-1")
      val content = PostContent("Title", "Body")
      val outcome = driver.run(AddPost(content))
      outcome.events should ===(List(PostAdded("post-1", content)))
      outcome.state.published should ===(false)
      outcome.state.content should ===(Some(content))
      outcome.replies should ===(List(AddPostDone("post-1")))
      outcome.issues should be(Nil)
    }

    "validate title" in {
      val driver  = new PersistentEntityTestDriver(system, new Post, "post-1")
      val outcome = driver.run(AddPost(PostContent("", "Body")))
      outcome.replies.head.getClass should be(classOf[InvalidCommandException])
      outcome.events.size should ===(0)
      outcome.issues should be(Nil)
    }

    "handle ChangeBody" in {
      val driver = new PersistentEntityTestDriver(system, new Post, "post-1")
      driver.run(AddPost(PostContent("Title", "Body")))

      val outcome = driver.run(ChangeBody("New body 1"), ChangeBody("New body 2"))
      outcome.events should ===(List(BodyChanged("post-1", "New body 1"), BodyChanged("post-1", "New body 2")))
      outcome.state.published should ===(false)
      outcome.state.content.get.body should ===("New body 2")
      outcome.replies should ===(List(Done, Done))
      outcome.issues should be(Nil)
    }
  }
}
//#unit-test 
Example 70
Source File: JpaSessionImplSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.persistence.jpa

import javax.persistence.EntityManager
import javax.persistence.EntityTransaction
import javax.persistence.Persistence

import com.google.common.collect.ImmutableMap
import com.lightbend.lagom.javadsl.persistence.jpa.TestJpaEntity
import org.scalatest.matchers.BePropertyMatchResult
import org.scalatest.matchers.BePropertyMatcher

import scala.compat.java8.FunctionConverters._
import scala.compat.java8.FutureConverters._
import scala.concurrent.duration.DurationInt
import scala.concurrent.Await
import scala.concurrent.Future

class JpaSessionImplSpec extends JpaPersistenceSpec {
  private val open = BePropertyMatcher[EntityManager] { entityManager =>
    BePropertyMatchResult(entityManager.isOpen, "open")
  }

  private val active = BePropertyMatcher[EntityTransaction] { entityTransaction =>
    BePropertyMatchResult(entityTransaction.isActive, "active")
  }

  // Convenience for converting between Scala and Java 8
  private def withTransaction[T](block: EntityManager => T): Future[T] =
    jpa.withTransaction(block.asJava).toScala

  "JpaSessionImpl" must {
    "provide an open EntityManager and close it when the block completes" in {
      val entityManager = Await.result(withTransaction { entityManager =>
        entityManager shouldBe open
        entityManager
      }, 65.seconds)
      entityManager should not be null
      entityManager should not be open
    }

    "provide an active EntityTransaction and complete it when the block completes" in {
      val entityTransaction = Await.result(withTransaction { entityManager =>
        val transaction = entityManager.getTransaction
        transaction shouldBe active
        transaction
      }, 10.seconds)
      entityTransaction should not be null
      entityTransaction should not be active
    }

    "support saving and reading entities" in {
      Persistence.generateSchema("default", ImmutableMap.of("hibernate.hbm2ddl.auto", "update"))
      val entity = new TestJpaEntity("1", "test saving and reading entities")
      entity.getId shouldBe null

      Await.ready(withTransaction(_.persist(entity)), 10.seconds)

      // Note that the retrieval runs in a new transaction
      val retrievedEntity = Await.result(
        withTransaction {
          _.createQuery("SELECT test FROM TestJpaEntity test WHERE parentId = :parentId", classOf[TestJpaEntity])
            .setParameter("parentId", "1")
            .getSingleResult
        },
        10.seconds
      )
      retrievedEntity.getId should not be null
      retrievedEntity.getParentId should equal("1")
      retrievedEntity.getElement should equal("test saving and reading entities")
    }
  }
} 
Example 71
Source File: ServiceSupport.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.it

import java.util.Collections
import java.util.function.{ Function => JFunction }

import akka.stream.Materializer
import akka.stream.scaladsl.Source
import org.scalatest.Inside
import play.api.Application
import play.api.Configuration
import play.api.Environment
import play.inject.guice.GuiceApplicationBuilder

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import akka.japi.function.Procedure
import com.google.inject.Binder
import com.google.inject.Module
import com.google.inject.TypeLiteral
import com.lightbend.lagom.javadsl.testkit.ServiceTest
import com.lightbend.lagom.javadsl.testkit.ServiceTest.TestServer
import play.api.routing.Router
import java.util

import com.lightbend.lagom.internal.testkit.EmptyAdditionalRoutersModule
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

sealed trait HttpBackend {
  final val provider: String = s"play.core.server.${codeName}ServerProvider"
  val codeName: String
}

case object AkkaHttp extends HttpBackend {
  val codeName = "AkkaHttp"
}

case object Netty extends HttpBackend {
  val codeName = "Netty"
}

trait ServiceSupport extends AnyWordSpecLike with Matchers with Inside {
  def withServer(
      configureBuilder: GuiceApplicationBuilder => GuiceApplicationBuilder
  )(block: Application => Unit)(implicit httpBackend: HttpBackend): Unit = {
    val jConfigureBuilder = new JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder] {
      override def apply(b: GuiceApplicationBuilder): GuiceApplicationBuilder = {
        configureBuilder(b)
          .overrides(EmptyAdditionalRoutersModule)
          .configure("play.server.provider", httpBackend.provider)
      }
    }
    val jBlock = new Procedure[TestServer] {
      override def apply(server: TestServer): Unit = {
        block(server.app.asScala())
      }
    }
    val setup = ServiceTest.defaultSetup.configureBuilder(jConfigureBuilder).withCluster(false)
    ServiceTest.withServer(setup, jBlock)
  }

  def withClient[T: ClassTag](
      configureBuilder: GuiceApplicationBuilder => GuiceApplicationBuilder
  )(block: Application => T => Unit)(implicit httpBackend: HttpBackend): Unit = {
    withServer(configureBuilder) { application =>
      val client = application.injector.instanceOf[T]
      block(application)(client)
    }
  }

  implicit def materializer(implicit app: Application): Materializer = app.materializer

  def consume[T](source: Source[T, _])(implicit mat: Materializer): List[T] = {
    Await.result(source.runFold(List.empty[T])((list, t) => t :: list), 10.seconds).reverse
  }
} 
Example 72
Source File: LagomClientFactorySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.javadsl.client.integration

import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfterEach

import scala.concurrent.duration._
import scala.concurrent.Await
import akka.pattern._
import akka.stream.SystemMaterializer
import akka.util.Timeout
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class LagomClientFactorySpec extends AnyFlatSpec with Matchers with BeforeAndAfterEach with ScalaFutures {
  private var system: ActorSystem = _
  private var echoActor: ActorRef = _
  implicit val timeout            = Timeout(5.seconds)

  
  "LagomClientFactory" should "when using a unmanaged actor system, shoudl not terminate it upon closing" in {
    // check that actor system is operational
    (echoActor ? "hey").mapTo[String].futureValue shouldBe "hey"

    LagomClientFactory
    // create a factory by passing the existing ActorSystem
      .create(
        "test",
        this.getClass.getClassLoader,
        system,
        SystemMaterializer(system).materializer
      )
      // closing the factory should not close the existing ActorSystem
      .close()

    // check that actor system is still operational
    (echoActor ? "hey").mapTo[String].futureValue shouldBe "hey"
  }

  protected override def beforeEach(): Unit = {
    system = ActorSystem("test", ConfigFactory.load())
    echoActor = system.actorOf(Props(new EchoActor), "echo")
  }

  class EchoActor extends Actor {
    override def receive: Receive = {
      case s: String => sender() ! s
    }
  }
  protected override def afterEach(): Unit = {
    Await.ready(system.terminate(), 5.seconds)
  }
} 
Example 73
Source File: ServiceLocatorHolderSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.cassandra

import akka.actor.ActorSystem
import akka.testkit.TestKit
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.duration._
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec

class ServiceLocatorHolderSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll {
  val system = ActorSystem("test")

  protected override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(actorSystem = system, verifySystemShutdown = true)
  }

  "ServiceLocatorHolder" should {
    "timeout when no service locator is found" in {
      val eventually = ServiceLocatorHolder(system).serviceLocatorEventually
      assertThrows[NoServiceLocatorException](
        Await.result(eventually, ServiceLocatorHolder.TIMEOUT + 2.seconds)
      )
    }
  }
} 
Example 74
Source File: ServiceLocatorSessionProviderSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.cassandra

import java.net.InetSocketAddress
import java.net.URI

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.Future
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec

class ServiceLocatorSessionProviderSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll {
  val system         = ActorSystem("test")
  val config: Config = ConfigFactory.load()
  val uri            = new URI("http://localhost:8080")

  protected override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(actorSystem = system, verifySystemShutdown = true)
  }

  val locator = new ServiceLocatorAdapter {
    override def locateAll(name: String): Future[List[URI]] = {
      name match {
        case "existing" => Future.successful(List(uri))
        case "absent"   => Future.successful(Nil)
      }
    }
  }

  val providerConfig: Config = config.getConfig("lagom.persistence.read-side.cassandra")
  val provider               = new ServiceLocatorSessionProvider(system, providerConfig)
  ServiceLocatorHolder(system).setServiceLocator(locator)

  "ServiceLocatorSessionProvider" should {
    "Get the address when the contact points exist" in {
      val future = provider.lookupContactPoints("existing")

      Await.result(future, 3.seconds) mustBe Seq(new InetSocketAddress(uri.getHost, uri.getPort))
    }

    "Fail the future when the contact points do not exist" in {
      val future = provider.lookupContactPoints("absent")

      intercept[NoContactPointsException] {
        Await.result(future, 3.seconds)
      }
    }
  }
} 
Example 75
Source File: JdbcPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.javadsl.persistence.jdbc

import akka.actor.ActorSystem
import akka.cluster.Cluster
import com.lightbend.lagom.internal.javadsl.persistence.jdbc._
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbTestProvider
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import play.api.Configuration
import play.api.Environment

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class JdbcPersistenceSpec private (actorSystemFactory: () => ActorSystem)
    extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config) = {
    this(() => ActorSystem(testName, config.withFallback(Configuration.load(Environment.simple()).underlying)))
  }

  def this(config: Config) = this(PersistenceSpec.testNameFromCallStack(classOf[JdbcPersistenceSpec]), config)

  def this() = this(ConfigFactory.empty())

  import system.dispatcher

  protected lazy val slick = new SlickProvider(system, coordinatedShutdown)

  protected lazy val offsetStore =
    new JavadslJdbcOffsetStore(
      slick,
      system,
      new OffsetTableConfiguration(
        system.settings.config,
        ReadSideConfig()
      ),
      ReadSideConfig()
    )
  protected lazy val jdbcReadSide: JdbcReadSide = new JdbcReadSideImpl(slick, offsetStore)

  override def beforeAll(): Unit = {
    super.beforeAll()

    // Join ourselves - needed because we're using cluster singleton to create tables
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)

    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)

    // Trigger tables to be created
    Await.ready(slick.ensureTablesCreated(), 20.seconds)

    awaitPersistenceInit(system)
  }
} 
Example 76
Source File: JdbcClusteredPersistentEntitySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.jdbc

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.stream.Materializer
import akka.stream.SystemMaterializer
import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntityConfig
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.TestEntitySerializerRegistry
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec.Ports
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.h2.tools.Server
import play.api.Configuration
import play.api.Environment
import play.api.db.HikariCPComponents
import play.api.inject.ApplicationLifecycle
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future

object JdbcClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {

  override def specPorts: Ports.SpecPorts = Ports.jdbcSpecPorts

  override def additionalCommonConfig: Config = ConfigFactory.parseString(
    s"""
      db.default.driver=org.h2.Driver
      db.default.url="jdbc:h2:tcp://localhost:${specPorts.database}/mem:JdbcClusteredPersistentEntitySpec"
    """
  )
}

class JdbcClusteredPersistentEntitySpecMultiJvmNode1 extends JdbcClusteredPersistentEntitySpec
class JdbcClusteredPersistentEntitySpecMultiJvmNode2 extends JdbcClusteredPersistentEntitySpec
class JdbcClusteredPersistentEntitySpecMultiJvmNode3 extends JdbcClusteredPersistentEntitySpec

class JdbcClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(JdbcClusteredPersistentEntityConfig) {
  import JdbcClusteredPersistentEntityConfig._

  var h2: Server = _

  protected override def atStartup(): Unit = {
    runOn(node1) {
      h2 = Server.createTcpServer("-tcpPort", specPorts.database.toString, "-ifNotExists").start()
    }

    enterBarrier("h2-started")
    super.atStartup()
  }

  protected override def afterTermination(): Unit = {
    super.afterTermination()
    Await.ready(defaultApplicationLifecycle.stop(), shutdownTimeout)
    Option(h2).foreach(_.stop())
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: JdbcPersistenceComponents =
    new JdbcPersistenceComponents with HikariCPComponents {
      override def actorSystem: ActorSystem                 = JdbcClusteredPersistentEntitySpec.this.system
      override def executionContext: ExecutionContext       = system.dispatcher
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(actorSystem)

      override lazy val materializer: Materializer                 = SystemMaterializer(actorSystem).materializer
      override lazy val configuration: Configuration               = Configuration(system.settings.config)
      override def environment: Environment                        = JdbcClusteredPersistentEntityConfig.environment
      override lazy val applicationLifecycle: ApplicationLifecycle = defaultApplicationLifecycle
      override def jsonSerializerRegistry: JsonSerializerRegistry  = TestEntitySerializerRegistry
    }

  lazy val jdbcTestEntityReadSide: JdbcTestEntityReadSide =
    new JdbcTestEntityReadSide(components.jdbcSession)

  protected override def getAppendCount(id: String): Future[Long] =
    jdbcTestEntityReadSide.getAppendCount(id)

  protected override def readSideProcessor: () => ReadSideProcessor[Evt] = { () =>
    new JdbcTestEntityReadSide.TestEntityReadSideProcessor(components.jdbcReadSide)
  }
} 
Example 77
Source File: SlickClusteredPersistentEntitySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.slick

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.stream.Materializer
import akka.stream.SystemMaterializer
import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntityConfig
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.TestEntitySerializerRegistry
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec.Ports
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.h2.tools.Server
import play.api.Configuration
import play.api.Environment
import play.api.db.HikariCPComponents
import play.api.inject.ApplicationLifecycle
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future

object SlickClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {

  override def specPorts: Ports.SpecPorts = Ports.slickSpecPorts

  override def additionalCommonConfig: Config = ConfigFactory.parseString(
    s"""
      db.default.driver=org.h2.Driver
      db.default.url="jdbc:h2:tcp://localhost:${specPorts.database}/mem:JdbcClusteredPersistentEntitySpec"
    """
  )
}

class SlickClusteredPersistentEntitySpecMultiJvmNode1 extends SlickClusteredPersistentEntitySpec
class SlickClusteredPersistentEntitySpecMultiJvmNode2 extends SlickClusteredPersistentEntitySpec
class SlickClusteredPersistentEntitySpecMultiJvmNode3 extends SlickClusteredPersistentEntitySpec

class SlickClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(SlickClusteredPersistentEntityConfig) {
  import SlickClusteredPersistentEntityConfig._

  var h2: Server = _

  protected override def atStartup(): Unit = {
    runOn(node1) {
      h2 = Server.createTcpServer("-tcpPort", specPorts.database.toString, "-ifNotExists").start()
    }
    enterBarrier("h2-started")
    super.atStartup()
  }

  protected override def afterTermination(): Unit = {
    super.afterTermination()
    Await.ready(defaultApplicationLifecycle.stop(), shutdownTimeout)
    Option(h2).foreach(_.stop())
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: SlickPersistenceComponents =
    new SlickPersistenceComponents with HikariCPComponents {
      override def actorSystem: ActorSystem                 = SlickClusteredPersistentEntitySpec.this.system
      override def executionContext: ExecutionContext       = system.dispatcher
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(actorSystem)

      override lazy val materializer: Materializer                 = SystemMaterializer(actorSystem).materializer
      override lazy val configuration: Configuration               = Configuration(system.settings.config)
      override def environment: Environment                        = SlickClusteredPersistentEntityConfig.environment
      override lazy val applicationLifecycle: ApplicationLifecycle = defaultApplicationLifecycle
      override def jsonSerializerRegistry: JsonSerializerRegistry  = TestEntitySerializerRegistry
    }

  lazy val jdbcTestEntityReadSide: SlickTestEntityReadSide =
    new SlickTestEntityReadSide(
      components.db,
      components.profile
    )(components.executionContext)

  protected override def getAppendCount(id: String): Future[Long] =
    jdbcTestEntityReadSide.getAppendCount(id)

  protected override def readSideProcessor: () => ReadSideProcessor[Evt] = { () =>
    new SlickTestEntityReadSide.TestEntityReadSideProcessor(
      components.slickReadSide,
      components.db,
      components.profile
    )(components.executionContext)
  }
} 
Example 78
Source File: SlickPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.slick

import akka.actor.setup.ActorSystemSetup
import akka.actor.ActorSystem
import akka.actor.BootstrapSetup
import akka.cluster.Cluster
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbTestProvider
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.OffsetTableConfiguration
import com.lightbend.lagom.internal.scaladsl.persistence.slick.SlickReadSideImpl
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import play.api.Configuration
import play.api.Environment

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class SlickPersistenceSpec private (actorSystemFactory: () => ActorSystem)
    extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config, registry: JsonSerializerRegistry) =
    this(
      () =>
        ActorSystem(
          testName,
          ActorSystemSetup(
            BootstrapSetup(
              config.withFallback(Configuration.load(Environment.simple()).underlying)
            ),
            JsonSerializerRegistry.serializationSetupFor(registry)
          )
        )
    )

  def this(config: Config, registry: JsonSerializerRegistry) =
    this(PersistenceSpec.testNameFromCallStack(classOf[SlickPersistenceSpec]), config, registry)

  def this(registry: JsonSerializerRegistry) =
    this(ConfigFactory.empty(), registry)

  import system.dispatcher

  protected lazy val slick = new SlickProvider(system, coordinatedShutdown)
  protected lazy val slickReadSide: SlickReadSide = {
    val offsetStore =
      new SlickOffsetStore(
        system,
        slick,
        new OffsetTableConfiguration(system.settings.config, ReadSideConfig())
      )
    new SlickReadSideImpl(slick, offsetStore)
  }

  override def beforeAll(): Unit = {
    super.beforeAll()

    // Join ourselves - needed because we're using cluster singleton to create tables
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)

    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)

    // Trigger tables to be created
    Await.ready(slick.ensureTablesCreated(), 20.seconds)

    awaitPersistenceInit(system)
  }
} 
Example 79
Source File: JdbcPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.jdbc

import akka.actor.setup.ActorSystemSetup
import akka.actor.ActorSystem
import akka.actor.BootstrapSetup
import akka.cluster.Cluster
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbTestProvider
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcReadSideImpl
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.OffsetTableConfiguration
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import play.api.Configuration
import play.api.Environment

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class JdbcPersistenceSpec private (actorSystemFactory: () => ActorSystem)
    extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config, registry: JsonSerializerRegistry) =
    this(
      () =>
        ActorSystem(
          testName,
          ActorSystemSetup(
            BootstrapSetup(
              config.withFallback(Configuration.load(Environment.simple()).underlying)
            ),
            JsonSerializerRegistry.serializationSetupFor(registry)
          )
        )
    )

  def this(config: Config, registry: JsonSerializerRegistry) =
    this(PersistenceSpec.testNameFromCallStack(classOf[JdbcPersistenceSpec]), config, registry)

  def this(registry: JsonSerializerRegistry) = this(ConfigFactory.empty(), registry)

  import system.dispatcher

  protected lazy val slick = new SlickProvider(system, coordinatedShutdown)
  protected lazy val jdbcReadSide: JdbcReadSide = new JdbcReadSideImpl(
    slick,
    new SlickOffsetStore(
      system,
      slick,
      new OffsetTableConfiguration(system.settings.config, ReadSideConfig())
    )
  )

  override def beforeAll(): Unit = {
    super.beforeAll()

    // Join ourselves - needed because we're using cluster singleton to create tables
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)

    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)

    // Trigger tables to be created
    Await.ready(slick.ensureTablesCreated(), 20.seconds)

    awaitPersistenceInit(system)
  }
} 
Example 80
Source File: SlickOffsetStoreSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.jdbc

import akka.cluster.Cluster
import akka.pattern.AskTimeoutException
import com.lightbend.lagom.persistence.ActorSystemSpec
import play.api.Configuration
import play.api.Environment
import slick.jdbc.meta.MTable

import scala.concurrent.Await
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._

class SlickOffsetStoreSpec extends ActorSystemSpec(Configuration.load(Environment.simple()).underlying) {
  import system.dispatcher

  private lazy val slick = new SlickProvider(system, coordinatedShutdown)

  private lazy val offsetStore = new SlickOffsetStore(
    system,
    slick,
    TestOffsetStoreConfiguration()
  )

  protected override def beforeAll(): Unit = {
    super.beforeAll()
    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)
  }

  "SlickOffsetStoreSpec" when {
    "auto-creating tables is enabled" should {
      // Regression test for https://github.com/lagom/lagom/issues/1336
      "allow prepare to be retried after a failure" in {
        val exception = Await.result(offsetStore.prepare("test_read_side", "TestTag").failed, 10.seconds)
        exception shouldBe a[AskTimeoutException]

        // Join ourselves - needed because we're using cluster singleton to create tables
        val cluster = Cluster(system)
        cluster.join(cluster.selfAddress)

        Await.result(offsetStore.prepare("test_read_side", "TestTag"), 20.seconds)

        val tables = Await.result(slick.db.run(MTable.getTables("test_read_side_offsets_tbl")), 5.seconds)
        (tables should have).length(1)
      }

      
      "creates the read-side offset table when preparing" in pending
      "allows prepare to be called multiple times" in pending
      "uses configured column names" in pending
      "returns an offset DAO with the last stored offset" in pending
    }

    "auto-creating tables is disabled" in pending
  }

  private case class TestOffsetStoreConfiguration(
      tableName: String = "test_read_side_offsets_tbl",
      schemaName: Option[String] = None,
      idColumnName: String = "test_read_side_id_col",
      tagColumnName: String = "test_tag_col",
      sequenceOffsetColumnName: String = "test_sequence_offset_col",
      timeUuidOffsetColumnName: String = "test_time_uuid_offset_col",
      minBackoff: FiniteDuration = 1.second,
      maxBackoff: FiniteDuration = 1.second,
      randomBackoffFactor: Double = 0,
      globalPrepareTimeout: FiniteDuration = 5.seconds,
      role: Option[String] = None
  ) extends SlickOffsetStoreConfiguration
} 
Example 81
Source File: ExecutorServiceWrapper.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.internal

import java.util
import java.util.concurrent.{ AbstractExecutorService, ExecutorService, TimeUnit }

import monix.execution.schedulers.{ ReferenceScheduler, SchedulerService }
import monix.execution.{ Cancelable, ExecutionModel, Scheduler }

import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, ExecutionContextExecutorService }


  private val currentThread: Scheduler =
    new ReferenceScheduler {
      import monix.execution.Scheduler.global
      def execute(r: Runnable): Unit = r.run()
      def reportFailure(t: Throwable): Unit = throw t
      def scheduleOnce(initialDelay: Long, unit: TimeUnit, r: Runnable): Cancelable =
        global.scheduleOnce(initialDelay, unit, r)
      def executionModel: ExecutionModel =
        ExecutionModel.Default
    }
} 
Example 82
Source File: UdpIntegrationSpec.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.udp

import java.net.InetSocketAddress

import minitest.SimpleTestSuite
import monix.eval.Task
import monix.execution.Ack
import monix.execution.Ack.{ Continue, Stop }
import monix.reactive.Observable

import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }

object UdpIntegrationSpec extends SimpleTestSuite {
  implicit val ctx = monix.execution.Scheduler.Implicits.global

  test("send and receive UDP packets successfully") {
    val data = Array.fill(8)("monix")

    val writes = (ch: TaskDatagramChannel, to: InetSocketAddress) => Observable
      .fromIterable(data)
      .mapEval(data => ch.send(Packet(data.getBytes, to)))

    val readsPromise = Promise[String]()
    val recv = new StringBuilder("")
    val reads = (ch: TaskDatagramChannel, maxSize: Int) => Observable
      .repeatEval(ch.receive(maxSize, 2.seconds))
      .mapEval(t => t)
      .map { packet =>
        packet.foreach(p => recv.append(new String(p.data)))
        packet
      }
      .guaranteeCase(_ => Task(readsPromise.success(recv.mkString)))
      .subscribe(_.fold[Ack](Stop)(_ => Continue))

    val program = for {
      ch <- bind("localhost", 2115).map { ch =>
        reads(ch, 64)
        ch
      }
      sent <- writes(ch, new InetSocketAddress("localhost", 2115)).sumL
      received <- Task.fromFuture(readsPromise.future)
      _ <- ch.close()
    } yield sent == 40 & received == data.mkString("")

    val result = Await.result(program.runToFuture, 10.seconds)
    assert(result)
  }
} 
Example 83
Source File: AsyncSocketChannelSpec.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.tcp

import java.net.InetSocketAddress
import java.nio.ByteBuffer

import minitest.SimpleTestSuite

import scala.concurrent.Await
import scala.concurrent.duration._

object AsyncSocketChannelSpec extends SimpleTestSuite {

  test("simple connect and write test") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val asyncSocketChannel = AsyncSocketChannel()
    val connectF = asyncSocketChannel.connect(new InetSocketAddress("google.com", 80))

    val data = "Hello world!".getBytes("UTF-8")
    val bytes = ByteBuffer.wrap(data)
    val writeF = connectF
      .flatMap(_ => asyncSocketChannel.write(bytes, Some(4.seconds)))
      .map { result =>
        asyncSocketChannel.stopWriting()
        asyncSocketChannel.close()
        result
      }

    assertEquals(Await.result(writeF, 5.seconds), data.length)
  }

  test("simple connect and read test") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val asyncSocketChannel = AsyncSocketChannel()
    val connectF = asyncSocketChannel.connect(new InetSocketAddress("google.com", 80))

    val buff = ByteBuffer.allocate(0)
    val readF = connectF
      .flatMap(_ => asyncSocketChannel.read(buff, Some(4.seconds)))
      .map { _ =>
        asyncSocketChannel.stopReading()
        asyncSocketChannel.close()
        0
      }

    assertEquals(Await.result(readF, 5.seconds), 0)
  }
} 
Example 84
Source File: WatchServiceTest.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.file

import java.io.File
import java.nio.file.{ Paths, WatchEvent }

import minitest.SimpleTestSuite
import monix.eval.Task
import monix.execution.Ack.{ Continue, Stop }

import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }
object WatchServiceTest extends SimpleTestSuite {
  implicit val ctx = monix.execution.Scheduler.Implicits.global

  test("file event captured") {
    val path = Paths.get(System.getProperty("java.io.tmpdir"))

    val watchP = Promise[Boolean]()
    val watchT = Task.evalAsync {
      watchAsync(path).timeoutOnSlowUpstream(10.seconds).subscribe(
        (events: Array[WatchEvent[_]]) => {
          val captured = events.find(e => s"${e.kind().name()} - ${e.context().toString}".contains("monix"))
          if (captured.isDefined) {
            watchP.success(true)
            Stop
          } else {
            Continue
          }
        },
        err => watchP.failure(err),
        () => watchP.success(true))
    }
    val fileT = Task.evalAsync {
      val temp = File.createTempFile("monix", ".tmp", path.toFile)
      Thread.sleep(2000)
      temp.delete()
    }

    watchT.runToFuture
    fileT.runToFuture

    val result = Await.result(watchP.future, 20.seconds)
    assert(result)
  }

} 
Example 85
Source File: IntegrationTest.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.file

import java.nio.file.{ Files, Paths, StandardOpenOption }
import java.util

import minitest.SimpleTestSuite
import monix.execution.Callback
import monix.nio.file

import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }
import scala.util.control.NonFatal

object IntegrationTest extends SimpleTestSuite {
  test("same file generated") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val from = Paths.get(this.getClass.getResource("/testFiles/file.txt").toURI)
    val to = Paths.get("src/test/resources/out.txt")
    val consumer = file.writeAsync(to)
    val p = Promise[Boolean]()
    val callback = new Callback[Throwable, Long] {
      override def onSuccess(value: Long): Unit = p.success(true)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .consumeWith(consumer)
      .runAsync(callback)

    val result = Await.result(p.future, 3.second)
    assert(result)

    val f1 = Files.readAllBytes(from)
    val f2 = Files.readAllBytes(to)
    Files.delete(to) // clean
    assert(util.Arrays.equals(f1, f2))
  }

  test("add data to existing file") {
    implicit val ctx = monix.execution.Scheduler.Implicits.global

    val from = Paths.get(this.getClass.getResource("/testFiles/file.txt").toURI)
    val to = Paths.get("src/test/resources/existing.txt")
    val strSeq = Seq("A", "\u0024", "\u00A2", "\u20AC", new String(Array(0xF0, 0x90, 0x8D, 0x88).map(_.toByte)), "B")

    try {
      Files.write(to, strSeq.flatMap(_.getBytes).toArray, StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.APPEND)
    } catch {
      case NonFatal(e) => fail(s"got error: $e")
    }
    val consumer = file.appendAsync(to, Files.size(to))
    val p = Promise[Boolean]()
    val callback = new Callback[Throwable, Long] {
      override def onSuccess(value: Long): Unit = p.success(true)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .consumeWith(consumer)
      .runAsync(callback)

    val result = Await.result(p.future, 3.second)
    assert(result)

    val f1 = Files.readAllBytes(from)
    val f2 = Files.readAllBytes(to)
    Files.delete(to) // clean

    val all1: Seq[Byte] = strSeq.flatMap(_.getBytes) ++ f1.toSeq
    assertEquals(all1, f2.toSeq)
  }
} 
Example 86
Source File: CodecTest.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.file

import java.nio.file.{ Files, Paths }
import java.util

import minitest.SimpleTestSuite
import monix.eval.Task
import monix.execution.Callback
import monix.execution.Scheduler.Implicits.{ global => ctx }
import monix.nio.file
import monix.nio.text.UTF8Codec.{ utf8Decode, utf8Encode }
import monix.reactive.Observable

import scala.concurrent.duration._
import scala.concurrent.{ Await, Promise }

object CodecTest extends SimpleTestSuite {
  test("decode file utf8") {
    val from = Paths.get(this.getClass.getResource("/testFiles/specialChars.txt").toURI)

    val p = Promise[Seq[Byte]]()
    val callback = new Callback[Throwable, List[Array[Byte]]] {
      override def onSuccess(value: List[Array[Byte]]): Unit = p.success(value.flatten)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .pipeThrough(utf8Decode)
      .pipeThrough(utf8Encode)
      .toListL
      .runAsync(callback)
    val result = Await.result(p.future, 3.second)
    val f1 = Files.readAllBytes(from)
    val f2 = result
    assert(util.Arrays.equals(f1, f2.toArray))
  }

  test("decode special chars") {
    val strSeq = Seq("A", "\u0024", "\u00A2", "\u20AC", new String(Array(0xF0, 0x90, 0x8D, 0x88).map(_.toByte)), "B")

    for (grouping <- 1 to 12) {
      val obsSeq =
        Observable
          .fromIterator(Task(strSeq.flatMap(_.getBytes).grouped(grouping).map(_.toArray)))
          .pipeThrough(utf8Decode)

      val p = Promise[Boolean]()
      val callback = new Callback[Throwable, List[String]] {
        override def onSuccess(value: List[String]): Unit = {
          p.success(if (value.mkString == strSeq.mkString) true else false)
        }

        override def onError(ex: Throwable): Unit = p.failure(ex)
      }
      obsSeq.toListL.runAsync(callback)
      val result = Await.result(p.future, 3.second)
      assert(result)
    }
  }

  test("copy file utf8") {
    val from = Paths.get(this.getClass.getResource("/testFiles/specialChars.txt").toURI)
    val to = Paths.get("src/test/resources/res.txt")
    val consumer = file.writeAsync(to)
    val p = Promise[Long]()
    val callback = new Callback[Throwable, Long] {
      override def onSuccess(value: Long): Unit = p.success(value)
      override def onError(ex: Throwable): Unit = p.failure(ex)
    }

    readAsync(from, 3)
      .pipeThrough(utf8Decode)
      .map { str =>
        //Console.println(str)
        str
      }
      .pipeThrough(utf8Encode)
      .consumeWith(consumer)
      .runAsync(callback)
    val result = Await.result(p.future, 3.second)
    val f1 = Files.readAllBytes(from)
    val f2 = result
    Files.delete(to)
    assertEquals(f1.size, f2)
  }
} 
Example 87
Source File: PollBot.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.TimeUnit

import cats.instances.future._
import cats.syntax.functor._
import com.bot4s.telegram.Implicits._
import com.bot4s.telegram.api.declarative.{Callbacks, Commands}
import com.bot4s.telegram.future.Polling
import com.bot4s.telegram.methods._
import com.bot4s.telegram.models._

import scala.concurrent.{Await, Future}
import scala.util.Failure


class PollBot(token: String) extends ExampleBot(token)
  with Polling
  with Commands[Future]
  with Callbacks[Future] {

  var pollMsgId = 0

  onCommand("poll") { implicit msg =>
    val f = request(SendPoll(ChatId(msg.chat.id), "Pick A or B", Array("A", "B")))
    f.onComplete {
      case Failure(e) => println("Error " + e)
      case _ =>
    }
    for {
      poll <- f
    } yield {
      println("Poll sent")
      pollMsgId = poll.messageId
    }
  }

  onCommand("stop") { implicit msg =>
    request(StopPoll(ChatId(msg.chat.id), pollMsgId)).void
  }
} 
Example 88
Source File: TestUtils.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
package com.bot4s.telegram.api

import com.bot4s.telegram.models._

import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.Future

trait TestUtils {
  def textMessage(text: String): Message =
    Message(0, chat = Chat(0, ChatType.Private), date = 0, text = Some(text))

  def user(name: String): User = User(0, false, name)

  def inlineQuery(query: String): InlineQuery = {
    InlineQuery("0", from = user("Pepe"), query = query, offset = "")
  }

  implicit class FutureOps[A](f: Future[A]) {
    def get: A = Await.result(f, 10.seconds)
  }
} 
Example 89
Source File: Demo.scala    From chordial   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.tristanpenman.chordial.demo

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.TextMessage
import akka.stream.scaladsl._
import akka.stream.{ActorAttributes, ActorMaterializer, OverflowStrategy, Supervision}
import akka.util.Timeout
import com.tristanpenman.chordial.core.Event
import com.tristanpenman.chordial.core.Event._

import scala.concurrent.Await
import scala.concurrent.duration._

object Demo extends App {
  implicit val system = ActorSystem("chordial-demo")
  implicit val mat = ActorMaterializer()
  implicit val ec = system.dispatcher

  implicit val timeout: Timeout = 3.seconds

  // Generate IDs ranging from 0 to 63 (inclusive) so that when visualising the network,
  // each node represents a ~5.625 degree arc on the ring
  private val keyspaceBits = 6

  // Create an actor that is responsible for creating and terminating nodes, while ensuring
  // that nodes are assigned unique IDs in the Chord key-space
  private val governor =
    system.actorOf(Governor.props(keyspaceBits), "Governor")

  // Create an actor that will log events published by nodes
  private val eventWriter = system.actorOf(EventWriter.props, "EventWriter")

  // Subscribe the EventWriter actor to events published by nodes
  system.eventStream.subscribe(eventWriter, classOf[Event])

  val (listener, eventsSource) =
    Source
      .actorRef[Event](Int.MaxValue, OverflowStrategy.fail)
      .map {
        case FingerReset(nodeId: Long, index: Int) =>
          s"""{ "type": "FingerReset", "nodeId": $nodeId, "index": $index }"""
        case FingerUpdated(nodeId: Long, index: Int, fingerId: Long) =>
          s"""{ "type": "FingerUpdated", "nodeId": $nodeId, "index": $index, "fingerId": $fingerId }"""
        case NodeCreated(nodeId, successorId) =>
          s"""{ "type": "NodeCreated", "nodeId": $nodeId, "successorId": $successorId }"""
        case NodeShuttingDown(nodeId) =>
          s"""{ "type": "NodeDeleted", "nodeId": $nodeId }"""
        case PredecessorReset(nodeId) =>
          s"""{ "type": "PredecessorReset", "nodeId": $nodeId }"""
        case PredecessorUpdated(nodeId, predecessorId) =>
          s"""{ "type": "PredecessorUpdated", "nodeId": $nodeId, "predecessorId": $predecessorId }"""
        case SuccessorListUpdated(nodeId, primarySuccessorId, _) =>
          s"""{ "type": "SuccessorUpdated", "nodeId": $nodeId, "successorId": $primarySuccessorId }"""
      }
      .map(TextMessage(_))
      .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
      .toMat(BroadcastHub.sink[TextMessage](bufferSize = 16))(Keep.both)
      .run()

  system.eventStream.subscribe(listener, classOf[Event])

  Http().bindAndHandle(WebSocketWorker(governor, eventsSource), "0.0.0.0", 4567)

  Await.result(system.whenTerminated, Duration.Inf)
} 
Example 90
Source File: AppRFMClient.scala    From frees-rpc-workshop   with Apache License 2.0 5 votes vote down vote up
package scalaexchange
package app

import cats.effect.IO
import freestyle.rpc.protocol.Empty
import freestyle.asyncCatsEffect.implicits._
import freestyle.rpc.client.implicits._
import monix.reactive.Observable

import scala.concurrent.Await
import scala.concurrent.duration._
import scalaexchange.services.protocol._

object AppRFMClient extends Implicits {

  def main(args: Array[String]): Unit = {

    implicit val rfmClient: RFMAnalysisService.Client[IO] =
      RFMAnalysisService.client[IO](channel)

    val (segments: IO[SegmentList], stream: Observable[UserEvent], ack: IO[Ack]) =
      (
        rfmClient.segments(Empty),
        rfmClient.userEvents(Empty),
        rfmClient.orderStream(ordersStreamObs)
      )

    println(s"Segments: \n${segments.unsafeRunSync().list.mkString("\n")}\n")
    println(s"Client Streaming: \n${ack.unsafeRunSync()}\n")
    Await.ready(
      stream
        .map { u =>
          println(u)
          u
        }
        .completedL
        .runAsync,
      Duration.Inf)
  }

  private[this] def ordersStreamObs: Observable[Order] = {
    val orderList: List[Order] = (1 to 1000).map { customerId =>
      import com.fortysevendeg.scalacheck.datetime.GenDateTime
      import org.joda.time.{DateTime, Period}
      import org.scalacheck._
      import com.fortysevendeg.scalacheck.datetime.instances.joda.jodaForPeriod

      (for {
        date    <- GenDateTime.genDateTimeWithinRange(DateTime.parse("2017-12-01"), Period.days(22))
        orderId <- Gen.uuid
        total   <- Gen.choose[Int](5, 200)
      } yield
        Order(
          customerId,
          CustomerData(date.toString, orderId.toString, total)
        )).sample.get
    }.toList

    Observable.fromIterable(orderList)
  }

} 
Example 91
Source File: TestUtils.scala    From scala-play-realworld-example-app   with MIT License 5 votes vote down vote up
package commons_test.test_helpers

import commons.services.ActionRunner
import slick.dbio.DBIO

import scala.concurrent.duration.{Duration, DurationInt}
import scala.concurrent.{Await, Future}

object TestUtils {

  val config: Map[String, String] = Map(
    "play.evolutions.enabled" -> "true",
    "play.evolutions.autoApply" -> "true",
    "slick.dbs.default.profile" -> "slick.jdbc.H2Profile$",
    "slick.dbs.default.db.driver" -> "org.h2.Driver",
    "slick.dbs.default.db.url" -> "jdbc:h2:mem:play;DATABASE_TO_UPPER=false",
    "slick.dbs.default.db.user" -> "user",
    "slick.dbs.default.db.password" -> ""
  )

  def runAndAwaitResult[T](action: DBIO[T])(implicit actionRunner: ActionRunner,
                                            duration: Duration = new DurationInt(1).minute): T = {
    val future: Future[T] = actionRunner.runTransactionally(action)
    Await.result(future, duration)
  }
} 
Example 92
Source File: BlockTransferService.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Promise, Await, Future}
import scala.concurrent.duration.Duration

import org.apache.spark.Logging
import org.apache.spark.network.buffer.{NioManagedBuffer, ManagedBuffer}
import org.apache.spark.network.shuffle.{ShuffleClient, BlockFetchingListener}
import org.apache.spark.storage.{BlockManagerId, BlockId, StorageLevel}

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel): Unit = {
    Await.result(uploadBlock(hostname, port, execId, blockId, blockData, level), Duration.Inf)
  }
} 
Example 93
Source File: NettyBlockTransferSecuritySuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.netty

import java.nio._
import java.util.concurrent.TimeUnit

import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}
import scala.util.{Failure, Success, Try}

import org.apache.commons.io.IOUtils
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.BlockFetchingListener
import org.apache.spark.network.{BlockDataManager, BlockTransferService}
import org.apache.spark.storage.{BlockId, ShuffleBlockId}
import org.apache.spark.{SecurityManager, SparkConf}
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite, ShouldMatchers}

class NettyBlockTransferSecuritySuite extends FunSuite with MockitoSugar with ShouldMatchers {
  test("security default off") {
    val conf = new SparkConf()
      .set("spark.app.id", "app-id")
    testConnection(conf, conf) match {
      case Success(_) => // expected
      case Failure(t) => fail(t)
    }
  }

  test("security on same password") {
    val conf = new SparkConf()
      .set("spark.authenticate", "true")
      .set("spark.authenticate.secret", "good")
      .set("spark.app.id", "app-id")
    testConnection(conf, conf) match {
      case Success(_) => // expected
      case Failure(t) => fail(t)
    }
  }

  test("security on mismatch password") {
    val conf0 = new SparkConf()
      .set("spark.authenticate", "true")
      .set("spark.authenticate.secret", "good")
      .set("spark.app.id", "app-id")
    val conf1 = conf0.clone.set("spark.authenticate.secret", "bad")
    testConnection(conf0, conf1) match {
      case Success(_) => fail("Should have failed")
      case Failure(t) => t.getMessage should include ("Mismatched response")
    }
  }

  test("security mismatch auth off on server") {
    val conf0 = new SparkConf()
      .set("spark.authenticate", "true")
      .set("spark.authenticate.secret", "good")
      .set("spark.app.id", "app-id")
    val conf1 = conf0.clone.set("spark.authenticate", "false")
    testConnection(conf0, conf1) match {
      case Success(_) => fail("Should have failed")
      case Failure(t) => // any funny error may occur, sever will interpret SASL token as RPC
    }
  }

  test("security mismatch auth off on client") {
    val conf0 = new SparkConf()
      .set("spark.authenticate", "false")
      .set("spark.authenticate.secret", "good")
      .set("spark.app.id", "app-id")
    val conf1 = conf0.clone.set("spark.authenticate", "true")
    testConnection(conf0, conf1) match {
      case Success(_) => fail("Should have failed")
      case Failure(t) => t.getMessage should include ("Expected SaslMessage")
    }
  }

  
  private def fetchBlock(
      self: BlockTransferService,
      from: BlockTransferService,
      execId: String,
      blockId: BlockId): Try[ManagedBuffer] = {

    val promise = Promise[ManagedBuffer]()

    self.fetchBlocks(from.hostName, from.port, execId, Array(blockId.toString),
      new BlockFetchingListener {
        override def onBlockFetchFailure(blockId: String, exception: Throwable): Unit = {
          promise.failure(exception)
        }

        override def onBlockFetchSuccess(blockId: String, data: ManagedBuffer): Unit = {
          promise.success(data.retain())
        }
      })

    Await.ready(promise.future, FiniteDuration(1000, TimeUnit.MILLISECONDS))
    promise.future.value.get
  }
} 
Example 94
Source File: FutureActionSuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import org.scalatest.{BeforeAndAfter, FunSuite, Matchers}


class FutureActionSuite extends FunSuite with BeforeAndAfter with Matchers with LocalSparkContext {

  before {
    sc = new SparkContext("local", "FutureActionSuite")
  }

  test("simple async action") {
    val rdd = sc.parallelize(1 to 10, 2)
    val job = rdd.countAsync()
    val res = Await.result(job, Duration.Inf)
    res should be (10)
    job.jobIds.size should be (1)
  }

  test("complex async action") {
    val rdd = sc.parallelize(1 to 15, 3)
    val job = rdd.takeAsync(10)
    val res = Await.result(job, Duration.Inf)
    res should be (1 to 10)
    job.jobIds.size should be (2)
  }

} 
Example 95
Source File: LoadTest.scala    From ws_to_kafka   with MIT License 5 votes vote down vote up
package com.pkinsky

import java.util.concurrent.atomic.AtomicInteger

import akka.http.scaladsl.model.ws.{InvalidUpgradeResponse, WebsocketUpgradeResponse, WebsocketRequest, TextMessage}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, Sink, RunnableGraph, Source}
import play.api.libs.json.Json

import scala.concurrent.{Future, Await}
import scala.concurrent.duration._
import scala.language.postfixOps

object LoadTest extends App with AppContext {
  val clients = 256
  val eventsPerClient = 256

  val eventsSent = new AtomicInteger(0)

  def testData(clientId: String): Source[Event, Unit] =
    Source.unfoldInf(1) { n =>
      val event = Event(s"msg number $n", clientId, System.currentTimeMillis())
      (n + 1, event)
    }.take(eventsPerClient).throttle(1, 100 millis, 1, ThrottleMode.Shaping)

  def wsClient(clientId: String): RunnableGraph[Future[WebsocketUpgradeResponse]] =
    testData(clientId).map(e => TextMessage.Strict(Json.toJson(e).toString))
      .map { x => eventsSent.incrementAndGet(); x }
      .viaMat(Http().websocketClientFlow(WebsocketRequest(Uri(s"ws://localhost:$port/ws"))))(Keep.right).to(Sink.ignore)

  //set up websocket connections
  (1 to clients).foreach { id =>
    wsClient(s"client $id").run()
  }

  //watch kafka for messages sent via websocket
  val kafkaConsumerGraph: RunnableGraph[Future[Seq[Event]]] =
    kafka.consume[Event](eventTopic, "group_new")
      .take(clients * eventsPerClient).takeWithin(2 minutes)
      .toMat(Sink.seq)(Keep.right)

  val res = Await.result(kafkaConsumerGraph.run, 5 minutes)
  println(s"sent ${eventsSent.get()} events total")
  println(s"res size: ${res.length}")
} 
Example 96
Source File: FutureResultSupport.scala    From sangria   with Apache License 2.0 5 votes vote down vote up
package sangria.util

import sangria.execution.{ErrorWithResolver, QueryAnalysisError}
import sangria.marshalling.ResultMarshallerForType

import language.postfixOps
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

trait FutureResultSupport {
  implicit class FutureResult[T](f: Future[T]) {
    def await = Await.result(f, 10 seconds)
    def await(duration: Duration) = Await.result(f, duration)

    def awaitAndRecoverQueryAnalysis(implicit m: ResultMarshallerForType[T]): T = Await.result(recoverQueryAnalysis, 10 seconds)

    def recoverQueryAnalysis(implicit m: ResultMarshallerForType[T]): Future[T] = f.recover {
      case analysisError: QueryAnalysisError => analysisError.resolveError(m.marshaller).asInstanceOf[T]
    }

    def awaitAndRecoverQueryAnalysisScala(implicit ev: T =:= Any) = Await.result(recoverQueryAnalysisScala, 10 seconds)

    def recoverQueryAnalysisScala(implicit ev: T =:= Any) = f.recover {
      case analysisError: ErrorWithResolver => analysisError.resolveError
    }
  }


  object sync {
    val executionContext = ExecutionContext.fromExecutor(new java.util.concurrent.Executor {
      def execute(command: Runnable) = command.run()
    })
  }
} 
Example 97
Source File: Redis4CatsFunSuite.scala    From redis4cats   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.redis4cats

import cats.effect._
import cats.implicits._
import dev.profunktor.redis4cats.connection._
import dev.profunktor.redis4cats.data.RedisCodec
import dev.profunktor.redis4cats.effect.Log.NoOp._
import munit.FunSuite
import scala.concurrent.{ Await, ExecutionContext, Future }
import scala.concurrent.duration.Duration

abstract class Redis4CatsFunSuite(isCluster: Boolean) extends FunSuite {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit val timer: Timer[IO]     = IO.timer(ExecutionContext.global)
  implicit val clock: Clock[IO]     = timer.clock

  val flushAllFixture = new Fixture[Unit]("FLUSHALL") {
    def apply(): Unit = ()

    override def afterEach(context: AfterEach): Unit =
      Await.result(flushAll(), Duration.Inf)
  }

  override def munitFixtures = List(flushAllFixture)

  override def munitFlakyOK: Boolean = true

  private val stringCodec = RedisCodec.Utf8

  def withAbstractRedis[A, K, V](f: RedisCommands[IO, K, V] => IO[A])(codec: RedisCodec[K, V]): Future[Unit] =
    Redis[IO].simple("redis://localhost", codec).use(f).as(assert(true)).unsafeToFuture()

  def withRedis[A](f: RedisCommands[IO, String, String] => IO[A]): Future[Unit] =
    withAbstractRedis[A, String, String](f)(stringCodec)

  private def flushAll(): Future[Unit] =
    if (isCluster) withRedisCluster(_.flushAll)
    else withRedis(_.flushAll)

  // --- Cluster ---

  lazy val redisUri = List(
    "redis://localhost:30001",
    "redis://localhost:30002",
    "redis://localhost:30003"
  ).traverse(RedisURI.make[IO](_))

  private def mkRedisCluster[K, V](codec: RedisCodec[K, V]): Resource[IO, RedisCommands[IO, K, V]] =
    for {
      uris <- Resource.liftF(redisUri)
      client <- RedisClusterClient[IO](uris: _*)
      cluster <- Redis[IO].fromClusterClient(client, codec)
    } yield cluster

  def withAbstractRedisCluster[A, K, V](
      f: RedisCommands[IO, K, V] => IO[A]
  )(codec: RedisCodec[K, V]): Future[Unit] =
    mkRedisCluster(codec).use(f).as(assert(true)).unsafeToFuture()

  def withRedisCluster[A](f: RedisCommands[IO, String, String] => IO[A]): Future[Unit] =
    withAbstractRedisCluster[A, String, String](f)(stringCodec)

} 
Example 98
Source File: rabenchmarks.scala    From reactive-async   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package com.phaller.rasync
package bench

import com.phaller.rasync.cell.CellCompleter
import com.phaller.rasync.lattice.lattices.{ NaturalNumberKey, NaturalNumberLattice }
import com.phaller.rasync.pool.HandlerPool
import lattice.Lattice
import org.scalameter.api._
import org.scalameter.picklers.noPickler._

import scala.concurrent.{ Await, Promise }
import scala.concurrent.duration._

object ReactiveAsyncBenchmarks extends PerformanceTest.Microbenchmark {
  
  performance of "Cells" in {
    measure method "create and putFinal" in {
      using(size) config (
        exec.benchRuns -> 9) in {
          r =>
            {
              implicit val pool = new HandlerPool(NaturalNumberKey, nrOfThreads)
              for (i <- 1 to r) {
                pool.execute(() => {
                  val cellCompleter = CellCompleter[Int, Null]()
                  cellCompleter.putFinal(1)
                })
              }
              waitUntilQuiescent(pool)
            }
        }
    }
  }

  performance of "Cells" in {
    measure method "putNext" in {
      using(Gen.unit(s"$nrOfCells cells")) config (
        exec.benchRuns -> 9) in {
          (Unit) =>
            implicit val pool = new HandlerPool(NaturalNumberKey, nrOfThreads)
            val cellCompleter = CellCompleter[Int, Null]()
            for (i <- 1 to nrOfCells) pool.execute(() => cellCompleter.putNext(i))
            waitUntilQuiescent(pool)
        }
    }
  }

  def waitUntilQuiescent(pool: HandlerPool[_, _]): Unit = {
    val p = Promise[Boolean]
    pool.onQuiescent { () =>
      p.success(true)
    }
    Await.ready(p.future, 30.seconds)
  }
} 
Example 99
Source File: PoolSuite.scala    From reactive-async   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package com.phaller.rasync
package test

import java.util.concurrent.{ ConcurrentHashMap, CountDownLatch }

import com.phaller.rasync.cell.{ Cell, CellCompleter }
import org.scalatest.FunSuite

import scala.concurrent.{ Await, Promise }
import scala.concurrent.duration._
import com.phaller.rasync.lattice.Updater
import com.phaller.rasync.pool.HandlerPool
import com.phaller.rasync.test.lattice.{ IntUpdater, StringIntKey }

class PoolSuite extends FunSuite {
  test("onQuiescent") {
    val pool = HandlerPool[Int]

    var i = 0
    while (i < 10000) {
      val p1 = Promise[Boolean]()
      val p2 = Promise[Boolean]()
      pool.execute { () => { p1.success(true) }: Unit }
      pool.onQuiescent { () => p2.success(true) }
      try {
        Await.result(p2.future, 1.seconds)
      } catch {
        case t: Throwable =>
          assert(false, s"failure after $i iterations")
      }
      i += 1
    }

    pool.shutdown()
  }

  test("register cells concurrently") {
    implicit val stringIntUpdater: Updater[Int] = new IntUpdater

    implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s"))
    var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]()
    for (_ <- 1 to 1000) {
      pool.execute(() => {
        val completer = CellCompleter[Int, Null]()
        completer.cell.trigger()
        regCells.put(completer.cell, completer.cell)
        ()
      })
    }
    val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback
    Await.ready(fut, 5.seconds)

    regCells.values().removeIf(_.getResult() != 0)
    assert(regCells.size === 0)
  }

  test("register cells concurrently 2") {
    implicit val stringIntUpdater: Updater[Int] = new IntUpdater

    implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s"))
    var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]()
    for (_ <- 1 to 1000) {
      pool.execute(() => {
        val completer = CellCompleter[Int, Null]()
        regCells.put(completer.cell, completer.cell)
        ()
      })
    }
    val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback
    Await.ready(fut, 5.seconds)

    assert(regCells.size === 1000)
  }

  test("handler pool quiescence") {
    implicit val pool = new HandlerPool[Int, Null]
    val latch = new CountDownLatch(1)
    val latch2 = new CountDownLatch(1)
    pool.execute { () => latch.await() }
    pool.onQuiescent { () => latch2.countDown() }
    latch.countDown()

    latch2.await()
    assert(true)

    pool.onQuiescenceShutdown()
  }

} 
Example 100
Source File: ParallelIteratorExecutor.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.passes

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class ParallelIteratorExecutor[T](iterator: Iterator[T]) {
  def map[D](func: T => D): Iterator[D] = {
    val futures = Future.traverse(iterator) { element =>
      Future {
        func(element)
      }
    }
    Await.result(futures, Duration.Inf)
  }
} 
Example 101
Source File: DistributedCountRDD.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.indexserver

import java.util.concurrent.Executors

import scala.collection.JavaConverters._
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}
import scala.concurrent.duration.Duration

import org.apache.hadoop.mapred.TaskAttemptID
import org.apache.hadoop.mapreduce.{InputSplit, TaskType}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.{Partition, SparkEnv, TaskContext}
import org.apache.spark.sql.SparkSession

import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.CacheProvider
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.index.{IndexInputFormat, IndexStoreManager}
import org.apache.carbondata.core.index.dev.expr.IndexInputSplitWrapper
import org.apache.carbondata.core.util.{CarbonProperties, CarbonThreadFactory}
import org.apache.carbondata.spark.rdd.CarbonRDD


class DistributedCountRDD(@transient ss: SparkSession, indexInputFormat: IndexInputFormat)
  extends CarbonRDD[(String, String)](ss, Nil) {

  @transient private val LOGGER = LogServiceFactory.getLogService(classOf[DistributedPruneRDD]
    .getName)

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    if (split.asInstanceOf[IndexRDDPartition].getLocations != null) {
      split.asInstanceOf[IndexRDDPartition].getLocations.toSeq
    } else {
      Seq()
    }
  }

  override def internalCompute(split: Partition,
      context: TaskContext): Iterator[(String, String)] = {
    val attemptId = new TaskAttemptID(DistributedRDDUtils.generateTrackerId,
      id, TaskType.MAP, split.index, 0)
    val attemptContext = new TaskAttemptContextImpl(FileFactory.getConfiguration, attemptId)
    val inputSplits = split.asInstanceOf[IndexRDDPartition].inputSplit
    val numOfThreads = CarbonProperties.getInstance().getNumOfThreadsForExecutorPruning
    val service = Executors
      .newFixedThreadPool(numOfThreads, new CarbonThreadFactory("IndexPruningPool", true))
    implicit val ec: ExecutionContextExecutor = ExecutionContext
      .fromExecutor(service)
    if (indexInputFormat.ifAsyncCall()) {
      // to clear cache of invalid segments during pre-priming in index server
      IndexStoreManager.getInstance().clearInvalidSegments(indexInputFormat.getCarbonTable,
        indexInputFormat.getInvalidSegments)
    }
    val futures = if (inputSplits.length <= numOfThreads) {
      inputSplits.map {
        split => generateFuture(Seq(split))
      }
    } else {
      DistributedRDDUtils.groupSplits(inputSplits, numOfThreads).map {
        splits => generateFuture(splits)
      }
    }
    // scalastyle:off awaitresult
    val results = Await.result(Future.sequence(futures), Duration.Inf).flatten
    // scalastyle:on awaitresult
    val executorIP = s"${ SparkEnv.get.blockManager.blockManagerId.host }_${
      SparkEnv.get.blockManager.blockManagerId.executorId
    }"
    val cacheSize = if (CacheProvider.getInstance().getCarbonCache != null) {
      CacheProvider.getInstance().getCarbonCache.getCurrentSize
    } else {
      0L
    }
    Iterator((executorIP + "_" + cacheSize.toString, results.map(_._2.toLong).sum.toString))
  }

  override protected def internalGetPartitions: Array[Partition] = {
    new DistributedPruneRDD(ss, indexInputFormat).partitions
  }

  private def generateFuture(split: Seq[InputSplit])
    (implicit executionContext: ExecutionContext) = {
    Future {
      val segments = split.map { inputSplit =>
        val distributable = inputSplit.asInstanceOf[IndexInputSplitWrapper]
        distributable.getDistributable.getSegment
          .setReadCommittedScope(indexInputFormat.getReadCommittedScope)
        distributable.getDistributable.getSegment
      }
      val defaultIndex = IndexStoreManager.getInstance
        .getIndex(indexInputFormat.getCarbonTable, split.head
          .asInstanceOf[IndexInputSplitWrapper].getDistributable.getIndexSchema)
      defaultIndex.getBlockRowCount(defaultIndex, segments.toList.asJava, indexInputFormat
        .getPartitions).asScala
    }
  }

} 
Example 102
Source File: TestSegmentReadingForMultiThreading.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.spark.testsuite.segmentreading

import java.util.concurrent.TimeUnit

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

import org.apache.spark.sql.{CarbonUtils, Row}
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll



class TestSegmentReadingForMultiThreading extends QueryTest with BeforeAndAfterAll {

  override def beforeAll: Unit = {
    sql("DROP TABLE IF EXISTS carbon_table_MulTI_THread")
    sql(
      "CREATE TABLE carbon_table_MulTI_THread (empno int, empname String, designation String, doj " +
      "Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname " +
      "String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance " +
      "int,utilization int,salary int) STORED AS carbondata")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data1.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data1.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
  }

  test("test multithreading for segment reading") {


    CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1,2,3")
    val df = sql("select count(empno) from carbon_table_MulTI_THread")
    checkAnswer(df, Seq(Row(30)))

    val four = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1,3")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(20)))
    }

    val three = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "0,1,2")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(30)))
    }


    val one = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "0,2")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(20)))
    }

    val two = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(10)))
    }
    Await.result(Future.sequence(Seq(one, two, three, four)), Duration(300, TimeUnit.SECONDS))
  }

  override def afterAll: Unit = {
    sql("DROP TABLE IF EXISTS carbon_table_MulTI_THread")
    CarbonUtils.threadUnset("carbon.input.segments.default.carbon_table_MulTI_THread")
  }
} 
Example 103
Source File: InfluxAkkaClient.scala    From chronicler   with Apache License 2.0 5 votes vote down vote up
package com.github.fsanaulla.chronicler.akka.shared

import akka.actor.ActorSystem
import akka.http.scaladsl.{Http, HttpExt, HttpsConnectionContext}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

abstract class InfluxAkkaClient(
    terminateActorSystem: Boolean,
    httpsContext: Option[HttpsConnectionContext]
  )(implicit system: ActorSystem,
    ec: ExecutionContext) { self: AutoCloseable =>

  private[akka] implicit val http: HttpExt = Http()

  private[akka] val (ctx, schema) =
    httpsContext
      .map(_ -> "https")
      .getOrElse(http.defaultClientHttpsContext -> "http")

  def close(): Unit =
    Await.ready(closeAsync(), Duration.Inf)

  def closeAsync(): Future[Unit] = {
    for {
      _ <- http.shutdownAllConnectionPools()
      _ <- if (terminateActorSystem) system.terminate().map(_ => {}) else Future.successful({})
    } yield {}
  }
} 
Example 104
Source File: CouchbasePersistenceModule.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.javadsl.persistence.couchbase

import java.net.URI

import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.alpakka.couchbase.javadsl.CouchbaseSession
import akka.stream.alpakka.couchbase.CouchbaseSessionSettings
import com.google.inject.Provider
import com.lightbend.lagom.internal.javadsl.persistence.couchbase.{
  CouchbasePersistentEntityRegistry,
  CouchbaseReadSideImpl,
  JavadslCouchbaseOffsetStore
}
import com.lightbend.lagom.internal.persistence.couchbase.{
  CouchbaseConfigValidator,
  CouchbaseOffsetStore,
  ServiceLocatorAdapter,
  ServiceLocatorHolder
}
import com.lightbend.lagom.javadsl.api.ServiceLocator
import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry
import com.lightbend.lagom.spi.persistence.OffsetStore
import com.typesafe.config.Config
import javax.inject.Inject
import play.api.inject.{Binding, Injector, Module}
import play.api.{Configuration, Environment}

import scala.compat.java8.FutureConverters._
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.Try


class CouchbasePersistenceModule extends Module {
  override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq(
    bind[CouchbasePersistenceModule.InitServiceLocatorHolder].toSelf.eagerly(),
    bind[PersistentEntityRegistry].to[CouchbasePersistentEntityRegistry],
    bind[CouchbaseSession].toProvider[CouchbaseProvider],
    bind[CouchbaseReadSide].to[CouchbaseReadSideImpl],
    //TODO: add other modules similar to Cassandra
    //    bind[CassandraReadSideSettings].toSelf,
    bind[CouchbaseOffsetStore].to(bind[JavadslCouchbaseOffsetStore]),
    bind[OffsetStore].to(bind[CouchbaseOffsetStore])
  )
}

private[lagom] class CouchbaseProvider @Inject() (system: ActorSystem, cfg: Config) extends Provider[CouchbaseSession] {
  private val log = Logging(system, classOf[CouchbaseProvider])

  CouchbaseConfigValidator.validateBucket("lagom.persistence.read-side.couchbase", cfg, log)

  private val readSideCouchbaseConfig: Config =
    cfg.getConfig("lagom.persistence.read-side.couchbase")

  private val sessionSettings = CouchbaseSessionSettings(
    readSideCouchbaseConfig.getConfig("connection")
  )

  private val bucket = readSideCouchbaseConfig.getString("bucket")

  // FIXME is there a way to have async component creation in lagom instead of letting every component know that the thing is async?
  // if not we should pass Future[CouchbaseSession] around and let the use sites mix in AsyncCouchbaseSession - but if we use
  // that from Lagom it needs to be made public API
  // FIXME this should be the Java API of CouchbaseSession, when there is one
  lazy val couchbase: CouchbaseSession =
    Await.result(CouchbaseSession.create(sessionSettings, bucket, system.dispatcher).toScala, 30.seconds)

  override def get(): CouchbaseSession = couchbase
}

private[lagom] object CouchbasePersistenceModule {
  class InitServiceLocatorHolder @Inject() (system: ActorSystem, injector: Injector) {
    def init(): Unit =
      Try(injector.instanceOf[ServiceLocator]).foreach { locator =>
        ServiceLocatorHolder(system).setServiceLocator(new ServiceLocatorAdapter {
          override def locateAll(name: String): Future[List[URI]] = {
            import system.dispatcher

            import scala.collection.JavaConverters._
            import scala.compat.java8.FutureConverters._
            locator.locateAll(name).toScala.map(_.asScala.toList)
          }
        })
      }
  }
} 
Example 105
Source File: CouchbasePersistenceComponents.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.couchbase

import akka.event.Logging
import akka.stream.alpakka.couchbase.{CouchbaseSessionRegistry, CouchbaseSessionSettings}
import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
import com.lightbend.lagom.internal.persistence.couchbase.{CouchbaseConfigValidator, CouchbaseOffsetStore}
import com.lightbend.lagom.internal.scaladsl.persistence.couchbase.{
  CouchbasePersistentEntityRegistry,
  CouchbaseReadSideImpl,
  ScaladslCouchbaseOffsetStore
}
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.persistence.{
  PersistenceComponents,
  PersistentEntityRegistry,
  ReadSidePersistenceComponents,
  WriteSidePersistenceComponents
}
import com.lightbend.lagom.spi.persistence.OffsetStore
import com.typesafe.config.Config

import scala.concurrent.duration._
import scala.concurrent.Await


trait ReadSideCouchbasePersistenceComponents extends ReadSidePersistenceComponents {
  private val log = Logging(actorSystem, classOf[ReadSideCouchbasePersistenceComponents])

  CouchbaseConfigValidator.validateBucket("lagom.persistence.read-side.couchbase", configuration.underlying, log)

  private val readSideCouchbaseConfig: Config =
    configuration.underlying.getConfig("lagom.persistence.read-side.couchbase")

  private val sessionSettings = CouchbaseSessionSettings(
    readSideCouchbaseConfig.getConfig("connection")
  )

  private val bucket = readSideCouchbaseConfig.getString("bucket")

  // FIXME is there a way to have async component creation in lagom instead of letting every component know that the thing is async?
  // if not we should pass Future[CouchbaseSession] around and let the use sites mix in AsyncCouchbaseSession - but if we use
  // that from Lagom it needs to be made public API
  lazy val couchbase: CouchbaseSession =
    Await.result(CouchbaseSessionRegistry(actorSystem).sessionFor(sessionSettings, bucket), 30.seconds)

  private[lagom] lazy val couchbaseOffsetStore: CouchbaseOffsetStore =
    new ScaladslCouchbaseOffsetStore(actorSystem, couchbase, readSideConfig)

  lazy val offsetStore: OffsetStore = couchbaseOffsetStore

  lazy val couchbaseReadSide: CouchbaseReadSide =
    new CouchbaseReadSideImpl(actorSystem, couchbase, couchbaseOffsetStore)
} 
Example 106
Source File: CouchbaseConfigValidatorSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.couchbase

import akka.actor.ActorSystem
import akka.event.Logging
import akka.testkit.EventFilter
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, MustMatchers, WordSpec}

import scala.concurrent.duration._
import scala.concurrent.Await

class MyException extends RuntimeException("MyException")

class CouchbaseConfigValidatorSpec extends WordSpec with MustMatchers with BeforeAndAfterAll {
  val akkaTestLogging = ConfigFactory.parseString("akka.loggers = [akka.testkit.TestEventListener]")
  implicit val system = ActorSystem("test", akkaTestLogging)
  val log = Logging(system, classOf[CouchbaseConfigValidatorSpec])

  override def afterAll =
    Await.result(system.terminate(), Duration.Inf)

  "CouchbaseConfigValidator" should {
    "detect when bucket is not set" in {
      val config = ConfigFactory.parseString("""some.config.setting = 1""".stripMargin)
      EventFilter
        .error("Configuration for [test.bucket] must be set in application.conf ", occurrences = 1)
        .intercept {
          CouchbaseConfigValidator.validateBucket("test", config, log)
        }
    }
    "detect when bucket is set to null" in {
      val config = ConfigFactory.parseString("""testpath1.bucket = null""".stripMargin)
      EventFilter
        .error("Configuration for [testpath1.bucket] must be set in application.conf ", occurrences = 1)
        .intercept {
          CouchbaseConfigValidator.validateBucket("testpath1", config, log)
        }
    }
    "pass when bucket is specified" in {
      val config = ConfigFactory.parseString("""sample.path.bucket = bucketname""".stripMargin)
      // expect only one "another error" in the log
      EventFilter.error(occurrences = 1).intercept {
        CouchbaseConfigValidator.validateBucket("sample.path", config, log)
        log.error("another error")
      }
    }
  }
} 
Example 107
Source File: CouchbaseClusterConnection.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase

import java.util.concurrent.TimeUnit

import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
import com.couchbase.client.java.{Bucket, Cluster, CouchbaseCluster}
import com.couchbase.client.java.query.N1qlQuery

import scala.concurrent.Await
import scala.util.Try
import scala.concurrent.duration._

object CouchbaseClusterConnection {
  def connect(): CouchbaseClusterConnection = connect("admin", "admin1", "akka")

  def connect(username: String, password: String, bucketName: String): CouchbaseClusterConnection = {
    val cluster = CouchbaseCluster.create()
    cluster.authenticate(username, password) // needs to be admin

    val bucket = cluster.openBucket(bucketName)

    new CouchbaseClusterConnection(cluster, bucket)
  }
}

final class CouchbaseClusterConnection(val cluster: Cluster, bucket: Bucket) {
  val couchbaseSession: CouchbaseSession = CouchbaseSession(bucket)

  def cleanUp(): CouchbaseClusterConnection = {
    val bucketName = bucket.name()
    bucket.bucketManager().createN1qlPrimaryIndex(true, false)
    val result = bucket.query(N1qlQuery.simple(s"delete from $bucketName"), 5, TimeUnit.MINUTES)
    assert(result.finalSuccess(), s"Failed to clean out bucket $bucketName")
    bucket.bucketManager().dropN1qlPrimaryIndex(true)

    this
  }

  def close(): Unit = {
    Try(Await.result(couchbaseSession.close(), 30.seconds))
    Try(cluster.disconnect())
  }
} 
Example 108
Source File: PlotServer.scala    From DynaML   with Apache License 2.0 5 votes vote down vote up
package io.github.mandar2812.dynaml.graphics.charts.repl

import unfiltered.request._
import unfiltered.response._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Promise}


class PlotServer extends UnfilteredWebApp[UnfilteredWebApp.Arguments]  {
  // this is fulfilled by the plot command, to allow a browser to wait for plot to reload
  var p = Promise[Unit]()

  private class WebApp extends unfiltered.filter.Plan {
    def intent = {
      // handle jsonp
      case req @ GET(Path(Seg("check" :: Nil)) & Params(params)) =>
        implicit val responder = req
        val str = """[]"""
        val response = params.get("callback") match {
          case Some(v) =>
            val callbackName = v.head
            s"$callbackName($str)"
          case _ => str
        }
        // block for plot command to fulfill promise, and release this result to trigger browser reload
        Await.result(p.future, Duration.Inf)
        JsonContent ~> ResponseString(response)
      case _ => Pass
    }
  }

  def parseArgs(args: Array[String]) = {
    val parsed = new UnfilteredWebApp.Arguments{}
    parsed.parse(args)
    parsed
  }

  def setup(parsed: UnfilteredWebApp.Arguments): unfiltered.filter.Plan = {
    new WebApp
  }

  def htmlRoot: String = "/"
} 
Example 109
Source File: AkkaBlockingConnection.scala    From scredis   with Apache License 2.0 5 votes vote down vote up
package scredis.io

import java.util.concurrent.locks.ReentrantLock

import akka.actor._
import scredis.exceptions._
import scredis.protocol._
import scredis.util.UniqueNameGenerator

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try


abstract class AkkaBlockingConnection(
  system: ActorSystem,
  host: String,
  port: Int,
  passwordOpt: Option[String],
  database: Int,
  nameOpt: Option[String],
  decodersCount: Int,
  connectTimeout: FiniteDuration,
  maxWriteBatchSize: Int,
  tcpSendBufferSizeHint: Int,
  tcpReceiveBufferSizeHint: Int,
  akkaListenerDispatcherPath: String,
  akkaIODispatcherPath: String,
  akkaDecoderDispatcherPath: String,
  failCommandOnConnecting:Boolean
) extends AbstractAkkaConnection(
  system = system,
  host = host,
  port = port,
  passwordOpt = passwordOpt,
  database = database,
  nameOpt = nameOpt,
  decodersCount = decodersCount,
  receiveTimeoutOpt = None,
  connectTimeout = connectTimeout,
  maxWriteBatchSize = maxWriteBatchSize,
  tcpSendBufferSizeHint = tcpSendBufferSizeHint,
  tcpReceiveBufferSizeHint = tcpReceiveBufferSizeHint,
  akkaListenerDispatcherPath = akkaListenerDispatcherPath,
  akkaIODispatcherPath = akkaIODispatcherPath,
  akkaDecoderDispatcherPath = akkaDecoderDispatcherPath
) with BlockingConnection {
  
  private val lock = new ReentrantLock()
  
  protected val listenerActor = system.actorOf(
    Props(
      classOf[ListenerActor],
      host,
      port,
      passwordOpt,
      database,
      nameOpt,
      decodersCount,
      receiveTimeoutOpt,
      connectTimeout,
      maxWriteBatchSize,
      tcpSendBufferSizeHint,
      tcpReceiveBufferSizeHint,
      akkaIODispatcherPath,
      akkaDecoderDispatcherPath,
      failCommandOnConnecting
    ).withDispatcher(akkaListenerDispatcherPath),
    UniqueNameGenerator.getUniqueName(s"${nameOpt.getOrElse(s"$host-$port")}-listener-actor")
  )
  
  private def withLock[A](f: => A): A = {
    if (lock.tryLock) {
      try {
        f
      } finally {
        lock.unlock()
      }
    } else {
      throw RedisIOException("Trying to send request on a blocked connection")
    }
  }
  
  override protected[scredis] def sendBlocking[A](request: Request[A])(
    implicit timeout: Duration
  ): Try[A] = withLock {
    logger.debug(s"Sending blocking request: $request")
    updateState(request)
    val future = Protocol.send(request, listenerActor)
    Try(Await.result(future, timeout))
  }
  
} 
Example 110
Source File: ClientBenchmark.scala    From scredis   with Apache License 2.0 5 votes vote down vote up
package scredis

import org.scalameter.api._
import org.scalameter.picklers.Implicits._
import akka.actor.ActorSystem
import org.scalameter.execution.SeparateJvmsExecutor

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
object ClientBenchmark extends Bench[Double] {
  
  private var system: ActorSystem = _
  private var client: Client = _
  
  

  performance of "Client" in {
    measure method "PING" in {
      using(sizes) config {
        exec.maxWarmupRuns -> 3
        exec.benchRuns -> 3
        exec.independentSamples -> 3
      } setUp { _ =>
        system = ActorSystem()
        client = Client()(system)
      } tearDown { _ =>
        Await.result(client.quit(), 2.seconds)
        Await.result(system.terminate(), 10.seconds)
        client = null
        system = null
      } in { i =>
        implicit val ec = system.dispatcher
        val future = Future.traverse(1 to i) { _ =>
          client.ping()
        }
        Await.result(future, 30.seconds)
      }
    }
    
    measure method "GET" in {
      using(sizes) config {
        exec.maxWarmupRuns -> 3
        exec.benchRuns -> 3
        exec.independentSamples -> 3
      } setUp { _ =>
        system = ActorSystem()
        client = Client()(system)
        Await.result(client.set("foo", "bar"), 2.seconds)
      } tearDown { _ =>
        Await.result(client.del("foo"), 2.seconds)
        Await.result(client.quit(), 2.seconds)
        Await.result(system.terminate(), 10.seconds)
        client = null
        system = null
      } in { i =>
        implicit val ec = system.dispatcher
        val future = Future.traverse(1 to i) { _ =>
          client.get("foo")
        }
        Await.result(future, 30.seconds)
      }
    }
    
    measure method "SET" in {
      using(sizes) config {
        exec.maxWarmupRuns -> 3
        exec.benchRuns -> 3
        exec.independentSamples -> 3
      } setUp { _ =>
        system = ActorSystem()
        client = Client()(system)
      } tearDown { _ =>
        Await.result(client.del("foo"), 2.seconds)
        Await.result(client.quit(), 2.seconds)
        Await.result(system.terminate(), 10.seconds)
        client = null
        system = null
      } in { i =>
        implicit val ec = system.dispatcher
        val future = Future.traverse(1 to i) { _ =>
          client.set("foo", "bar")
        }
        Await.result(future, 30.seconds)
      }
    }
  }
} 
Example 111
Source File: KVStore.scala    From Freasy-Monad   with MIT License 5 votes vote down vote up
package examples.scalaz

import scalaz._
import scalaz.Id.Id
import freasymonad.scalaz._

import scala.collection.mutable
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

@free trait KVStore {
  type KVStoreF[A] = Free[GrammarADT, A]
  sealed trait GrammarADT[A]

  def put[T](key: String, value: T): KVStoreF[Unit]
  def get[T](key: String): KVStoreF[Option[T]]
  def delete(key: String): KVStoreF[Unit]

  def update[T](key: String, f: T => T): KVStoreF[Unit] =
    for {
      vMaybe <- get[T](key)
      _      <- vMaybe.map(v => put[T](key, f(v))).getOrElse(Free.pure(()))
    } yield ()
}

object Main extends App {
  import KVStore.ops._

  def program: KVStoreF[Option[Int]] =
    for {
      _ <- put("wild-cats", 2)
      _ <- update[Int]("wild-cats", _ + 12)
      _ <- put("tame-cats", 5)
      n <- get[Int]("wild-cats")
      _ <- delete("tame-cats")
    } yield n

  val idInterpreter = new KVStore.Interp[Id] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Id[Option[T]] = {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Id[Unit] = {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Id[Unit] = {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resId: Id[Option[Int]] = idInterpreter.run(program)

  import scalaz.std.scalaFuture.futureInstance
  import scala.concurrent.ExecutionContext.Implicits.global

  val futureInterpreter = new KVStore.Interp[Future] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Future[Option[T]] = Future {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Future[Unit] = Future {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Future[Unit] = Future {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resFuture: Future[Option[Int]] = futureInterpreter.run(program)
  Await.ready(resFuture, Duration.Inf)
} 
Example 112
Source File: PubSubSinkIT.scala    From akka-cloudpubsub   with Apache License 2.0 5 votes vote down vote up
package com.qubit.pubsub.akka

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.testkit.scaladsl.TestSource
import akka.stream.{ActorMaterializer, Attributes, Graph, SinkShape}
import com.google.common.base.Charsets
import com.qubit.pubsub.PubSubIntegrationTest
import com.qubit.pubsub.akka.attributes.{
  PubSubClientAttribute,
  PubSubStageBufferSizeAttribute
}
import com.qubit.pubsub.client.PubSubMessage
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

class PubSubSinkIT
    extends FunSuite
    with Matchers
    with BeforeAndAfterAll
    with PubSubIntegrationTest {

  implicit val actorSystem = ActorSystem("pubsub-stream-test")
  implicit val materializer = ActorMaterializer()

  override def testName = "pubsubsink"

  override def beforeAll(): Unit = {
    Await.ready(client.createTopic(testTopic), timeout)
    Await
      .ready(client.createSubscription(testSubscription, testTopic), timeout)
  }

  override def afterAll(): Unit = {
    actorSystem.terminate()
    Await.ready(client.deleteSubscription(testSubscription), timeout)
    Await.ready(client.deleteTopic(testTopic), timeout)
  }

  test("PubSubSink success") {
    val sinkGraph: Graph[SinkShape[PubSubMessage], NotUsed] =
      new PubSubSink(testTopic, 1.second)
    val sinkAttributes = Attributes(
      List(PubSubClientAttribute(client), PubSubStageBufferSizeAttribute(30)))
    val pubsubSink = Sink.fromGraph(sinkGraph).withAttributes(sinkAttributes)

    val (pub, _) = TestSource
      .probe[Array[Byte]]
      .map(PubSubMessage(_))
      .toMat(pubsubSink)(Keep.both)
      .run()

    Range(0, 100)
      .map(i => s"xxx$i".getBytes(Charsets.UTF_8))
      .foreach(pub.sendNext)
    pub.sendComplete()

    // wait for buffers to flush
    Try(Thread.sleep(1000))

    val output = Await.result(client.pull(testSubscription, 100), timeout)
    client.ack(testSubscription, output.map(m => m.ackId))

    output should not be (null)
    output should have size (100)
    output
      .map(m => new String(m.payload.payload, Charsets.UTF_8))
      .forall(_.startsWith("xxx")) should be(true)
  }
} 
Example 113
Source File: PubSubSourceIT.scala    From akka-cloudpubsub   with Apache License 2.0 5 votes vote down vote up
package com.qubit.pubsub.akka

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.Source
import akka.stream.testkit.scaladsl.TestSink
import akka.stream.{ActorMaterializer, Attributes, Graph, SourceShape}
import com.google.common.base.Charsets
import com.qubit.pubsub.PubSubIntegrationTest
import com.qubit.pubsub.akka.attributes.{
  PubSubClientAttribute,
  PubSubStageBufferSizeAttribute
}
import com.qubit.pubsub.client.PubSubMessage
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._

class PubSubSourceIT
    extends FunSuite
    with Matchers
    with BeforeAndAfterAll
    with PubSubIntegrationTest {

  implicit val actorSystem = ActorSystem("pubsub-stream-test")
  implicit val materializer = ActorMaterializer()

  override def testName = "pubsubsource"

  override def beforeAll(): Unit = {
    Await.ready(client.createTopic(testTopic), timeout)
    Await
      .ready(client.createSubscription(testSubscription, testTopic), timeout)
  }

  override def afterAll(): Unit = {
    actorSystem.terminate()
    Await.ready(client.deleteSubscription(testSubscription), timeout)
    Await.ready(client.deleteTopic(testTopic), timeout)
  }

  test("PubSubSource success") {
    val data = Range(0, 100)
      .map(i => s"msg$i".getBytes(Charsets.UTF_8))
      .map(PubSubMessage(_))
    Await.ready(client.publish(testTopic, data), timeout)

    val sourceGraph: Graph[SourceShape[PubSubMessage], NotUsed] =
      new PubSubSource(testSubscription, 1.millisecond)
    val sourceAttributes = Attributes(
      List(PubSubClientAttribute(client), PubSubStageBufferSizeAttribute(30)))
    val pubsubSource =
      Source.fromGraph(sourceGraph).withAttributes(sourceAttributes)

    val msgList = pubsubSource
      .runWith(TestSink.probe[PubSubMessage])
      .request(100)
      .expectNextN(100)

    msgList should not be (null)
    msgList should have size (100)
    msgList
      .map(m => new String(m.payload, Charsets.UTF_8))
      .forall(_.startsWith("msg")) should be(true)
  }
} 
Example 114
Source File: Example1.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.tepkin.examples

import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import akka.util.Timeout
import net.fehmicansaglam.bson.BsonDocument
import net.fehmicansaglam.bson.BsonDsl._
import net.fehmicansaglam.tepkin.MongoClient

import scala.collection.immutable.Iterable
import scala.concurrent.Await
import scala.concurrent.duration._

object Example1 extends App {
  val begin = System.currentTimeMillis()

  // Connect to Mongo client
  val client = MongoClient("mongodb://localhost")

  // Use client's execution context for async operations

  import client.{context, ec}

  // Obtain reference to database "tepkin" using client
  val db = client("tepkin")

  // Obtain reference to the collection "collection1" using database
  val collection1 = db("collection1")

  // Obtain reference to the collection "collection2" using database
  val collection2 = db("collection2")

  implicit val timeout: Timeout = 30.seconds
  implicit val mat = ActorMaterializer()

  // Batch document source
  def documents(n: Int): Source[List[BsonDocument], akka.NotUsed] = Source {
    Iterable.tabulate(n) { _ =>
      (1 to 1000).map(i => $document("name" := s"fehmi$i")).toList
    }
  }

  // Insert 3M documents and then read them all.
  val futureResult = for {
    delete1 <- collection1.drop()
    delete2 <- collection2.drop()
    insert1 <- collection1.insertFromSource(documents(1000)).runForeach(_ => ())
    insert2 <- collection2.insertFromSource(documents(2000)).runForeach(_ => ())
    source1 = collection1.find(BsonDocument.empty, batchMultiplier = 10000)
    source2 = collection2.find(BsonDocument.empty, batchMultiplier = 10000)
    fold1 = source1.runFold(0) { (total, documents) =>
      total + documents.size
    }
    fold2 = source2.runFold(0) { (total, documents) =>
      total + documents.size
    }
    result1 <- fold1
    result2 <- fold2
  } yield (result1, result2)

  val result = Await.result(futureResult, 90.seconds)

  println(s"collection1: ${result._1}")
  println(s"collection2: ${result._2}")
  println(s"Elapsed: ${System.currentTimeMillis() - begin}ms")

  // Drop created collections
  Await.ready(collection1.drop(), 10.seconds)
  Await.ready(collection2.drop(), 10.seconds)

  client.shutdown()
} 
Example 115
Source File: Example2.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.pide.examples

import java.util.UUID

import akka.util.Timeout
import net.fehmicansaglam.bson.BsonDocument
import net.fehmicansaglam.bson.BsonDsl._
import net.fehmicansaglam.pide.{Dao, Entity, StringPide}
import net.fehmicansaglam.tepkin.{MongoClient, MongoCollection}

import scala.concurrent.Await
import scala.concurrent.duration._


object Example2 extends App {

  val client = MongoClient("mongodb://localhost")
  val db = client("tepkin")

  case class Person(id: String,
                    name: String,
                    surname: String,
                    age: Int) extends Entity[String]

  object PersonDao extends Dao[String, Person] {
    override val collection: MongoCollection = db("person")
  }

  implicit object PersonPide extends StringPide[Person] {

    override def read(document: BsonDocument): Person = {
      Person(
        id = document.getAs[String]("_id").get,
        name = document.getAs[String]("name").get,
        surname = document.getAs[String]("surname").get,
        age = document.getAs[Int]("age").get
      )
    }

    override def write(person: Person): BsonDocument = {
      ("_id" := person.id) ~
        ("name" := person.name) ~
        ("surname" := person.surname) ~
        ("age" := person.age)
    }
  }

  val person1 = Person(UUID.randomUUID().toString, "name1", "surname1", 16)
  val person2 = Person(UUID.randomUUID().toString, "name2", "surname2", 32)

  import client.ec

  implicit val timeout: Timeout = 5.seconds

  val result = for {
    insert1 <- PersonDao.insert(person1)
    insert2 <- PersonDao.insert(person2)
    drop <- PersonDao.collection.drop()
  } yield drop

  Await.ready(result, 30.seconds)

  client.shutdown()
} 
Example 116
Source File: Example1.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.pide.examples

import akka.util.Timeout
import net.fehmicansaglam.bson.BsonDocument
import net.fehmicansaglam.bson.BsonDsl._
import net.fehmicansaglam.bson.Implicits._
import net.fehmicansaglam.bson.element.BsonObjectId
import net.fehmicansaglam.pide.{Dao, Entity, ObjectIdPide}
import net.fehmicansaglam.tepkin.{MongoClient, MongoCollection}

import scala.concurrent.Await
import scala.concurrent.duration._


object Example1 extends App {

  val client = MongoClient("mongodb://localhost")
  val db = client("tepkin")

  case class Person(id: ObjectId,
                    name: String,
                    surname: String,
                    age: Int) extends Entity[ObjectId]

  object PersonDao extends Dao[ObjectId, Person] {
    override val collection: MongoCollection = db("person")
  }

  implicit object PersonPide extends ObjectIdPide[Person] {
    override def read(document: BsonDocument): Person = {
      Person(
        id = document.get[ObjectId]("_id").get,
        name = document.getAs[String]("name").get,
        surname = document.getAs[String]("surname").get,
        age = document.getAs[Int]("age").get
      )
    }

    override def write(person: Person): BsonDocument = {
      ("_id" := person.id) ~
        ("name" := person.name) ~
        ("surname" := person.surname) ~
        ("age" := person.age)
    }
  }

  val person1 = Person(BsonObjectId.generate, "name1", "surname1", 16)
  val person2 = Person(BsonObjectId.generate, "name2", "surname2", 32)

  import client.ec

  implicit val timeout: Timeout = 5.seconds

  val result = for {
    insert1 <- PersonDao.insert(person1)
    insert2 <- PersonDao.insert(person2)
    drop <- PersonDao.collection.drop()
  } yield drop

  Await.ready(result, 30.seconds)

  client.shutdown()
} 
Example 117
Source File: S3BrainTest.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.brain

import akka.actor.ActorSystem
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider}
import com.amazonaws.services.s3.AmazonS3ClientBuilder
import com.sumologic.sumobot.brain.Brain.ValueRetrieved
import com.sumologic.sumobot.core.aws.AWSAccounts
import com.sumologic.sumobot.test.annotated.SumoBotTestKit
import org.scalatest.{BeforeAndAfterAll, Matchers}

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random

class S3BrainTest
    extends SumoBotTestKit(ActorSystem("S3SingleObjectBrainTest"))
    with BeforeAndAfterAll
    with Matchers {

  lazy val credsOption = AWSAccounts.load(system.settings.config).values.headOption

  val bucketPrefix = "sumobot-s3-brain"

  // The tests here only run if there are valid AWS credentials in the configuration. Otherwise,
  // they're skipped.
  credsOption foreach {
    creds =>
      cleanupBuckets(creds)

      val bucket = bucketPrefix + randomString(5)

      "S3 brain" should {
        "persist the contents across reloads" in {
          implicit val timeout = Timeout(5.seconds)
          val s3Key = randomString(16)
          val firstBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key))
          firstBrain ! Brain.Store("hello", "world")

          // Just wait for the next message to return.
          val firstRetrieval = firstBrain ? Brain.Retrieve("hello")
          val firstResult = Await.result(firstRetrieval, 5.seconds)
          firstResult match {
            case ValueRetrieved(k, v) =>
              k should be("hello")
              v should be("world")
            case wrongResult => fail(s"Did not get what we expected: $wrongResult")
          }

          // Since we wrote to S3, the 2nd brain should now have the value.
          val secondBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key))
          val secondRetrieval = secondBrain ? Brain.Retrieve("hello")
          val secondResult = Await.result(secondRetrieval, 5.seconds)
          secondResult match {
            case ValueRetrieved(k, v) =>
              k should be("hello")
              v should be("world")
            case wrongResult => fail(s"Did not get what we expected: $wrongResult")
          }
        }
      }
  }

  private def randomString(length: Int): String = {
    val alphabet = ('a' to 'z').mkString + ('0' to '9').mkString
    (1 to length).
        map(_ => Random.nextInt(alphabet.length)).
        map(alphabet.charAt).mkString
  }

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
    credsOption.foreach(cleanupBuckets)
  }

  def cleanupBuckets(creds: AWSCredentials): Unit = {
    val s3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(creds)).build()
    s3.listBuckets().asScala.filter(_.getName.startsWith(bucketPrefix)).foreach {
      bucket =>
        println(s"Deleting S3 bucket ${bucket.getName}")
        val objects = s3.listObjects(bucket.getName).getObjectSummaries.asScala.map(_.getKey)
        objects.foreach {
          obj =>
            s3.deleteObject(bucket.getName, obj)
        }
        s3.deleteBucket(bucket.getName)
    }
  }
} 
Example 118
Source File: MergeByCommitCallbackTest.scala    From monix-kafka   with Apache License 2.0 5 votes vote down vote up
package monix.kafka

import monix.eval.Task
import monix.kafka.config.AutoOffsetReset
import monix.reactive.Observable
import org.apache.kafka.clients.producer.ProducerRecord
import org.scalatest.{FunSuite, Matchers}

import scala.concurrent.duration._
import scala.concurrent.Await
import monix.execution.Scheduler.Implicits.global
import org.apache.kafka.clients.consumer.OffsetCommitCallback
import org.apache.kafka.common.TopicPartition
import org.scalacheck.Gen
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks

class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers {

  val commitCallbacks: List[Commit] = List.fill(4)(new Commit {
    override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit

    override def commitBatchAsync(batch: Map[TopicPartition, Long], callback: OffsetCommitCallback): Task[Unit] =
      Task.unit
  })

  val committableOffsetsGen: Gen[CommittableOffset] = for {
    partition <- Gen.posNum[Int]
    offset <- Gen.posNum[Long]
    commit <- Gen.oneOf(commitCallbacks)
  } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit)

  test("merge by commit callback works") {
    forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets =>
      val partitions = offsets.map(_.topicPartition)
      val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets)

      received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys }

      received.size should be <= 4
    }
  }

  test("merge by commit callback for multiple consumers") {
    withRunningKafka {
      val count = 10000
      val topicName = "monix-kafka-merge-by-commit"

      val producerCfg = KafkaProducerConfig.default.copy(
        bootstrapServers = List("127.0.0.1:6001"),
        clientId = "monix-kafka-1-0-producer-test"
      )

      val producer = KafkaProducerSink[String, String](producerCfg, io)

      val pushT = Observable
        .range(0, count)
        .map(msg => new ProducerRecord(topicName, "obs", msg.toString))
        .bufferIntrospective(1024)
        .consumeWith(producer)

      val listT = Observable
        .range(0, 4)
        .mergeMap(i => createConsumer(i.toInt, topicName).take(500))
        .bufferTumbling(2000)
        .map(CommittableOffsetBatch.mergeByCommitCallback)
        .map { offsetBatches => assert(offsetBatches.length == 4) }
        .completedL

      Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
    }
  }

  private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = {
    val cfg = KafkaConsumerConfig.default.copy(
      bootstrapServers = List("127.0.0.1:6001"),
      groupId = s"kafka-tests-$i",
      autoOffsetReset = AutoOffsetReset.Earliest
    )

    KafkaConsumerObservable
      .manualCommit[String, String](cfg, List(topicName))
      .executeOn(io)
      .map(_.committableOffset)
  }
} 
Example 119
Source File: MonixKafkaTopicRegexTest.scala    From monix-kafka   with Apache License 2.0 5 votes vote down vote up
package monix.kafka

import monix.eval.Task
import monix.execution.Scheduler.Implicits.global
import monix.kafka.config.AutoOffsetReset
import monix.reactive.Observable
import org.apache.kafka.clients.producer.ProducerRecord
import org.scalatest.FunSuite

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._

class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit {
  val topicsRegex = "monix-kafka-tests-.*".r
  val topicMatchingRegex = "monix-kafka-tests-anything"

  val producerCfg = KafkaProducerConfig.default.copy(
    bootstrapServers = List("127.0.0.1:6001"),
    clientId = "monix-kafka-1-0-producer-test"
  )

  val consumerCfg = KafkaConsumerConfig.default.copy(
    bootstrapServers = List("127.0.0.1:6001"),
    groupId = "kafka-tests",
    clientId = "monix-kafka-1-0-consumer-test",
    autoOffsetReset = AutoOffsetReset.Earliest
  )

  test("publish one message when subscribed to topics regex") {

    withRunningKafka {
      val producer = KafkaProducer[String, String](producerCfg, io)
      val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io)
      val consumer = Await.result(consumerTask.runToFuture, 60.seconds)

      try {
        // Publishing one message
        val send = producer.send(topicMatchingRegex, "my-message")
        Await.result(send.runToFuture, 30.seconds)

        val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList
        assert(records === List("my-message"))
      } finally {
        Await.result(producer.close().runToFuture, Duration.Inf)
        consumer.close()
      }
    }
  }

  test("listen for one message when subscribed to topics regex") {

    withRunningKafka {
      val producer = KafkaProducer[String, String](producerCfg, io)
      val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io)
      try {
        // Publishing one message
        val send = producer.send(topicMatchingRegex, "test-message")
        Await.result(send.runToFuture, 30.seconds)

        val first = consumer.take(1).map(_.value()).firstL
        val result = Await.result(first.runToFuture, 30.seconds)
        assert(result === "test-message")
      } finally {
        Await.result(producer.close().runToFuture, Duration.Inf)
      }
    }
  }

  test("full producer/consumer test when subscribed to topics regex") {
    withRunningKafka {
      val count = 10000

      val producer = KafkaProducerSink[String, String](producerCfg, io)
      val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count)

      val pushT = Observable
        .range(0, count)
        .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString))
        .bufferIntrospective(1024)
        .consumeWith(producer)

      val listT = consumer
        .map(_.value())
        .toListL

      val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
      assert(result.map(_.toInt).sum === (0 until count).sum)
    }
  }
} 
Example 120
Source File: MergeByCommitCallbackTest.scala    From monix-kafka   with Apache License 2.0 5 votes vote down vote up
package monix.kafka

import monix.eval.Task
import monix.kafka.config.AutoOffsetReset
import monix.reactive.Observable
import org.apache.kafka.clients.producer.ProducerRecord
import org.scalatest.{FunSuite, Matchers}

import scala.concurrent.duration._
import scala.concurrent.Await
import monix.execution.Scheduler.Implicits.global
import org.apache.kafka.clients.consumer.OffsetCommitCallback
import org.apache.kafka.common.TopicPartition
import org.scalacheck.Gen
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks

class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers {

  val commitCallbacks: List[Commit] = List.fill(4)(new Commit {
    override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit

    override def commitBatchAsync(batch: Map[TopicPartition, Long], callback: OffsetCommitCallback): Task[Unit] =
      Task.unit
  })

  val committableOffsetsGen: Gen[CommittableOffset] = for {
    partition <- Gen.posNum[Int]
    offset <- Gen.posNum[Long]
    commit <- Gen.oneOf(commitCallbacks)
  } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit)

  test("merge by commit callback works") {
    forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets =>
      val partitions = offsets.map(_.topicPartition)
      val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets)

      received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys }

      received.size should be <= 4
    }
  }

  test("merge by commit callback for multiple consumers") {
    withRunningKafka {
      val count = 10000
      val topicName = "monix-kafka-merge-by-commit"

      val producerCfg = KafkaProducerConfig.default.copy(
        bootstrapServers = List("127.0.0.1:6001"),
        clientId = "monix-kafka-1-0-producer-test"
      )

      val producer = KafkaProducerSink[String, String](producerCfg, io)

      val pushT = Observable
        .range(0, count)
        .map(msg => new ProducerRecord(topicName, "obs", msg.toString))
        .bufferIntrospective(1024)
        .consumeWith(producer)

      val listT = Observable
        .range(0, 4)
        .mergeMap(i => createConsumer(i.toInt, topicName).take(500))
        .bufferTumbling(2000)
        .map(CommittableOffsetBatch.mergeByCommitCallback)
        .map { offsetBatches => assert(offsetBatches.length == 4) }
        .completedL

      Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
    }
  }

  private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = {
    val cfg = KafkaConsumerConfig.default.copy(
      bootstrapServers = List("127.0.0.1:6001"),
      groupId = s"kafka-tests-$i",
      autoOffsetReset = AutoOffsetReset.Earliest
    )

    KafkaConsumerObservable
      .manualCommit[String, String](cfg, List(topicName))
      .executeOn(io)
      .map(_.committableOffset)
  }
} 
Example 121
Source File: MonixKafkaTopicRegexTest.scala    From monix-kafka   with Apache License 2.0 5 votes vote down vote up
package monix.kafka

import monix.eval.Task
import monix.execution.Scheduler.Implicits.global
import monix.kafka.config.AutoOffsetReset
import monix.reactive.Observable
import org.apache.kafka.clients.producer.ProducerRecord
import org.scalatest.FunSuite

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._

class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit {
  val topicsRegex = "monix-kafka-tests-.*".r
  val topicMatchingRegex = "monix-kafka-tests-anything"

  val producerCfg = KafkaProducerConfig.default.copy(
    bootstrapServers = List("127.0.0.1:6001"),
    clientId = "monix-kafka-1-0-producer-test"
  )

  val consumerCfg = KafkaConsumerConfig.default.copy(
    bootstrapServers = List("127.0.0.1:6001"),
    groupId = "kafka-tests",
    clientId = "monix-kafka-1-0-consumer-test",
    autoOffsetReset = AutoOffsetReset.Earliest
  )

  test("publish one message when subscribed to topics regex") {

    withRunningKafka {
      val producer = KafkaProducer[String, String](producerCfg, io)
      val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io)
      val consumer = Await.result(consumerTask.runToFuture, 60.seconds)

      try {
        // Publishing one message
        val send = producer.send(topicMatchingRegex, "my-message")
        Await.result(send.runToFuture, 30.seconds)

        val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList
        assert(records === List("my-message"))
      } finally {
        Await.result(producer.close().runToFuture, Duration.Inf)
        consumer.close()
      }
    }
  }

  test("listen for one message when subscribed to topics regex") {
    withRunningKafka {
      val producer = KafkaProducer[String, String](producerCfg, io)
      val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io)
      try {
        // Publishing one message
        val send = producer.send(topicMatchingRegex, "test-message")
        Await.result(send.runToFuture, 30.seconds)

        val first = consumer.take(1).map(_.value()).firstL
        val result = Await.result(first.runToFuture, 30.seconds)
        assert(result === "test-message")
      } finally {
        Await.result(producer.close().runToFuture, Duration.Inf)
      }
    }
  }

  test("full producer/consumer test when subscribed to topics regex") {
    withRunningKafka {
      val count = 10000

      val producer = KafkaProducerSink[String, String](producerCfg, io)
      val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count)

      val pushT = Observable
        .range(0, count)
        .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString))
        .bufferIntrospective(1024)
        .consumeWith(producer)

      val listT = consumer
        .map(_.value())
        .toListL

      val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
      assert(result.map(_.toInt).sum === (0 until count).sum)
    }
  }
} 
Example 122
Source File: MergeByCommitCallbackTest.scala    From monix-kafka   with Apache License 2.0 5 votes vote down vote up
package monix.kafka

import monix.eval.Task
import monix.kafka.config.AutoOffsetReset
import monix.reactive.Observable
import org.apache.kafka.clients.producer.ProducerRecord
import org.scalatest.{FunSuite, Matchers}

import scala.concurrent.duration._
import scala.concurrent.Await
import monix.execution.Scheduler.Implicits.global
import org.apache.kafka.clients.consumer.OffsetCommitCallback
import org.apache.kafka.common.TopicPartition
import org.scalacheck.Gen
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks

class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers {

  val commitCallbacks: List[Commit] = List.fill(4)(new Commit {
    override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit

    override def commitBatchAsync(batch: Map[TopicPartition, Long], callback: OffsetCommitCallback): Task[Unit] =
      Task.unit
  })

  val committableOffsetsGen: Gen[CommittableOffset] = for {
    partition <- Gen.posNum[Int]
    offset <- Gen.posNum[Long]
    commit <- Gen.oneOf(commitCallbacks)
  } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit)

  test("merge by commit callback works") {
    forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets =>
      val partitions = offsets.map(_.topicPartition)
      val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets)

      received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys }

      received.size should be <= 4
    }
  }

  test("merge by commit callback for multiple consumers") {
    withRunningKafka {
      val count = 10000
      val topicName = "monix-kafka-merge-by-commit"

      val producerCfg = KafkaProducerConfig.default.copy(
        bootstrapServers = List("127.0.0.1:6001"),
        clientId = "monix-kafka-1-0-producer-test"
      )

      val producer = KafkaProducerSink[String, String](producerCfg, io)

      val pushT = Observable
        .range(0, count)
        .map(msg => new ProducerRecord(topicName, "obs", msg.toString))
        .bufferIntrospective(1024)
        .consumeWith(producer)

      val listT = Observable
        .range(0, 4)
        .mergeMap(i => createConsumer(i.toInt, topicName).take(500))
        .bufferTumbling(2000)
        .map(CommittableOffsetBatch.mergeByCommitCallback)
        .map { offsetBatches => assert(offsetBatches.length == 4) }
        .completedL

      Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
    }
  }

  private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = {
    val cfg = KafkaConsumerConfig.default.copy(
      bootstrapServers = List("127.0.0.1:6001"),
      groupId = s"kafka-tests-$i",
      autoOffsetReset = AutoOffsetReset.Earliest
    )

    KafkaConsumerObservable
      .manualCommit[String, String](cfg, List(topicName))
      .executeOn(io)
      .map(_.committableOffset)
  }
} 
Example 123
Source File: MonixKafkaTopicRegexTest.scala    From monix-kafka   with Apache License 2.0 5 votes vote down vote up
package monix.kafka

import monix.eval.Task
import monix.execution.Scheduler.Implicits.global
import monix.kafka.config.AutoOffsetReset
import monix.reactive.Observable
import org.apache.kafka.clients.producer.ProducerRecord
import org.scalatest.FunSuite

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._

class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit {
  val topicsRegex = "monix-kafka-tests-.*".r
  val topicMatchingRegex = "monix-kafka-tests-anything"

  val producerCfg = KafkaProducerConfig.default.copy(
    bootstrapServers = List("127.0.0.1:6001"),
    clientId = "monix-kafka-1-0-producer-test"
  )

  val consumerCfg = KafkaConsumerConfig.default.copy(
    bootstrapServers = List("127.0.0.1:6001"),
    groupId = "kafka-tests",
    clientId = "monix-kafka-1-0-consumer-test",
    autoOffsetReset = AutoOffsetReset.Earliest
  )

  test("publish one message when subscribed to topics regex") {
    withRunningKafka {

      val producer = KafkaProducer[String, String](producerCfg, io)
      val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io)
      val consumer = Await.result(consumerTask.runToFuture, 60.seconds)

      try {
        // Publishing one message
        val send = producer.send(topicMatchingRegex, "my-message")
        Await.result(send.runToFuture, 30.seconds)

        val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList
        assert(records === List("my-message"))
      } finally {
        Await.result(producer.close().runToFuture, Duration.Inf)
        consumer.close()
      }
    }
  }

  test("listen for one message when subscribed to topics regex") {

    withRunningKafka {
      val producer = KafkaProducer[String, String](producerCfg, io)
      val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io)
      try {
        // Publishing one message
        val send = producer.send(topicMatchingRegex, "test-message")
        Await.result(send.runToFuture, 30.seconds)

        val first = consumer.take(1).map(_.value()).firstL
        val result = Await.result(first.runToFuture, 30.seconds)
        assert(result === "test-message")
      } finally {
        Await.result(producer.close().runToFuture, Duration.Inf)
      }
    }
  }

  test("full producer/consumer test when subscribed to topics regex") {
    withRunningKafka {
      val count = 10000

      val producer = KafkaProducerSink[String, String](producerCfg, io)
      val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count)

      val pushT = Observable
        .range(0, count)
        .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString))
        .bufferIntrospective(1024)
        .consumeWith(producer)

      val listT = consumer
        .map(_.value())
        .toListL

      val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds)
      assert(result.map(_.toInt).sum === (0 until count).sum)
    }
  }
} 
Example 124
Source File: BintrayExt.scala    From reactive-cli   with Apache License 2.0 5 votes vote down vote up
package bintray

import dispatch._, Defaults._
import sbt.{ File, Logger }
import scala.concurrent.Await
import scala.concurrent.duration._

object RpmBuildTarget {
  def normalizeVersion(version: String): String =
    version.replaceAll("-", ".")
}


object BintrayExt {
  def publishDeb(file: File, distributions: Seq[String], components: String, architecture: String, version: String, bintrayCredentialsFile: File, log: Logger): Unit = {
    val urlString =
      s"https://api.bintray.com/content/lightbend/deb/reactive-cli/$version/${file.getName}"

    val request = withAuth(Bintray.ensuredCredentials(bintrayCredentialsFile, log))(
      url(urlString)
        .addHeader("X-Bintray-Debian-Distribution", distributions.mkString(","))
        .addHeader("X-Bintray-Debian-Component", components)
        .addHeader("X-Bintray-Debian-Architecture", architecture) <<< file)

    log.info(s"Uploading ${file.getName} to $urlString")

    val response = Await.result(Http(request), Duration.Inf)

    val responseText = s"[${response.getStatusCode} ${response.getStatusText}] ${response.getResponseBody}"

    if (response.getStatusCode >= 200 && response.getStatusCode <= 299)
      log.info(responseText)
    else
      sys.error(responseText)
  }

  def publishRpm(file: File, version: String, bintrayCredentialsFile: File, log: Logger): Unit = {
    val urlString =
      s"https://api.bintray.com/content/lightbend/rpm/reactive-cli/${RpmBuildTarget.normalizeVersion(version)}/${file.getName}"

    val request =
      withAuth(Bintray.ensuredCredentials(bintrayCredentialsFile, log))(url(urlString) <<< file)

    log.info(s"Uploading ${file.getName} to $urlString")

    val response = Await.result(Http(request), Duration.Inf)

    val responseText = s"[${response.getStatusCode} ${response.getStatusText}] ${response.getResponseBody}"

    if (response.getStatusCode >= 200 && response.getStatusCode <= 299)
      log.info(responseText)
    else
      sys.error(responseText)
  }

  def publishTarGz(file: File, version: String, bintrayCredentialsFile: File, log: Logger): Unit = {
    val urlString =
      s"https://api.bintray.com/content/lightbend/generic/reactive-cli/$version/${file.getName}"

    val request =
      withAuth(Bintray.ensuredCredentials(bintrayCredentialsFile, log))(url(urlString) <<< file)

    log.info(s"Uploading ${file.getName} to $urlString")

    val response = Await.result(Http(request), Duration.Inf)

    val responseText = s"[${response.getStatusCode} ${response.getStatusText}] ${response.getResponseBody}"

    if (response.getStatusCode >= 200 && response.getStatusCode <= 299)
      log.info(responseText)
    else
      sys.error(responseText)
  }

  private def withAuth(credentials: Option[BintrayCredentials])(request: Req) =
    credentials.fold(request)(c => request.as_!(c.user, c.password))
} 
Example 125
Source File: RetrySpec.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.utils

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.language.postfixOps

import akka.actor.ActorSystem
import akka.util.Timeout
import org.scalatest.{Matchers, WordSpec}

import io.deepsense.commons.utils.RetryActor.{RetriableException, RetryLimitReachedException}

class RetrySpec extends WordSpec with Matchers {

  val uutName = classOf[Retry[_]].getSimpleName.filterNot(_ == '$')

  trait Setup {
    def generateUUT[T](retryLimitCount: Int)(toDo: => Future[T]): Retry[T] = new {
      override val workDescription = Some("test work")

      override val actorSystem: ActorSystem = ActorSystem()

      override val retryInterval = 1 nano

      override val retryLimit = retryLimitCount

      override val timeout = Timeout(1 minute)

    } with Retry[T] {
      override def work: Future[T] = toDo
    }
  }

  s"A $uutName" should {
    "complete its work" when {
      "no exceptions are thrown" in {
        new Setup {
          val uut = generateUUT(0) {
            Future.successful(2 * 3 + 8)
          }

          Await.result(
            uut.tryWork, Duration.Inf) shouldBe 14
        }
      }

      "only retriable exceptions are thrown and retry limit is not reached" in {
        new Setup {
          var count = 3
          val uut = generateUUT(3) {
            if (count > 0) {
              count -= 1
              Future.failed(RetriableException(s"Thrown because count is ${count + 1}", None))
            } else {
              Future.successful("success")
            }
          }

          Await.result(
            uut.tryWork, Duration.Inf
          ) shouldBe "success"

          count shouldBe 0
        }
      }
    }

    "fail" when {
      "retry limit is reached" in {
        new Setup {
          val uut = generateUUT(10) {
            Future.failed(RetriableException(s"This will never succeed, yet we keep trying", None))
          }

          a [RetryLimitReachedException] shouldBe thrownBy (Await.result(uut.tryWork, Duration.Inf))

        }
      }

      "unexpected exception is thrown" in {
        var count = 1
        new Setup {
          val uut = generateUUT(10) {
            if (count == 0) {
              Future.failed(new RuntimeException("Thrown because counter reached zero"))
            } else {
              count -= 1
              Future.failed(RetriableException(s"Thrown because counter was ${count + 1}", None))
            }
          }

          a [RuntimeException] shouldBe thrownBy (Await.result(uut.tryWork, Duration.Inf))
          count shouldBe 0
        }
      }
    }
  }
} 
Example 126
Source File: PythonNotebook.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.deeplang.doperations

import java.io.ByteArrayInputStream

import io.deepsense.commons.utils.Version
import io.deepsense.deeplang.DOperation.Id
import io.deepsense.deeplang.ExecutionContext
import io.deepsense.deeplang.doperables.dataframe.DataFrame
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.reflect.runtime.{universe => ru}
import scala.util.Failure

import io.deepsense.commons.rest.client.NotebookRestClient

case class PythonNotebook()
  extends Notebook {

  override val id: Id = "e76ca616-0322-47a5-b390-70c9668265dd"
  override val name: String = "Python Notebook"
  override val description: String = "Creates a Python notebook with access to the DataFrame"

  override val since: Version = Version(1, 0, 0)
  override val notebookType: String = "python"

  override protected def execute(dataFrame: DataFrame)(context: ExecutionContext): Unit = {
    context.dataFrameStorage.setInputDataFrame(0, dataFrame.sparkDataFrame)
    headlessExecution(context)
  }

} 
Example 127
Source File: CustomCodeEntryPoint.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.workflowexecutor.customcode

import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicReference

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}

import org.apache.spark.api.java.JavaSparkContext
import org.apache.spark.sql.DataFrame
import org.apache.spark.{SparkConf, SparkContext}

import io.deepsense.commons.utils.Logging
import io.deepsense.deeplang._
import io.deepsense.sparkutils.SparkSQLSession


class CustomCodeEntryPoint(
    val sparkContext: SparkContext,
    val sparkSQLSession: SparkSQLSession,
    val dataFrameStorage: DataFrameStorage,
    val operationExecutionDispatcher: OperationExecutionDispatcher)
  extends Logging {
  import io.deepsense.workflowexecutor.customcode.CustomCodeEntryPoint._
  def getSparkContext: JavaSparkContext = sparkContext

  def getSparkSQLSession: SparkSQLSession = sparkSQLSession

  def getNewSparkSQLSession: SparkSQLSession = sparkSQLSession.newSession()

  def getSparkConf: SparkConf = sparkContext.getConf

  private val codeExecutor: AtomicReference[Promise[CustomCodeExecutor]] =
    new AtomicReference(Promise())

  private val pythonPort: AtomicReference[Promise[Int]] =
    new AtomicReference(Promise())

  def getCodeExecutor(timeout: Duration): CustomCodeExecutor =
    getFromPromise(codeExecutor.get, timeout)

  def getPythonPort(timeout: Duration): Int =
    getFromPromise(pythonPort.get, timeout)

  def registerCodeExecutor(newCodeExecutor: CustomCodeExecutor): Unit =
    replacePromise(codeExecutor, newCodeExecutor)

  def registerCallbackServerPort(newPort: Int): Unit =
    replacePromise(pythonPort, newPort)

  def retrieveInputDataFrame(workflowId: String, nodeId: String, portNumber: Int): DataFrame =
    dataFrameStorage.getInputDataFrame(workflowId, nodeId, portNumber).get

  def retrieveOutputDataFrame(workflowId: String, nodeId: String, portNumber: Int): DataFrame =
    dataFrameStorage.getOutputDataFrame(workflowId, nodeId, portNumber).get

  def registerOutputDataFrame(
      workflowId: String, nodeId: String, portNumber: Int, dataFrame: DataFrame): Unit =
    dataFrameStorage.setOutputDataFrame(workflowId, nodeId, portNumber, dataFrame)

  def executionCompleted(workflowId: String, nodeId: String): Unit =
    operationExecutionDispatcher.executionEnded(workflowId, nodeId, Right(()))

  def executionFailed(workflowId: String, nodeId: String, error: String): Unit =
    operationExecutionDispatcher.executionEnded(workflowId, nodeId, Left(error))
}

object CustomCodeEntryPoint {
  private case class PromiseReplacedException() extends Exception

  @tailrec
  private def getFromPromise[T](promise: => Promise[T], timeout: Duration): T = {
    try {
      Await.result(promise.future, timeout)
    } catch {
      case e: TimeoutException => throw e
      case e: PromiseReplacedException => getFromPromise(promise, timeout)
    }
  }

  private def replacePromise[T](promise: AtomicReference[Promise[T]], newValue: T): Unit = {
    val oldPromise = promise.getAndSet(Promise.successful(newValue))
    try {
      oldPromise.failure(new PromiseReplacedException)
    } catch {
      // The oldPromise will have been completed always, except for the first time.
      // The illegal state is expected, but we have to complete the oldPromise,
      // since someone might be waiting on it.
      case e: IllegalStateException => ()
    }
  }

  case class CustomCodeEntryPointConfig(
    pyExecutorSetupTimeout: Duration = 5.seconds)
} 
Example 128
Source File: BlockingBrain.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.brain

import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import com.sumologic.sumobot.brain.Brain._

import scala.concurrent.Await
import scala.concurrent.duration._

class BlockingBrain(brain: ActorRef) {

  def retrieve(key: String): Option[String] = {
    implicit val timeout = Timeout(2.seconds)
    Await.result(brain ? Retrieve(key), 2.seconds) match {
      case ValueRetrieved(_, value) => Some(value)
      case ValueMissing(_) => None
    }
  }

  def listValues(prefix: String = ""): Map[String, String] = {
    implicit val timeout = Timeout(2.seconds)
    Await.result(brain ? ListValues(prefix), 2.seconds) match {
      case ValueMap(map) => map
    }
  }

  def store(key: String, value: String): Unit = {
    brain ! Store(key, value)
  }

  def remove(key: String): Unit = {
    brain ! Remove(key)
  }
} 
Example 129
Source File: CalculatorSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package actors

import org.scalatest._
import akka.testkit.TestActorRef
import scala.concurrent.duration._
import scala.concurrent.Await
import akka.pattern.ask
import scala.util._
import scala.io.Source
import scala.concurrent._
import scala.concurrent.duration._
import com.typesafe.config.{ ConfigFactory, Config }
import akka.actor.{ Actor, ActorSystem, Props, ActorRef }
import akka.util.Timeout
import java.net.URL
import org.scalatest.concurrent._
import org.scalatest._
import org.scalatest.time._
import edu.neu.coe.scala.numerics.Rational
import models._


class CalculatorSpec extends FlatSpec with Matchers with Futures with ScalaFutures with Inside {
  implicit val system = ActorSystem("CountWords")  
  import play.api.libs.concurrent.Execution.Implicits.defaultContext
  implicit val timeout: Timeout = Timeout(10 seconds)

  "Rational Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Rational]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  it should "yield 1 for 1" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  it should "yield 1 when given floating point problem" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "0.2 0.1 + 10 * 3 /").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  "Double Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Double]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  
  // This test suffers from a very peculiar bug which might even be a bug
  // in the Scala compiler. Kudos to you if you can fix it!!
  ignore should "yield 1 for 1" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Double]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(1.0) => }
  }
} 
Example 130
Source File: PluginRegistryTest.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.sumologic.sumobot.core.PluginRegistry.{Plugin, PluginList, RequestPluginList}
import com.sumologic.sumobot.plugins.BotPlugin.{PluginAdded, PluginRemoved}
import com.sumologic.sumobot.plugins.help.Help
import com.sumologic.sumobot.test.annotated.SumoBotTestKit
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.duration._

class PluginRegistryTest
  extends SumoBotTestKit(ActorSystem("PluginRegistryTest"))
  with BeforeAndAfterAll {

  "PluginRegistry" should {
    "maintain a list of all registered plugins" in {

      implicit val timeout = Timeout(1.second)
      val reg = system.actorOf(Props[PluginRegistry])
      def checkList(func: Seq[Plugin] => Unit) = {
        Await.result(reg ? RequestPluginList, 1.second) match {
          case PluginList(list) => func(list)
          case other => fail(s"Got $other instead.")
        }
      }

      val fakePlugin = system.actorOf(Props[Help])

      checkList(_.isEmpty should be(true))
      reg ! PluginAdded(fakePlugin, "hah")
      checkList(_.size should be(1))
      reg ! PluginRemoved(fakePlugin)
      checkList(_.isEmpty should be(true))
    }
  }

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 131
package highperfscala.concurrency.future

import java.util.concurrent.TimeUnit

import org.openjdk.jmh.annotations.Mode.Throughput
import org.openjdk.jmh.annotations._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

@BenchmarkMode(Array(Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G"))
class TransformFutureBenchmarks {

  import TransformFutureBenchmarks._

  @Benchmark
  def manyTransforms(state: TransformFutureState): Int = {
    import scala.concurrent.ExecutionContext.Implicits._
    val init = Future(0)
    val res = (1 until state.operations).foldLeft(init)((f, _) => f.map(_ + 1))
    Await.result(res, Duration("5 minutes"))
  }

  @Benchmark
  def oneTransform(state: TransformFutureState): Int = {
    import scala.concurrent.ExecutionContext.Implicits._
    val res = Future {
      (1 until state.operations).foldLeft(0)((acc, _) => acc + 1)
    }
    Await.result(res, Duration("5 minutes"))
  }

}

object TransformFutureBenchmarks {

  @State(Scope.Benchmark)
  class TransformFutureState {

    @Param(Array("5", "10"))
    var operations: Int = 0

  }

} 
Example 132
Source File: Backtesting.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala.concurrency.backtesting

import java.util.concurrent.TimeUnit

import org.joda.time.{DateTime, Interval}

import scala.concurrent.{Await, Future}

object Backtesting {

  sealed trait Strategy

  case class PnL(value: BigDecimal) extends AnyVal
  case class BacktestPerformanceSummary(pnl: PnL)

  case class Ticker(value: String) extends AnyVal

  def backtest(
    strategy: Strategy,
    ticker: Ticker,
    testInterval: Interval): BacktestPerformanceSummary = ???

  sealed trait VectorBasedReturnSeriesFrame

  def loadReturns(testInterval: Interval): VectorBasedReturnSeriesFrame = ???

  case object Dave1 extends Strategy
  case object Dave2 extends Strategy

  object Serial {
    def lastMonths(months: Int): Interval =
      new Interval(new DateTime().minusMonths(months), new DateTime())
    backtest(Dave1, Ticker("AAPL"), lastMonths(3))
    backtest(Dave1, Ticker("GOOG"), lastMonths(3))
    backtest(Dave2, Ticker("AAPL"), lastMonths(3))
    backtest(Dave2, Ticker("GOOG"), lastMonths(2))
  }

  object ForComprehension {
    def lastMonths(months: Int): Interval =
      new Interval(new DateTime().minusMonths(months), new DateTime())

    implicit val ec = scala.concurrent.ExecutionContext.Implicits.global
    val summariesF = for {
      firstDaveAapl <- Future(backtest(Dave1, Ticker("AAPL"), lastMonths(3)))
      firstDaveGoog <- Future(backtest(Dave1, Ticker("GOOG"), lastMonths(3)))
      secondDaveAapl <- Future(backtest(Dave2, Ticker("AAPL"), lastMonths(3)))
      secondDaveGoog <- Future(backtest(Dave2, Ticker("GOOG"), lastMonths(2)))
    } yield (firstDaveAapl, firstDaveGoog, secondDaveAapl, secondDaveGoog)

    Await.result(summariesF, scala.concurrent.duration.Duration(1, TimeUnit.SECONDS))

    Future(1).flatMap(f1 => Future(2).flatMap(f2 => Future(3).map(f3 => (f1, f2, f3))))
  }

  object Concurrency {
    def lastMonths(months: Int): Interval =
      new Interval(new DateTime().minusMonths(months), new DateTime())

    implicit val ec = scala.concurrent.ExecutionContext.Implicits.global
    val firstDaveAaplF = Future(backtest(Dave1, Ticker("AAPL"), lastMonths(3)))
    val firstDaveGoogF = Future(backtest(Dave1, Ticker("GOOG"), lastMonths(3)))
    val secondDaveAaplF = Future(backtest(Dave2, Ticker("AAPL"), lastMonths(3)))
    val secondDaveGoogF = Future(backtest(Dave2, Ticker("GOOG"), lastMonths(2)))
    val z = for {
      firstDaveAapl <- firstDaveAaplF
      firstDaveGoog <- firstDaveGoogF
      secondDaveAapl <- secondDaveAaplF
      secondDaveGoog <- secondDaveGoogF
    } yield (firstDaveAapl, firstDaveGoog, secondDaveAapl, secondDaveGoog)

  }

} 
Example 133
Source File: TaskFutureBenchmarks.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala.concurrency.task

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}

import org.openjdk.jmh.annotations.Mode.Throughput
import org.openjdk.jmh.annotations._

import scala.concurrent.{ExecutionContext, Future, Await}
import scala.concurrent.duration.Duration
import scalaz.concurrent.Task

@BenchmarkMode(Array(Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G"))
class TaskFutureBenchmarks {

  import TaskFutureBenchmarks._

  @Benchmark
  def mapWithFuture(state: TaskFutureState): Int = {
    implicit val ec = state.context
    val init = Future(0)
    val res = (1 until state.operations).foldLeft(init)((f, _) => f.map(_ + 1))
    Await.result(res, Duration("5 minutes"))
  }

  @Benchmark
  def mapWithTask(state: TaskFutureState): Int = {
    val init = Task(0)(state.es)
    val res = (1 until state.operations).foldLeft(init)((t, _) => t.map(_ + 1))
    res.unsafePerformSync
  }

  @Benchmark
  def flatMapWithFuture(state: TaskFutureState): Int = {
    implicit val ec = state.context
    val init = Future(0)
    val res = (1 until state.operations).foldLeft(init)((f, _) =>
      f.flatMap(i => Future(i + 1)))
    Await.result(res, Duration("5 minutes"))
  }

  @Benchmark
  def flatMapWithTask(state: TaskFutureState): Int = {
    val init = Task(0)(state.es)
    val res = (1 until state.operations).foldLeft(init)((t, _) =>
      t.flatMap(i => Task(i + 1)(state.es)))
    res.unsafePerformSync
  }

}

object TaskFutureBenchmarks {

  @State(Scope.Benchmark)
  class TaskFutureState {

    @Param(Array("5", "10", "100"))
    var operations: Int = 0

    var es: ExecutorService = null
    var context: ExecutionContext = null

    @Setup(Level.Trial)
    def setup(): Unit = {
      es = Executors.newFixedThreadPool(20)
      context = ExecutionContext.fromExecutor(es)
    }

    @TearDown(Level.Trial)
    def tearDown(): Unit = {
      es.shutdownNow()
    }
  }

} 
Example 134
package highperfscala.concurrency.blocking

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}

import highperfscala.concurrency.blocking.BlockingExample.{ClientId, Order, Ticker}
import org.openjdk.jmh.annotations.Mode.Throughput
import org.openjdk.jmh.annotations._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

@BenchmarkMode(Array(Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G"))
class BlockingFutureBenchmarks {

  import BlockingFutureBenchmarks._

  @Benchmark
  def withDefaultContext(state: BlockingFutureState): List[List[Order]] = {
    val futures = (1 until state.operations).map{_ =>
      BlockingExample.JdbcOrderRepository.findBuyOrders(
        state.clientId, state.ticker
      )(state.defaultC)
    }

    implicit val ex = state.defaultC
    Await.result(
      Future.sequence(futures).map(_.toList),
      Duration("5 minutes")
    )
  }

  @Benchmark
  def withDedicatedContext(state: BlockingFutureState): List[List[Order]] = {
    val futures = (1 until state.operations).map{_ =>
      BlockingExample.JdbcOrderRepository.findBuyOrders(
        state.clientId, state.ticker
      )(state.dedicatedC)
    }

    implicit val ex = state.defaultC  // we use CPU-bound context for computations below
    Await.result(
      Future.sequence(futures).map(_.toList),
      Duration("5 minutes")
    )
  }

}

object BlockingFutureBenchmarks {

  @State(Scope.Benchmark)
  class BlockingFutureState {

    @Param(Array("10", "1000"))
    var operations: Int = 0

    val clientId = ClientId(12345)
    val ticker = Ticker("FOO")

    var defaultC: ExecutionContext = null
    var dedicatedC: ExecutionContext = null
    var es: ExecutorService = null

    @Setup(Level.Trial)
    def setup(): Unit = {
      defaultC = scala.concurrent.ExecutionContext.global
      es = {
        val i = Runtime.getRuntime.availableProcessors * 20
        Executors.newFixedThreadPool(i)
      }
      dedicatedC = ExecutionContext.fromExecutorService(es)
    }

    @TearDown(Level.Trial)
    def tearDown(): Unit = {
      es.shutdownNow()
    }

  }

} 
Example 135
Source File: ExperimentVariantEventMongoServiceTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.mongo.abtesting

import domains.abtesting.events.impl.ExperimentVariantEventMongoService
import domains.abtesting.AbstractExperimentServiceTest
import domains.abtesting.events.ExperimentVariantEventService
import env.{DbDomainConfig, DbDomainConfigDetails, Mongo}
import org.scalactic.source.Position
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import play.api.Configuration
import play.modules.reactivemongo.DefaultReactiveMongoApi
import reactivemongo.api.MongoConnection
import test.FakeApplicationLifecycle

import scala.concurrent.duration.DurationLong
import scala.concurrent.Await
import scala.util.Random

class ExperimentVariantEventMongoServiceTest
    extends AbstractExperimentServiceTest("Mongo")
    with BeforeAndAfter
    with BeforeAndAfterAll {

  import zio.interop.catz._

  val mongoApi = new DefaultReactiveMongoApi(
    Await.result(MongoConnection.fromString("mongodb://localhost:27017"), 5.seconds),
    s"dbtest-${Random.nextInt(50)}",
    false,
    Configuration.empty,
    new FakeApplicationLifecycle()
  )

  override def dataStore(name: String): ExperimentVariantEventService.Service = ExperimentVariantEventMongoService(
    DbDomainConfig(Mongo, DbDomainConfigDetails(name, None), None),
    mongoApi
  )

  override protected def before(fun: => Any)(implicit pos: Position): Unit = {
    super.before(fun)
    deleteAllData
  }

  override protected def afterAll(): Unit = {
    super.afterAll()

    deleteAllData
  }

  private def deleteAllData =
    Await.result(mongoApi.database.flatMap { _.drop() }, 30.seconds)

} 
Example 136
Source File: MongoJsonDataStoreTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.mongo.store

import env.{DbDomainConfig, DbDomainConfigDetails, Mongo}
import org.scalactic.source.Position
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import play.api.Configuration
import play.modules.reactivemongo.DefaultReactiveMongoApi
import reactivemongo.api.MongoConnection
import store.AbstractJsonDataStoreTest
import test.FakeApplicationLifecycle

import scala.concurrent.duration.DurationLong
import scala.concurrent.Await
import scala.util.Random
import store.mongo.MongoJsonDataStore

class MongoJsonDataStoreTest extends AbstractJsonDataStoreTest("Mongo") with BeforeAndAfter with BeforeAndAfterAll {

  val mongoApi = new DefaultReactiveMongoApi(
    Await.result(MongoConnection.fromString("mongodb://localhost:27017"), 5.seconds),
    s"dbtest-${Random.nextInt(50)}",
    false,
    Configuration.empty,
    new FakeApplicationLifecycle()
  )

  override def dataStore(name: String): MongoJsonDataStore = MongoJsonDataStore(
    mongoApi,
    DbDomainConfig(Mongo, DbDomainConfigDetails(name, None), None)
  )

  override protected def before(fun: => Any)(implicit pos: Position): Unit = {
    super.before(fun)
    deleteAllData
  }

  override protected def afterAll(): Unit = {
    super.afterAll()

    deleteAllData
  }

  private def deleteAllData =
    Await.result(mongoApi.database.flatMap { _.drop() }, 30.seconds)

} 
Example 137
Source File: ExperimentVariantEventLevelDBServiceTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.leveldb.abtesting

import java.io.File

import domains.abtesting.events.impl.ExperimentVariantEventLevelDBService
import domains.abtesting.AbstractExperimentServiceTest
import domains.abtesting.events.ExperimentVariantEventService
import env.{DbDomainConfig, DbDomainConfigDetails, LevelDbConfig}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import test.FakeApplicationLifecycle

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, Future}
import scala.util.Random

class ExperimentVariantEventLevelDBServiceTest
    extends AbstractExperimentServiceTest("LevelDb")
    with BeforeAndAfter
    with BeforeAndAfterAll {

  private val lifecycle: FakeApplicationLifecycle = new FakeApplicationLifecycle()

  override def dataStore(name: String): ExperimentVariantEventService.Service =
    ExperimentVariantEventLevelDBService(s"./target/leveldb-test/data-${Random.nextInt(1000)}")

  override protected def afterAll(): Unit = {
    super.afterAll()

    Await.result(Future.traverse(lifecycle.hooks) {
      _.apply()
    }, 5.seconds)

    import scala.reflect.io.Directory
    val directory = new Directory(new File("./target/leveldb-test/"))
    directory.deleteRecursively()

  }

} 
Example 138
Source File: LevelDBJsonDataStoreTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package specs.leveldb.store

import java.io.File

import env.{DbDomainConfig, DbDomainConfigDetails, InMemory, LevelDbConfig}
import org.scalatest.BeforeAndAfterAll
import store.AbstractJsonDataStoreTest
import test.FakeApplicationLifecycle

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.DurationInt
import scala.util.Random
import store.leveldb._
import store.datastore.JsonDataStore

class LevelDBJsonDataStoreTest extends AbstractJsonDataStoreTest("LevelDb") with BeforeAndAfterAll {

  private val lifecycle: FakeApplicationLifecycle = new FakeApplicationLifecycle()

  override def dataStore(name: String): JsonDataStore.Service =
    LevelDBJsonDataStore(s"./target/leveldb-storetest/data-${Random.nextInt(1000)}")

  override protected def afterAll(): Unit = {
    super.afterAll()

    Await.result(Future.traverse(lifecycle.hooks) {
      _.apply()
    }, 5.seconds)

    import scala.reflect.io.Directory
    val directory = new Directory(new File("./target/leveldb-test/"))
    directory.deleteRecursively()

  }
} 
Example 139
Source File: FutureAwaitWithFailFastFn.scala    From kafka-connect-common   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.concurrent

import java.util.concurrent.{ExecutorService, TimeUnit}

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import scala.util.Failure

object FutureAwaitWithFailFastFn extends StrictLogging {

  def apply(executorService: ExecutorService, futures: Seq[Future[Unit]], duration: Duration): Unit = {
    //make sure we ask the executor to shutdown to ensure the process exits
    executorService.shutdown()

    val promise = Promise[Boolean]()

    //stop on the first failure
    futures.foreach { f =>
      f.failed.foreach { case t =>
        if (promise.tryFailure(t)) {
          executorService.shutdownNow()
        }
      }
    }

    val fut = Future.sequence(futures)
    fut.foreach { case t =>
      if (promise.trySuccess(true)) {
        val failed = executorService.shutdownNow()
        if (failed.size() > 0) {
          logger.error(s"${failed.size()} task have failed.")
        }
      }
    }

    Await.ready(promise.future, duration).value match {
      case Some(Failure(t)) =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
        //throw the underlying error
        throw t

      case _ =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
    }
  }

  def apply[T](executorService: ExecutorService, futures: Seq[Future[T]], duration: Duration = 1.hours): Seq[T] = {
    //make sure we ask the executor to shutdown to ensure the process exits
    executorService.shutdown()

    val promise = Promise[Boolean]()

    //stop on the first failure
    futures.foreach { f =>
      f.failed.foreach { case t =>
        if (promise.tryFailure(t)) {
          executorService.shutdownNow()
        }
      }
    }

    val fut = Future.sequence(futures)
    fut.foreach { case t =>
      if (promise.trySuccess(true)) {
        val failed = executorService.shutdownNow()
        if (failed.size() > 0) {
          logger.error(s"${failed.size()} task have failed.")
        }
      }
    }

    Await.ready(promise.future, duration).value match {
      case Some(Failure(t)) =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
        //throw the underlying error
        throw t

      case _ =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
        //return the result from each of the futures
        Await.result(Future.sequence(futures), 1.minute)
    }
  }
} 
Example 140
Source File: HelloWorldAkkaServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import sttp.tapir._
import sttp.tapir.server.akkahttp._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import sttp.client._

object HelloWorldAkkaServer extends App {
  // the endpoint: single fixed path input ("hello"), single query parameter
  // corresponds to: GET /hello?name=...
  val helloWorld: Endpoint[String, Unit, String, Nothing] =
    endpoint.get.in("hello").in(query[String]("name")).out(stringBody)

  // converting an endpoint to a route (providing server-side logic); extension method comes from imported packages
  val helloWorldRoute: Route = helloWorld.toRoute(name => Future.successful(Right(s"Hello, $name!")))

  // starting the server
  implicit val actorSystem: ActorSystem = ActorSystem()
  import actorSystem.dispatcher

  val bindAndCheck = Http().bindAndHandle(helloWorldRoute, "localhost", 8080).map { _ =>
    // testing
    implicit val backend: SttpBackend[Identity, Nothing, NothingT] = HttpURLConnectionBackend()
    val result: String = basicRequest.response(asStringAlways).get(uri"http://localhost:8080/hello?name=Frodo").send().body
    println("Got result: " + result)

    assert(result == "Hello, Frodo!")
  }

  Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute)
} 
Example 141
Source File: TapsExample.scala    From scio   with Apache License 2.0 5 votes vote down vote up
// Example: Use Futures and Taps to wait for resources
package com.spotify.scio.examples.extra

import com.spotify.scio._
import com.spotify.scio.io.Taps

import scala.concurrent.Await
import scala.concurrent.duration.Duration

// Set system properties to chose the appropriate taps algorithm.
// Set -Dtaps.algorithm=polling to wait for the resources to become available
// Set -Dtaps.algorithm=immediate to fail immediately if a resource is not available
object TapsExample {
  def main(cmdlineArgs: Array[String]): Unit = {
    import scala.concurrent.ExecutionContext.Implicits.global
    val taps = Taps() // entry point to acquire taps

    // extract Tap[T]s from two Future[Tap[T]]s
    val tap = for {
      t1 <- taps.textFile("kinglear.txt")
      t2 <- taps.textFile("macbeth.txt")
    } yield {
      // execution logic when both taps are available
      val (sc, args) = ContextAndArgs(cmdlineArgs)
      val out = (t1.open(sc) ++ t2.open(sc))
        .flatMap(_.split("[^a-zA-Z']+").filter(_.nonEmpty))
        .countByValue
        .map(kv => kv._1 + "\t" + kv._2)
        .materialize
      sc.run().waitUntilDone().tap(out)
    }

    val result = Await.ready(tap, Duration.Inf).value.take(10).toList

    println(result)
  }
} 
Example 142
Source File: AvroTapIT.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.avro.types

import com.spotify.scio.avro.AvroTaps
import com.spotify.scio.io.Taps
import org.apache.avro.Schema.Parser
import org.apache.beam.sdk.io.FileSystems
import org.apache.beam.sdk.options.PipelineOptionsFactory
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.Await
import scala.concurrent.duration.Duration

// scio-test/it:runMain PopulateTestData to re-populate data for integration tests
final class AvroTapIT extends AnyFlatSpec with Matchers {
  private val schema = new Parser().parse("""{
                                                    |  "type" : "record",
                                                    |  "name" : "Root",
                                                    |  "fields" : [ {
                                                    |    "name" : "word",
                                                    |    "type" : [ "string", "null" ]
                                                    |  }, {
                                                    |    "name" : "word_count",
                                                    |    "type" : [ "long", "null" ]
                                                    |  }, {
                                                    |    "name" : "corpus",
                                                    |    "type" : [ "string", "null" ]
                                                    |  }, {
                                                    |    "name" : "corpus_date",
                                                    |    "type" : [ "long", "null" ]
                                                    |  } ]
                                                    |}""".stripMargin)

  it should "read avro file" in {
    FileSystems.setDefaultPipelineOptions(PipelineOptionsFactory.create)

    val tap = AvroTaps(Taps()).avroFile(
      "gs://data-integration-test-eu/avro-integration-test/folder-a/folder-b/shakespeare.avro",
      schema = schema
    )
    val result = Await.result(tap, Duration.Inf)

    result.value.hasNext shouldBe true
  }
} 
Example 143
Source File: ScalaFutureHandlers.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.transforms

import java.lang
import java.util.function.{Function => JFunction}

import scala.jdk.CollectionConverters._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.Duration


trait ScalaFutureHandlers[T] extends FutureHandlers.Base[Future[T], T] {
  @transient
  implicit private lazy val immediateExecutionContext = new ExecutionContext {
    override def execute(runnable: Runnable): Unit = runnable.run()
    override def reportFailure(cause: Throwable): Unit =
      ExecutionContext.defaultReporter(cause)
  }

  override def waitForFutures(futures: lang.Iterable[Future[T]]): Unit = {
    Await.ready(Future.sequence(futures.asScala), Duration.Inf)
    ()
  }

  override def addCallback(
    future: Future[T],
    onSuccess: JFunction[T, Void],
    onFailure: JFunction[Throwable, Void]
  ): Future[T] =
    future.transform(r => { onSuccess(r); r }, t => { onFailure(t); t })
} 
Example 144
Source File: TapsTest.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.io

import java.io.File
import java.nio.file.{Files, Path}
import java.util.UUID

import com.spotify.scio.CoreSysProps
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.Await
import scala.concurrent.duration._

class TapsTest extends AnyFlatSpec with Matchers {
  val data = Seq("a", "b", "c")

  private def tmpFile: Path =
    new File(new File(CoreSysProps.TmpDir.value), "taps-test-" + UUID.randomUUID()).toPath

  private def writeText(p: Path, data: Seq[String]): Unit = {
    val writer = Files.newBufferedWriter(p)
    data.foreach { s =>
      writer.write(s)
      writer.newLine()
    }
    writer.close()
  }

  "ImmediateTap" should "work with text file" in {
    TapsSysProps.Algorithm.value = "immediate"
    val f = tmpFile
    writeText(f, data)
    val future = Taps().textFile(f.toString)
    future.isCompleted shouldBe true
    future.value.get.isSuccess shouldBe true
    Await.result(future, Duration.Inf).value.toSeq shouldBe data
    Files.delete(f)
  }

  it should "fail missing text file" in {
    TapsSysProps.Algorithm.value = "immediate"
    val f = tmpFile
    val future = Taps().textFile(f.toString)
    future.isCompleted shouldBe true
    future.value.get.isSuccess shouldBe false
  }

  "PollingTap" should "work with text file" in {
    TapsSysProps.Algorithm.value = "polling"
    TapsSysProps.PollingInitialInterval.value = "1000"
    TapsSysProps.PollingMaximumAttempts.value = "1"
    val f = tmpFile
    val future = Taps().textFile(f.toString)
    future.isCompleted shouldBe false
    writeText(f, data)

    val result = Await.result(future, 10.seconds)
    result.value.toSeq shouldBe data

    Files.delete(f)
  }

  it should "fail missing text file" in {
    TapsSysProps.Algorithm.value = "polling"
    TapsSysProps.PollingInitialInterval.value = "1000"
    TapsSysProps.PollingMaximumAttempts.value = "1"
    val f = tmpFile
    val future = Taps().textFile(f.toString)
    future.isCompleted shouldBe false

    Await.ready(future, 10.seconds)
  }
} 
Example 145
Source File: RequestDSL.scala    From twitter4s   with Apache License 2.0 5 votes vote down vote up
package com.danielasfregola.twitter4s.helpers

import java.text.SimpleDateFormat
import java.util.Locale

import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.RawHeader
import akka.testkit.TestProbe
import com.danielasfregola.twitter4s.entities.RateLimit
import org.specs2.specification.AfterEach

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

abstract class RequestDSL extends TestActorSystem with FixturesSupport with AfterEach {

  def after = system.terminate

  private val timeout = 10 seconds

  val headers = List(RawHeader("x-rate-limit-limit", "15"),
                     RawHeader("x-rate-limit-remaining", "14"),
                     RawHeader("x-rate-limit-reset", "1445181993"))

  val rateLimit = {
    val dateFormatter = new SimpleDateFormat("EEE MMM dd HH:mm:ss ZZZZ yyyy", Locale.ENGLISH)
    val resetDate = dateFormatter.parse("Sun Oct 18 15:26:33 +0000 2015").toInstant
    new RateLimit(limit = 15, remaining = 14, reset = resetDate)
  }

  protected val transport = TestProbe()

  def when[T](future: Future[T]): RequestMatcher[T] = new RequestMatcher(future)

  class RequestMatcher[T](future: Future[T]) {
    protected def responder = new Responder(future)

    def expectRequest(req: HttpRequest): Responder[T] = {
      transport.expectMsg(timeout, req)
      responder
    }

    def expectRequest(fn: HttpRequest => Unit) = {
      transport.expectMsgPF(timeout) {
        case req: HttpRequest => fn(req)
      }
      responder
    }
  }

  class Responder[T](future: Future[T]) {
    def respondWith(response: HttpResponse): Await[T] = {
      transport.reply(response)
      new Await(future)
    }

    def respondWith(resourcePath: String): Await[T] =
      respondWith(HttpResponse(StatusCodes.OK, entity = HttpEntity(MediaTypes.`application/json`, load(resourcePath))))

    def respondWithRated(resourcePath: String): Await[T] =
      respondWith(
        HttpResponse(StatusCodes.OK,
                     headers = headers,
                     entity = HttpEntity(MediaTypes.`application/json`, load(resourcePath))))

    def respondWithOk: Await[Unit] = {
      val response =
        HttpResponse(StatusCodes.OK, entity = HttpEntity(MediaTypes.`application/json`, """{"code": "OK"}"""))
      transport.reply(response)
      new Await(Future.successful((): Unit))
    }
  }

  class Await[T](future: Future[T]) {
    private[helpers] val underlyingFuture = future

    def await(implicit duration: FiniteDuration = 20 seconds) =
      Await.result(future, duration)
  }

  implicit def awaitToReqMatcher[T](await: Await[T]) =
    new RequestMatcher(await.underlyingFuture)

} 
Example 146
Source File: FutureAndPromise.scala    From scala-tutorials   with MIT License 5 votes vote down vote up
package com.baeldung.scala.concurrency

import java.math.BigInteger
import java.net.URL
import java.security.MessageDigest

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future, Promise}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

object ScalaAndPromise {
  def sampleFuture(): Future[Int] = Future {
    println("Long running computation started.")
    val result = {
      Thread.sleep(5)
      5
    }
    println("Our computation, finally finished.")
    result
  }

  type Name = String
  type Email = String
  type Password = String
  type Avatar = URL

  case class User(name: Name, email: Email, password: Password, avatar: Avatar)

  def exist(email: Email): Future[Boolean] = Future {
    Thread.sleep(100) // Call to the database takes time
    true
  }

  def md5hash(str: String): String =
    new BigInteger(1,
      MessageDigest
        .getInstance("MD5")
        .digest(str.getBytes)
    ).toString(16)

  def avatar(email: Email): Future[Avatar] = Future {
    Thread.sleep(200) // Call to the database takes time
    new Avatar("http://avatar.example.com/user/23k520f23f4.png")
  }

  def createUser(name: Name, email: Email, password: Password): Future[User] =
    for {
      _ <- exist(email)
      avatar <- avatar(email)
      hashedPassword = md5hash(password)
    } yield User(name, email, hashedPassword, avatar)

  def runByPromise[T](block: => T)(implicit ec: ExecutionContext): Future[T] = {
    val p = Promise[T]
    ec.execute { () =>
      try {
        p.success(block)
      } catch {
        case NonFatal(e) => p.failure(e)
      }
    }
    p.future
  }
}

object FutureAndPromiseApp extends App {

  import ScalaAndPromise._

  // Access to the value of Future by passing callback to the onComplete
  val userFuture: Future[User] = createUser("John", "[email protected]", "secret")
  userFuture.onComplete {
    case Success(user) =>
      println(s"User created: $user")
    case Failure(exception) =>
      println(s"Creating user failed due to the exception: $exception")
  }

  // Access to the value of Future by applying the result function on the Future value
  val user: User = Await.result(userFuture, Duration.Inf)

  // Forcing the Future value to be complete
  val completedFuture: Future[User] = Await.ready(userFuture, Duration.Inf)
  completedFuture.value.get match {
    case Success(user) =>
      println(s"User created: $user")
    case Failure(exception) =>
      println(s"Creating user failed due to the exception: $exception")
  }

} 
Example 147
Source File: PartialServerLogicAkka.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import sttp.client._
import sttp.tapir._
import sttp.tapir.server.PartialServerEndpoint
import sttp.tapir.server.akkahttp._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

object PartialServerLogicAkka extends App {
  implicit val actorSystem: ActorSystem = ActorSystem()
  import actorSystem.dispatcher

  // authentication logic
  case class User(name: String)
  val AuthenticationErrorCode = 1001
  def auth(token: String): Future[Either[Int, User]] =
    Future {
      if (token == "secret") Right(User("Spock"))
      else Left(AuthenticationErrorCode)
    }

  // 1st approach: define a base endpoint, which has the authentication logic built-in
  val secureEndpoint: PartialServerEndpoint[User, Unit, Int, Unit, Nothing, Future] = endpoint
    .in(header[String]("X-AUTH-TOKEN"))
    .errorOut(plainBody[Int])
    .serverLogicForCurrent(auth)

  // extend the base endpoint to define (potentially multiple) proper endpoints, define the rest of the server logic
  val secureHelloWorld1WithLogic = secureEndpoint.get
    .in("hello1")
    .in(query[String]("salutation"))
    .out(stringBody)
    .serverLogic { case (user, salutation) => Future(Right(s"$salutation, ${user.name}!")) }

  // ---

  // 2nd approach: define the endpoint entirely first
  val secureHelloWorld2: Endpoint[(String, String), Int, String, Nothing] = endpoint
    .in(header[String]("X-AUTH-TOKEN"))
    .errorOut(plainBody[Int])
    .get
    .in("hello2")
    .in(query[String]("salutation"))
    .out(stringBody)

  // then, provide the server logic in parts
  val secureHelloWorld2WithLogic = secureHelloWorld2
    .serverLogicPart(auth)
    .andThen { case (user, salutation) => Future(Right(s"$salutation, ${user.name}!")) }

  // ---

  // interpreting as routes
  val helloWorld1Route: Route = secureHelloWorld1WithLogic.toRoute
  val helloWorld2Route: Route = secureHelloWorld2WithLogic.toRoute

  // starting the server
  val bindAndCheck = Http().bindAndHandle(helloWorld1Route ~ helloWorld2Route, "localhost", 8080).map { _ =>
    // testing
    implicit val backend: SttpBackend[Identity, Nothing, NothingT] = HttpURLConnectionBackend()

    def testWith(path: String, salutation: String, token: String): String = {
      val result: String = basicRequest
        .response(asStringAlways)
        .get(uri"http://localhost:8080/$path?salutation=$salutation")
        .header("X-AUTH-TOKEN", token)
        .send()
        .body

      println(s"For path: $path, salutation: $salutation, token: $token, got result: $result")
      result
    }

    assert(testWith("hello1", "Hello", "secret") == "Hello, Spock!")
    assert(testWith("hello2", "Hello", "secret") == "Hello, Spock!")
    assert(testWith("hello1", "Cześć", "secret") == "Cześć, Spock!")
    assert(testWith("hello2", "Cześć", "secret") == "Cześć, Spock!")
    assert(testWith("hello1", "Hello", "1234") == AuthenticationErrorCode.toString)
    assert(testWith("hello2", "Hello", "1234") == AuthenticationErrorCode.toString)
  }

  Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute)
} 
Example 148
Source File: MultipleEndpointsDocumentationAkkaServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import com.github.ghik.silencer.silent
import io.circe.generic.auto._
import sttp.tapir._
import sttp.tapir.docs.openapi._
import sttp.tapir.json.circe._
import sttp.tapir.openapi.OpenAPI
import sttp.tapir.openapi.circe.yaml._
import sttp.tapir.server.akkahttp._
import sttp.tapir.swagger.akkahttp.SwaggerAkka

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

object MultipleEndpointsDocumentationAkkaServer extends App {
  // endpoint descriptions
  case class Author(name: String)
  case class Book(title: String, year: Int, author: Author)

  val booksListing: Endpoint[Unit, Unit, Vector[Book], Nothing] = endpoint.get
    .in("books")
    .in("list" / "all")
    .out(jsonBody[Vector[Book]])

  val addBook: Endpoint[Book, Unit, Unit, Nothing] = endpoint.post
    .in("books")
    .in("add")
    .in(
      jsonBody[Book]
        .description("The book to add")
        .example(Book("Pride and Prejudice", 1813, Author("Jane Austen")))
    )

  // server-side logic
  val books = new AtomicReference(
    Vector(
      Book("The Sorrows of Young Werther", 1774, Author("Johann Wolfgang von Goethe")),
      Book("Iliad", -8000, Author("Homer")),
      Book("Nad Niemnem", 1888, Author("Eliza Orzeszkowa")),
      Book("The Colour of Magic", 1983, Author("Terry Pratchett")),
      Book("The Art of Computer Programming", 1968, Author("Donald Knuth")),
      Book("Pharaoh", 1897, Author("Boleslaw Prus"))
    )
  )

  val booksListingRoute = booksListing.toRoute(_ => Future.successful(Right(books.get())))
  @silent("discarded")
  val addBookRoute = addBook.toRoute(book => Future.successful(Right(books.getAndUpdate(books => books :+ book))))

  // generating the documentation in yml; extension methods come from imported packages
  val openApiDocs: OpenAPI = List(booksListing, addBook).toOpenAPI("The tapir library", "1.0.0")
  val openApiYml: String = openApiDocs.toYaml

  // starting the server
  implicit val actorSystem: ActorSystem = ActorSystem()
  import actorSystem.dispatcher

  val routes = {
    import akka.http.scaladsl.server.Directives._
    booksListingRoute ~ addBookRoute ~ new SwaggerAkka(openApiYml).routes
  }

  val bindAndCheck = Http().bindAndHandle(routes, "localhost", 8080).map { _ =>
    // testing
    println("Go to: http://localhost:8080/docs")
    println("Press any key to exit ...")
    scala.io.StdIn.readLine()
  }

  // cleanup
  Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute)
} 
Example 149
Source File: MultipleServerEndpointsAkkaServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import sttp.client._
import sttp.tapir._
import sttp.tapir.server.akkahttp._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

object MultipleServerEndpointsAkkaServer extends App {
  // endpoint descriptions, together with the server logic
  val endpoint1 = endpoint.get.in("endpoint1").out(stringBody).serverLogic[Future] { _ => Future.successful(Right("ok1")) }
  val endpoint2 =
    endpoint.get.in("endpoint2").in(path[String]).out(stringBody).serverLogic[Future] { path => Future.successful(Right(s"ok2: $path")) }

  // converting the endpoints to a (single) route
  val route: Route = List(endpoint1, endpoint2).toRoute

  // starting the server
  implicit val actorSystem: ActorSystem = ActorSystem()
  import actorSystem.dispatcher

  val bindAndCheck = Http().bindAndHandle(route, "localhost", 8080).map { _ =>
    // testing
    implicit val backend: SttpBackend[Identity, Nothing, NothingT] = HttpURLConnectionBackend()

    val result1: String = basicRequest.response(asStringAlways).get(uri"http://localhost:8080/endpoint1").send().body
    println("Got result (1): " + result1)
    assert(result1 == "ok1")

    val result2: String = basicRequest.response(asStringAlways).get(uri"http://localhost:8080/endpoint2/apple").send().body
    println("Got result (2): " + result2)
    assert(result2 == "ok2: apple")
  }

  Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute)
} 
Example 150
Source File: StreamingAkkaServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.Source
import akka.util.ByteString
import sttp.client._
import sttp.tapir._
import sttp.tapir.server.akkahttp._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

object StreamingAkkaServer extends App {
  // The endpoint: corresponds to GET /receive.
  // We need to provide both the schema of the value (for documentation), as well as the format (media type) of the
  // body. Here, the schema is a `string` and the media type is `text/plain`.
  val streamingEndpoint: Endpoint[Unit, Unit, Source[ByteString, Any], Source[ByteString, Any]] =
    endpoint.get.in("receive").out(streamBody[Source[ByteString, Any]](schemaFor[String], CodecFormat.TextPlain()))

  // converting an endpoint to a route (providing server-side logic); extension method comes from imported packages
  val testStream: Source[ByteString, Any] = Source.repeat("Hello!").take(10).map(s => ByteString(s))
  val streamingRoute: Route = streamingEndpoint.toRoute(_ => Future.successful(Right(testStream)))

  // starting the server
  implicit val actorSystem: ActorSystem = ActorSystem()
  import actorSystem.dispatcher

  val bindAndCheck = Http().bindAndHandle(streamingRoute, "localhost", 8080).map { _ =>
    // testing
    implicit val backend: SttpBackend[Identity, Nothing, NothingT] = HttpURLConnectionBackend()
    val result: String = basicRequest.response(asStringAlways).get(uri"http://localhost:8080/receive").send().body
    println("Got result: " + result)

    assert(result == "Hello!" * 10)
  }

  Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute)
} 
Example 151
Source File: StatsActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import akka.util.Timeout
import io.coral.actors.CoralActorFactory
import io.coral.api.DefaultModule
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.Await
import scala.concurrent.duration._

class StatsActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll {
	def this() = this(ActorSystem("StatsActorSpec"))

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	implicit val timeout = Timeout(100.millis)
	implicit val injector = new DefaultModule(system.settings.config)

	def createStatsActor: StatsActor = {
		val createJson = parse( """{ "type": "stats", "params": { "field": "val" } }""")
			.asInstanceOf[JObject]
		val props = CoralActorFactory.getProps(createJson).get
		val actorRef = TestActorRef[StatsActor](props)
		actorRef.underlyingActor
	}

	val expectedInitialState = Map(
		("count", render(0L)),
		("avg", render(JNull)),
		("sd", render(JNull)),
		("min", render(JNull)),
		("max", render(JNull))
	)

	"StatsActor" should {
		"have a field corresponding to the json definition" in {
			val actor = createStatsActor
			actor.field should be("val")
		}

		"supply it's state" in {
			val actor = createStatsActor
			actor.state should be(expectedInitialState)
		}

		"accept a value as trigger" in {
			val actor = createStatsActor
			val triggerJson = parse( """{ "bla": 1.0, "val": 2.7 }""").asInstanceOf[JObject]
			actor.trigger(triggerJson)
			actor.state should be(
				Map(
					("count", render(1L)),
					("avg", render(2.7)),
					("sd", render(0.0)),
					("min", render(2.7)),
					("max", render(2.7))
				))
		}

		"have timer reset statistics" in {
			val actor = createStatsActor
			val triggerJson = parse( """{ "val": 2.7 }""").asInstanceOf[JObject]
			actor.trigger(triggerJson)
			actor.state should be(
				Map(
					("count", render(1L)),
					("avg", render(2.7)),
					("sd", render(0.0)),
					("min", render(2.7)),
					("max", render(2.7))
				))
			val future = actor.timer
			val json = Await.result(future, timeout.duration).get
			json should be(JNothing)
			actor.state should be(expectedInitialState)
		}
	}
} 
Example 152
Source File: ErrorOutputsAkkaServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import sttp.client._
import sttp.tapir._
import sttp.tapir.server.akkahttp._
import sttp.tapir.json.circe._
import io.circe.generic.auto._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

object ErrorOutputsAkkaServer extends App {
  // the endpoint description
  case class Result(result: Int)

  val errorOrJson: Endpoint[Int, String, Result, Nothing] =
    endpoint.get
      .in(query[Int]("amount"))
      .out(jsonBody[Result])
      .errorOut(stringBody)

  // converting an endpoint to a route
  val errorOrJsonRoute: Route = errorOrJson.toRoute {
    case x if x < 0 => Future.successful(Left("Invalid parameter, smaller than 0!"))
    case x          => Future.successful(Right(Result(x * 2)))
  }

  // starting the server
  implicit val actorSystem: ActorSystem = ActorSystem()
  import actorSystem.dispatcher

  val bindAndCheck = Http().bindAndHandle(errorOrJsonRoute, "localhost", 8080).map { _ =>
    // testing
    implicit val backend: SttpBackend[Identity, Nothing, NothingT] = HttpURLConnectionBackend()

    val result1: Either[String, String] = basicRequest.get(uri"http://localhost:8080?amount=-5").send().body
    println("Got result (1): " + result1)
    assert(result1 == Left("Invalid parameter, smaller than 0!"))

    val result2: Either[String, String] = basicRequest.get(uri"http://localhost:8080?amount=21").send().body
    println("Got result (2): " + result2)
    assert(result2 == Right("""{"result":42}"""))
  }

  Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute)
} 
Example 153
Source File: PlayServerTests.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.play

import akka.actor.ActorSystem
import cats.data.NonEmptyList
import cats.effect.{IO, Resource}
import play.api.Mode
import play.api.mvc.{Handler, RequestHeader}
import play.api.routing.Router
import play.api.routing.Router.Routes
import play.core.server.{DefaultAkkaHttpServerComponents, ServerConfig}
import sttp.tapir.Endpoint
import sttp.tapir.server.tests.ServerTests
import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint}
import sttp.tapir.tests.{Port, PortCounter}

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, Future}
import scala.reflect.ClassTag

class PlayServerTests extends ServerTests[Future, Nothing, Router.Routes] {
  override def multipleValueHeaderSupport: Boolean = false
  override def multipartInlineHeaderSupport: Boolean = false
  override def streamingSupport: Boolean = false

  private implicit val actorSystem: ActorSystem = ActorSystem()

  override protected def afterAll(): Unit = {
    Await.result(actorSystem.terminate(), 5.seconds)
    super.afterAll()
  }

  override def pureResult[T](t: T): Future[T] = Future.successful(t)

  override def suspendResult[T](t: => T): Future[T] = Future(t)

  override def route[I, E, O](
      e: ServerEndpoint[I, E, O, Nothing, Future],
      decodeFailureHandler: Option[DecodeFailureHandler]
  ): Routes = {
    implicit val serverOptions: PlayServerOptions =
      PlayServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler))
    e.toRoute
  }

  override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => Future[O])(implicit
      eClassTag: ClassTag[E]
  ): Routes = {
    e.toRouteRecoverErrors(fn)
  }

  override def server(routes: NonEmptyList[Routes], port: Port): Resource[IO, Unit] = {
    val components = new DefaultAkkaHttpServerComponents {
      override lazy val serverConfig: ServerConfig = ServerConfig(port = Some(port), address = "127.0.0.1", mode = Mode.Test)
      override def router: Router =
        Router.from(
          routes.reduce((a: Routes, b: Routes) => {
            val handler: PartialFunction[RequestHeader, Handler] = {
              case request => a.applyOrElse(request, b)
            }

            handler
          })
        )
    }
    val bind = IO {
      components.server
    }
    Resource.make(bind)(s => IO(s.stop())).map(_ => ())
  }

  override val portCounter: PortCounter = new PortCounter(38000)
} 
Example 154
Source File: AkkaHttpServerTests.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.server.akkahttp

import cats.implicits._
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.server.Directives._
import cats.data.NonEmptyList
import cats.effect.{IO, Resource}
import sttp.client._
import com.typesafe.scalalogging.StrictLogging
import sttp.tapir.{Endpoint, endpoint, stringBody}
import sttp.tapir.server.tests.ServerTests
import sttp.tapir._
import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint}
import sttp.tapir.tests.{Port, PortCounter}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.reflect.ClassTag

class AkkaHttpServerTests extends ServerTests[Future, AkkaStream, Route] with StrictLogging {
  private implicit var actorSystem: ActorSystem = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    actorSystem = ActorSystem()
  }

  override protected def afterAll(): Unit = {
    Await.result(actorSystem.terminate(), 5.seconds)
    super.afterAll()
  }

  override def route[I, E, O](
      e: ServerEndpoint[I, E, O, AkkaStream, Future],
      decodeFailureHandler: Option[DecodeFailureHandler] = None
  ): Route = {
    implicit val serverOptions: AkkaHttpServerOptions = AkkaHttpServerOptions.default.copy(
      decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler)
    )
    e.toRoute
  }

  override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, AkkaStream], fn: I => Future[O])(implicit
      eClassTag: ClassTag[E]
  ): Route = {
    e.toRouteRecoverErrors(fn)
  }

  override def server(routes: NonEmptyList[Route], port: Port): Resource[IO, Unit] = {
    val bind = IO.fromFuture(IO(Http().bindAndHandle(routes.toList.reduce(_ ~ _), "localhost", port)))
    Resource.make(bind)(binding => IO.fromFuture(IO(binding.unbind())).void).void
  }

  override def pureResult[T](t: T): Future[T] = Future.successful(t)
  override def suspendResult[T](t: => T): Future[T] = {
    import scala.concurrent.ExecutionContext.Implicits.global
    Future { t }
  }

  override lazy val portCounter: PortCounter = new PortCounter(57000)

  if (testNameFilter.isEmpty) {
    test("endpoint nested in a path directive") {
      val e = endpoint.get.in("test" and "directive").out(stringBody).serverLogic(_ => pureResult("ok".asRight[Unit]))
      val port = portCounter.next()
      val route = Directives.pathPrefix("api")(e.toRoute)
      server(NonEmptyList.of(route), port).use { _ =>
        basicRequest.get(uri"http://localhost:$port/api/test/directive").send().map(_.body shouldBe Right("ok"))
      }.unsafeRunSync
    }
  }
} 
Example 155
Source File: MultiGroupedTransformDC.scala    From spark-flow   with Apache License 2.0 5 votes vote down vote up
package com.bloomberg.sparkflow.dc

import com.bloomberg.sparkflow.serialization.Hashing
import org.apache.spark.sql.{Dataset, Encoder, KeyValueGroupedDataset, SparkSession}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.reflect.ClassTag
import scala.concurrent.ExecutionContext.Implicits.global



class MultiGroupedTransformDC[K, V, U, T: ClassTag]
(left: KeyValueGroupedDC[K, V],
 right: KeyValueGroupedDC[K, U],
 f: (KeyValueGroupedDataset[K, V], KeyValueGroupedDataset[K, U]) => Dataset[T])
(implicit tEncoder: Encoder[T]) extends DC[T](tEncoder, Seq(left, right)) {

  override def computeDataset(spark: SparkSession) = {
    val leftFuture = Future{left.get(spark)}
    val rightFuture = Future{right.get(spark)}
    val ld = Await.result(leftFuture, Duration.Inf)
    val rd = Await.result(rightFuture, Duration.Inf)
    val dataset = f(ld, rd)
    dataset
  }

  override def computeSignature() = {
    Hashing.hashString(left.getSignature + right.getSignature + Hashing.hashClass(f))
  }

} 
Example 156
Source File: MultiInputDC.scala    From spark-flow   with Apache License 2.0 5 votes vote down vote up
package com.bloomberg.sparkflow.dc

import com.bloomberg.sparkflow.serialization.Hashing
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Encoder, SparkSession}

import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, Future}


class MultiInputDC[T, U, V](encoder: Encoder[T], left: DC[U], right: DC[V],
                            f: (RDD[U], RDD[V]) => RDD[T])
  extends DC[T](encoder, Seq(left, right)) {

  override def computeSignature() = {
    Hashing.hashString(left.getSignature + right.getSignature + Hashing.hashClass(f))
  }

  override def computeDataset(spark: SparkSession) = {
    val leftFuture = Future{left.getRDD(spark)}
    val rightFuture = Future{right.getRDD(spark)}
    val leftRDD = Await.result(leftFuture, Duration.Inf)
    val rightRDD = Await.result(rightFuture, Duration.Inf)
    val rdd = f(leftRDD, rightRDD)
    spark.createDataset(rdd)
  }


} 
Example 157
Source File: FutureCodec.scala    From aws-lambda-scala   with MIT License 5 votes vote down vote up
package io.github.mkotsur.aws.codecs

import java.io.ByteArrayOutputStream
import java.nio.charset.Charset

import io.circe.Encoder
import io.github.mkotsur.aws.handler.CanEncode
import io.github.mkotsur.aws.proxy.ProxyResponse
import io.circe.generic.auto._
import io.circe.syntax._
import cats.syntax.either.catsSyntaxEither

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}

private[aws] trait FutureCodec {
  implicit def canEncodeFuture[I: Encoder](implicit canEncode: Encoder[I]) =
    CanEncode.instance[Future[I]]((os, responseEither, ctx) => {
      (for {
        response     <- responseEither.toTry
        futureResult <- Try(Await.result(response, ctx.getRemainingTimeInMillis millis))
        json         <- Try(canEncode(futureResult).noSpaces.getBytes)
        _            <- Try(os.write(json))
      } yield {
        ()
      }) match {
        case Success(v) => Right(v)
        case Failure(e) => Left(e)
      }
    })

  implicit def canEncodeProxyResponse[T](implicit canEncode: CanEncode[T]) = CanEncode.instance[ProxyResponse[T]](
    (output, proxyResponseEither, ctx) => {

      def writeBody(bodyOption: Option[T]): Either[Throwable, Option[String]] =
        bodyOption match {
          case None => Right(None)
          case Some(body) =>
            val os     = new ByteArrayOutputStream()
            val result = canEncode.writeStream(os, Right(body), ctx)
            os.close()
            result.map(_ => Some(os.toString()))
        }

      val proxyResposeOrError = for {
        proxyResponse <- proxyResponseEither
        bodyOption    <- writeBody(proxyResponse.body)
      } yield
        ProxyResponse[String](
          proxyResponse.statusCode,
          proxyResponse.headers,
          bodyOption
        )

      val response = proxyResposeOrError match {
        case Right(proxyRespose) =>
          proxyRespose
        case Left(e) =>
          ProxyResponse[String](
            500,
            Some(Map("Content-Type" -> s"text/plain; charset=${Charset.defaultCharset().name()}")),
            Some(e.getMessage)
          )
      }

      output.write(response.asJson.noSpaces.getBytes)

      Right(())
    }
  )
} 
Example 158
Source File: FutureTrySpec.scala    From scala-common   with Apache License 2.0 5 votes vote down vote up
import com.softwaremill.futuretry._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.prop.TableDrivenPropertyChecks
import org.scalatest.matchers.must.Matchers

import scala.concurrent.duration.Duration
import scala.concurrent.{Future, Await, Promise}
import scala.util.{Failure, Success, Try}

class FutureTrySpec extends AnyFlatSpec with Matchers with TableDrivenPropertyChecks with ScalaFutures {

  import scala.concurrent.ExecutionContext.Implicits.global

  "tried" must "convert a successful result into a Success" in {
    val p = Promise[String]
    p.complete(Try("a"))

    val transformedFuture = p.future.tried

    transformedFuture.futureValue must be(Success("a"))
  }

  it must "convert an exceptional result into a Failure" in {
    val p = Promise[String]
    val exception = new RuntimeException("blah")
    p.complete(Try(throw exception))

    val transformedFuture = p.future.tried

    transformedFuture.futureValue must be(Failure(exception))
  }

  "transform" must "correctly transform between all Try variants in" in {
    val exception = new RuntimeException("bloh")

    val scenarios = Table[Try[String], Try[String] => Try[String], Try[String]] (
      ("original value", "transform", "expected output"),
      (Success("a"), identity[Try[String]], Success("a")),
      (Failure(exception), (x: Try[String]) => x match { case Failure(e) => Success(e.toString); case _ => ??? }, Success(exception.toString)),
      (Success("a"), (x: Try[String]) => x match { case Success(_) => Failure(exception); case _ => ??? }, Failure(exception)),
      (Failure(exception), identity[Try[String]], Failure(exception))
    )

    forAll(scenarios) {
      (orgValue, f, output) =>
        {
          val p = Promise[String]
          p.complete(orgValue)

          val transformedFuture = p.future.transformTry(f)

          transformedFuture.tried.futureValue must be(output)
        }
    }
  }

} 
Example 159
Source File: GitFetcher.scala    From sbt-git-versioning   with MIT License 5 votes vote down vote up
package com.rallyhealth.sbt.versioning

import sbt.util.Logger

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future, TimeoutException}
import scala.util.control.NonFatal
import scala.sys.process.Process


  def fetchRemotes(remotes: Seq[String], timeout: Duration)(implicit logger: Logger): Seq[FetchResult] = {
    val outputLogger = new BufferingProcessLogger
    val processResult = Process("git remote") ! outputLogger

    processResult match {
      case 0 =>
        logger.debug("Fetching remote sources...")
        val remotes = outputLogger.stdout

        val tagsToFetch = remotes.filter(remotes.contains)
        if (tagsToFetch.nonEmpty) {
          logger.info("Fetching tags from: " + tagsToFetch.mkString(", "))
          tagsToFetch.flatMap(remote => fetchTagsFromRemote(remote, timeout))
        } else {
          logger.debug("No tags to fetch")
          Seq.empty[FetchResult]
        }

      case exitCode =>
        logger.error(s"Fetching remotes failed enumerating remotes [git exitCode=$exitCode]")
        Seq.empty[FetchResult]
    }
  }

  private def fetchTagsFromRemote(remote: String, timeout: Duration)(implicit logger: Logger): Seq[FetchResult] = {

    val outputLogger = new BufferingProcessLogger
    val process = Process(s"git fetch $remote --tags").run(outputLogger)
    val resultFuture = Future {
      if (process.exitValue() == 0) {
        outputLogger.stderr.filter(_.contains("[new tag]")).flatMap {
          case tagResultRegex(tag) =>
            logger.debug(s"Fetched from remote=$remote tag=$tag")
            Some(FetchResult(remote, tag))
          case line =>
            logger.warn(s"Unable to parse git result=$line, skipping")
            None
        }
      } else {
        logger.error(s"Fetching remote=$remote failed [git exitCode=${process.exitValue()}]")
        Seq.empty[FetchResult]
      }
    }

    try {
      val result = Await.result(resultFuture, timeout)
      logger.debug(s"Successfully fetched $remote")
      result
    } catch {
      case _: TimeoutException =>
        process.destroy()
        logger.error(s"Fetching remote=$remote timed out [git exitCode=${process.exitValue()}]")
        Seq.empty
      case NonFatal(exc) =>
        logger.error(s"Fetching remote=$remote failed [git exitCode=${process.exitValue()}]")
        logger.trace(exc)
        Seq.empty
    }
  }
} 
Example 160
Source File: UpdateLogger.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.language.postfixOps
import com.phasmid.hedge_fund.model.Model
import com.phasmid.hedge_fund.portfolio._


class UpdateLogger(blackboard: ActorRef) extends BlackboardActor(blackboard) {

  var portfolio = new Portfolio("", Nil)

  override def receive =
    {
      case Confirmation(id, model, attrs) =>
        log.debug(s"update for identifier: $id")
        if (model.isOption)
          processOption(id, model, attrs)
        else
          processStock(id, model)

      case PortfolioUpdate(p) =>
        log.debug(s"portfolio update for: ${p.name}")
        portfolio = p
        showPortfolio

      case m => super.receive(m)
    }

  implicit val timeout = Timeout(5 seconds)

  def processStock(identifier: String, model: Model) = {
    model.getKey("price") match {
      case Some(p) => {
        // sender is the MarketData actor
        val future = (sender ? SymbolQuery(identifier, List(p))).mapTo[QueryResponse]
        // TODO why are we waiting for this here?
        val result = Await.result(future, timeout.duration)
        result match {
          case QueryResponseValid(k,a) =>
            a map {
              case (k, v) => log.info(s"$identifier attribute $k has been updated to: $v")
            }
          case _ =>
        }
      }
      case None => log.warning(s"'price' not defined in model")
    }
  }

  def processOption(identifier: String, model: Model, attributes: Map[String, Any]) = {
    val key = "underlying"
    attributes.get(key) match {
      case Some(value) =>
        val future = (blackboard ? OptionQuery("id", value)).mapTo[QueryResponse]
        // TODO why are we waiting for this here?
        val result = Await.result(future, timeout.duration)
        result match {
          case QueryResponseValid(k,a) =>
              println(s"Action Required: re: qualifying option $identifier with underlying symbol: $k and attributes: $a")
          case _ =>
        }
      case None => log.warning(s"processOption: value not present for $key")
    }
  }

  def showPortfolio {
    println(s"Portfolio for ${portfolio.name}")
    portfolio.positions foreach { showPosition(_) }
  }

  def showPosition(position: Position) {
    println(s"position for ${position.symbol}: quantity=${position.quantity}; options=")
    position.contracts foreach { showContract(_) }
  }

  def showContract(contract: Contract) {
    println(s"contract: $contract")
  }
} 
Example 161
Source File: JsonYQLParserSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorSystem, Actor, Props, ActorRef }
import akka.testkit._
import org.scalatest.{ WordSpecLike, Matchers, BeforeAndAfterAll }
import scala.io.Source
import scala.concurrent.duration._
import spray.http._
import spray.http.MediaTypes._
import org.scalatest.Inside
import scala.language.postfixOps
import spray.http.ContentType.apply


class JsonYQLParserSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
    with WordSpecLike with Matchers with Inside with BeforeAndAfterAll {

  def this() = this(ActorSystem("JsonYQLParserSpec"))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  import scala.language.postfixOps
  val json = Source.fromFile("src/test/resources/yqlExample.json") mkString

  "json conversion" in {
    val body = HttpEntity(MediaTypes.`application/json`, json.getBytes())
    val ok = JsonYQLParser.decode(body) match {
      case Right(x) =>
        val count = x.query.count
        count should equal(4)
        x.query.results.quote.length should equal(count)
        x.query.results.get(count - 1, "symbol") should matchPattern { case Some("MSFT") => }

      case Left(x) =>
        fail("decoding error: " + x)
    }
  }

  "send back" in {
    val blackboard = system.actorOf(Props.create(classOf[MockYQLBlackboard], testActor), "blackboard")
    val entityParser = _system.actorOf(Props.create(classOf[EntityParser], blackboard), "entityParser")
    val entity = HttpEntity(MediaTypes.`application/json`, json.getBytes())
    entityParser ! EntityMessage("json:YQL", entity)
    val msg = expectMsgClass(3.seconds, classOf[QueryResponseValid])
    println("msg received: " + msg)
    msg should matchPattern {
      case QueryResponseValid("MSFT", _) =>
    }
    inside(msg) {
      case QueryResponseValid(symbol, attributes) => attributes.get("Ask") should matchPattern { case Some("46.17") => }
    }
  }

}

import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import com.phasmid.hedge_fund.model.Model

class MockYQLUpdateLogger(blackboard: ActorRef) extends UpdateLogger(blackboard) {
  override def processStock(identifier: String, model: Model) = {
    model.getKey("price") match {
      case Some(p) => {
        // sender is the MarketData actor
        val future = sender ? SymbolQuery(identifier, List(p))
        val result = Await.result(future, timeout.duration).asInstanceOf[QueryResponseValid]
        result.attributes map {
          case (k, v) =>
            log.info(s"$identifier attribute $k has been updated to: $v")
            blackboard ! result
        }
      }
      case None => log.warning(s"'price' not defined in model")
    }
  }
}

class MockYQLBlackboard(testActor: ActorRef) extends Blackboard(Map(classOf[KnowledgeUpdate] -> "marketData", classOf[SymbolQuery] -> "marketData", classOf[OptionQuery] -> "marketData", classOf[CandidateOption] -> "optionAnalyzer", classOf[Confirmation] -> "updateLogger"),
  Map("marketData" -> classOf[MarketData], "optionAnalyzer" -> classOf[OptionAnalyzer], "updateLogger" -> classOf[MockYQLUpdateLogger])) {

  override def receive =
    {
      case msg: Confirmation => msg match {
        // Cut down on the volume of messages
        case Confirmation("MSFT", _, _) => super.receive(msg)
        case _ =>
      }
      case msg: QueryResponseValid => testActor forward msg

      case msg => super.receive(msg)
    }
} 
Example 162
Source File: HogzillaContinuous.scala    From hogzilla   with GNU General Public License v2.0 5 votes vote down vote up
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.hogzilla.hbase.HogHBaseRDD
import org.hogzilla.initiate.HogInitiate
import org.hogzilla.prepare.HogPrepare
import org.hogzilla.sflow._
import org.hogzilla.http.HogHTTP
import org.hogzilla.auth.HogAuth
import org.hogzilla.dns.HogDNS
import org.apache.hadoop.hbase.client.Delete
import scala.concurrent.Await


object HogzillaContinuous {
  
  def main(args: Array[String])
  {
    val sparkConf = new SparkConf()
                          .setAppName("HogzillaContinuous")
                          .set("spark.executor.memory", "512m")
                          .set("spark.default.parallelism", "16") 
                          
    val spark = new SparkContext(sparkConf)
    
    // Get the HBase RDD
    val HogRDD = HogHBaseRDD.connect(spark);
   
    //var i=0
    while(true) {
      //i=i+1
    	val HogRDDAuth = HogHBaseRDD.connectAuth(spark)
    	val summary = HogAuth.runDeleting(HogRDDAuth,spark)
      Thread.sleep(10000) // 10s
    }
    
    // Stop Spark
    spark.stop()
    
    // Close the HBase Connection
    HogHBaseRDD.close();

  }
  
} 
Example 163
Source File: RDDFailOverSuite.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.suites

import org.infinispan.client.hotrod.RemoteCache
import org.infinispan.spark._
import org.infinispan.spark.config.ConnectorConfiguration
import org.infinispan.spark.domain.Runner
import org.infinispan.spark.test.TestingUtil._
import org.infinispan.spark.test._
import org.scalatest._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps

@DoNotDiscover
class RDDFailOverSuite extends FunSuite with Matchers with Spark with MultipleServers with FailOver {

   val NumEntries = 10000

   override def getConfiguration: ConnectorConfiguration = {
      super.getConfiguration.setServerList("127.0.0.1:11222;127.0.0.1:12222")
        .setWriteBatchSize(5)
   }

   ignore("RDD read failover") {
      val cache = getRemoteCache.asInstanceOf[RemoteCache[Int, Runner]]
      cache.clear()
      (0 until NumEntries).foreach(id => cache.put(id, new Runner(s"name$id", true, id * 10, 20)))

      val infinispanRDD = createInfinispanRDD[Int, String]

      val ispnIter = infinispanRDD.toLocalIterator
      var count = 0
      for (_ <- 1 to NumEntries / Cluster.getClusterSize) {
         ispnIter.next()
         count += 1
      }

      Cluster.failServer(0)

      while (ispnIter.hasNext) {
         ispnIter.next()
         count += 1
      }

      count shouldBe NumEntries
   }

   ignore("RDD write failover (Re-test with 10.1.0.Final)") {
      val cache = getRemoteCache.asInstanceOf[RemoteCache[Int, Runner]]
      cache.clear()

      val range1 = 1 to NumEntries
      val entities1 = for (num <- range1) yield new Runner(s"name$num", true, num * 10, 20)
      val rdd = sc.parallelize(range1.zip(entities1))

      val writeRDD = Future(rdd.writeToInfinispan(getConfiguration))
      waitForCondition({ () =>
         cache.size() > 0 //make sure we are already writing into the cache
      }, 2 seconds)
      Cluster.failServer(0)
      Await.ready(writeRDD, 30 second)

      cache.size() shouldBe NumEntries
      cache.get(350).getName shouldBe "name350"
   }
} 
Example 164
Source File: PersistenceIT.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome.integrationtest

import org.scalatest.time.{Minutes, Span}
import play.api.libs.json.{JsArray, JsObject}

import scala.concurrent.Await
import scala.concurrent.duration._

class PersistenceIT extends MetronomeITBase {

  override val timeLimit = Span(3, Minutes)
  override lazy implicit val patienceConfig = PatienceConfig(180.seconds, interval = 1.second)

  "A job and run should be available after a restart of metronome" in withFixture() { f =>
    When("A job description is posted")
    val jobId = "persistence-my-job"
    val jobDef =
      s"""
        |{
        |  "id": "$jobId",
        |  "description": "A job that sleeps",
        |  "run": {
        |    "cmd": "sleep 120",
        |    "cpus": 0.02,
        |    "mem": 64,
        |    "disk": 0
        |  }
        |}
      """.stripMargin

    val resp = f.metronome.createJob(jobDef)

    Then("The response should be OK")
    resp shouldBe Created

    When("A job run is started")
    val startRunResp = f.metronome.startRun(jobId)

    Then("The response should be OK")
    startRunResp shouldBe Created

    eventually(timeout(30.seconds)) {
      val runsJson = f.metronome.getRuns(jobId)
      runsJson shouldBe OK
      val runs = runsJson.entityJson.as[JsArray]
      runs.value should have size 1

      val run = runs.value.head.as[JsObject]
      val status = run.value("status").as[String]
      status shouldBe "ACTIVE"
    }

    When("Metronome is stopped and restarted")
    Await.result(f.metronomeFramework.stop(), 30.seconds)
    Await.result(f.metronomeFramework.start(), 60.seconds)

    Then("The Job and the Run should be available")
    val jobResp = f.metronome.getJob(jobId)
    jobResp shouldBe OK
    (jobResp.entityJson \ "id").as[String] shouldBe jobId

    val runResp = f.metronome.getRuns(jobId)
    val runs = runResp.entityJson.as[JsArray]
    runs.value.length shouldBe 1
  }

} 
Example 165
Source File: MigrationImpl.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package migration.impl

import dcos.metronome.migration.Migration
import dcos.metronome.repository.impl.kv.{JobHistoryPathResolver, JobRunPathResolver, JobSpecPathResolver}
import dcos.metronome.utils.state.{PersistentStore, PersistentStoreManagement, PersistentStoreWithNestedPathsSupport}
import org.slf4j.LoggerFactory

import scala.async.Async.{async, await}
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

class MigrationImpl(store: PersistentStore) extends Migration {
  import MigrationImpl._

  override def migrate()(implicit ec: ExecutionContext): Unit = {
    Await.result(initializeStore(), Duration.Inf)
    log.info("Migration successfully applied for version")
  }

  private[this] def initializeStore()(implicit ec: ExecutionContext): Future[Unit] =
    async {
      store match {
        case store: PersistentStoreManagement with PersistentStoreWithNestedPathsSupport =>
          await(store.initialize())
          await(store.createPath(JobSpecPathResolver.basePath))
          await(store.createPath(JobRunPathResolver.basePath))
          await(store.createPath(JobHistoryPathResolver.basePath))
        case _: PersistentStore =>
          log.info("Unsupported type of persistent store. Not running any migrations.")
          Future.successful(())
      }
    }

}

object MigrationImpl {
  private[migration] val log = LoggerFactory.getLogger(getClass)
} 
Example 166
Source File: TaglessFinal.scala    From Mastering-Functional-Programming   with MIT License 5 votes vote down vote up
package jvm

import scala.concurrent.{ Future, Await }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration

import cats._, cats.implicits._

trait Capabilities[F[_]] {
  def resource(name: String): F[String]
  def notify(target: String, text: String): F[Unit]
}

object TaglessFinalExample extends App {
  implicit val capabilities: Capabilities[Future] = new Capabilities[Future] {
    import java.io.File
    import org.apache.commons.io.FileUtils

    def resource(name: String): Future[String] =
      Future { FileUtils.readFileToString(new File(name), "utf8") }

    def notify(target: String, text: String): Future[Unit] =
      Future { println(s"Notifying $target: $text") }
  }

  implicit val anotherEnvironmentCapabilities: Capabilities[Future] = new Capabilities[Future] {
    def resource(name: String): Future[String] = ???
    def notify(target: String, text: String): Future[Unit] = ???
  }

  implicit val logMonad: Monad[Future] = new Monad[Future] {
    def flatMap[A, B](fa: Future[A])(f: (A) ⇒ Future[B]): Future[B] =
      fa.flatMap { x =>
        println(s"Trace of the Future's result: $x")
        f(x) }
    
    def pure[A](x: A): Future[A] = Future(x)

    def tailRecM[A, B](a: A)(f: (A) ⇒ Future[Either[A, B]]): Future[B] = ???
  }

  def income[F[_]](implicit M: Monad[F], C: Capabilities[F]): F[Unit] =
    for {
      contents <- C.resource("sales.csv")
      total = contents
        .split("\n").toList.tail  // Collection of lines, drop the CSV header
        .map { _.split(",").toList match  // List[Double] - prices of each of the entries
          { case name :: price :: Nil => price.toDouble }
        }
        .sum
      _ <- C.notify("[email protected]", s"Total income made today: $total")
    } yield ()

  Await.result(income[Future](logMonad, capabilities), Duration.Inf)  // Block so that the application does not exit prematurely
}

object FacadeExample {
  trait Capabilities {
    def resource(name: String): String
    def notify(target: String, text: String): Unit
  }

  def income(c: Capabilities): Unit = {
    val contents = c.resource("sales.csv")
    val total = contents
      .split("\n").toList.tail  // Collection of lines, drop the CSV header
      .map { _.split(",").toList match  // List[Double] - prices of each of the entries
        { case name :: price :: Nil => price.toDouble }
      }
      .sum
    c.notify("[email protected]", s"Total income made today: $total")
  }
} 
Example 167
Source File: UpdateUnitTests.scala    From scala-cass   with MIT License 5 votes vote down vote up
package com.weather.scalacass.scsession

import com.datastax.driver.core.exceptions.InvalidQueryException
import com.weather.scalacass.Result

import concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration._

class UpdateUnitTests extends ActionUnitTests {

  case class Query(str: String)
  case class Update(l: Long, i: Option[Int])
  case class IfS(l: Long)

  def executeAsync[T](q: SCStatement[T], shouldSucceed: Boolean = true)(implicit ec: ExecutionContext): Result[T] = {
    val res = Await.result(q.executeAsync()(ec), 3.seconds)
    res.isRight shouldBe shouldSucceed
    res
  }

  "update" should "use IF EXISTS" in {
    val query = ss.update(table, Update(123, None), Query("asdf")).ifExists
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "use ttl" in {
    val query = ss.update(table, Update(123, None), Query("asdf")).usingTTL(1234)
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "use timestamp" in {
    val query = ss.update(table, Update(123, None), Query("asdf")).usingTimestamp(12345L)
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "use if statement" in {
    val query = ss.update(table, Update(123, Some(123)), Query("asdf")).`if`(IfS(123L))
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "combine all of them" in {
    val query = ss.update(table, Update(123, None), Query("asdf")).usingTTL(1234).`if`(IfS(123L)).usingTimestamp(12345L)
    val executed = query.executeAsync()
    Await.ready(executed, 3.seconds)
    executed.value.value.failure.exception shouldBe an[InvalidQueryException]
    query.execute().left.value shouldBe an[InvalidQueryException]

    println(s"broke: ${query.getStringRepr}")
    val fixedQuery = query.noTimestamp
    println(fixedQuery.getStringRepr)
    println(executeAsync(fixedQuery.noTimestamp))
  }
} 
Example 168
Source File: DeleteUnitTests.scala    From scala-cass   with MIT License 5 votes vote down vote up
package com.weather.scalacass.scsession

import com.datastax.driver.core.exceptions.InvalidQueryException
import com.weather.scalacass.{ Result, ScalaSession }

import concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration._

class DeleteUnitTests extends ActionUnitTests {
  case class SelectiveDelete(i: Int)
  case class Query(str: String)
  case class IfS(l: Long)

  def executeAsync[T](q: SCStatement[T], shouldSucceed: Boolean = true)(implicit ec: ExecutionContext): Result[T] = {
    val res = Await.result(q.executeAsync()(ec), 3.seconds)
    res.isRight shouldBe shouldSucceed
    res
  }

  "delete" should "use selective columns" in {
    val query = ss.delete[SelectiveDelete](table, Query("asdf"))
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "not use selective columns" in {
    val query = ss.delete[ScalaSession.NoQuery](table, Query("asdf"))
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "use timestamp" in {
    val query = ss.delete[ScalaSession.NoQuery](table, Query("asdf")).usingTimestamp(12345L)
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "use if exists" in {
    val query = ss.delete[ScalaSession.NoQuery](table, Query("asdf")).ifExists
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "use if statement" in {
    val query = ss.delete[ScalaSession.NoQuery](table, Query("asdf")).`if`(IfS(1234L))
    println(query.getStringRepr)
    println(executeAsync(query))
  }
  it should "use everything" in {
    val query = ss.delete[SelectiveDelete](table, Query("asdf")).`if`(IfS(1234L)).usingTimestamp(12345L)
    val executed = query.executeAsync()
    Await.ready(executed, 3.seconds)
    executed.value.value.failure.exception shouldBe an[InvalidQueryException]
    query.execute().left.value shouldBe an[InvalidQueryException]

    println(s"broke: ${query.getStringRepr}")
    val fixedQuery = query.noTimestamp
    println(fixedQuery.getStringRepr)
    println(fixedQuery.execute())
  }
} 
Example 169
Source File: BenchmarkUtil.scala    From sigmastate-interpreter   with MIT License 5 votes vote down vote up
package scalan.util

import scala.concurrent.duration.Duration
import scala.concurrent.{Future, Await}
import scala.concurrent.ExecutionContext.Implicits.global


  def measureTime[T](action: => T): (T, Long) = {
    val t0 = System.currentTimeMillis()
    val res = action
    val t = System.currentTimeMillis()
    (res, t - t0)
  }

  def runTasks(nTasks: Int)(block: Int => Unit) = {
    val (_, total) = measureTime {
      val tasks = (1 to nTasks).map(iTask => Future(block(iTask)))
      val res = Await.result(Future.sequence(tasks), Duration.Inf)
    }
    println(s"Completed $nTasks tasks in $total msec")
  }

} 
Example 170
Source File: EventSpec.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.installer

import akka.actor.ActorSystem
import akka.event.Logging
import akka.event.LoggingAdapter
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import org.scalatest._

import scala.concurrent.Await
import scala.concurrent.duration._

class EventSpec extends WordSpec with MustMatchers with GivenWhenThen with EitherValues with Inspectors {

  implicit val system: ActorSystem    = ActorSystem("TestSystem")
  implicit val mat: ActorMaterializer = ActorMaterializer()
  implicit val ec                     = mat.executionContext
  implicit val log: LoggingAdapter    = Logging(system, "Cloudflow Installer")
  implicit val settings               = Settings(system)

  "Cloudflow event" should {
    "transform an install event to a install action" in {
      val instance        = TestInstance.get
      val clusterFeatures = ClusterFeatures()
      val installEvent    = InstallEvent(instance, None, instance.metadata.namespace, clusterFeatures)
      val future          = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption)
      val result          = Await.result(future, 3.seconds)

      result must not be empty
      result.get mustBe a[CompositeAction]

      val action = result.get.asInstanceOf[CompositeAction]

      action.childActions must have size 5
      action.childActions(0) mustBe a[Strimzi]
      action.childActions(1) mustBe a[SparkOperator]
      action.childActions(2) mustBe a[FlinkOperator]
      action.childActions(3) mustBe a[CloudflowOperatorManagedStrimzi]
      action.childActions(4) mustBe a[PatchOwnerReferenceOfSparkMutatingWebhookConfig]
    }

    "transform an un-install event to an un-install action" in {
      val instance        = TestInstance.get
      val clusterFeatures = ClusterFeatures()

      val installEvent = UninstallEvent(instance, None, instance.metadata.namespace, clusterFeatures)
      val future       = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption)
      val result       = Await.result(future, 3.seconds)

      result must not be empty
      result.get mustBe a[CompositeAction]

      val action = result.get.asInstanceOf[CompositeAction]

      action.childActions must have size 2
      action.childActions(0) mustBe a[RemoveCloudflowClusterwideResources]
      action.childActions(1) mustBe a[RemoveCloudflowNamespacedResources]
    }

    "verify that detected cluster features are present" in {
      val instance        = TestInstance.get
      val clusterFeatures = ClusterFeatures(hasSecurityContextConstraints = true)
      val installEvent    = InstallEvent(instance, None, instance.metadata.namespace, clusterFeatures)
      val future          = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption)
      val result          = Await.result(future, 3.seconds)

      result must not be empty
      result.get mustBe a[CompositeAction]

      val action = result.get.asInstanceOf[CompositeAction]

      action.childActions must have size 6
      action.childActions(0) mustBe a[Strimzi]
      action.childActions(1) mustBe a[SparkOperator]
      action.childActions(2) mustBe a[FlinkOperator]
      action.childActions(3) mustBe a[AddSccToSparkServiceAccount]
      action.childActions(4) mustBe a[CloudflowOperatorManagedStrimzi]
      action.childActions(5) mustBe a[PatchOwnerReferenceOfSparkMutatingWebhookConfig]
    }
  }

  "transform an pre-requisite failure event to an no operation action" in {
    val instance = TestInstance.get

    val failures     = List(CloudflowInstance.ValidationFailure("The cluster does not have a storage class named 'test'"))
    val installEvent = PreRequisiteFailed(instance, failures)
    val future       = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption)
    val result       = Await.result(future, 3.seconds)

    result must not be empty
    result.get mustBe a[UpdateCRStatusAction]

    val action = result.get.asInstanceOf[UpdateCRStatusAction]
    val caught = Await.result(action.execute(), 3.seconds)
    caught.stdErr mustBe Some("The cluster does not have a storage class named 'test'")
  }
} 
Example 171
Source File: PipelineAction.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.executor.actions

import java.time.LocalDateTime
import java.util.NoSuchElementException

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.pattern.ask
import akka.util.Timeout
import org.marvin.artifact.manager.ArtifactSaver
import org.marvin.artifact.manager.ArtifactSaver.SaveToRemote
import org.marvin.exception.MarvinEExecutorException
import org.marvin.executor.actions.PipelineAction.{PipelineExecute, PipelineExecutionStatus}
import org.marvin.executor.proxies.BatchActionProxy
import org.marvin.executor.proxies.EngineProxy.{ExecuteBatch, Reload}
import org.marvin.model._
import org.marvin.util.{JsonUtil, LocalCache}

import scala.collection.mutable.ListBuffer
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.Success

object PipelineAction {
  case class PipelineExecute(protocol:String, params:String)
  case class PipelineExecutionStatus(protocol:String)
}

class PipelineAction(metadata: EngineMetadata) extends Actor with ActorLogging{
  implicit val ec = context.dispatcher

  var artifactSaver: ActorRef = _
  var cache: LocalCache[BatchExecution] = _

  override def preStart() = {
    artifactSaver = context.actorOf(ArtifactSaver.build(metadata), name = "artifactSaver")
    cache = new LocalCache[BatchExecution](maximumSize = 10000L, defaultTTL = 30.days)
  }

  override def receive  = {
    case PipelineExecute(protocol, params) =>
      implicit val futureTimeout = Timeout(metadata.pipelineTimeout milliseconds)

      log.info(s"Starting to process pipeline process with. Protocol: [$protocol] and Params: [$params].")
      cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Working))

      try{
        for(actionName <- metadata.pipelineActions){
          val engineActionMetadata = metadata.actionsMap(actionName)
          val _actor: ActorRef = context.actorOf(Props(new BatchActionProxy(engineActionMetadata)), name = actionName.concat("Actor"))
          Await.result((_actor ? Reload(protocol)), futureTimeout.duration)
          Await.result((_actor ? ExecuteBatch(protocol, params)), futureTimeout.duration)
          context stop _actor

          val futures:ListBuffer[Future[Done]] = ListBuffer[Future[Done]]()

          for(artifactName <- engineActionMetadata.artifactsToPersist) {
            futures += (artifactSaver ? SaveToRemote(artifactName, protocol)).mapTo[Done]
          }

          if (!futures.isEmpty) Future.sequence(futures).onComplete{
            case Success(response) =>
              log.info(s"All artifacts from [$actionName] were saved with success!! [$response]")
          }
        }
      }catch {
        case e: Exception =>
          cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Failed))
          throw e
      }

      cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Finished))

    case PipelineExecutionStatus(protocol) =>
      log.info(s"Getting pipeline execution status to protocol $protocol.")

      try {
        sender ! JsonUtil.toJson(cache.load(protocol).get)

      }catch {
        case _: NoSuchElementException =>
          sender ! akka.actor.Status.Failure(new MarvinEExecutorException(s"Protocol $protocol not found!"))
      }

    case Done =>
      log.info("Work Done!")

    case _ =>
      log.warning(s"Not valid message !!")

  }
} 
Example 172
Source File: AkkaHttpRouteBackendTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import org.scalatest.BeforeAndAfterAll
import sttp.client.{NothingT, SttpBackend}
import sttp.model.StatusCode

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec

class AkkaHttpRouteBackendTest extends AsyncWordSpec with Matchers with BeforeAndAfterAll {

  implicit val system: ActorSystem = ActorSystem()
  implicit val materializer: ActorMaterializer = ActorMaterializer()

  override protected def afterAll(): Unit = {
    Await.result(system.terminate(), 5.seconds)
  }

  val backend: SttpBackend[Future, Nothing, NothingT] = {
    AkkaHttpBackend.usingClient(system, http = AkkaHttpClient.stubFromRoute(Routes.route))
  }

  import sttp.client._

  "matched route" should {

    "respond" in {
      backend.send(basicRequest.get(uri"http://localhost/hello")).map { response =>
        response.code shouldBe StatusCode.Ok
        response.body.right.get shouldBe "Hello, world!"
      }
    }
  }

  "unmatched route" should {
    "respond with 404" in {
      backend.send(basicRequest.get(uri"http://localhost/not-matching")).map { response =>
        response.code shouldBe StatusCode.NotFound
        response.body.left.get shouldBe "The requested resource could not be found."
      }
    }
  }

}

object Routes {
  import akka.http.scaladsl.server.Directives._

  val route: Route =
    pathPrefix("hello") {
      complete("Hello, world!")
    }
} 
Example 173
Source File: SttpBackendStubAkkaTests.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import sttp.client._
import sttp.model.Headers

import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class SttpBackendStubAkkaTests extends AnyFlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll {

  implicit val system: ActorSystem = ActorSystem()

  override protected def afterAll(): Unit = {
    Await.result(system.terminate().map(_ => ()), 5.seconds)
  }

  "backend stub" should "cycle through responses using a single sent request" in {
    // given
    implicit val backend = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenRespondCyclic("a", "b", "c")

    // when
    def r = basicRequest.get(uri"http://example.org/a/b/c").send().futureValue

    // then
    r.body shouldBe Right("a")
    r.body shouldBe Right("b")
    r.body shouldBe Right("c")
    r.body shouldBe Right("a")
  }

  it should "use given flow as web socket handler" in {
    // This test is an example how can we test client flow.
    // We check behavior of client when connected to echo server.
    // Client responsibility was to send two messages to the server and collect received messages.
    val useHandler: Flow[Message, Message, Future[Seq[Message]]] => Future[Seq[Message]] = clientFlow => {
      val ((outQueue, clientReceivedMessages), inQueue) = Source
        .queue(1, OverflowStrategy.fail)
        .viaMat(clientFlow)(Keep.both)
        .toMat(Sink.queue())(Keep.both)
        .run()

      def echoMsg(): Future[Unit] =
        inQueue.pull().flatMap {
          case None =>
            echoMsg()
          case Some(msg) =>
            outQueue.offer(TextMessage(s"echo: " + msg.asTextMessage.getStrictText)).map(_ => ())
        }

      (for {
        _ <- outQueue.offer(TextMessage("Hi!"))
        _ <- echoMsg()
        _ <- echoMsg()
        _ = outQueue.complete()
        _ <- outQueue.watchCompletion()
      } yield ()).flatMap(_ => clientReceivedMessages)
    }

    val clientFlow: Flow[Message, Message, Future[Seq[Message]]] = {
      Flow.fromSinkAndSourceMat(
        Sink.seq[Message],
        Source((1 to 2).map(i => TextMessage(s"test$i")))
      )(Keep.left)
    }

    implicit val b = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenHandleOpenWebSocket(Headers(List.empty), useHandler)

    val receivedMessages = basicRequest
      .get(uri"wss://echo.websocket.org")
      .openWebsocket(clientFlow)
      .flatMap(_.result)
      .futureValue
      .toList

    receivedMessages shouldBe List("Hi!", "echo: test1", "echo: test2").map(TextMessage(_))
  }
} 
Example 174
Source File: CorsBenchmark.scala    From akka-http-cors   with Apache License 2.0 5 votes vote down vote up
package ch.megard.akka.http.cors

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.headers.{Origin, `Access-Control-Request-Method`}
import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.unmarshalling.Unmarshal
import ch.megard.akka.http.cors.scaladsl.CorsDirectives
import ch.megard.akka.http.cors.scaladsl.settings.CorsSettings
import com.typesafe.config.ConfigFactory
import org.openjdk.jmh.annotations._

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}

@State(Scope.Benchmark)
@OutputTimeUnit(TimeUnit.SECONDS)
@BenchmarkMode(Array(Mode.Throughput))
class CorsBenchmark extends Directives with CorsDirectives {
  private val config = ConfigFactory.parseString("akka.loglevel = ERROR").withFallback(ConfigFactory.load())

  implicit private val system: ActorSystem  = ActorSystem("CorsBenchmark", config)
  implicit private val ec: ExecutionContext = scala.concurrent.ExecutionContext.global

  private val http         = Http()
  private val corsSettings = CorsSettings.default

  private var binding: ServerBinding        = _
  private var request: HttpRequest          = _
  private var requestCors: HttpRequest      = _
  private var requestPreflight: HttpRequest = _

  @Setup
  def setup(): Unit = {
    val route = {
      path("baseline") {
        get {
          complete("ok")
        }
      } ~ path("cors") {
        cors(corsSettings) {
          get {
            complete("ok")
          }
        }
      }
    }
    val origin = Origin("http://example.com")

    binding = Await.result(http.bindAndHandle(route, "127.0.0.1", 0), 1.second)
    val base = s"http://${binding.localAddress.getHostString}:${binding.localAddress.getPort}"

    request = HttpRequest(uri = base + "/baseline")
    requestCors = HttpRequest(
      method = HttpMethods.GET,
      uri = base + "/cors",
      headers = List(origin)
    )
    requestPreflight = HttpRequest(
      method = HttpMethods.OPTIONS,
      uri = base + "/cors",
      headers = List(origin, `Access-Control-Request-Method`(HttpMethods.GET))
    )
  }

  @TearDown
  def shutdown(): Unit = {
    val f = for {
      _ <- http.shutdownAllConnectionPools()
      _ <- binding.terminate(1.second)
      _ <- system.terminate()
    } yield ()
    Await.ready(f, 5.seconds)
  }

  @Benchmark
  def baseline(): Unit = {
    val f = http.singleRequest(request).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "ok")
  }

  @Benchmark
  def default_cors(): Unit = {
    val f = http.singleRequest(requestCors).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "ok")
  }

  @Benchmark
  def default_preflight(): Unit = {
    val f = http.singleRequest(requestPreflight).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "")
  }
} 
Example 175
Source File: RichFuture.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.util

import java.util.concurrent.TimeoutException
import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import swave.core.StreamEnv

final class RichFuture[T](val underlying: Future[T]) extends AnyVal {

  def await(timeout: FiniteDuration = 1.second): T =
    underlying.value match {
      case Some(t)                          ⇒ t.get
      case None if timeout == Duration.Zero ⇒ throw new TimeoutException(s"Future was not completed")
      case _                                ⇒ Await.result(underlying, timeout)
    }

  def delay(duration: FiniteDuration)(implicit env: StreamEnv): Future[T] = {
    import env.defaultDispatcher
    val promise = Promise[T]()
    underlying.onComplete { value ⇒
      env.scheduler.scheduleOnce(duration) { promise.complete(value); () }
    }
    promise.future
  }
} 
Example 176
Source File: WorkerActor.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package transform

import akka.actor.Actor
import java.io.File
import java.util.UUID
import scala.concurrent.Await
import scala.concurrent.duration._
import services.generated.tables.records.{DocumentRecord, DocumentFilepartRecord}
import services.task._

abstract class WorkerActor(
  taskType: TaskType, 
  taskService: TaskService
) extends Actor {
  
  def receive = {
    
    case msg: WorkerActor.WorkOnPart =>
      // Create a task record in the DB
      val taskId = Await.result(
        taskService.insertTask(
          taskType,
          this.getClass.getName,
          msg.jobId,
          Some(msg.document.getId),
          Some(msg.part.getId),
          Some(msg.document.getOwner)),
        10.seconds)
        
      taskService.updateTaskStatusAndProgress(taskId, TaskStatus.RUNNING, 1)
      
      // Actual work is left to the subclass to implement
      doWork(msg.document, msg.part, msg.dir, msg.jobDef, taskId)
      
      taskService.scheduleTaskForRemoval(taskId, 60.minutes)(context.system)
  }
  
  def doWork(
    doc: DocumentRecord, 
    part: DocumentFilepartRecord, 
    dir: File, 
    jobDef: Option[SpecificJobDefinition], 
    taskId: UUID)
  
}

object WorkerActor {
  
  case class WorkOnPart(
    jobId    : UUID,
    document : DocumentRecord,
    part     : DocumentFilepartRecord,
    dir      : File,
    jobDef   : Option[SpecificJobDefinition]) 
  
} 
Example 177
Source File: TEIParserActor.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package transform.tei

import akka.actor.Props
import java.io.File
import java.util.UUID
import play.api.Logger
import scala.concurrent.Await
import scala.concurrent.duration._
import services.annotation.AnnotationService
import services.generated.tables.records.{DocumentRecord, DocumentFilepartRecord}
import services.task.TaskService
import transform.{WorkerActor, SpecificJobDefinition}

class TEIParserActor(
  taskService: TaskService,
  annotationService: AnnotationService
) extends WorkerActor(TEIParserService.TASK_TYPE, taskService) {
  
  implicit val ctx = context.dispatcher
  
  override def doWork(
    doc: DocumentRecord, 
    part: DocumentFilepartRecord, 
    dir: File, 
    jobDef: Option[SpecificJobDefinition], 
    taskId: UUID
  ) = {
    val annotations = TEIParserService.extractEntities(part, new File(dir, part.getFile))
    
    val fUpsertAll = annotationService.upsertAnnotations(annotations).map { failed =>
      if (failed.size == 0) {
        taskService.setTaskCompleted(taskId)
      } else {
        val msg = "Failed to store " + failed.size + " annotations"
        Logger.warn(msg)
        failed.foreach(a => Logger.warn(a.toString))
        taskService.setTaskFailed(taskId, Some(msg))
      } 
    } recover { case t: Throwable =>
      t.printStackTrace
      taskService.setTaskFailed(taskId, Some(t.getMessage))
    }

    Await.result(fUpsertAll, 20.minutes)   
  }
  
}

object TEIParserActor {
  
  def props(taskService: TaskService, annotationService: AnnotationService) =
    Props(classOf[TEIParserActor], taskService, annotationService)
  
} 
Example 178
Source File: DocumentIdFactory.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package services.document

import collection.JavaConversions._
import org.apache.commons.lang3.RandomStringUtils
import play.api.Logger
import scala.concurrent.Await
import scala.concurrent.duration._
import services.generated.Tables._
import storage.db.DB

object DocumentIdFactory {
  
  // We use random alphanumeric IDs with 14 chars length (because 62^14 should be enough for anyone (TM))  
  val ID_LENGTH = 14
  
  // Utility function to check if an ID exists in the DB
  def existsId(id: String)(implicit db: DB) = {
    def checkExists() = db.query { sql =>
      val count = sql.select(DOCUMENT.ID)
         .from(DOCUMENT)
         .where(DOCUMENT.ID.equal(id))
         .fetchArray()
         .length
      
      count > 0
    }
    
    Await.result(checkExists(), 10.seconds)    
  }
  
  def generateRandomID(retriesLeft: Int = 10)(implicit db: DB): String = {
    
    // Takes a set of strings and returns those that already exist in the DB as doc IDs
    def findIds(ids: Set[String])(implicit db: DB) = db.query { sql =>
      sql.select(DOCUMENT.ID)
         .from(DOCUMENT)
         .where(DOCUMENT.ID.in(ids))
         .fetchArray()
         .map(_.value1).toSet    
    }
    
    // Generate 10 random IDs
    val randomIds = 
      (1 to 10).map(_ => RandomStringUtils.randomAlphanumeric(ID_LENGTH).toLowerCase).toSet

    // Match them all against the database and remove those that already exist
    val idsAlreadyInDB = Await.result(findIds(randomIds), 10.seconds)    
    val uniqueIds = randomIds.filter(id => !idsAlreadyInDB.contains(id))
    
    if (uniqueIds.size > 0) {
      uniqueIds.head
    } else if (retriesLeft > 0) {
      Logger.warn("Failed to generate unique random document ID")
      generateRandomID(retriesLeft - 1)
    } else {
      throw new RuntimeException("Failed to create unique document ID")
    }
  }
  
} 
Example 179
Source File: HasAccountRemoval.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import services.announcement.AnnouncementService
import services.annotation.AnnotationService
import services.contribution.ContributionService
import services.document.DocumentService
import services.upload.UploadService
import services.user.UserService

trait HasAccountRemoval {

  private def deleteDocumentsAndAnnotations(documentIds: Seq[String])(implicit 
    annotations: AnnotationService,
    context: ExecutionContext,
    contributions: ContributionService
  ) = {
    
    def deleteOneDocument(docId: String): Future[Unit] = {
      // Annotations, geo-tags and version history
      val deleteAnnotations = annotations.deleteByDocId(docId)
        
      // Contributions
      val deleteContributions = contributions.deleteHistory(docId) 
        
      for {
        _ <- deleteAnnotations
        _ <- deleteContributions
      } yield ()
    }
    
    Future {
      scala.concurrent.blocking {
        documentIds.foreach(id => Await.result(deleteOneDocument(id), 10.second))
      }
    }
  }
  
  def deleteUserAccount(username: String)(implicit
    announcements: AnnouncementService,
    annotations: AnnotationService,
    context: ExecutionContext,
    contributions: ContributionService,
    documents: DocumentService,
    uploads: UploadService,
    users: UserService
  ) = {   
    
    // Fetch IDs of all documents owned by this user
    val fOwnedDocumentIds = documents.listAllIdsByOwner(username)
        
    // Delete pending upload & upload_filepart records
    val fDeletePendingUpload = uploads.deletePendingUpload(username)
    
    // Delete sharing policies shared by and with this user
    val fDeleteSharingPolicies = documents.deletePoliciesByUsername(username)
    
    // Delete pending/archived announcements for this user, if any
    val fDeleteAnnouncements = announcements.deleteForUser(username)
        
    for {
      ids <- fOwnedDocumentIds
      _ <- fDeletePendingUpload
      _ <- fDeleteSharingPolicies
      _ <- fDeleteAnnouncements
      
      // Delete owned documents, document_fileparts & sharing policies linked to them
      _ <- documents.deleteByOwner(username) 
      
      // Delete annotations, history, geotags & contributions
      _ <- deleteDocumentsAndAnnotations(ids)

      // User & roles
      _ <- users.deleteByUsername(username)
    } yield ()  
  }

} 
Example 180
Source File: DumpLoader.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers.admin.authorities

import java.io.{InputStream, File, FileInputStream}
import java.util.zip.GZIPInputStream
import play.api.Logger
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration._
import services.entity.EntityRecord
import services.entity.builtin.importer.EntityImporter

class DumpLoader {
  
  private def getStream(file: File, filename: String) =
    if (filename.endsWith(".gz"))
      new GZIPInputStream(new FileInputStream(file))
    else
      new FileInputStream(file)
  
  def importDump(file: File, filename: String, crosswalk: InputStream => Seq[EntityRecord], importer: EntityImporter)(implicit ctx: ExecutionContext) = {
    val records = crosswalk(getStream(file, filename))
    Logger.info("Importing " + records.size + " records")
    Await.result(importer.importRecords(records), 60.minute)   
  }
  
} 
Example 181
Source File: StreamLoader.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers.admin.authorities

import akka.stream.{ActorAttributes, ClosedShape, Materializer, Supervision}
import akka.stream.scaladsl._
import akka.util.ByteString
import java.io.InputStream
import services.entity.EntityRecord
import services.entity.builtin.importer.EntityImporter
import play.api.Logger
import play.api.libs.json.Json
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration._

class StreamLoader(implicit materializer: Materializer) {
  
  private val BATCH_SIZE = 100
  
  private val decider: Supervision.Decider = {    
    case t: Throwable => 
      t.printStackTrace()
      Supervision.Stop    
  }
  
  def importPlaces(is: InputStream, crosswalk: String => Option[EntityRecord], importer: EntityImporter)(implicit ctx: ExecutionContext) = {
    
    val source = StreamConverters.fromInputStream(() => is, 1024)
      .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = Int.MaxValue, allowTruncation = false))
      .map(_.utf8String)
      
    val parser = Flow.fromFunction[String, Option[EntityRecord]](crosswalk)
      .withAttributes(ActorAttributes.supervisionStrategy(decider))
      .grouped(BATCH_SIZE)
      
    val sink = Sink.foreach[Seq[Option[EntityRecord]]] { records =>
      val toImport = records.flatten
      Await.result(importer.importRecords(toImport), 60.minutes)
    }
    
    val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
      
      import GraphDSL.Implicits._
      
      source ~> parser ~> sink
      
      ClosedShape
    }).withAttributes(ActorAttributes.supervisionStrategy(decider))
        
    graph.run()
  }
  
} 
Example 182
Source File: AccountSettingsController.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers.my.settings

import com.mohiva.play.silhouette.api.{Silhouette, LoginInfo}
import controllers.{HasUserService, HasConfig, Security}
import javax.inject.Inject
import services.announcement.AnnouncementService
import services.annotation.AnnotationService
import services.contribution.ContributionService
import services.user.Roles._
import services.user.UserService
import services.upload.UploadService
import services.document.DocumentService
import org.webjars.play.WebJarsUtil
import play.api.Configuration
import play.api.data.Form
import play.api.data.Forms._
import play.api.i18n.I18nSupport
import play.api.mvc.{AbstractController, ControllerComponents}
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import controllers.HasAccountRemoval

case class AccountSettingsData(
  email  : String,
  name   : Option[String],
  bio    : Option[String],
  website: Option[String])

class AccountSettingsController @Inject() (
  val components: ControllerComponents,
  val config: Configuration,
  val silhouette: Silhouette[Security.Env],
  implicit val announcements: AnnouncementService,
  implicit val annotations: AnnotationService,
  implicit val contributions: ContributionService,
  implicit val ctx: ExecutionContext,
  implicit val documents: DocumentService,
  implicit val uploads: UploadService,
  implicit val users: UserService,
  implicit val webjars: WebJarsUtil
) extends AbstractController(components) with HasUserService with HasConfig with HasAccountRemoval with I18nSupport {

  val accountSettingsForm = Form(
    mapping(
      "email" -> email,
      "name" -> optional(text(maxLength=80)),
      "bio" -> optional(text(maxLength=256)),
      "website" -> optional(text(maxLength=256))
    )(AccountSettingsData.apply)(AccountSettingsData.unapply)
  )

  def index() = silhouette.SecuredAction { implicit request =>
    val u = request.identity
    
    val form = accountSettingsForm.fill(AccountSettingsData(
      users.decryptEmail(u.email),
      u.realName,
      u.bio,
      u.website))
    
    Ok(views.html.my.settings.account(form, u))
  }

  def updateAccountSettings() = silhouette.SecuredAction.async { implicit request =>
    accountSettingsForm.bindFromRequest.fold(
      formWithErrors =>
        Future.successful(BadRequest(views.html.my.settings.account(formWithErrors, request.identity))),

      f =>
        users.updateUserSettings(request.identity.username, f.email, f.name, f.bio, f.website)
          .map { success =>
            if (success)
              Redirect(routes.AccountSettingsController.index).flashing("success" -> "Your settings have been saved.")
            else 
              Redirect(routes.AccountSettingsController.index).flashing("error" -> "There was an error while saving your settings.")
          }.recover { case t:Throwable => {
            t.printStackTrace()
            Redirect(routes.AccountSettingsController.index).flashing("error" -> "There was an error while saving your settings.")
          }}
    )
  }
  
  def deleteAccount() = silhouette.SecuredAction.async { implicit request =>
    deleteUserAccount(request.identity.username).flatMap { _ =>
      silhouette.env.authenticatorService.discard(
        request.authenticator,
        Redirect(controllers.landing.routes.LandingController.index))
    }
  }

} 
Example 183
Source File: ES.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package storage.es

import com.sksamuel.elastic4s.{ElasticsearchClientUri, TcpClient}
import com.sksamuel.elastic4s.ElasticDsl._
import com.sksamuel.elastic4s.bulk.RichBulkResponse
import java.io.File
import javax.inject.{Inject, Singleton}
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.xcontent.XContentType
import play.api.{Configuration, Logger}
import play.api.inject.ApplicationLifecycle
import scala.io.Source
import scala.concurrent.{Await, Future, ExecutionContext}
import scala.concurrent.duration._
import scala.util.{Try, Success, Failure}



  private def loadMappings(existingMappings: Seq[String] = Seq.empty[String]): Seq[(String, String)] =
    new File("conf/es-mappings").listFiles.toSeq.filter(_.getName.endsWith(".json"))
      .foldLeft(Seq.empty[(Int, (String, String))])((mappings, file)  => {
        val number = file.getName.substring(0, 2).toInt
        val name = file.getName.substring(3, file.getName.lastIndexOf('.'))
        if (existingMappings.contains(name)) {
          mappings
        } else {
          val json = Source.fromFile(file).getLines.mkString("\n")
          mappings :+ (number, (name, json))
        }
      }).sortBy(_._1).map(_._2)
      
  
  def countTotalDocs()(implicit ctx: ExecutionContext): Future[Long] =
    client execute {
      search(ES.RECOGITO) limit 0
    } map { _.totalHits }

} 
Example 184
Source File: PatchExamples.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber.examples.patch

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import skuber._
import skuber.json.format._
import skuber.apps.v1beta1.StatefulSet

import scala.concurrent.Await
import scala.concurrent.duration.Duration.Inf
import play.api.libs.json.{Format, Json}
import skuber.api.patch.JsonMergePatch

object PatchExamples extends App {

  // API patch method.
  // This will change the replica count on the statefulset causing it to scale accordingly.
  case class ReplicaSpec(replicas: Int)
  case class ReplicaPatch(spec: ReplicaSpec) extends JsonMergePatch

  implicit val rsFmt: Format[ReplicaSpec] = Json.format[ReplicaSpec]
  implicit val rpFmt: Format[ReplicaPatch] = Json.format[ReplicaPatch]

  val statefulSetName = "nginx-patch-sts"

  def scaleNginx = {

    val nginxContainer = Container("nginx",image="nginx").exposePort(80)
    val nginxBaseSpec = Pod.Template.Spec().addContainer(nginxContainer)


    val nginxStsLabels=Map("patch-example" -> "statefulset")
    val nginxStsSel=LabelSelector(LabelSelector.IsEqualRequirement("patch-example","statefulset"))
    val nginxStsSpec=nginxBaseSpec.addLabels(nginxStsLabels)
    val nginxStatefulSet= StatefulSet(statefulSetName)
      .withReplicas(4)
      .withServiceName(statefulSetName)
      .withLabelSelector(nginxStsSel)
      .withTemplate(nginxStsSpec)

    // StatefulSet needs a headless service
    val nginxStsService: Service=Service(nginxStatefulSet.spec.get.serviceName.get, nginxStsLabels, 80).isHeadless

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val dispatcher = system.dispatcher

    val k8s = k8sInit

    println("Creating nginx stateful set")
    val createdStsFut = for {
      svc <- k8s create nginxStsService
      sts <- k8s create nginxStatefulSet
    } yield sts
   
    val stsFut = createdStsFut recoverWith {
      case ex: K8SException if (ex.status.code.contains(409)) => {
        println("It seems the stateful set or service already exists - retrieving latest version")
        k8s get[StatefulSet] nginxStatefulSet.name
      }
    }

    // Wait for stateful set creation before proceeding
    val sts = Await.result(stsFut, Inf)
    println("waiting two minutes to allow Stateful Set creation to complete before patching it")
    Thread.sleep(120000)


    println("Patching stateful set to assign replica count of 1")

    // Create the Patch
    val singleReplicaPatch=ReplicaPatch(ReplicaSpec(1))
    val singleReplicaPatchJson=Json.toJson(singleReplicaPatch)
    val singleReplicaPatchJsonStr=singleReplicaPatchJson.toString

    // Send the Patch to the statefulset on Kubernetes
    val patchedStsFut = k8s.patch[ReplicaPatch, StatefulSet](statefulSetName, singleReplicaPatch)

    val patchedSts = Await.result(patchedStsFut, Inf)
    println(s"Patched statefulset now has a desired replica count of ${patchedSts.spec.get.replicas}")
    println("waiting 5 minutes to allow scaling to be observed before cleaning up")
    Thread.sleep(300000)
    println("will now delete StatefulSet and its service")
    val cleanupRequested= for {
      sts <- k8s.deleteWithOptions[StatefulSet](nginxStatefulSet.name, DeleteOptions(propagationPolicy = Some(DeletePropagation.Foreground)))
      done <- k8s.delete[Service](nginxStsService.name)
    } yield done


    Await.ready(cleanupRequested, Inf)
    println("Finishing up")
    k8s.close
    system.terminate()
  }
  scaleNginx
} 
Example 185
Source File: ExecExamples.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber.examples.exec

import akka.{Done, NotUsed}
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import skuber._

import scala.concurrent.{Await, Future, Promise}
import scala.concurrent.duration.Duration.Inf
import skuber.json.format._

object ExecExamples extends App {

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val dispatcher = system.dispatcher

  val k8s = k8sInit

  println("Executing commands in pods ==>")

  val podName = "sleep"
  val containerName = "sleep"
  val sleepContainer = Container(name = containerName, image = "busybox", command = List("sh", "-c", "trap exit TERM; sleep 99999 & wait"))
  val sleepPod = Pod(podName, Pod.Spec().addContainer(sleepContainer))

  val terminalReady: Promise[Unit] = Promise()

  // Just print stdout and signal when the terminal gets ready
  val sink: Sink[String, Future[Done]] = Sink.foreach {
    case s =>
      print(s)
      if (s.startsWith("/ #")) {
        terminalReady.success(())
      }
  }

  // Execute `ps aux` when the terminal gets ready
  val source: Source[String, NotUsed] = Source.fromFuture(terminalReady.future.map { _ =>
    "ps aux\n"
  })

  // Wait for a while to ensure outputs
  def close: Promise[Unit] = {
    val promise = Promise[Unit]()
    Future {
      Thread.sleep(1000)
      promise.success(())
    }
    promise
  }

  val fut = for {
    // Create the sleep pod if not present
    _ <- k8s.getOption[Pod](podName).flatMap {
      case Some(pod) => Future.successful()
      case None =>
        k8s.create(sleepPod).map { _ =>
          Thread.sleep(3000)
        }
    }
    // Simulate kubectl exec
    _ <- {
      println("`kubectl exec ps aux`")
      k8s.exec(podName, Seq("ps", "aux"), maybeStdout = Some(sink), maybeClose = Some(close))
    }
    // Simulate kubectl exec -it
    _ <- {
      println("`kubectl -it exec sh` -> `ps aux`")
      k8s.exec(podName, Seq("sh"), maybeStdout = Some(sink), maybeStdin = Some(source), tty = true, maybeClose = Some(close))
    }
  } yield ()

  // Clean up
  fut.onComplete { _ =>
    println("\nFinishing up")
    k8s.delete[Pod]("sleep")
    k8s.close
    system.terminate().foreach { f =>
      System.exit(0)
    }
  }

  Await.result(fut, Inf)
} 
Example 186
Source File: Guestbook.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber.examples.guestbook

import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
  

object Guestbook extends App {
  val sys = ActorSystem("SkuberExamples")
  val guestbook = sys.actorOf(Props[GuestbookActor], "guestbook")
  
  implicit val timeout = Timeout(40.seconds)
  
  val deploymentResult = ask(guestbook, GuestbookActor.Deploy)
  deploymentResult map { result =>
    result match {
      case GuestbookActor.DeployedSuccessfully => {
        System.out.println("\n*** Deployment of Guestbook application to Kubernetes completed successfully!")
        sys.terminate().foreach { f =>
          System.exit(0)
        }
      }
      case GuestbookActor.DeploymentFailed(ex) => {
        System.err.println("\n!!! Deployment of Guestbook application failed: " + ex)
        sys.terminate().foreach { f =>
          System.exit(0)
        }
      }
    }  
  }
  deploymentResult.failed.foreach {
    case ex =>
      System.err.println("Unexpected error deploying Guestbook: " + ex)
      sys.terminate().foreach { f =>
        System.exit(1)
      }
  }
} 
Example 187
Source File: ListExamples.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber.examples.list

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import skuber.Pod.Phase
import skuber._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import skuber.json.format._


object ListExamples extends App {

  private def listPods(pods: List[Pod]) = {

    System.out.println("")
    System.out.println("POD                                               NAMESPACE           PHASE")
    System.out.println("===                                               =========           =======")

    pods.map { pod: Pod =>
      val name = pod.name
      val ns = pod.namespace
      val phaseOpt = for {
        status <- pod.status
        phase <- status.phase
      } yield phase
      val phase = phaseOpt.getOrElse("Not set")

      System.out.println(f"${name}%-50s${ns}%-20s${phase}")
    }
  }

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val dispatcher = system.dispatcher

  val k8s = k8sInit

  System.out.println("\nGetting list of pods in namespace of current context ==>")

  val currNsPods: Future[PodList] = k8s.list[PodList]()
  val printCurrNsPods = currNsPods map { podList => listPods(podList.items) }
  printCurrNsPods.failed.foreach { ex => System.err.println("Failed => " + ex) }

  Await.ready(printCurrNsPods, 30.seconds)

  System.out.println("\nGetting lists of pods in 'kube-system' namespace ==>")

  val ksysPods: Future[PodList] = k8s.listInNamespace[PodList]("kube-system")
  val printKSysPods = ksysPods map { podList => listPods(podList.items) }
  printKSysPods.failed.foreach { ex => System.err.println("Failed => " + ex) }

  Await.ready(printKSysPods, 30.seconds)

  System.out.println("\nGetting lists of pods in all namespaces in the cluster ==>")

  val allPodsMapFut: Future[Map[String, PodList]] = k8s.listByNamespace[PodList]()
  val allPods: Future[List[Pod]] = allPodsMapFut map { allPodsMap =>
    allPodsMap.values.flatMap(_.items).toList
  }

  val printAllPods = allPods map { pods=> listPods(pods) }
  printAllPods.failed.foreach { ex => System.err.println("Failed => " + ex) }

  Await.ready(printAllPods, 30.seconds)

  k8s.close
  system.terminate().foreach { f =>
    System.exit(0)
  }
} 
Example 188
Source File: PodLogsExample.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber.examples.podlogs

import akka.NotUsed
import skuber._
import skuber.json.format._
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import akka.util.ByteString
import skuber.api.client

import scala.concurrent.Await
import scala.concurrent.duration._


object PodLogExample extends App {

  def printLogFlow(cntrName: String): Sink[ByteString, NotUsed] = Flow[ByteString]
      .via(Framing.delimiter(
        ByteString("\n"),
        maximumFrameLength = 256,
        allowTruncation = true))
      .map(_.utf8String)
      .to(Sink.foreach(text => println(s"[${cntrName} logs] $text")))


  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val dispatcher = system.dispatcher
  val k8s = client.init(
    client.defaultK8sConfig.currentContext,
    client.LoggingConfig(logRequestBasic = false, logResponseBasic = false) )

  val helloWorldContainer=Container(name="hello-world", image="busybox", command=List("sh", "-c", "echo Hello World! && echo Goodbye World && sleep 60"))
  val helloWorldContainer2=Container(name="hello-world2", image="busybox", command=List("sh", "-c", "echo Hello World again! && echo Goodbye World again && sleep 60"))
  val helloWorldPod=Pod("hello-world", Pod.Spec().addContainer(helloWorldContainer).addContainer(helloWorldContainer2))

  val podFut = k8s.create(helloWorldPod)

  println("Waiting 30 seconds to allow pod initialisation to complete before getting logs...")
  Thread.sleep(30000)
  for {
    pod <- podFut
    logsSource <- k8s.getPodLogSource("hello-world", Pod.LogQueryParams(containerName = Some("hello-world"), sinceSeconds = Some(9999999)))
    logsSource1 <- k8s.getPodLogSource("hello-world", Pod.LogQueryParams(containerName = Some("hello-world2"), sinceTime = pod.metadata.creationTimestamp))
    donePrinting = logsSource.runWith(printLogFlow("hello-world"))
    donePrinting1 = logsSource1.runWith(printLogFlow("hello-world2"))
  } yield (donePrinting, donePrinting1)

  // allow another 5 seconds for logs to be streamed from the pod to stdout before cleaning up
  Thread.sleep(5000)
  Await.result(k8s.delete[Pod]("hello-world"), 5.seconds)
  k8s.close
  system.terminate
  System.exit(0)
} 
Example 189
Source File: ServiceSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import skuber.json.format.serviceFmt
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.{Failure, Random, Success}

class ServiceSpec extends K8SFixture with Eventually with Matchers {
  val nginxServiceName: String = Random.alphanumeric.filter(_.isLetter).take(20).mkString.toLowerCase

  behavior of "Service"

  it should "create a service" in { k8s =>
    k8s.create(getService(nginxServiceName)) map { p =>
      assert(p.name == nginxServiceName)
    }
  }

  it should "get the newly created service" in { k8s =>
    k8s.get[Service](nginxServiceName) map { d =>
      assert(d.name == nginxServiceName)
      // Default ServiceType is ClusterIP
      assert(d.spec.map(_._type) == Option(Service.Type.ClusterIP))
    }
  }

  it should "delete a service" in { k8s =>
    k8s.delete[Service](nginxServiceName).map { _ =>
      eventually(timeout(100.seconds), interval(3.seconds)) {
        val retrieveService = k8s.get[Service](nginxServiceName)
        val serviceRetrieved = Await.ready(retrieveService, 2.seconds).value.get
        serviceRetrieved match {
          case s: Success[_] => assert(false)
          case Failure(ex) => ex match {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  def getService(name: String): Service = {
    val spec: Service.Spec = Service.Spec(ports = List(Service.Port(port = 80)), selector = Map("app" -> "nginx"))
    Service(name, spec)
  }
} 
Example 190
Source File: PodSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually
import skuber.json.format._

import scala.concurrent.duration._
import scala.concurrent.Await
import scala.util.{Failure, Success}


class PodSpec extends K8SFixture with Eventually with Matchers with BeforeAndAfterAll {
  val nginxPodName: String = java.util.UUID.randomUUID().toString
  val defaultLabels = Map("app" -> this.suiteName)

  override def afterAll() = {
    val k8s = k8sInit
    val requirements = defaultLabels.toSeq.map { case (k, v) => LabelSelector.IsEqualRequirement(k, v) }
    val labelSelector = LabelSelector(requirements: _*)
    Await.result(k8s.deleteAllSelected[PodList](labelSelector), 5.seconds)
  }

  behavior of "Pod"

  it should "create a pod" in { k8s =>
    k8s.create(getNginxPod(nginxPodName, "1.7.9")) map { p =>
      assert(p.name == nginxPodName)
    }
  }

  it should "get the newly created pod" in { k8s =>
    k8s.get[Pod](nginxPodName) map { p =>
      assert(p.name == nginxPodName)
    }
  }

  it should "check for newly created pod and container to be ready" in { k8s =>
    eventually(timeout(100.seconds), interval(3.seconds)) {
      val retrievePod = k8s.get[Pod](nginxPodName)
      val podRetrieved = Await.ready(retrievePod, 2.seconds).value.get
      val podStatus = podRetrieved.get.status.get
      val nginxContainerStatus = podStatus.containerStatuses(0)
      podStatus.phase should contain(Pod.Phase.Running)
      nginxContainerStatus.name should be(nginxPodName)
      nginxContainerStatus.state.get shouldBe a[Container.Running]
      val isUnschedulable = podStatus.conditions.exists { c =>
        c._type == "PodScheduled" && c.status == "False" && c.reason == Some("Unschedulable")
      }
      val isScheduled = podStatus.conditions.exists { c =>
        c._type == "PodScheduled" && c.status == "True"
      }
      val isInitialised = podStatus.conditions.exists { c =>
        c._type == "Initialized" && c.status == "True"
      }
      val isReady = podStatus.conditions.exists { c =>
        c._type == "Ready" && c.status == "True"
      }
      assert(isScheduled)
      assert(isInitialised)
      assert(isReady)
    }
  }

  it should "delete a pod" in { k8s =>
    k8s.delete[Pod](nginxPodName).map { _ =>
      eventually(timeout(100.seconds), interval(3.seconds)) {
        val retrievePod = k8s.get[Pod](nginxPodName)
        val podRetrieved = Await.ready(retrievePod, 2.seconds).value.get
        podRetrieved match {
          case s: Success[_] => assert(false)
          case Failure(ex) => ex match {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  it should "delete selected pods" in { k8s =>
    for {
      _ <- k8s.create(getNginxPod(nginxPodName + "-foo", "1.7.9", labels = Map("foo" -> "1")))
      _ <- k8s.create(getNginxPod(nginxPodName + "-bar", "1.7.9", labels = Map("bar" -> "2")))
      _ <- k8s.deleteAllSelected[PodList](LabelSelector(LabelSelector.ExistsRequirement("foo")))
    } yield eventually(timeout(100.seconds), interval(3.seconds)) {
      val retrievePods = k8s.list[PodList]()
      val podsRetrieved = Await.result(retrievePods, 2.seconds)
      val podNamesRetrieved = podsRetrieved.items.map(_.name)
      assert(!podNamesRetrieved.contains(nginxPodName + "-foo") && podNamesRetrieved.contains(nginxPodName + "-bar"))
    }
  }

  def getNginxContainer(name: String, version: String): Container = Container(name = name, image = "nginx:" + version).exposePort(80)

  def getNginxPod(name: String, version: String, labels: Map[String, String] = Map()): Pod = {
    val nginxContainer = getNginxContainer(name, version)
    val nginxPodSpec = Pod.Spec(containers = List((nginxContainer)))
    val podMeta=ObjectMeta(name = name, labels = labels ++ defaultLabels)
    Pod(metadata = podMeta, spec = Some(nginxPodSpec))
  }
} 
Example 191
Source File: PodLogSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import java.time.ZonedDateTime

import akka.stream.scaladsl.TcpIdleTimeoutException
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually
import skuber.Pod.LogQueryParams
import skuber.json.format._

import scala.concurrent.Await
import scala.concurrent.duration._

class PodLogSpec extends K8SFixture with Eventually with Matchers with BeforeAndAfterAll {
  val podName: String = java.util.UUID.randomUUID().toString

  behavior of "PodLog"

  val idleTimeout = 3.seconds
  override val config = ConfigFactory.parseString(s"skuber.pod-log.idle-timeout=${idleTimeout.toSeconds}s").withFallback(ConfigFactory.load())

  override def beforeAll(): Unit = {
    super.beforeAll()

    val k8s = k8sInit(config)
    Await.result(k8s.create(getNginxPod(podName, "1.7.9")), 3.second)
    // Let the pod running
    Thread.sleep(3000)
    k8s.close
  }

  override def afterAll(): Unit = {
    val k8s = k8sInit(config)
    Await.result(k8s.delete[Pod](podName), 3.second)
    Thread.sleep(3000)
    k8s.close

    super.afterAll()
  }

  it should "get log of a pod" in { k8s =>
    k8s.getPodLogSource(podName, LogQueryParams(follow = Some(false))).flatMap { source =>
      source.map(_.utf8String).runReduce(_ + _).map { s =>
        assert(s == "foo\n")
      }
    }
  }

  it should "tail log of a pod and timeout after a while" in { k8s =>
    var log = ""
    var start = ZonedDateTime.now()
    k8s.getPodLogSource(podName, LogQueryParams(follow = Some(true))).flatMap { source =>
      source.map(_.utf8String).runForeach(log += _)
    }.failed.map { case e: TcpIdleTimeoutException =>
      val msgPattern = s"TCP idle-timeout encountered on connection to [^,]+, no bytes passed in the last ${idleTimeout}"
      assert(e.getMessage.matches(msgPattern), s"""["${e.getMessage}"] does not match ["${msgPattern}"]""")
      assert(log == "foo\n")
      assert(ZonedDateTime.now().isAfter(start.withSecond(idleTimeout.toSeconds.toInt)))
    }
  }

  def getNginxContainer(version: String): Container = Container(
    name = "ubuntu", image = "nginx:" + version,
    command = List("sh"),
    args = List("-c", s"""echo "foo"; trap exit TERM; sleep infinity & wait""")
  )

  def getNginxPod(name: String, version: String): Pod = {
    val container = getNginxContainer(version)
    val podSpec = Pod.Spec(containers = List((container)))
    Pod.named(podName).copy(spec = Some(podSpec))
  }
} 
Example 192
Source File: DeploymentSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import org.scalatest.Matchers
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import skuber.LabelSelector.IsEqualRequirement
import skuber.apps.v1.Deployment

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success}

class DeploymentSpec extends K8SFixture with Eventually with Matchers {
  val nginxDeploymentName: String = java.util.UUID.randomUUID().toString

  behavior of "Deployment"

  it should "create a deployment" in { k8s =>
    k8s.create(getNginxDeployment(nginxDeploymentName, "1.7.9")) map { d =>
      assert(d.name == nginxDeploymentName)
    }
  }

  it should "get the newly created deployment" in { k8s =>
    k8s.get[Deployment](nginxDeploymentName) map { d =>
      assert(d.name == nginxDeploymentName)
    }
  }

  it should "upgrade the newly created deployment" in { k8s =>
    k8s.get[Deployment](nginxDeploymentName).flatMap { d =>
      println(s"DEPLOYMENT TO UPDATE ==> $d")
      val updatedDeployment = d.updateContainer(getNginxContainer("1.9.1"))
      k8s.update(updatedDeployment).flatMap { _ =>
        eventually(timeout(200.seconds), interval(5.seconds)) {
          val retrieveDeployment=k8s.get[Deployment](nginxDeploymentName)
          ScalaFutures.whenReady(retrieveDeployment, timeout(2.seconds), interval(1.second)) { deployment =>
            deployment.status.get.updatedReplicas shouldBe 1
          }
        }
      }
    }
  }

  it should "delete a deployment" in { k8s =>
    k8s.deleteWithOptions[Deployment](nginxDeploymentName, DeleteOptions(propagationPolicy = Some(DeletePropagation.Foreground))).map { _ =>
      eventually(timeout(200.seconds), interval(3.seconds)) {
        val retrieveDeployment = k8s.get[Deployment](nginxDeploymentName)
        val deploymentRetrieved=Await.ready(retrieveDeployment, 2.seconds).value.get
        deploymentRetrieved match {
          case s: Success[_] => assert(false)
          case Failure(ex) => ex match {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  def getNginxContainer(version: String): Container = Container(name = "nginx", image = "nginx:" + version).exposePort(80)

  def getNginxDeployment(name: String, version: String): Deployment = {
    import LabelSelector.dsl._
    val nginxContainer = getNginxContainer(version)
    val nginxTemplate = Pod.Template.Spec.named("nginx").addContainer(nginxContainer).addLabel("app" -> "nginx")
    Deployment(name).withTemplate(nginxTemplate).withLabelSelector("app" is "nginx")
  }
} 
Example 193
Source File: CleaningIntegrationSpec.scala    From neotypes   with MIT License 5 votes vote down vote up
package neotypes

import neotypes.implicits.mappers.executions._
import neotypes.implicits.syntax.string._
import org.scalatest.FutureOutcome
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration


abstract class CleaningIntegrationSpec[F[_]](testkit: EffectTestkit[F]) extends BaseIntegrationSpec(testkit) {
  override final def withFixture(test: NoArgAsyncTest): FutureOutcome = {
    complete {
      super.withFixture(test)
    } lastly {
      this.cleanDb()
    }
  }

  override final val initQuery: String = BaseIntegrationSpec.EMPTY_INIT_QUERY
} 
Example 194
Source File: RunOneTaskbutBlock.scala    From Scala-and-Spark-for-Big-Data-Analytics   with MIT License 5 votes vote down vote up
package com.chapter3.ScalaFP
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
object RunOneTaskbutBlock {
  def main(args: Array[String]) {
    // Getting the current time in Milliseconds
    implicit val baseTime = System.currentTimeMillis    
    // Future creation
    val testFuture = Future {
      Thread.sleep(300)
      2 + 2
    }    
    // this is the blocking part
    val finalOutput = Await.result(testFuture, 2 second)
    println(finalOutput)
  }
} 
Example 195
Source File: SystemShutdown.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.core

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.github.vonnagy.service.container.log.LoggingAdapter

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.sys.ShutdownHookThread



  private[container] def shutdownActorSystem(fromHook: Boolean = false)(f: => Unit): Unit = {

    try {
      // Remove the hook
      if (shutdownHook.isDefined && !fromHook) {
        shutdownHook.get.remove

      }
      shutdownHook = None

      log.info("Shutting down the actor system")
      system.terminate()

      // Wait for termination if it is not already complete
      Await.result(system.whenTerminated, Duration.apply(30, TimeUnit.SECONDS))
      log.info("The actor system has terminated")
    }
    catch {
      case t: Throwable =>
        log.error(s"The actor system could not be shutdown: ${t.getMessage}", t)
    }

    // Call the passed function
    f
  }
} 
Example 196
Source File: HealthProviderSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.health

import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import org.specs2.mutable.SpecificationLike

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

class HealthProviderSpec extends AkkaTestkitSpecs2Support with SpecificationLike {

  sequential
  val sys = system

  import system.dispatcher

  "The HealthProvider" should {

    "properly gather the container health" in {

      Health(system).addCheck(new HealthCheck {
        override def getHealth: Future[HealthInfo] = Future {
          HealthInfo("healthy", HealthState.OK, "details")
        }
      })

      val prov = new HealthProvider {
        implicit val system = sys
        implicit val executor = system.dispatcher
      }

      val health = Await.result[ContainerHealth](prov.runChecks, 1 second)
      health.state must be equalTo HealthState.OK
    }

    "properly gather the container health when the system is not healthy" in {

      val prov = new HealthProvider {
        implicit val system = sys
        implicit val executor = system.dispatcher
      }

      Health(system).addCheck(new HealthCheck {
        override def getHealth: Future[HealthInfo] = Future {
          HealthInfo("critical", HealthState.CRITICAL, "details", checks = List(HealthInfo("critical-child", HealthState.CRITICAL, "child")))
        }
      })

      val health = Await.result[ContainerHealth](prov.runChecks, 1 second)
      health.state must be equalTo HealthState.CRITICAL
    }
  }

} 
Example 197
Source File: RegisteredHealthCheckActorSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.health

import akka.actor._
import akka.testkit.TestActorRef
import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import org.specs2.mutable.SpecificationLike

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

class RegisteredHealthCheckActorSpec extends AkkaTestkitSpecs2Support with SpecificationLike {

  sequential

  "Health check registration" should {

    "allow for the creation of a registered health check" in {

      val r = new TestRegisteredHealthCheck()(system)
      Await.result[HealthInfo](r.getHealth, 1 second).state must be equalTo HealthState.OK
      Health(system).getChecks.length must be equalTo 1
    }

    "allow for the creation of a registered health check actor" in {

      val ext = Health(system)
      val act = TestActorRef(new Actor with RegisteredHealthCheckActor {
        def receive = {
          case GetHealth => sender ! HealthInfo("test", HealthState.OK, "details")
        }
      })

      Await.result[HealthInfo](act.underlyingActor.getHealth, 1 second).state must be equalTo HealthState.OK
      ext.getChecks.length must be equalTo 2
    }
  }
}

class TestRegisteredHealthCheck(implicit val system: ActorSystem) extends RegisteredHealthCheck {

  import system.dispatcher

  def getHealth: Future[HealthInfo] = Future {
    HealthInfo("test", HealthState.OK, "details")
  }
} 
Example 198
Source File: CatalogDatabase.scala    From modelmatrix   with Apache License 2.0 5 votes vote down vote up
package com.collective.modelmatrix.catalog

import java.util.concurrent.Executors

import com.collective.modelmatrix.db.SchemaInstaller
import org.scalatest.BeforeAndAfterAll
import slick.driver.JdbcProfile

import scala.concurrent.duration.{FiniteDuration, _}
import scala.concurrent.{Await, ExecutionContext, Future}
import scalaz.Tag

trait CatalogDatabase {
  def driver: JdbcProfile

  import com.collective.modelmatrix.db.GenericSlickDriver.api.Database
  def db: Database

  lazy val catalog = new ModelMatrixCatalog(driver)

  protected implicit val catalogExecutionContext =
    Tag[ExecutionContext, ModelMatrixCatalog](ExecutionContext.fromExecutor(Executors.newFixedThreadPool(10)))

  protected def await[T](f: Future[T], duration: FiniteDuration = 10.seconds): T = {
    Await.result(f, duration)
  }

}

trait InstallSchemaBefore extends SchemaInstaller {
  self: BeforeAndAfterAll with CatalogDatabase =>
  private[this] var schemaInstalled: Boolean = false

  override protected def beforeAll(): Unit = {
    this.synchronized {
      if (!schemaInstalled) {
        installOrMigrate
        schemaInstalled = true
      }
    }
  }
} 
Example 199
Source File: RemoteAccessor.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package transmitter

import contexts.Immediate.Implicits.global

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

object RemoteAccessor {
  trait Default { this: language.PlacedValue.type =>
    implicit class BasicMultipleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, T, L, Multiple])
      extends RemoteAccessor {

      def asLocalFromAll: Seq[(Remote[R], T)] = value.remotes zip value.retrieveValues
    }

    implicit class BasicBlockingMultipleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, Future[T], L, Multiple])
      extends RemoteAccessor {

      def asLocalFromAll_?(timeout: Duration): Seq[(Remote[R], T)] =
        value.remotes zip (Await result (Future sequence value.retrieveValues, timeout))

      def asLocalFromAll_! : Seq[(Remote[R], T)] = asLocalFromAll_?(Duration.Inf)
    }


    implicit class BasicOptionalAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, T, L, Optional])
      extends RemoteAccessor {

      def asLocal: Option[T] = value.retrieveValue
    }

    implicit class BasicBlockingOptionalAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, Future[T], L, Optional])
      extends RemoteAccessor {

      def asLocal_?(timeout: Duration): Option[T] =
        value.retrieveValue map { Await result (_, timeout) }

      def asLocal_! : Option[T] = asLocal_?(Duration.Inf)
    }


    implicit class BasicSingleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, T, L, Single])
      extends RemoteAccessor {

      def asLocal: T = value.retrieveValue
    }

    implicit class BasicBlockingSingleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, Future[T], L, Single])
      extends RemoteAccessor {

      def asLocal_?(timeout: Duration): T =
        Await result (value.retrieveValue, timeout)

      def asLocal_! : T = asLocal_?(Duration.Inf)
    }
  }

  sealed trait Access {
    implicit class MultipleValueAccess[V, T, R, L](value: V from R)(implicit
        ev: Transmission[V, R, T, L, _]) {

      def cache[B <: AnyRef](id: Any)(body: => B): B = ev.cache(id, body)
      val remoteJoined: Notice.Stream[Remote[R]] = ev.remoteJoined
      val remoteLeft: Notice.Stream[Remote[R]] = ev.remoteLeft
      def remotes: Seq[Remote[R]] = ev.remotesReferences
      def retrieveValues: Seq[T] = ev.retrieveValues
    }
  }
}

trait RemoteAccessor extends RemoteAccessor.Access {
  implicit class OptionalValueAccess[V, T, R, L](value: V from R)(implicit
      ev: Transmission[V, R, T, L, Optional])
    extends MultipleValueAccess(value)(ev) {

    def remote: Option[Remote[R]] = ev.remotesReferences.headOption
    def retrieveValue: Option[T] = ev.retrieveValues.headOption
  }

  implicit class SingleValueAccess[V, T, R, L](value: V from R)(implicit
      ev: Transmission[V, R, T, L, Single])
    extends MultipleValueAccess(value)(ev) {

    def remote: Remote[R] = ev.remotesReferences.head
    def retrieveValue: T = ev.retrieveValues.head
  }
} 
Example 200
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo6

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val create = withSchema {
    (authorId: String, title: String) =>
      cql"""
         INSERT INTO test.posts (author_id , post_id , post_title )
         VALUES ( $authorId, now(), $title);
       """.prepared.executeAsync
  }

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  println(Await.result(create("test", "title"), Duration(1, "second")))
  println(Await.result(listByAuthor("test"), Duration(1, "second")))

  session.close()
  cluster.close()
}