akka.util.Timeout Scala Examples

The following examples show how to use akka.util.Timeout. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: QueriesAkkaHttpResource.scala    From model-serving-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.modelserving.akka

import akka.actor.Scheduler
import akka.actor.typed.ActorRef
import akka.actor.typed.scaladsl.AskPattern._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import com.lightbend.modelserving.model.ModelToServeStats
import de.heikoseeberger.akkahttpjackson.JacksonSupport

import scala.concurrent.duration._

object QueriesAkkaHttpResource extends JacksonSupport {

  implicit val askTimeout = Timeout(30.seconds)

  def storeRoutes(modelserver: ActorRef[ModelServerManagerActor])(implicit scheduler: Scheduler) : Route =
    get {
      // Get list of models
      path("processors") {
        onSuccess(modelserver ? ((replyTo: ActorRef[GetModelsResult]) => GetModels(replyTo))) {
          case models: GetModelsResult =>
            complete(models)
        }
      } ~
      // Get statistics for a given data type
      path("state"/Segment) { dataType =>
        onSuccess(modelserver ? ((replyTo: ActorRef[ModelToServeStats]) => GetState(replyTo, dataType))) {
          case stats : ModelToServeStats =>
            complete(stats)
        }
      }
    }
} 
Example 2
Source File: ClientSpec.scala    From twitter4s   with Apache License 2.0 5 votes vote down vote up
package com.danielasfregola.twitter4s.helpers

import java.util.UUID

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.pattern.ask
import akka.stream.{KillSwitches, Materializer, SharedKillSwitch}
import akka.util.Timeout
import akka.util.Timeout.durationToTimeout
import com.danielasfregola.twitter4s.entities.streaming.StreamingMessage
import com.danielasfregola.twitter4s.http.clients.authentication.AuthenticationClient
import com.danielasfregola.twitter4s.http.clients.rest.RestClient
import com.danielasfregola.twitter4s.http.clients.streaming.StreamingClient

import scala.concurrent.Future
import scala.concurrent.duration.DurationInt

trait ClientSpec extends Spec {

  abstract class AuthenticationClientSpecContext extends RequestDSL with SpecContext {

    protected val authenticationClient = new AuthenticationClient(consumerToken) {

      override def sendAndReceive[T](request: HttpRequest, f: HttpResponse => Future[T])(
          implicit system: ActorSystem,
          materializer: Materializer): Future[T] = {
        implicit val ec = materializer.executionContext
        implicit val timeout: Timeout = DurationInt(20) seconds
        val requestStartTime = System.currentTimeMillis
        val responseR: Future[HttpResponse] = (transport.ref ? request).map(_.asInstanceOf[HttpResponse])
        for {
          response <- responseR
          t <- unmarshal[T](requestStartTime, f)(request, response, materializer)
        } yield t
      }
    }
  }

  abstract class RestClientSpecContext extends RequestDSL with SpecContext {

    protected val restClient = new RestClient(consumerToken, accessToken) {

      override def sendAndReceive[T](request: HttpRequest, f: HttpResponse => Future[T])(
          implicit system: ActorSystem,
          materializer: Materializer): Future[T] = {
        implicit val ec = materializer.executionContext
        implicit val timeout: Timeout = DurationInt(20) seconds
        val requestStartTime = System.currentTimeMillis
        val responseR: Future[HttpResponse] = (transport.ref ? request).map(_.asInstanceOf[HttpResponse])
        for {
          response <- responseR
          t <- unmarshal[T](requestStartTime, f)(request, response, materializer)
        } yield t
      }
    }
  }

  abstract class StreamingClientSpecContext extends RequestDSL with SpecContext {

    def dummyProcessing: PartialFunction[StreamingMessage, Unit] = { case _ => }

    val killSwitch = KillSwitches.shared(s"test-twitter4s-${UUID.randomUUID}")

    protected val streamingClient = new StreamingClient(consumerToken, accessToken) {

      override def processStreamRequest[T <: StreamingMessage: Manifest](
          request: HttpRequest
      )(
          f: PartialFunction[T, Unit],
          errorHandler: PartialFunction[Throwable, Unit]
      )(
          implicit
          system: ActorSystem,
          materializer: Materializer
      ): Future[SharedKillSwitch] = {
        implicit val ec = materializer.executionContext
        implicit val timeout: Timeout = DurationInt(20) seconds

        val responseR: Future[HttpResponse] = (transport.ref ? request).map(_.asInstanceOf[HttpResponse])
        for {
          response <- responseR
          _ <- Future.successful(processBody(response, killSwitch)(f)(manifest[T], request, materializer))
        } yield killSwitch
      }

    }
  }
} 
Example 3
Source File: ThresholdActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import io.coral.actors.CoralActorFactory
import io.coral.api.DefaultModule
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.concurrent.duration._
import akka.actor.ActorSystem
import akka.testkit._
import akka.util.Timeout
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

@RunWith(classOf[JUnitRunner])
class ThresholdActorSpec(_system: ActorSystem) extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll {
	implicit val timeout = Timeout(100.millis)
	def this() = this(ActorSystem("ThresholdActorSpec"))

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	"A ThresholdActor" must {
		val createJson = parse(
			"""{ "type": "threshold", "params": { "key": "key1", "threshold": 10.5 }}"""
				.stripMargin).asInstanceOf[JObject]

		implicit val injector = new DefaultModule(system.settings.config)

		// test invalid definition json as well !!!
		val props = CoralActorFactory.getProps(createJson).get
		val threshold = TestActorRef[ThresholdActor](props)

		// subscribe the testprobe for emitting
		val probe = TestProbe()
		threshold.underlyingActor.emitTargets += probe.ref

		"Emit when equal to the threshold" in {
			val json = parse( """{"key1": 10.5}""").asInstanceOf[JObject]
			threshold ! json
			probe.expectMsg(parse( """{ "key1": 10.5 }"""))
		}

		"Emit when higher than the threshold" in {
			val json = parse( """{"key1": 10.7}""").asInstanceOf[JObject]
			threshold ! json
			probe.expectMsg(parse( """{"key1": 10.7 }"""))
		}

		"Not emit when lower than the threshold" in {
			val json = parse( """{"key1": 10.4 }""").asInstanceOf[JObject]
			threshold ! json
			probe.expectNoMsg()
		}

		"Not emit when key is not present in triggering json" in {
			val json = parse( """{"key2": 10.7 }""").asInstanceOf[JObject]
			threshold ! json
			probe.expectNoMsg()
		}
	}
} 
Example 4
Source File: StatsActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import akka.util.Timeout
import io.coral.actors.CoralActorFactory
import io.coral.api.DefaultModule
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.Await
import scala.concurrent.duration._

class StatsActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll {
	def this() = this(ActorSystem("StatsActorSpec"))

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	implicit val timeout = Timeout(100.millis)
	implicit val injector = new DefaultModule(system.settings.config)

	def createStatsActor: StatsActor = {
		val createJson = parse( """{ "type": "stats", "params": { "field": "val" } }""")
			.asInstanceOf[JObject]
		val props = CoralActorFactory.getProps(createJson).get
		val actorRef = TestActorRef[StatsActor](props)
		actorRef.underlyingActor
	}

	val expectedInitialState = Map(
		("count", render(0L)),
		("avg", render(JNull)),
		("sd", render(JNull)),
		("min", render(JNull)),
		("max", render(JNull))
	)

	"StatsActor" should {
		"have a field corresponding to the json definition" in {
			val actor = createStatsActor
			actor.field should be("val")
		}

		"supply it's state" in {
			val actor = createStatsActor
			actor.state should be(expectedInitialState)
		}

		"accept a value as trigger" in {
			val actor = createStatsActor
			val triggerJson = parse( """{ "bla": 1.0, "val": 2.7 }""").asInstanceOf[JObject]
			actor.trigger(triggerJson)
			actor.state should be(
				Map(
					("count", render(1L)),
					("avg", render(2.7)),
					("sd", render(0.0)),
					("min", render(2.7)),
					("max", render(2.7))
				))
		}

		"have timer reset statistics" in {
			val actor = createStatsActor
			val triggerJson = parse( """{ "val": 2.7 }""").asInstanceOf[JObject]
			actor.trigger(triggerJson)
			actor.state should be(
				Map(
					("count", render(1L)),
					("avg", render(2.7)),
					("sd", render(0.0)),
					("min", render(2.7)),
					("max", render(2.7))
				))
			val future = actor.timer
			val json = Await.result(future, timeout.duration).get
			json should be(JNothing)
			actor.state should be(expectedInitialState)
		}
	}
} 
Example 5
Source File: GroupByActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import java.util.UUID

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import akka.util.Timeout
import io.coral.actors.RuntimeActor
import io.coral.api.DefaultModule
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._
import scala.languageFeature.postfixOps

class GroupByActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll
	with ScalaFutures {
	def this() = this(ActorSystem("GroupByActorSpec"))
	implicit val ec = scala.concurrent.ExecutionContext.Implicits.global
	implicit val injector = new DefaultModule(system.settings.config)
	val name = "runtime1"
	val userUUID1 = UUID.randomUUID()
	implicit val runtime = system.actorOf(Props(new RuntimeActor(name, userUUID1)), "coral")
	implicit val timeout = Timeout(100.millis)
	implicit val formats = org.json4s.DefaultFormats

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	// here is a dependency on the stats actor
	// in the current situation (the CoralActorFactory) it seems unavoidable
	// to depend in some tests on an existing actor instead of injecting a test actor
	def statsGroupBy: GroupByActor = {
		val createJson = parse(
			"""{ "type": "stats",
			  |  "params": { "field": "amount" },
			  |  "group": { "by": "tag" }
			  | }""".stripMargin
		).asInstanceOf[JObject]
		TestActorRef[GroupByActor](GroupByActor(createJson).get).underlyingActor
	}

	"A GroupByActor" should {
		
	}
} 
Example 6
Source File: SampleActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit, TestProbe}
import akka.util.Timeout
import io.coral.lib.{NotSoRandom, Random}
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._
import scala.language.postfixOps

class SampleActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll
	with ScalaFutures {
	def this() = this(ActorSystem("SampleActorSpec"))

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	def arbitrarySampleActor(): SampleActor = {
		val json = parse(
			"""{ "type": "sample",
			  | "params": { "fraction": 0.707 } }
			""".stripMargin)
		val props = SampleActor(json).get
		TestActorRef[SampleActor](props).underlyingActor
	}

	def notSoRandomSampleActor(fraction: Double, randoms: Double*): SampleActor = {
		val json = parse(
			s"""{ "type": "sample", "params": { "fraction": ${fraction} } }
     		 """.stripMargin)
		val source = NotSoRandom(randoms: _*)
		val props = Props(classOf[SampleActor], json, Random(source))
		TestActorRef[SampleActor](props).underlyingActor
	}

	implicit val timeout = Timeout(100 millis)

	"A SampleActor" should {

		"Be instantiated with sample fraction" in {
			val json = parse("""{ "type": "sample", "params": { "fraction": 0.5 }}""".stripMargin)
			val props = SampleActor(json).get
			props.actorClass() should be(classOf[SampleActor])
			val actor = TestActorRef[SampleActor](props).underlyingActor
			actor.fraction should be(0.5)
		}

		"Not be instantiated without fraction or percentage" in {
			val json = parse("""{ "type": "sample", "params": { "bla": "blabla" }}""".stripMargin)
			SampleActor(json) should be(None)
		}

		"Be constructible with a io.coral.lib.Random for random boolean stream" in {
			val actor = notSoRandomSampleActor(fraction = 0.5, randoms = 0.1, 0.49, 0.50, 0.51, 0.8, 0.4)
			actor.next() should be(true)
			actor.next() should be(true)
			actor.next() should be(false)
			actor.next() should be(false)
			actor.next() should be(false)
			actor.next() should be(true)
		}

		"Should trigger true or false according to random binomial sequence" in {
			val actor = notSoRandomSampleActor(fraction = 0.7, randoms = 0.8, 0.6)
			val json = parse( """{ "something": "whatever" }""").asInstanceOf[JObject]

			val result1 = actor.simpleEmitTrigger(json)
			result1 should be(Some(JNothing))

			val result2 = actor.simpleEmitTrigger(json)
			result2 should be(Some(json))
		}

		"Should have trigger and emit cooperate" in {
			val actor = notSoRandomSampleActor(fraction = 0.7, randoms = 0.6, 0.8)
			val ref = actor.self
			val json = parse( """{ "something": "whatever" }""").asInstanceOf[JObject]
			val probe = TestProbe()
			actor.emitTargets += probe.ref
			ref ! json
			probe.expectMsg(json)
			ref ! json
			probe.expectNoMsg(100 millis)
		}
	}
} 
Example 7
Source File: JsonActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import akka.util.Timeout
import org.json4s.JsonAST.JValue
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._

class JsonActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll {
	def this() = this(ActorSystem("JsonActorSpec"))

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	implicit val timeout = Timeout(100.millis)
	def createJsonActor(json: JValue): JsonActor = {
		val props = JsonActor(json).get
		val actorRef = TestActorRef[JsonActor](props)
		actorRef.underlyingActor
	}

	"JsonActor" should {
		"have a standard coral props supplier" in {
			val json = parse("""{ "type": "json", "params": { "template": {} } }""")
			val props = JsonActor(json).get
			props.actorClass shouldBe classOf[JsonActor]
		}

		"read the template parameter" in {
			val template = """{ "a": "someReference" }"""
			val json = parse(s"""{ "type": "json", "params": { "template": $template } }""")
			val actor = createJsonActor(json)
			actor.template.template shouldBe parse(template)
		}

		"emit the json based on template" in {
			val templateJson =
				"""{ "a": "ALPHA",
				  |  "b": "${beta}",
				  |  "c": { "d": 123,
				  |         "e": "${epsilon}"
				  |       },
				  |  "f": 1,
				  |  "g": 1.0
				  |}""".stripMargin
			val json = parse(s"""{ "type": "json", "params": { "template": ${templateJson} } }""")
			val actor = createJsonActor(json)
			val triggerJson = parse(
				"""{ "beta": "xyz",
				  |  "epsilon": 987
				  |}""".stripMargin)
			val expectedJson = parse(
				"""{ "a": "ALPHA",
				  |  "c": { "d": 123,
				  |         "e": 987
				  |       },
				  |  "f": 1,
				  |  "b": "xyz",
				  |  "g": 1.0
				  |}""".stripMargin)
			actor.simpleEmitTrigger(triggerJson.asInstanceOf[JObject]) shouldBe Some(expectedJson)
		}
	}
} 
Example 8
Source File: LinearRegressionActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorRef, ActorSystem}
import akka.testkit.{TestProbe, TestActorRef, ImplicitSender, TestKit}
import io.coral.actors.CoralActorFactory
import io.coral.api.DefaultModule
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import akka.util.Timeout
import org.json4s.native.Serialization.write
import scala.concurrent.duration._

class LinearRegressionActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll {
	def this() = this(ActorSystem("LinearRegressionActorSpec"))

	implicit val timeout = Timeout(100.millis)
	implicit val injector = new DefaultModule(system.settings.config)

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	def createLinearRegressionActor(intercept: Double, weights: Map[String, Double]) = {
		implicit val formats = DefaultFormats
		val str =
			s"""{ "type":"linearregression",
			   |"params": { "intercept": $intercept,
			   |"weights": ${write(weights)}
			   |}}""".stripMargin

		val createJson = parse(str).asInstanceOf[JObject]
		val props = CoralActorFactory.getProps(createJson).get
		val actorTestRef = TestActorRef[LinearRegressionActor](props)

		val probe = TestProbe()
		actorTestRef.underlyingActor.emitTargets += probe.ref
		(actorTestRef, probe)
	}

	"LinearRegressionActor" should {
		"Instantiate from companion object" in {
			val (actor, _) = createLinearRegressionActor(0, Map("salary" -> 2000))
			actor.underlyingActor.intercept should be(0)
			actor.underlyingActor.weights should be(Map("salary" -> 2000))
		}

		"process trigger data when all the features are available even with different order" in {
			val (actor, probe) = createLinearRegressionActor(0, Map("age" -> 0.2, "salary" -> 0.1))
			val message = parse( s"""{"salary": 4000, "age": 40}""").asInstanceOf[JObject]
			actor ! message

			probe.expectMsg(parse( s"""{"score": 408.0, "salary": 4000, "age": 40}"""))
		}

		"emit when score is calculated" in {
			val (actor, probe) = createLinearRegressionActor(0, Map("salary" -> 10))
			val message = parse( s"""{"salary": 2000}""").asInstanceOf[JObject]
			actor ! message

			probe.expectMsg(parse( s"""{"score": 20000.0, "salary": 2000}"""))
		}

		"not emit when keys are missing" in {
			val (actor, probe) = createLinearRegressionActor(0, Map("age" -> 0.2, "salary" -> 10))
			val message = parse( s"""{"salary": 2000}""").asInstanceOf[JObject]
			actor ! message

			probe.expectNoMsg
		}
	}
} 
Example 9
Source File: MinMaxActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{TestProbe, ImplicitSender, TestActorRef, TestKit}
import akka.util.Timeout
import io.coral.actors.CoralActorFactory
import io.coral.api.DefaultModule
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.duration._

@RunWith(classOf[JUnitRunner])
class MinMaxActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll {
	implicit val timeout = Timeout(100.millis)
	implicit val formats = org.json4s.DefaultFormats
	implicit val injector = new DefaultModule(system.settings.config)
	def this() = this(ActorSystem("ZscoreActorSpec"))

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	"A MinMaxActor" must {
		val createJson = parse(
			"""{ "type": "minmax", "params": { "field": "field1", "min": 10.0, "max": 13.5 }}"""
				.stripMargin).asInstanceOf[JObject]

		implicit val injector = new DefaultModule(system.settings.config)

		val props = CoralActorFactory.getProps(createJson).get
		val threshold = TestActorRef[MinMaxActor](props)

		// subscribe the testprobe for emitting
		val probe = TestProbe()
		threshold.underlyingActor.emitTargets += probe.ref

		"Emit the minimum when lower than the min" in {
			val json = parse( """{"field1": 7 }""").asInstanceOf[JObject]
			threshold ! json
			probe.expectMsg(parse( """{ "field1": 10.0 }"""))
		}

		"Emit the maximum when higher than the max" in {
			val json = parse( """{"field1": 15.3 }""").asInstanceOf[JObject]
			threshold ! json
			probe.expectMsg(parse( """{"field1": 13.5 }"""))
		}

		"Emit the value itself when between the min and the max" in {
			val json = parse( """{"field1": 11.7 }""").asInstanceOf[JObject]
			threshold ! json
			probe.expectMsg(parse( """{"field1": 11.7 }"""))
		}

		"Emit object unchanged when key is not present in triggering json" in {
			val json = parse( """{"otherfield": 15.3 }""").asInstanceOf[JObject]
			threshold ! json
			probe.expectMsg(parse( """{"otherfield": 15.3 }"""))
		}
	}
} 
Example 10
Source File: HelloServiceImpl.scala    From scala-tutorials   with MIT License 5 votes vote down vote up
package com.baeldung.hello.impl

import akka.NotUsed
import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.routing.{ClusterRouterGroup, ClusterRouterGroupSettings}
import akka.pattern.ask
import akka.routing.ConsistentHashingGroup
import akka.stream.scaladsl.Source
import akka.util.Timeout
import com.baeldung.hello.akka.{Job, JobAccepted, JobStatus, Worker}
import com.baeldung.hello.api.HelloService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.pubsub.{PubSubRegistry, TopicId}

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._

class HelloServiceImpl(system: ActorSystem, pubSub: PubSubRegistry)(implicit ec: ExecutionContext)
  extends HelloService {

  if (Cluster.get(system).selfRoles("worker-node")) {
    system.actorOf(Worker.props(pubSub), "worker")
  }

  val workerRouter = {
    val paths = List("/user/worker")
    val groupConf = ConsistentHashingGroup(paths, hashMapping = {
      case Job(_, task, _) => task
    })
    val routerProps = ClusterRouterGroup(
      groupConf,
      ClusterRouterGroupSettings(
        totalInstances = 1000,
        routeesPaths = paths,
        allowLocalRoutees = true,
        useRoles = Set("worker-node")
      )
    ).props
    system.actorOf(routerProps, "workerRouter")
  }

  override def submit(): ServiceCall[Job, JobAccepted] = ServiceCall {
    job =>
      //Future{JobAccepted(job.jobId)}
      implicit val timeout = Timeout(5.seconds)
      (workerRouter ? job).mapTo[JobAccepted]
  }

  override def status(): ServiceCall[NotUsed, Source[JobStatus, NotUsed]] = ServiceCall {
    _ =>
      val topic = pubSub.refFor(TopicId[JobStatus]("job-status"))
      Future.successful(topic.subscriber)
  }

} 
Example 11
Source File: TFServingModelServer.scala    From model-serving-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.modelserving.tensorflowserving

import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.http.scaladsl.Http
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.stream.typed.scaladsl.{ActorFlow, ActorMaterializer}
import akka.util.Timeout
import com.lightbend.modelserving.configuration.ModelServingConfiguration
import com.lightbend.modelserving.model.ServingResult
import com.lightbend.modelserving.winemodel.DataRecord
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.ByteArrayDeserializer

import scala.concurrent.duration._
import scala.util.Success


object TFServingModelServer {

  import ModelServingConfiguration._

  // Initialization

  implicit val modelServer = ActorSystem(
    Behaviors.setup[TFModelServerActor](
      context => new TFModelServerBehaviour(context)), "ModelServing")

  implicit val materializer = ActorMaterializer()
  implicit val executionContext = modelServer.executionContext
  implicit val askTimeout = Timeout(30.seconds)

  // Configuration properties for the Kafka topic.
  val dataSettings = ConsumerSettings(modelServer.toUntyped, new ByteArrayDeserializer, new ByteArrayDeserializer)
    .withBootstrapServers(KAFKA_BROKER)
    .withGroupId(DATA_GROUP)
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  def main(args: Array[String]): Unit = {

    println(s"Akka application that uses TensorFlow Serving, brokers $KAFKA_BROKER")

    // Data stream processing
    Consumer.atMostOnceSource(dataSettings, Subscriptions.topics(DATA_TOPIC))
      .map(record => DataRecord.wineFromByteArray(record.value)).collect { case Success(a) => a }
      .via(ActorFlow.ask(1)(modelServer)((elem, replyTo : ActorRef[Option[ServingResult[Double]]]) => new ServeData(replyTo, elem)))
      .collect{ case Some(result) => result}
      .runWith(Sink.foreach(result =>
        println(s"Model served in ${System.currentTimeMillis() - result.submissionTs} ms, with result ${result.result} " +
          s"(model ${result.name}, data type ${result.dataType})")))
    // Rest Server
    startRest(modelServer)
  }

  def startRest(modelServerManager: ActorSystem[TFModelServerActor]): Unit = {

    implicit val timeout = Timeout(10.seconds)
    implicit val system = modelServerManager.toUntyped

    val host = "0.0.0.0"
    val port = MODELSERVING_PORT
    val routes = TFQueriesAkkaHttpResource.storeRoutes(modelServerManager)(modelServerManager.scheduler)

    val _ = Http().bindAndHandle(routes, host, port) map
      { binding =>
        println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
} 
Example 12
Source File: TFQueriesAkkaHttpResource.scala    From model-serving-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.modelserving.tensorflowserving

import akka.actor.Scheduler
import akka.actor.typed.ActorRef
import akka.actor.typed.scaladsl.AskPattern._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import com.lightbend.modelserving.model.ModelToServeStats
import de.heikoseeberger.akkahttpjackson.JacksonSupport

import scala.concurrent.duration._

object TFQueriesAkkaHttpResource extends JacksonSupport {

  implicit val askTimeout = Timeout(30.seconds)

  def storeRoutes(modelserver: ActorRef[TFModelServerActor])(implicit scheduler: Scheduler) : Route =
    get {
      // Get statistics
      path("state") {
        onSuccess(modelserver ? ((replyTo: ActorRef[ModelToServeStats]) => GetState(replyTo))) {
          case stats : ModelToServeStats =>
            complete(stats)
        }
      }
    }
} 
Example 13
Source File: Internal.scala    From cave   with MIT License 5 votes vote down vote up
package controllers

import worker.Coordinator
import akka.actor.Inbox
import akka.pattern.ask
import akka.util.Timeout
import init.Init
import play.api.libs.json.Json
import play.api.mvc._
import scala.concurrent.duration._

object Internal extends Controller {

  def actorSystem = Init.system

  def coordinator = Init.coordinator

  def healthCheck() = Action { request =>
    // TODO: add real checks
    Ok("healthy")
  }

  def status() = Action.async { request =>
    implicit val inbox = Inbox.create(actorSystem)
    implicit val timeout = Timeout(3.seconds)
    import scala.concurrent.ExecutionContext.Implicits.global

    ask(coordinator, Coordinator.StatusRequest).mapTo[Coordinator.StatusResponse] map { response =>
      Ok(Json.toJson(response))
    }
  }
} 
Example 14
Source File: Internal.scala    From cave   with MIT License 5 votes vote down vote up
package controllers

import actors.Coordinator
import akka.actor.Inbox
import akka.pattern.ask
import akka.util.Timeout
import init.Init
import play.api.libs.json.Json
import play.api.mvc._

import scala.concurrent.duration._

object Internal extends Controller {

  def actorSystem = Init.system
  def coordinator = Init.coordinator

  def healthCheck() = Action { request =>
    // TODO: add real checks
    Ok("healthy")
  }

  def status() = Action.async { request =>
    implicit val inbox = Inbox.create(actorSystem)
    implicit val timeout = Timeout(3.seconds)
    import scala.concurrent.ExecutionContext.Implicits.global

    ask(coordinator, Coordinator.StatusRequest).mapTo[Coordinator.StatusResponse] map { response =>
      Ok(Json.toJson(response))
    }
  }
} 
Example 15
Source File: Scheduler.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import java.util.concurrent.{Executor, TimeUnit}

import akka.actor.{Actor, ActorLogging}
import akka.pattern.ask
import akka.util.Timeout
import com.cave.metrics.data.evaluator.AlertParser
import com.cave.metrics.data.{Check, Schedule}
import init.{AwsWrapper, Init}
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.{Minutes, LocalTime, DateTime, DateTimeZone}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object Scheduler {
  object DoWork
  object Die
  case class NotificationUrlChange(newUrl: String)
}
class Scheduler(schedule: Schedule, awsWrapper: AwsWrapper) extends Actor with ActorLogging with AlertParser {

  private[actors] def leader = Init.leader
  var notificationUrl: String = schedule.notificationUrl
  implicit val timeout = Timeout(2, TimeUnit.SECONDS)

  val (waitTime, period) = getSchedule(schedule.alert.period)

  val Formatter = ISODateTimeFormat.dateTimeNoMillis()

  implicit val executor = context.dispatcher.asInstanceOf[Executor with ExecutionContext]
  private val queueCheckSchedule = context.system.scheduler.schedule(waitTime, period, self, Scheduler.DoWork)

  override def receive = {
    case Scheduler.DoWork =>
      leader ? Leadership.IsLeader onComplete {
        case scala.util.Success(imLeader: Boolean) =>
          if (imLeader) {
            awsWrapper.sendMessage(Check(Schedule(schedule.orgName, schedule.teamName, schedule.clusterName, notificationUrl, schedule.alert), now()))
          }

        case scala.util.Success(e) =>
          log.error("Unexpected result returned by the leader actor: " + e)

        case scala.util.Failure(t) =>
          log.error("Failed to query the leader actor, error was " + t)
      }


    case Scheduler.NotificationUrlChange(url) =>
      log.debug(s"Updating the notification URL, from $notificationUrl to $url.")
      notificationUrl = url

    case Scheduler.Die =>
      context stop self
  }

  override def postStop(): Unit = queueCheckSchedule.cancel()

  
  private[actors] def getSchedule(alertPeriod: String): (FiniteDuration, FiniteDuration) =
    parseAll(duration, alertPeriod) match {
      case Success(p, _) => (0.minutes, p)

      case NoSuccess(_, message) =>
        parseAll(daily, alertPeriod) match {
          case Success(time, _) => (getWait(nowLocal(), time), 1.day)

          case NoSuccess(_, message2) =>
            sys.error(s"Unexpected alert period $alertPeriod. Not a duration ($message) and not a daily scheduler ($message2).")
        }
    }

  private[actors] def getWait(now: LocalTime, until: LocalTime): FiniteDuration = {
    val wait = Minutes.minutesBetween(now, until).getMinutes
    val minutes = if (wait < 0) 1440 + wait else wait
    minutes.minutes
  }
} 
Example 16
Source File: CoordinatorTests.scala    From sparkplug   with MIT License 5 votes vote down vote up
package springnz.sparkplug.client

import akka.actor.{ ExtendedActorSystem, ActorRef, ActorSystem }
import akka.pattern.ask
import akka.testkit.{ ImplicitSender, TestKit }
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import org.scalatest._
import springnz.sparkplug.executor.MessageTypes.{ JobFailure, JobRequest, JobSuccess, ShutDown }

import scala.concurrent.Await
import scala.concurrent.duration._

import scala.collection.JavaConverters._

class CoordinatorTests(_system: ActorSystem)
    extends TestKit(_system) with ImplicitSender with WordSpecLike with BeforeAndAfterAll with Matchers {

  def this() = this(ActorSystem(Constants.actorSystemName, ConfigFactory.parseMap(Map(
    "akka.remote.netty.tcp.port" -> new Integer(0)).asJava).withFallback(ClientExecutor.defaultClientAkkaConfig)))

  var coordinator: ActorRef = null

  "client coordinator" should {

    "successfuly execute a job request" in {
      val request = JobRequest("springnz.sparkplug.executor.LetterCountPlugin", None)
      coordinator ! request
      expectMsg[JobSuccess](30.seconds, JobSuccess(request, (2, 2)))
    }

    "successfuly execute a job request after a failure" in {
      val invalidRequest = JobRequest("springnz.sparkplug.executor.InvalidClass", None)
      coordinator ! invalidRequest
      expectMsgType[JobFailure](30.seconds)
      val goodRequest = JobRequest("springnz.sparkplug.executor.LetterCountPlugin", None)
      coordinator ! goodRequest
      expectMsg[JobSuccess](30.seconds, JobSuccess(goodRequest, (2, 2)))
    }

    "work with the ask pattern as well" in {
      implicit val timeout = Timeout(30.seconds)
      val request = JobRequest("springnz.sparkplug.executor.LetterCountPlugin", None)
      val replyFuture = coordinator ? request
      val result = Await.result(replyFuture, 30.seconds)
      result shouldBe JobSuccess(request, (2, 2))
    }

  }

  override def beforeAll {
    val configSection = s"sparkplug.${springnz.sparkplug.executor.Constants.defaultAkkaRemoteConfigSection}"
    val port = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress.port.get
    val akkaClientConfig = ConfigFactory.parseMap(Map(
      "akka.remote.netty.tcp.port" -> new Integer(port)).asJava).withFallback(ClientExecutor.defaultClientAkkaConfig)
    coordinator = system.actorOf(Coordinator.props(None,
      akkaRemoteConfig = Some(ConfigFactory.load.getConfig(configSection)),
      akkaClientConfig = akkaClientConfig), "TestCoordinator")
  }

  override def afterAll {
    system.actorSelection(s"/user/TestCoordinator") ! ShutDown
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

} 
Example 17
Source File: TestSpec.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test

import java.io.IOException
import java.util.UUID

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.exceptions.TestFailedException
import org.scalatest._
import rx.lang.scala._

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.{ Random ⇒ Rnd, Try }

object Random {
  def apply(): Rnd = new Rnd()
}

trait TestSpec extends FlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with BeforeAndAfterAll {
  implicit val system: ActorSystem = ActorSystem("test")
  implicit val ec: ExecutionContextExecutor = system.dispatcher
  val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  implicit val timeout = Timeout(50.seconds)

  override protected def afterAll(): Unit = {
    system.terminate()
  }

  
  def cleanup(actors: ActorRef*): Unit = {
    actors.foreach { (actor: ActorRef) ⇒
      actor ! PoisonPill
      probe watch actor
    }
  }

  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class PimpedObservable[T](self: Observable[T]) {
    def waitFor: Unit = {
      self.toBlocking.toIterable.last
    }
  }

  implicit class MustBeWord[T](self: T) {
    def mustBe(pf: PartialFunction[T, Unit]): Unit =
      if (!pf.isDefinedAt(self)) throw new TestFailedException("Unexpected: " + self, 0)
  }

  object Socket { def apply() = new Socket }
  class Socket {
    def readFromMemory: Future[Array[Byte]] = Future {
      Thread.sleep(100) // sleep 100 millis
      "fromMemory".getBytes
    }

    def send(payload: Array[Byte], from: String, failed: Boolean): Future[Array[Byte]] =
      if (failed) Future.failed(new IOException(s"Network error: $from"))
      else {
        Future {
          Thread.sleep(250) // sleep 250 millis, not real life time, but hey
          s"${payload.getString}->$from".getBytes
        }
      }

    def sendToEurope(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromEurope", failed)

    def sendToUsa(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromUsa", failed)
  }
} 
Example 18
Source File: ExplainApi.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.druid

import akka.actor.ActorRefFactory
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.RouteResult
import akka.pattern.ask
import akka.util.Timeout
import com.netflix.atlas.akka.CustomDirectives._
import com.netflix.atlas.akka.WebApi
import com.netflix.atlas.druid.ExplainApi.ExplainRequest
import com.netflix.atlas.eval.graph.Grapher
import com.netflix.atlas.json.Json
import com.netflix.atlas.webapi.GraphApi.DataRequest
import com.typesafe.config.Config

import scala.concurrent.duration._

class ExplainApi(config: Config, implicit val actorRefFactory: ActorRefFactory) extends WebApi {

  private val grapher: Grapher = Grapher(config)

  private val dbRef = actorRefFactory.actorSelection("/user/db")

  private implicit val ec = actorRefFactory.dispatcher

  override def routes: Route = {
    endpointPath("explain" / "v1" / "graph") {
      get { ctx =>
        val graphCfg = grapher.toGraphConfig(ctx.request)
        dbRef
          .ask(ExplainRequest(DataRequest(graphCfg)))(Timeout(10.seconds))
          .map { response =>
            val json = Json.encode(response)
            val entity = HttpEntity(MediaTypes.`application/json`, json)
            RouteResult.Complete(HttpResponse(StatusCodes.OK, entity = entity))
          }
      }
    }
  }
}

object ExplainApi {
  case class ExplainRequest(dataRequest: DataRequest)
} 
Example 19
Source File: UpdateLogger.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.language.postfixOps
import com.phasmid.hedge_fund.model.Model
import com.phasmid.hedge_fund.portfolio._


class UpdateLogger(blackboard: ActorRef) extends BlackboardActor(blackboard) {

  var portfolio = new Portfolio("", Nil)

  override def receive =
    {
      case Confirmation(id, model, attrs) =>
        log.debug(s"update for identifier: $id")
        if (model.isOption)
          processOption(id, model, attrs)
        else
          processStock(id, model)

      case PortfolioUpdate(p) =>
        log.debug(s"portfolio update for: ${p.name}")
        portfolio = p
        showPortfolio

      case m => super.receive(m)
    }

  implicit val timeout = Timeout(5 seconds)

  def processStock(identifier: String, model: Model) = {
    model.getKey("price") match {
      case Some(p) => {
        // sender is the MarketData actor
        val future = (sender ? SymbolQuery(identifier, List(p))).mapTo[QueryResponse]
        // TODO why are we waiting for this here?
        val result = Await.result(future, timeout.duration)
        result match {
          case QueryResponseValid(k,a) =>
            a map {
              case (k, v) => log.info(s"$identifier attribute $k has been updated to: $v")
            }
          case _ =>
        }
      }
      case None => log.warning(s"'price' not defined in model")
    }
  }

  def processOption(identifier: String, model: Model, attributes: Map[String, Any]) = {
    val key = "underlying"
    attributes.get(key) match {
      case Some(value) =>
        val future = (blackboard ? OptionQuery("id", value)).mapTo[QueryResponse]
        // TODO why are we waiting for this here?
        val result = Await.result(future, timeout.duration)
        result match {
          case QueryResponseValid(k,a) =>
              println(s"Action Required: re: qualifying option $identifier with underlying symbol: $k and attributes: $a")
          case _ =>
        }
      case None => log.warning(s"processOption: value not present for $key")
    }
  }

  def showPortfolio {
    println(s"Portfolio for ${portfolio.name}")
    portfolio.positions foreach { showPosition(_) }
  }

  def showPosition(position: Position) {
    println(s"position for ${position.symbol}: quantity=${position.quantity}; options=")
    position.contracts foreach { showContract(_) }
  }

  def showContract(contract: Contract) {
    println(s"contract: $contract")
  }
} 
Example 20
Source File: JsonYQLParserSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorSystem, Actor, Props, ActorRef }
import akka.testkit._
import org.scalatest.{ WordSpecLike, Matchers, BeforeAndAfterAll }
import scala.io.Source
import scala.concurrent.duration._
import spray.http._
import spray.http.MediaTypes._
import org.scalatest.Inside
import scala.language.postfixOps
import spray.http.ContentType.apply


class JsonYQLParserSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
    with WordSpecLike with Matchers with Inside with BeforeAndAfterAll {

  def this() = this(ActorSystem("JsonYQLParserSpec"))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  import scala.language.postfixOps
  val json = Source.fromFile("src/test/resources/yqlExample.json") mkString

  "json conversion" in {
    val body = HttpEntity(MediaTypes.`application/json`, json.getBytes())
    val ok = JsonYQLParser.decode(body) match {
      case Right(x) =>
        val count = x.query.count
        count should equal(4)
        x.query.results.quote.length should equal(count)
        x.query.results.get(count - 1, "symbol") should matchPattern { case Some("MSFT") => }

      case Left(x) =>
        fail("decoding error: " + x)
    }
  }

  "send back" in {
    val blackboard = system.actorOf(Props.create(classOf[MockYQLBlackboard], testActor), "blackboard")
    val entityParser = _system.actorOf(Props.create(classOf[EntityParser], blackboard), "entityParser")
    val entity = HttpEntity(MediaTypes.`application/json`, json.getBytes())
    entityParser ! EntityMessage("json:YQL", entity)
    val msg = expectMsgClass(3.seconds, classOf[QueryResponseValid])
    println("msg received: " + msg)
    msg should matchPattern {
      case QueryResponseValid("MSFT", _) =>
    }
    inside(msg) {
      case QueryResponseValid(symbol, attributes) => attributes.get("Ask") should matchPattern { case Some("46.17") => }
    }
  }

}

import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import com.phasmid.hedge_fund.model.Model

class MockYQLUpdateLogger(blackboard: ActorRef) extends UpdateLogger(blackboard) {
  override def processStock(identifier: String, model: Model) = {
    model.getKey("price") match {
      case Some(p) => {
        // sender is the MarketData actor
        val future = sender ? SymbolQuery(identifier, List(p))
        val result = Await.result(future, timeout.duration).asInstanceOf[QueryResponseValid]
        result.attributes map {
          case (k, v) =>
            log.info(s"$identifier attribute $k has been updated to: $v")
            blackboard ! result
        }
      }
      case None => log.warning(s"'price' not defined in model")
    }
  }
}

class MockYQLBlackboard(testActor: ActorRef) extends Blackboard(Map(classOf[KnowledgeUpdate] -> "marketData", classOf[SymbolQuery] -> "marketData", classOf[OptionQuery] -> "marketData", classOf[CandidateOption] -> "optionAnalyzer", classOf[Confirmation] -> "updateLogger"),
  Map("marketData" -> classOf[MarketData], "optionAnalyzer" -> classOf[OptionAnalyzer], "updateLogger" -> classOf[MockYQLUpdateLogger])) {

  override def receive =
    {
      case msg: Confirmation => msg match {
        // Cut down on the volume of messages
        case Confirmation("MSFT", _, _) => super.receive(msg)
        case _ =>
      }
      case msg: QueryResponseValid => testActor forward msg

      case msg => super.receive(msg)
    }
} 
Example 21
Source File: Application.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package controllers

import play.api._
import play.api.mvc._
import akka.actor.{ActorSystem, Props}
import akka.util.Timeout
import akka.pattern.ask
import scala.concurrent._
import scala.concurrent.duration._
import scala.util._
import edu.neu.coe.scala.numerics.Rational
import akka.actor.ActorRef
import com.typesafe.config.{ ConfigFactory, Config }
import actors._
import models._
import spire.math.Real

class Application extends Controller {
  
  val config = ConfigFactory.load()
  val which = config.getString("calculator")
  
  import play.api.libs.concurrent.Execution.Implicits.defaultContext
  implicit val timeout: Timeout = Timeout(10 seconds)
  implicit val system = ActorSystem("RPN-Calculator")
  val setup = which match {
    case "rational" => Application.getSetupForRational
    case "double" => Application.getSetupForDouble
    case "spire" => Application.getSetupForSpire
    case _ => Console.err.println(s"Unsupported calculator type: $which"); Application.getSetupForRational
  }
  val calculator = system.actorOf(setup _1,setup _2)
  val name: String = setup _3;
  println(s"$name is ready")

  def index() = Action.async {
    val xsf = (calculator ? View).mapTo[Seq[_]]
    xsf map {
      case xs => Ok(s"$name: calculator has the following elements (starting with top): $xs")
    }
  }

  def command(s: String) = Action.async {
    val xtf = (calculator ? s).mapTo[Try[_]] 
    xtf map {
      case Success(x) => Ok(s"""$name: you have entered "$s" and got back $x""")
      case Failure(e) => if (s=="clr") Ok("$name: cleared") else Ok(s"""$name: you entered "$s" which caused error: $e""")
//      case Failure(e) => if (s=="clr") redirect("/") else  Ok(s"""$name: you entered "$s" which caused error: $e""")
    }
  }
}

object Application {
  // TODO move these to model classes
  def getSetupForDouble(implicit system: ActorSystem) = {
		  implicit val lookup: String=>Option[Double] = DoubleMill.constants.get _
      implicit val conv: String=>Try[Double] = DoubleMill.valueOf _
			implicit val parser = new ExpressionParser[Double](conv,lookup)
			val mill = DoubleMill()
			// Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
      // TODO for these methods, return mill and parser instead of props
			(props,"doubleCalculator","Double Calculator")
  }
  // CONSIDER This assumes that we have Rational in our classpath already.
  // I'd like to try the possibility of dynamically loading the Rational stuff.
  // But, that's going to be very tricky, so we'll leave it for now.
    def getSetupForRational(implicit system: ActorSystem) = {
      implicit val lookup: String=>Option[Rational] = RationalMill.constants.get _
      implicit val conv: String=>Try[Rational] = RationalMill.valueOf _
      implicit val parser = new ExpressionParser[Rational](conv,lookup)
      val mill = RationalMill()
      // Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
      (props,"rationalCalculator","Rational Calculator")
  }
  // CONSIDER This assumes that we have Spire in our classpath already.
  def getSetupForSpire(implicit system: ActorSystem) = {
    import spire.implicits._
    import spire.math._
		  implicit val lookup: String=>Option[Real] = SpireMill.constants.get _
      implicit val conv: String=>Try[Real] = SpireMill.valueOf _
			implicit val parser = new ExpressionParser[Real](conv,lookup)
			val mill = SpireMill()
			// Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
			(props,"spireCalculator","Spire Calculator")
  }
} 
Example 22
Source File: CalculatorSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package actors

import org.scalatest._
import akka.testkit.TestActorRef
import scala.concurrent.duration._
import scala.concurrent.Await
import akka.pattern.ask
import scala.util._
import scala.io.Source
import scala.concurrent._
import scala.concurrent.duration._
import com.typesafe.config.{ ConfigFactory, Config }
import akka.actor.{ Actor, ActorSystem, Props, ActorRef }
import akka.util.Timeout
import java.net.URL
import org.scalatest.concurrent._
import org.scalatest._
import org.scalatest.time._
import edu.neu.coe.scala.numerics.Rational
import models._


class CalculatorSpec extends FlatSpec with Matchers with Futures with ScalaFutures with Inside {
  implicit val system = ActorSystem("CountWords")  
  import play.api.libs.concurrent.Execution.Implicits.defaultContext
  implicit val timeout: Timeout = Timeout(10 seconds)

  "Rational Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Rational]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  it should "yield 1 for 1" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  it should "yield 1 when given floating point problem" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "0.2 0.1 + 10 * 3 /").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  "Double Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Double]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  
  // This test suffers from a very peculiar bug which might even be a bug
  // in the Scala compiler. Kudos to you if you can fix it!!
  ignore should "yield 1 for 1" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Double]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(1.0) => }
  }
} 
Example 23
Source File: Retry.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.utils

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout

trait Retry[T] {

  def work: Future[T]

  def retryInterval: FiniteDuration
  def retryLimit: Int
  def actorSystem: ActorSystem
  // the timeout should exceed the retryLimit * retryInterval + (retryLimit + 1) * avgWorkDuration
  // otherwise the ask in tryWork method may timeout before all the retries have been attempted
  implicit def timeout: Timeout
  def workDescription: Option[String]

  private lazy val retryActor = actorSystem.actorOf(Props(new RetryActor[T](
    retryInterval,
    retryLimit,
    work,
    workDescription
  )))

  def tryWork: Future[T] = (retryActor ? RetryActor.Trigger).asInstanceOf[Future[T]]

} 
Example 24
Source File: NotebookPoller.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.rest.client

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

import akka.actor.ActorSystem
import akka.util.Timeout
import spray.client.pipelining._
import spray.http.StatusCodes

import io.deepsense.commons.models.Id
import io.deepsense.commons.utils.Retry
import io.deepsense.commons.utils.RetryActor.RetriableException

class NotebookPoller private (
    notebookRestClient: NotebookRestClient,
    pollInterval: FiniteDuration,
    retryCountLimit: Int,
    workflowId: Id,
    nodeId: Id,
    endpointPath: String)(
    implicit override val actorSystem: ActorSystem,
    override val timeout: Timeout)
  extends Retry[Array[Byte]] {

  override val retryInterval: FiniteDuration = pollInterval

  override val retryLimit: Int = retryCountLimit

  override val workDescription: Option[String] = Some("notebook data retrieval")

  override def work: Future[Array[Byte]] = {
    implicit val ec: ExecutionContext = actorSystem.dispatcher

    notebookRestClient.fetchHttpResponse(Get(endpointPath)).flatMap { resp =>
      resp.status match {
        case StatusCodes.NotFound =>
          Future.failed(RetriableException(s"File containing output data for workflow " +
            s"s$workflowId and node s$nodeId not found", None))
        case StatusCodes.OK =>
          Future.successful(resp.entity.data.toByteArray)
        case statusCode =>
          Future.failed(NotebookHttpException(resp, s"Notebook server responded with $statusCode " +
            s"when asked for file for workflow $workflowId and node $nodeId"))
      }
    }
  }
}

object NotebookPoller {
  def apply(
      notebookRestClient: NotebookRestClient,
      pollInterval: FiniteDuration,
      retryCountLimit: Int,
      workflowId: Id,
      nodeId: Id,
      endpointPath: String
  )(implicit as: ActorSystem, tout: Timeout): Retry[Array[Byte]] = new NotebookPoller(
    notebookRestClient,
    pollInterval,
    retryCountLimit,
    workflowId,
    nodeId,
    endpointPath
  )
} 
Example 25
Source File: NotebookRestClient.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.rest.client

import java.net.URL

import scala.concurrent._
import scala.concurrent.duration._
import scala.language.postfixOps

import akka.actor._
import akka.util.Timeout
import spray.client.pipelining._
import spray.http.{HttpResponse, StatusCodes}
import spray.httpx.SprayJsonSupport

import io.deepsense.commons.json.NotebookRestClientProtocol._
import io.deepsense.commons.models.Id
import io.deepsense.commons.rest.client.req.NotebookClientRequest
import io.deepsense.commons.utils.Logging


case class NotebookHttpException(
    httpResponse: HttpResponse,
    msg: String,
    cause: Throwable = null)
  extends Exception(msg, cause)


class NotebookRestClient(
    notebooksServerAddress: URL,
    workflowId: Id,
    nodeId: Id,
    pollInterval: FiniteDuration,
    retryCountLimit: Int
)(implicit override val as: ActorSystem)
  extends Logging with RestClient with SprayJsonSupport {

  def apiUrl: java.net.URL = new URL(notebooksServerAddress, "/jupyter/")
  def credentials: Option[spray.http.HttpCredentials] = None
  def userId: Option[java.util.UUID] = None
  def userName: Option[String] = None

  implicit val timeout: Timeout = 70 minutes

  private val filenameExtension = "html"

  private val postPath = endpointPath("HeadlessNotebook")
  private val getPath = endpointPath(s"HeadlessNotebook/${workflowId}_$nodeId.$filenameExtension")

  private val poller = NotebookPoller(this, pollInterval, retryCountLimit, workflowId, nodeId, getPath)

  def pollForNotebookData(): Future[Array[Byte]] = poller.tryWork

  def generateNotebookData(language: String): Future[HttpResponse] = {
    val req = NotebookClientRequest(workflowId, nodeId, language)
    fetchHttpResponse(Post(postPath, req)).flatMap { resp => resp.status match {
      case StatusCodes.Success(_) => Future.successful(resp)
      case statusCode => Future.failed(NotebookHttpException(resp,
        s"Notebook server responded with $statusCode when asked to generate notebook data"
      ))
    }}
  }

  def generateAndPollNbData(language: String): Future[Array[Byte]] = {
    generateNotebookData(language).flatMap(_ => pollForNotebookData())
  }

  def toFactory: NotebooksClientFactory =
    new NotebooksClientFactory(notebooksServerAddress, pollInterval, retryCountLimit)

}

class NotebooksClientFactory(notebooksServerAddress: URL, pollInterval: FiniteDuration, retryCountLimit: Int)
  (implicit system: ActorSystem) {
  def createNotebookForNode(workflow: Id, node: Id): NotebookRestClient = {
    new NotebookRestClient(notebooksServerAddress, workflow, node, pollInterval, retryCountLimit)
  }
} 
Example 26
Source File: RetrySpec.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.utils

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.language.postfixOps

import akka.actor.ActorSystem
import akka.util.Timeout
import org.scalatest.{Matchers, WordSpec}

import io.deepsense.commons.utils.RetryActor.{RetriableException, RetryLimitReachedException}

class RetrySpec extends WordSpec with Matchers {

  val uutName = classOf[Retry[_]].getSimpleName.filterNot(_ == '$')

  trait Setup {
    def generateUUT[T](retryLimitCount: Int)(toDo: => Future[T]): Retry[T] = new {
      override val workDescription = Some("test work")

      override val actorSystem: ActorSystem = ActorSystem()

      override val retryInterval = 1 nano

      override val retryLimit = retryLimitCount

      override val timeout = Timeout(1 minute)

    } with Retry[T] {
      override def work: Future[T] = toDo
    }
  }

  s"A $uutName" should {
    "complete its work" when {
      "no exceptions are thrown" in {
        new Setup {
          val uut = generateUUT(0) {
            Future.successful(2 * 3 + 8)
          }

          Await.result(
            uut.tryWork, Duration.Inf) shouldBe 14
        }
      }

      "only retriable exceptions are thrown and retry limit is not reached" in {
        new Setup {
          var count = 3
          val uut = generateUUT(3) {
            if (count > 0) {
              count -= 1
              Future.failed(RetriableException(s"Thrown because count is ${count + 1}", None))
            } else {
              Future.successful("success")
            }
          }

          Await.result(
            uut.tryWork, Duration.Inf
          ) shouldBe "success"

          count shouldBe 0
        }
      }
    }

    "fail" when {
      "retry limit is reached" in {
        new Setup {
          val uut = generateUUT(10) {
            Future.failed(RetriableException(s"This will never succeed, yet we keep trying", None))
          }

          a [RetryLimitReachedException] shouldBe thrownBy (Await.result(uut.tryWork, Duration.Inf))

        }
      }

      "unexpected exception is thrown" in {
        var count = 1
        new Setup {
          val uut = generateUUT(10) {
            if (count == 0) {
              Future.failed(new RuntimeException("Thrown because counter reached zero"))
            } else {
              count -= 1
              Future.failed(RetriableException(s"Thrown because counter was ${count + 1}", None))
            }
          }

          a [RuntimeException] shouldBe thrownBy (Await.result(uut.tryWork, Duration.Inf))
          count shouldBe 0
        }
      }
    }
  }
} 
Example 27
Source File: BlockingBrain.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.brain

import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import com.sumologic.sumobot.brain.Brain._

import scala.concurrent.Await
import scala.concurrent.duration._

class BlockingBrain(brain: ActorRef) {

  def retrieve(key: String): Option[String] = {
    implicit val timeout = Timeout(2.seconds)
    Await.result(brain ? Retrieve(key), 2.seconds) match {
      case ValueRetrieved(_, value) => Some(value)
      case ValueMissing(_) => None
    }
  }

  def listValues(prefix: String = ""): Map[String, String] = {
    implicit val timeout = Timeout(2.seconds)
    Await.result(brain ? ListValues(prefix), 2.seconds) match {
      case ValueMap(map) => map
    }
  }

  def store(key: String, value: String): Unit = {
    brain ! Store(key, value)
  }

  def remove(key: String): Unit = {
    brain ! Remove(key)
  }
} 
Example 28
Source File: Help.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.plugins.help

import akka.actor.ActorLogging
import akka.pattern.ask
import akka.util.Timeout
import com.sumologic.sumobot.core.PluginRegistry.{PluginList, RequestPluginList}
import com.sumologic.sumobot.core.model.IncomingMessage
import com.sumologic.sumobot.plugins.BotPlugin

import scala.concurrent.duration._
import scala.util.Success

object Help {
  private[help] val ListPlugins = BotPlugin.matchText("(help|\\?)\\W*")
  private[help] val HelpForPlugin = BotPlugin.matchText("(help|\\?) ([\\-\\w]+).*")
}

class Help extends BotPlugin with ActorLogging {
  override protected def help =
    s"""I can help you understand plugins.
       |
       |help - I'll tell you what plugins I've got.
       |help <plugin>. - I'll tell you how <plugin> works.
     """.stripMargin

  import Help._

  override protected def receiveIncomingMessage = {
    case message@IncomingMessage(ListPlugins(_), true, _, _, _, _, _) =>
      val msg = message
      implicit val timeout = Timeout(5.seconds)
      pluginRegistry ? RequestPluginList onComplete {
        case Success(result) => result match {
          case PluginList(plugins) =>
            msg.say(plugins.map(_.plugin.path.name).sorted.mkString("\n"))
        }
        case _ =>
      }

    case message@IncomingMessage(HelpForPlugin(_, pluginName), addressedToUs, _, _, _, _, _) =>
      val msg = message
      implicit val timeout = Timeout(5.seconds)
      pluginRegistry ? RequestPluginList onComplete {
        case Success(result) => result match {
          case PluginList(plugins) =>
            plugins.find(_.plugin.path.name.equalsIgnoreCase(pluginName)) match {
              case Some(plugin) =>
                msg.say(plugin.help)
              case None =>
                if (addressedToUs) {
                  msg.respond(s"Sorry, I don't know $pluginName")
                }
            }
        }
        case _ =>
      }
  }
} 
Example 29
Source File: S3BrainTest.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.brain

import akka.actor.ActorSystem
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider}
import com.amazonaws.services.s3.AmazonS3ClientBuilder
import com.sumologic.sumobot.brain.Brain.ValueRetrieved
import com.sumologic.sumobot.core.aws.AWSAccounts
import com.sumologic.sumobot.test.annotated.SumoBotTestKit
import org.scalatest.{BeforeAndAfterAll, Matchers}

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random

class S3BrainTest
    extends SumoBotTestKit(ActorSystem("S3SingleObjectBrainTest"))
    with BeforeAndAfterAll
    with Matchers {

  lazy val credsOption = AWSAccounts.load(system.settings.config).values.headOption

  val bucketPrefix = "sumobot-s3-brain"

  // The tests here only run if there are valid AWS credentials in the configuration. Otherwise,
  // they're skipped.
  credsOption foreach {
    creds =>
      cleanupBuckets(creds)

      val bucket = bucketPrefix + randomString(5)

      "S3 brain" should {
        "persist the contents across reloads" in {
          implicit val timeout = Timeout(5.seconds)
          val s3Key = randomString(16)
          val firstBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key))
          firstBrain ! Brain.Store("hello", "world")

          // Just wait for the next message to return.
          val firstRetrieval = firstBrain ? Brain.Retrieve("hello")
          val firstResult = Await.result(firstRetrieval, 5.seconds)
          firstResult match {
            case ValueRetrieved(k, v) =>
              k should be("hello")
              v should be("world")
            case wrongResult => fail(s"Did not get what we expected: $wrongResult")
          }

          // Since we wrote to S3, the 2nd brain should now have the value.
          val secondBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key))
          val secondRetrieval = secondBrain ? Brain.Retrieve("hello")
          val secondResult = Await.result(secondRetrieval, 5.seconds)
          secondResult match {
            case ValueRetrieved(k, v) =>
              k should be("hello")
              v should be("world")
            case wrongResult => fail(s"Did not get what we expected: $wrongResult")
          }
        }
      }
  }

  private def randomString(length: Int): String = {
    val alphabet = ('a' to 'z').mkString + ('0' to '9').mkString
    (1 to length).
        map(_ => Random.nextInt(alphabet.length)).
        map(alphabet.charAt).mkString
  }

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
    credsOption.foreach(cleanupBuckets)
  }

  def cleanupBuckets(creds: AWSCredentials): Unit = {
    val s3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(creds)).build()
    s3.listBuckets().asScala.filter(_.getName.startsWith(bucketPrefix)).foreach {
      bucket =>
        println(s"Deleting S3 bucket ${bucket.getName}")
        val objects = s3.listObjects(bucket.getName).getObjectSummaries.asScala.map(_.getKey)
        objects.foreach {
          obj =>
            s3.deleteObject(bucket.getName, obj)
        }
        s3.deleteBucket(bucket.getName)
    }
  }
} 
Example 30
Source File: PluginRegistryTest.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.sumologic.sumobot.core.PluginRegistry.{Plugin, PluginList, RequestPluginList}
import com.sumologic.sumobot.plugins.BotPlugin.{PluginAdded, PluginRemoved}
import com.sumologic.sumobot.plugins.help.Help
import com.sumologic.sumobot.test.annotated.SumoBotTestKit
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.duration._

class PluginRegistryTest
  extends SumoBotTestKit(ActorSystem("PluginRegistryTest"))
  with BeforeAndAfterAll {

  "PluginRegistry" should {
    "maintain a list of all registered plugins" in {

      implicit val timeout = Timeout(1.second)
      val reg = system.actorOf(Props[PluginRegistry])
      def checkList(func: Seq[Plugin] => Unit) = {
        Await.result(reg ? RequestPluginList, 1.second) match {
          case PluginList(list) => func(list)
          case other => fail(s"Got $other instead.")
        }
      }

      val fakePlugin = system.actorOf(Props[Help])

      checkList(_.isEmpty should be(true))
      reg ! PluginAdded(fakePlugin, "hah")
      checkList(_.size should be(1))
      reg ! PluginRemoved(fakePlugin)
      checkList(_.isEmpty should be(true))
    }
  }

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 31
Source File: FetchWithCacheConfigClient.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package izanami.configs

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.scaladsl.util.FastFuture
import akka.stream.Materializer
import akka.util.Timeout
import com.google.common.cache.{Cache, CacheBuilder}
import izanami.Strategy.FetchWithCacheStrategy
import izanami.scaladsl._
import izanami._
import play.api.libs.json.Json

import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Success}

object FetchWithCacheConfigClient {
  def apply(
      clientConfig: ClientConfig,
      fallback: Configs,
      underlyingStrategy: ConfigClient,
      cacheConfig: FetchWithCacheStrategy
  )(implicit izanamiDispatcher: IzanamiDispatcher,
    actorSystem: ActorSystem,
    materializer: Materializer): FetchWithCacheConfigClient =
    new FetchWithCacheConfigClient(clientConfig,
                                   fallback,
                                   underlyingStrategy,
                                   cacheConfig,
                                   underlyingStrategy.cudConfigClient)
}

private[configs] class FetchWithCacheConfigClient(
    clientConfig: ClientConfig,
    fallback: Configs,
    underlyingStrategy: ConfigClient,
    cacheConfig: FetchWithCacheStrategy,
    override val cudConfigClient: CUDConfigClient
)(implicit val izanamiDispatcher: IzanamiDispatcher, actorSystem: ActorSystem, val materializer: Materializer)
    extends ConfigClient {

  import actorSystem.dispatcher

  implicit val timeout = Timeout(10.second)

  private val logger = Logging(actorSystem, this.getClass.getName)
  private val cache: Cache[String, Seq[Config]] = CacheBuilder
    .newBuilder()
    .maximumSize(cacheConfig.maxElement)
    .expireAfterWrite(cacheConfig.duration.toMillis, TimeUnit.MILLISECONDS)
    .build[String, Seq[Config]]()

  override def configs(pattern: Seq[String]): Future[Configs] = {
    val convertedPattern =
      Option(pattern).map(_.map(_.replace(".", ":")).mkString(",")).getOrElse("*")
    Option(cache.getIfPresent(convertedPattern)) match {
      case Some(configs) => FastFuture.successful(Configs(configs))
      case None =>
        val futureConfigs = underlyingStrategy.configs(convertedPattern)
        futureConfigs.onComplete {
          case Success(c) => cache.put(convertedPattern, c.configs)
          case Failure(e) => logger.error(e, "Error fetching configs")
        }
        futureConfigs
    }
  }

  override def config(key: String) = {
    require(key != null, "key should not be null")
    val convertedKey: String = key.replace(".", ":")
    Option(cache.getIfPresent(convertedKey)) match {
      case Some(configs) =>
        FastFuture.successful(configs.find(_.id == convertedKey).map(_.value).getOrElse(Json.obj()))
      case None =>
        val futureConfig: Future[Configs] =
          underlyingStrategy.configs(convertedKey)
        futureConfig.onComplete {
          case Success(configs) =>
            cache.put(convertedKey, configs.configs)
          case Failure(e) =>
            logger.error(e, "Error fetching features")
        }
        futureConfig
          .map(
            _.configs
              .find(_.id == convertedKey)
              .map(c => c.value)
              .getOrElse(Json.obj())
          )
    }
  }

  override def configsSource(pattern: String) =
    underlyingStrategy.configsSource(pattern)

  override def configsStream(pattern: String) =
    underlyingStrategy.configsStream(pattern)
} 
Example 32
Source File: UserProjection.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.Scheduler
import akka.actor.typed.{ ActorRef, Behavior }
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.cluster.Cluster
import akka.cluster.ddata.{ ORSet, ORSetKey }
import akka.cluster.ddata.Replicator.WriteLocal
import akka.cluster.ddata.typed.scaladsl.{ DistributedData, Replicator }
import akka.persistence.query.EventEnvelope
import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import cats.instances.string._
import cats.syntax.eq._
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration

object UserProjection extends Logging {
  import akka.actor.typed.scaladsl.adapter._

  sealed trait Command
  final case object Stop                              extends Command
  private final case object HandleEventStreamComplete extends Command

  abstract class EventStreamCompleteException
      extends IllegalStateException("Event stream completed unexpectedly!")
  private final case object EventStreamCompleteException extends EventStreamCompleteException

  final val Name = "user-projection"

  final val usersKey: ORSetKey[User] =
    ORSetKey("users")

  def apply(readJournal: EventsByPersistenceIdQuery,
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      implicit val c: Cluster   = Cluster(context.system.toUntyped)
      implicit val s: Scheduler = context.system.scheduler
      implicit val t: Timeout   = askTimeout
      val replicator            = DistributedData(context.system).replicator
      val self                  = context.self

      readJournal
        .eventsByPersistenceId(UserRepository.Name, 0, Long.MaxValue)
        .collect { case EventEnvelope(_, _, _, event: UserRepository.Event) => event }
        .mapAsync(1) {
          case UserRepository.UserAdded(user) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal)(_ + user)

          case UserRepository.UserRemoved(username) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal) { users =>
              users.elements.find(_.username.value === username).fold(users)(users - _)
            }
        }
        .runWith(Sink.onComplete(_ => self ! HandleEventStreamComplete))
      logger.debug("Running event stream")

      Actor.immutable {
        case (_, Stop)                      => Actor.stopped
        case (_, HandleEventStreamComplete) => throw EventStreamCompleteException
      }
    }
} 
Example 33
Source File: Api.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.{ ActorSystem, Scheduler }
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.StatusCodes.{ Conflict, Created, NoContent, NotFound }
import akka.http.scaladsl.server.{ Directives, Route }
import akka.stream.Materializer
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.actor.typed.{ ActorRef, Behavior }
import akka.util.Timeout
import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport
import java.net.InetSocketAddress
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success }

object Api extends Logging {

  sealed trait Command
  private final case object HandleBindFailure                      extends Command
  private final case class HandleBound(address: InetSocketAddress) extends Command

  final val Name = "api"

  def apply(address: String,
            port: Int,
            userRepository: ActorRef[UserRepository.Command],
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      import akka.actor.typed.scaladsl.adapter._
      import context.executionContext
      implicit val s: ActorSystem = context.system.toUntyped

      val self = context.self
      Http()
        .bindAndHandle(route(userRepository, userView)(askTimeout, context.system.scheduler),
                       address,
                       port)
        .onComplete {
          case Failure(_)                      => self ! HandleBindFailure
          case Success(ServerBinding(address)) => self ! HandleBound(address)
        }

      Actor.immutable {
        case (_, HandleBindFailure) =>
          logger.error(s"Stopping, because cannot bind to $address:$port!")
          Actor.stopped

        case (_, HandleBound(address)) =>
          logger.info(s"Bound to $address")
          Actor.ignore
      }
    }

  def route(
      userRepository: ActorRef[UserRepository.Command],
      userView: ActorRef[UserView.Command]
  )(implicit askTimeout: Timeout, scheduler: Scheduler): Route = {
    import Directives._
    import ErrorAccumulatingCirceSupport._
    import io.circe.generic.auto._
    import io.circe.refined._

    pathEndOrSingleSlash {
      get {
        complete {
          import UserView._
          (userView ? GetUsers).mapTo[Users]
        }
      } ~
      post {
        entity(as[User]) { user =>
          import UserRepository._
          onSuccess(userRepository ? addUser(user)) {
            case UsernameTaken(_) => complete(Conflict)
            case UserAdded(_)     => complete(Created)
          }
        }
      }
    } ~
    path(Segment) { username =>
      delete {
        import UserRepository._
        onSuccess(userRepository ? removeUser(username)) {
          case UsernameUnknown(_) => complete(NotFound)
          case UserRemoved(_)     => complete(NoContent)
        }
      }
    }
  }
} 
Example 34
Source File: Utilities.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client

import java.nio.charset.Charset

import akka.util.{ByteString, Timeout}
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest
import org.apache.toree.utils.LogLike
import play.api.data.validation.ValidationError
import play.api.libs.json.{JsPath, Json, Reads}

import scala.concurrent.duration._

object Utilities extends LogLike {
  //
  // NOTE: This is brought in to remove feature warnings regarding the use of
  //       implicit conversions regarding the following:
  //
  //       1. ByteStringToString
  //       2. ZMQMessageToKernelMessage
  //
  import scala.language.implicitConversions

  private val sessionId: UUID = java.util.UUID.randomUUID().toString

  
  implicit val timeout = Timeout(21474835.seconds) // Maximum delay

  implicit def ByteStringToString(byteString : ByteString) : String = {
    new String(byteString.toArray, Charset.forName("UTF-8"))
  }

  implicit def StringToByteString(string : String) : ByteString = {
    ByteString(string.getBytes)
  }

  implicit def ZMQMessageToKernelMessage(message: ZMQMessage): KernelMessage = {
    val delimiterIndex: Int =
      message.frames.indexOf(ByteString("<IDS|MSG>".getBytes))
    //  TODO Handle the case where there is no delimiter
    val ids: Seq[Array[Byte]] =
      message.frames.take(delimiterIndex).map(
        (byteString : ByteString) =>  { byteString.toArray }
      )
    val header = Json.parse(message.frames(delimiterIndex + 2)).as[Header]
    val parentHeader = Json.parse(message.frames(delimiterIndex + 3)).validate[ParentHeader].fold[ParentHeader](
      // TODO: Investigate better solution than setting parentHeader to null for {}
      (invalid: Seq[(JsPath, Seq[ValidationError])]) => null, //HeaderBuilder.empty,
      (valid: ParentHeader) => valid
    )
    val metadata = Json.parse(message.frames(delimiterIndex + 4)).as[Metadata]

    KMBuilder().withIds(ids.toList)
               .withSignature(message.frame(delimiterIndex + 1))
               .withHeader(header)
               .withParentHeader(parentHeader)
               .withMetadata(metadata)
               .withContentString(message.frame(delimiterIndex + 5)).build(false)
  }

  implicit def KernelMessageToZMQMessage(kernelMessage : KernelMessage) : ZMQMessage = {
    val frames: scala.collection.mutable.ListBuffer[ByteString] = scala.collection.mutable.ListBuffer()
    kernelMessage.ids.map((id : Array[Byte]) => frames += ByteString.apply(id) )
    frames += "<IDS|MSG>"
    frames += kernelMessage.signature
    frames += Json.toJson(kernelMessage.header).toString()
    frames += Json.toJson(kernelMessage.parentHeader).toString()
    frames += Json.toJson(kernelMessage.metadata).toString
    frames += kernelMessage.contentString
    ZMQMessage(frames  : _*)
  }

  def parseAndHandle[T](json: String, reads: Reads[T], handler: T => Unit) : Unit = {
    Json.parse(json).validate[T](reads).fold(
      (invalid: Seq[(JsPath, Seq[ValidationError])]) =>
        logger.error(s"Could not parse JSON, ${json}"),
      (content: T) => handler(content)
    )
  }

  def getSessionId = sessionId

  def toKernelMessage(message: ExecuteRequest): KernelMessage = {
    // construct a kernel message whose content is an ExecuteRequest
    val id = java.util.UUID.randomUUID().toString
    val header = Header(
      id, "spark", sessionId, MessageType.Incoming.ExecuteRequest.toString, "5.0")

    KMBuilder().withIds(Seq[Array[Byte]]()).withSignature("").withHeader(header)
      .withParentHeader(HeaderBuilder.empty).withContentString(message).build
  }

} 
Example 35
Source File: PulseBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.pulse

import akka.actor.ActorSystem
import akka.util.Timeout
import io.vamp.common.{ Config, Namespace }
import io.vamp.common.akka.ActorBootstrap
import io.vamp.pulse.notification.{ PulseNotificationProvider, UnsupportedPulseDriverError }

object PulseBootstrap {
  def `type`()(implicit namespace: Namespace) = Config.string("vamp.pulse.type")().toLowerCase
}

class PulseBootstrap extends ActorBootstrap with PulseNotificationProvider {

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout) = {
    implicit val executionContext = actorSystem.dispatcher
    info(s"Pulse: ${PulseBootstrap.`type`()}")
    PulseBootstrap.`type`() match {
      case "nats" ⇒
        for {
          pulseActor ← alias[PulseActor](PulseBootstrap.`type`(), (`type`: String) ⇒ {
            throwException(UnsupportedPulseDriverError(`type`))
          })
          pulseActorSupport ← alias[PulseActorSupport]("elasticsearch", (`type`: String) ⇒ {
            throwException(UnsupportedPulseDriverError(`type`))
          })
          pulseActorPublisher ← alias[PulseActorPublisher]("natspublisher", (`type`: String) ⇒ {
            throwException(UnsupportedPulseDriverError(`type`))
          })
        } yield pulseActor :: pulseActorSupport :: pulseActorPublisher :: Nil
      case _ ⇒
        alias[PulseActor](PulseBootstrap.`type`(), (`type`: String) ⇒ {
          throwException(UnsupportedPulseDriverError(`type`))
        }).map(_ :: Nil)(actorSystem.dispatcher)
    }
  }
} 
Example 36
Source File: ContainerDriverBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.container_driver

import akka.actor.ActorSystem
import akka.util.Timeout
import io.vamp.common.{ Config, Namespace }
import io.vamp.common.akka.ActorBootstrap
import io.vamp.container_driver.notification.{ ContainerDriverNotificationProvider, UnsupportedContainerDriverError }

object ContainerDriverBootstrap {
  def `type`()(implicit namespace: Namespace) = Config.string("vamp.container-driver.type")().toLowerCase
}

class ContainerDriverBootstrap extends ActorBootstrap with ContainerDriverNotificationProvider {

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout) = {
    info(s"Container driver: ${ContainerDriverBootstrap.`type`()}")
    alias[ContainerDriverActor](ContainerDriverBootstrap.`type`(), (`type`: String) ⇒ {
      throwException(UnsupportedContainerDriverError(`type`))
    }).map(_ :: Nil)(actorSystem.dispatcher)
  }
} 
Example 37
Source File: ElasticsearchBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.pulse

import akka.actor.{ ActorRef, ActorSystem }
import akka.util.Timeout
import io.vamp.common.Namespace
import io.vamp.common.akka.IoC.logger
import io.vamp.common.akka.{ ActorBootstrap, IoC }
import io.vamp.pulse.notification.PulseNotificationProvider

import scala.concurrent.{ ExecutionContext, Future }

class ElasticsearchBootstrap
    extends ActorBootstrap
    with PulseNotificationProvider {

  def createActors(implicit actorSystem: ActorSystem,
                   namespace: Namespace,
                   timeout: Timeout): Future[List[ActorRef]] = {
    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    Future.sequence(IoC.createActor[PulseInitializationActor] :: Nil)
  }

  override def start(implicit actorSystem: ActorSystem,
                     namespace: Namespace,
                     timeout: Timeout): Future[Unit] = {
    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    super.start.flatMap {
      _ => {
        IoC.actorFor[PulseInitializationActor] ! PulseInitializationActor.Initialize
        Future.unit
      }
    }
  }
} 
Example 38
Source File: PulseInitializationActor.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.pulse

import akka.actor.{ Actor, ActorRef }
import akka.util.Timeout
import io.vamp.common.Config
import io.vamp.common.akka.CommonSupportForActors
import io.vamp.pulse.PulseInitializationActor.Initialize
import io.vamp.pulse.notification.PulseNotificationProvider

object PulseInitializationActor {

  object Initialize

}

class PulseInitializationActor extends ElasticsearchPulseInitializationActor with CommonSupportForActors with PulseNotificationProvider{

  implicit lazy val timeout: Timeout = PulseActor.timeout()

  def receive: Actor.Receive = {
    case Initialize ⇒ initialize()
    case _          ⇒ done(sender())
  }

  private def initialize(): Unit = {
    val receiver = sender()
    val pulse = Config.string("vamp.pulse.type")().toLowerCase
    log.info(s"Initializing pulse of type: $pulse")

    pulse match {
      case "elasticsearch" | "nats" ⇒ initializeElasticsearch().foreach(_ ⇒ done(receiver))
      case _               ⇒ done(receiver)
    }
  }

  private def done(receiver: ActorRef): Unit = {
    log.info(s"Pulse has been initialized.")
    receiver ! true
  }
} 
Example 39
Source File: ActorBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.util.Timeout
import com.typesafe.scalalogging.Logger
import io.vamp.common.{ ClassProvider, Namespace }
import org.slf4j.{ LoggerFactory, MDC }

import scala.concurrent.Future
import scala.reflect.{ ClassTag, classTag }

trait Bootstrap extends BootstrapLogger {

  def start(): Future[Unit] = Future.successful(())

  def stop(): Future[Unit] = Future.successful(())
}

trait ActorBootstrap extends BootstrapLogger {

  private var actors: Future[List[ActorRef]] = Future.successful(Nil)

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]]

  def start(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = {
    info(s"Starting ${getClass.getSimpleName}")
    actors = createActors(actorSystem, namespace, timeout)
    actors.map(_ ⇒ ())(actorSystem.dispatcher)
  }

  def restart(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = {
    stop.flatMap(_ ⇒ start)(actorSystem.dispatcher)
  }

  def stop(implicit actorSystem: ActorSystem, namespace: Namespace): Future[Unit] = {
    info(s"Stopping ${getClass.getSimpleName}")
    actors.map(_.reverse.foreach(_ ! PoisonPill))(actorSystem.dispatcher)
  }

  def alias[T: ClassTag](name: String, default: String ⇒ Future[ActorRef])(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    ClassProvider.find[T](name).map { clazz ⇒
      IoC.alias(classTag[T].runtimeClass, clazz)
      IoC.createActor(clazz)
    } getOrElse default(name)
  }
}

trait BootstrapLogger {

  protected val logger = Logger(LoggerFactory.getLogger(getClass))

  protected def info(message: String)(implicit namespace: Namespace): Unit = {
    MDC.put("namespace", namespace.name)
    try logger.info(message) finally MDC.remove("namespace")
  }
} 
Example 40
Source File: DataRetrieval.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import java.util.concurrent.TimeoutException

import akka.actor.Actor
import akka.pattern.after
import akka.util.Timeout

import scala.concurrent.Future
import scala.util.{ Failure, Success }

case class DataRetrieved(data: Map[Class[Actor], Any], succeeded: Boolean)

trait DataRetrieval {
  this: ExecutionContextProvider with ActorSystemProvider ⇒

  def retrieve(actors: List[Class[Actor]], futureOf: (Class[Actor]) ⇒ Future[Any], timeout: Timeout): Future[DataRetrieved] = {
    def noDataError(actor: Class[Actor]) = noData(actor) → false

    val futures: Map[Class[Actor], Future[Any]] = actors.map(actor ⇒ actor → futureOf(actor)).toMap

    Future.firstCompletedOf(List(Future.sequence(futures.values.toList.map(_.recover { case x ⇒ Failure(x) })), after(timeout.duration, using = actorSystem.scheduler) {
      Future.successful(new TimeoutException("Component timeout."))
    })) map { _ ⇒
      futures.map {
        case (actor, future) if future.isCompleted ⇒
          actor → future.value.map {
            case Success(data) ⇒ data → true
            case _             ⇒ noDataError(actor)
          }.getOrElse(noDataError(actor))
        case (actor, future) ⇒ actor → noDataError(actor)
      }.foldLeft[DataRetrieved](DataRetrieved(Map(), succeeded = true)) { (r, e) ⇒ r.copy(r.data + (e._1 → e._2._1), succeeded = r.succeeded && e._2._2) }
    }
  }

  def noData(actor: Class[Actor]) = Map("error" → "No response.")
} 
Example 41
Source File: IoC.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import java.util.concurrent.atomic.AtomicInteger

import _root_.akka.pattern.ask
import akka.actor._
import akka.util.Timeout
import com.typesafe.scalalogging.LazyLogging
import io.vamp.common.Namespace
import io.vamp.common.util.TextUtil

import scala.collection.mutable
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect._

object IoC extends LazyLogging {

  private val counter = new AtomicInteger(0)

  private val aliases: mutable.Map[String, mutable.Map[Class[_], Class[_]]] = mutable.Map()

  private val actorRefs: mutable.Map[String, mutable.Map[Class[_], ActorRef]] = mutable.Map()

  private val namespaceMap: mutable.Map[String, Namespace] = mutable.Map()

  private val namespaceActors: mutable.Map[String, ActorRef] = mutable.Map()

  def namespaces: List[Namespace] = namespaceMap.values.toList

  def alias[FROM: ClassTag](implicit namespace: Namespace): Class[_] = {
    alias(classTag[FROM].runtimeClass)
  }

  def alias(from: Class[_])(implicit namespace: Namespace): Class[_] = {
    aliases.get(namespace.name).flatMap(_.get(from)).getOrElse(from)
  }

  def alias[FROM: ClassTag, TO: ClassTag](implicit namespace: Namespace): Option[Class[_]] = {
    alias(classTag[FROM].runtimeClass, classTag[TO].runtimeClass)
  }

  def alias(from: Class[_], to: Class[_])(implicit namespace: Namespace): Option[Class[_]] = {
    aliases.getOrElseUpdate(namespace.name, mutable.Map()).put(from, to)
  }

  def createActor(clazz: Class[_])(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    createActor(Props(clazz))
  }

  def createActor[ACTOR: ClassTag](implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    createActor(classTag[ACTOR].runtimeClass)
  }

  def createActor[ACTOR: ClassTag](arg: Any, args: Any*)(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    createActor(Props(classTag[ACTOR].runtimeClass, arg :: args.toList: _*))
  }

  def createActor(props: Props)(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    logger.info(s"Create Actor ${props.clazz.getSimpleName} for namespace ${namespace.name}")
    implicit val ec: ExecutionContext = actorSystem.dispatcher
    (namespaceActor ? props) map {
      case actorRef: ActorRef ⇒
        actorRefs.getOrElseUpdate(namespace.name, mutable.Map()).put(props.clazz, actorRef)
        aliases.getOrElseUpdate(namespace.name, mutable.Map()).foreach {
          case (from, to) if to == props.clazz ⇒ actorRefs.getOrElseUpdate(namespace.name, mutable.Map()).put(from, actorRef)
          case _                               ⇒
        }
        actorRef
      case _ ⇒ throw new RuntimeException(s"Cannot create actor for: ${props.clazz.getSimpleName}")
    }
  }

  def actorFor[ACTOR: ClassTag](implicit actorSystem: ActorSystem, namespace: Namespace): ActorRef = {
    actorFor(classTag[ACTOR].runtimeClass)
  }

  def actorFor(clazz: Class[_])(implicit actorSystem: ActorSystem, namespace: Namespace): ActorRef = {
    actorRefs.get(namespace.name).flatMap(_.get(alias(clazz))) match {
      case Some(actorRef) ⇒ actorRef
      case _              ⇒ throw new RuntimeException(s"No actor reference for: $clazz")
    }
  }

  private def namespaceActor(implicit actorSystem: ActorSystem, namespace: Namespace): ActorRef = {
    namespaceMap.put(namespace.name, namespace)
    namespaceActors.getOrElseUpdate(namespace.name, actorSystem.actorOf(Props(new Actor {
      def receive = {
        case props: Props ⇒ sender() ! context.actorOf(props, s"${TextUtil.toSnakeCase(props.clazz.getSimpleName)}-${counter.getAndIncrement}")
        case _            ⇒
      }
    }), namespace.name))
  }
} 
Example 42
Source File: IoCSpec.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import java.util.concurrent.TimeUnit

import akka.actor.{ ActorSystem, Props }
import akka.testkit.{ ImplicitSender, TestKit, TestProbe }
import akka.util.Timeout
import com.typesafe.scalalogging.LazyLogging
import io.vamp.common.notification.Notification
import io.vamp.common.{ ClassMapper, Namespace, NamespaceProvider }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }

import scala.concurrent.{ Await, Future }
import scala.concurrent.duration._

class IoCSpec extends TestKit(ActorSystem("IoCSpec")) with ImplicitSender
    with WordSpecLike with Matchers with BeforeAndAfterAll with NamespaceProvider
    with LazyLogging {

  implicit val namespace: Namespace = Namespace("default")
  implicit val timeout: Timeout = Timeout(5L, TimeUnit.SECONDS)

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  "Echo actor" must {

    "echo message" in {

      val testProbe = TestProbe("test")

      val actors = Await.result(IoC.createActor(Props(classOf[EchoActor])).map(_ :: Nil)(system.dispatcher), 5.seconds)
      val actor = actors.head
      val testMessage = "Example Message"
      testProbe.send(actor, testMessage)
      testProbe.expectMsgPF(30.seconds) {
        case response: String ⇒
          logger.info(response.toString)
          assert(response == testMessage)
        case _ ⇒
          fail("Unexpected message")
      }
    }
  }
}

class EchoActorMapper extends ClassMapper {
  val name = "echo"
  val clazz: Class[_] = classOf[EchoActor]
}

class EchoActor extends CommonSupportForActors {
  override def receive: Receive = {
    case text: String ⇒ reply(echo(text))
  }

  private def echo(text: String): Future[String] = Future { text }

  override def message(notification: Notification): String = "echo actor message"

  override def info(notification: Notification): Unit = log.info(s"echo actor info")

  override def reportException(notification: Notification): Exception = new Exception("Echo actor notification report")
} 
Example 43
Source File: ActorBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.bootstrap

import akka.actor.{ Actor, ActorNotFound, ActorSystem, Props }
import akka.util.Timeout
import io.vamp.common.akka.{ Bootstrap, ActorBootstrap ⇒ ActorBootstrapService }
import io.vamp.common.{ ClassProvider, Namespace }

import scala.concurrent.{ ExecutionContext, Future }

trait AbstractActorBootstrap extends Bootstrap {

  implicit def timeout: Timeout

  implicit def namespace: Namespace

  implicit def actorSystem: ActorSystem

  protected def bootstrap: List[ActorBootstrapService]

  override def start(): Future[Unit] = {
    info(s"Starting ${getClass.getSimpleName}")
    val all = bootstrap
    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    all.tail.foldLeft[Future[Unit]](all.head.start)((f, b) ⇒ f.flatMap(_ ⇒ b.start))
  }

  override def stop(): Future[Unit] = {
    info(s"Stopping ${getClass.getSimpleName}")
    val all = bootstrap.reverse
    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    all.tail.foldLeft[Future[Unit]](all.head.stop)((f, b) ⇒ f.flatMap(_ ⇒ b.stop))
  }
}

class ActorBootstrap(override val bootstrap: List[ActorBootstrapService])(implicit val actorSystem: ActorSystem, val namespace: Namespace, val timeout: Timeout) extends AbstractActorBootstrap

class RestartableActorBootstrap(namespace: Namespace)(override val bootstrap: List[ActorBootstrapService])(implicit actorSystem: ActorSystem, timeout: Timeout)
    extends ActorBootstrap(bootstrap)(actorSystem, namespace, timeout) {

  implicit val ns: Namespace = namespace
  implicit val executionContext: ExecutionContext = actorSystem.dispatcher

  private val name = s"${namespace.name}-config"

  actorSystem.actorSelection(name).resolveOne().failed.foreach {
    case _: ActorNotFound ⇒
      actorSystem.actorOf(Props(new Actor {
        def receive: Actor.Receive = {
          case "reload" ⇒ bootstrap.reverse.foreach(_.restart)
          case _        ⇒
        }
      }), name)
    case _ ⇒
  }
}

class ClassProviderActorBootstrap()(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout)
  extends RestartableActorBootstrap(namespace)(ClassProvider.all[ActorBootstrapService].toList)(actorSystem, timeout) 
Example 44
Source File: Vamp.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.bootstrap

import akka.actor.ActorSystem
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import io.vamp.common.Namespace
import io.vamp.common.akka.Bootstrap
import io.vamp.http_api.HttpApiBootstrap

import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration.{ FiniteDuration, MILLISECONDS }

trait Vamp extends VampApp {

  implicit val system: ActorSystem = ActorSystem("vamp")
  implicit val executionContext: ExecutionContext = system.dispatcher
  implicit val timeout: Timeout = Timeout(FiniteDuration(ConfigFactory.load().getDuration("vamp.bootstrap.timeout", MILLISECONDS), MILLISECONDS))

  protected lazy val bootstraps = {
    implicit val namespace: Namespace = Namespace(ConfigFactory.load().getString("vamp.namespace"))
    List() :+
      new LoggingBootstrap {
        lazy val logo: String =
          s"""
             |██╗   ██╗ █████╗ ███╗   ███╗██████╗
             |██║   ██║██╔══██╗████╗ ████║██╔══██╗
             |██║   ██║███████║██╔████╔██║██████╔╝
             |╚██╗ ██╔╝██╔══██║██║╚██╔╝██║██╔═══╝
             | ╚████╔╝ ██║  ██║██║ ╚═╝ ██║██║
             |  ╚═══╝  ╚═╝  ╚═╝╚═╝     ╚═╝╚═╝
             |                                    $version
             |                                    by magnetic.io
             |""".stripMargin
      } :+
      new KamonBootstrap :+
      new ConfigurationBootstrap :+
      new ClassProviderActorBootstrap :+
      new ActorBootstrap(new HttpApiBootstrap :: Nil)
  }

  addShutdownBootstrapHook()

  startBootstraps()
}

trait VampApp extends App {

  protected implicit def system: ActorSystem

  protected implicit def executionContext: ExecutionContext

  protected def bootstraps: List[Bootstrap]

  def addShutdownBootstrapHook(): Unit = sys.addShutdownHook {
    val reversed = bootstraps.reverse
    reversed.tail.foldLeft[Future[Unit]](reversed.head.stop())((f, b) ⇒ f.flatMap(_ ⇒ b.stop())).map { _ ⇒ system.terminate() }.recover {
      case e: Throwable ⇒ e.printStackTrace()
    }
  }

  def startBootstraps(): Future[Unit] = {
    bootstraps.tail.foldLeft[Future[Unit]](bootstraps.head.start())((f, b) ⇒ f.flatMap(_ ⇒ b.start())).recover {
      case e: Throwable ⇒ e.printStackTrace()
    }
  }
} 
Example 45
Source File: SignatureManagerActor.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.communication.security

import akka.actor.{Props, ActorRef, Actor}
import akka.util.Timeout
import org.apache.toree.communication.utils.OrderedSupport
import org.apache.toree.kernel.protocol.v5.KernelMessage
import org.apache.toree.utils.LogLike

import scala.concurrent.duration._
import akka.pattern.ask
import akka.pattern.pipe

class SignatureManagerActor(
  key: String, scheme: String
) extends Actor with LogLike with OrderedSupport {
  private val hmac = Hmac(key, HmacAlgorithm(scheme))

  def this(key: String) = this(key, HmacAlgorithm.SHA256.toString)

  // NOTE: Required to provide the execution context for futures with akka
  import context._

  // NOTE: Required for ask (?) to function... maybe can define elsewhere?
  implicit val timeout = Timeout(5.seconds)

  //
  // List of child actors that the signature manager contains
  //
  private var signatureChecker: ActorRef = _
  private var signatureProducer: ActorRef = _

  
  override def orderedTypes(): Seq[Class[_]] = Seq(
    classOf[(String, Seq[_])],
    classOf[KernelMessage]
  )
} 
Example 46
Source File: HeartbeatClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.{ActorRef, Actor}
import akka.util.{ByteString, Timeout}
import org.apache.toree.communication.ZMQMessage
import akka.pattern.ask
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.apache.toree.utils.LogLike
import org.apache.toree.kernel.protocol.v5.UUID
import scala.collection.concurrent.{Map, TrieMap}
import scala.concurrent.duration._

object HeartbeatMessage {}

class HeartbeatClient(
  socketFactory : SocketFactory,
  actorLoader: ActorLoader,
  signatureEnabled: Boolean
) extends Actor with LogLike {
  logger.debug("Created new Heartbeat Client actor")
  implicit val timeout = Timeout(1.minute)

  val futureMap: Map[UUID, ActorRef] = TrieMap[UUID, ActorRef]()
  val socket = socketFactory.HeartbeatClient(context.system, self)

  override def receive: Receive = {
    // from Heartbeat
    case message: ZMQMessage =>
      val id = message.frames.map((byteString: ByteString) =>
        new String(byteString.toArray)).mkString("\n")
      logger.info(s"Heartbeat client receive:$id")
      futureMap(id) ! true
      futureMap.remove(id)

    // from SparkKernelClient
    case HeartbeatMessage =>
      import scala.concurrent.ExecutionContext.Implicits.global
      val id = java.util.UUID.randomUUID().toString
      futureMap += (id -> sender)
      logger.info(s"Heartbeat client send: $id")
      val future = socket ? ZMQMessage(ByteString(id.getBytes))
      future.onComplete {
        // future always times out because server "tells" response {
        case(_) => futureMap.remove(id)
      }
  }
} 
Example 47
Source File: ShellClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.Actor
import akka.util.Timeout
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.communication.security.SecurityActorType
import org.apache.toree.kernel.protocol.v5.client.{ActorLoader, Utilities}
import org.apache.toree.kernel.protocol.v5.{KernelMessage, UUID}
import Utilities._
import org.apache.toree.kernel.protocol.v5.client.execution.{DeferredExecution, DeferredExecutionManager}
import org.apache.toree.kernel.protocol.v5.content.ExecuteReply

import org.apache.toree.utils.LogLike
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.pattern.ask


class ShellClient(
  socketFactory: SocketFactory,
  actorLoader: ActorLoader,
  signatureEnabled: Boolean
) extends Actor with LogLike {
  logger.debug("Created shell client actor")
  implicit val timeout = Timeout(21474835.seconds)

  val socket = socketFactory.ShellClient(context.system, self)

  def receiveExecuteReply(parentId:String, kernelMessage: KernelMessage): Unit = {
    val deOption: Option[DeferredExecution] = DeferredExecutionManager.get(parentId)
    deOption match {
      case None =>
        logger.warn(s"No deferred execution for parent id ${parentId}")
      case Some(de) =>
        Utilities.parseAndHandle(kernelMessage.contentString,
          ExecuteReply.executeReplyReads, (er: ExecuteReply) => de.resolveReply(er))
    }
  }

  override def receive: Receive = {
    // from shell
    case message: ZMQMessage =>
      logger.debug("Received shell kernel message.")
      val kernelMessage: KernelMessage = message

      // TODO: Validate incoming message signature

      logger.trace(s"Kernel message is ${kernelMessage}")
      receiveExecuteReply(message.parentHeader.msg_id,kernelMessage)

    // from handler
    case message: KernelMessage =>
      logger.trace(s"Sending kernel message ${message}")
      val signatureManager =
        actorLoader.load(SecurityActorType.SignatureManager)

      import scala.concurrent.ExecutionContext.Implicits.global
      val messageWithSignature = if (signatureEnabled) {
        val signatureMessage = signatureManager ? message
        Await.result(signatureMessage, 100.milliseconds)
          .asInstanceOf[KernelMessage]
      } else message

      val zMQMessage: ZMQMessage = messageWithSignature

      socket ! zMQMessage
  }
} 
Example 48
Source File: SparkKernelClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client

import akka.actor.ActorSystem
import akka.pattern.ask
import akka.util.Timeout
import org.apache.toree.comm._
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.client.execution.{DeferredExecution, ExecuteRequestTuple}
import org.apache.toree.kernel.protocol.v5.client.socket.HeartbeatMessage
import org.apache.toree.kernel.protocol.v5.client.socket.StdinClient.{ResponseFunctionMessage, ResponseFunction}
import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest
import org.apache.toree.utils.LogLike
import scala.concurrent.duration._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}


  val comm = new ClientCommManager(
    actorLoader = actorLoader,
    kmBuilder = KMBuilder(),
    commRegistrar = commRegistrar
  )

  // TODO: hide this? just heartbeat to see if kernel is reachable?
  def heartbeat(failure: () => Unit): Unit = {
    val future = actorLoader.load(SocketType.Heartbeat) ? HeartbeatMessage

    future.onComplete {
      case Success(_) =>
        logger.info("Client received heartbeat.")
      case Failure(_) =>
        failure()
        logger.info("There was an error receiving heartbeat from kernel.")
    }
  }

  def shutdown() = {
    logger.info("Shutting down client")
    actorSystem.terminate()
  }
} 
Example 49
Source File: ExecuteHandler.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.handler

import akka.actor.Actor
import akka.util.Timeout
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.client.{ActorLoader, Utilities}
import org.apache.toree.kernel.protocol.v5.client.execution.{ExecuteRequestTuple, DeferredExecutionManager}
import org.apache.toree.utils.LogLike
import scala.concurrent.duration._


class ExecuteHandler(actorLoader: ActorLoader) extends Actor with LogLike {
  implicit val timeout = Timeout(21474835.seconds)

  override def receive: Receive = {
    case reqTuple: ExecuteRequestTuple =>
      // create message to send to shell
      val km: KernelMessage = Utilities.toKernelMessage(reqTuple.request)
      //  Register the execution for this message id with the manager
      DeferredExecutionManager.add(km.header.msg_id,reqTuple.de)

      // send the message to the ShellClient
      val shellClient = actorLoader.load(SocketType.ShellClient)
      shellClient ! km
  }
} 
Example 50
Source File: ProxyRoute.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.http_api

import akka.http.scaladsl.model.StatusCodes.BadGateway
import akka.http.scaladsl.model.ws.UpgradeToWebSocket
import akka.http.scaladsl.server.{ RequestContext, Route, RouteResult }
import akka.stream.Materializer
import akka.util.Timeout
import io.vamp.common.Namespace
import io.vamp.common.http.HttpApiDirectives
import io.vamp.model.artifact.{ Gateway, Deployment, Workflow }
import io.vamp.operation.controller.ProxyController

import scala.concurrent.Future
import scala.util.Try

trait ProxyRoute extends AbstractRoute with ProxyController {
  this: HttpApiDirectives ⇒

  implicit def materializer: Materializer

  def proxyRoute(implicit namespace: Namespace, timeout: Timeout): Route =
    path("host" / Segment / "port" / Segment / RemainingPath) {
      (host, port, path) ⇒ Try(handle(hostPortProxy(host, port.toInt, path))).getOrElse(complete(BadGateway))
    } ~ path(Gateway.kind / Segment / Segment / Segment / RemainingPath) {
      (name1, name2, name3, path) ⇒ handle(gatewayProxy(s"$name1/$name2/$name3", path, skip = true))
    } ~ path(Gateway.kind / Segment / Segment / RemainingPath) {
      (name1, name2, path) ⇒ handle(gatewayProxy(s"$name1/$name2", path, skip = true))
    } ~ path(Gateway.kind / Segment / RemainingPath) {
      (gateway, path) ⇒ handle(gatewayProxy(gateway, path, skip = false))
    } ~ path(Workflow.kind / Segment / "instances" / Segment / "ports" / Segment / RemainingPath) {
      (workflow, instance, port, path) ⇒ handle(instanceProxy(workflow, instance, port, path))
    } ~ path(Deployment.kind / Segment / "clusters" / Segment / "services" / Segment / "instances" / Segment / "ports" / Segment / RemainingPath) {
      (deployment, cluster, service, instance, port, path) ⇒ handle(instanceProxy(deployment, cluster, service, instance, port, path))
    }

  private def handle(handler: (RequestContext, Option[UpgradeToWebSocket]) ⇒ Future[RouteResult]): Route = {
    extractUpgradeToWebSocket { upgrade ⇒ context ⇒ handler(context, Option(upgrade))
    } ~ {
      context ⇒ handler(context, None)
    }
  }
} 
Example 51
Source File: Utilities.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel

import java.nio.charset.Charset

import akka.util.{ByteString, Timeout}
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.utils.LogLike
import play.api.data.validation.ValidationError
import play.api.libs.json.{JsPath, Json, Reads}

import scala.concurrent.duration._

object Utilities extends LogLike {
  //
  // NOTE: This is brought in to remove feature warnings regarding the use of
  //       implicit conversions regarding the following:
  //
  //       1. ByteStringToString
  //       2. ZMQMessageToKernelMessage
  //
  import scala.language.implicitConversions

  
  implicit val timeout = Timeout(21474835.seconds)

  implicit def ByteStringToString(byteString : ByteString) : String = {
    new String(byteString.toArray, Charset.forName("UTF-8"))
  }

  implicit def StringToByteString(string : String) : ByteString = {
    ByteString(string.getBytes)
  }

  implicit def ZMQMessageToKernelMessage(message: ZMQMessage): KernelMessage = {
    val delimiterIndex: Int =
      message.frames.indexOf(ByteString("<IDS|MSG>".getBytes))
    //  TODO Handle the case where there is no delimiter
    val ids: Seq[Array[Byte]] =
      message.frames.take(delimiterIndex).map(
        (byteString : ByteString) =>  { byteString.toArray }
      )
    val header = Json.parse(message.frames(delimiterIndex + 2)).as[Header]
    // TODO: Investigate better solution than setting parentHeader to null for {}
    val parentHeader = parseAndHandle(message.frames(delimiterIndex + 3),
                                  ParentHeader.headerReads,
                                  handler = (valid: ParentHeader) => valid,
                                  errHandler = _ => null
    )
    val metadata = Json.parse(message.frames(delimiterIndex + 4)).as[Metadata]

    KMBuilder().withIds(ids.toList)
               .withSignature(message.frame(delimiterIndex + 1))
               .withHeader(header)
               .withParentHeader(parentHeader)
               .withMetadata(metadata)
               .withContentString(message.frame(delimiterIndex + 5)).build(false)
  }

  implicit def KernelMessageToZMQMessage(kernelMessage : KernelMessage) : ZMQMessage = {
    val frames: scala.collection.mutable.ListBuffer[ByteString] = scala.collection.mutable.ListBuffer()
    kernelMessage.ids.map((id : Array[Byte]) => frames += ByteString.apply(id) )
    frames += "<IDS|MSG>"
    frames += kernelMessage.signature
    frames += Json.toJson(kernelMessage.header).toString()
    frames += Json.toJson(kernelMessage.parentHeader).toString()
    frames += Json.toJson(kernelMessage.metadata).toString
    frames += kernelMessage.contentString
    ZMQMessage(frames  : _*)
  }

  def parseAndHandle[T, U](json: String, reads: Reads[T],
                           handler: T => U) : U = {
    parseAndHandle(json, reads, handler,
      (invalid: Seq[(JsPath, Seq[ValidationError])]) => {
        logger.error(s"Could not parse JSON, ${json}")
        throw new Throwable(s"Could not parse JSON, ${json}")
      }
    )
  }

  def parseAndHandle[T, U](json: String, reads: Reads[T],
                           handler: T => U,
                           errHandler: Seq[(JsPath, Seq[ValidationError])] => U) : U = {
    Json.parse(json).validate[T](reads).fold(
      errHandler,
      (content: T) => handler(content)
    )
  }
} 
Example 52
Source File: ExecuteRequestRelay.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.relay

import java.io.OutputStream
import akka.actor.Actor
import akka.pattern._
import akka.util.Timeout
import org.apache.toree.interpreter.{ExecuteAborted, ExecuteError, ExecuteFailure, ExecuteOutput}
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.content._
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.magic.MagicParser
import org.apache.toree.plugins.PluginManager
import org.apache.toree.utils.LogLike
import scala.concurrent.Future
import scala.concurrent.duration._
import org.apache.toree.plugins.NewOutputStream

case class ExecuteRequestRelay(
  actorLoader: ActorLoader,
  pluginManager: PluginManager,
  magicParser: MagicParser
)
  extends Actor with LogLike
{
  import context._
  implicit val timeout = Timeout(21474835.seconds)

  
  private def packageFutureResponse(
    future: Future[Either[ExecuteOutput, ExecuteFailure]]
  ): Future[(ExecuteReply, ExecuteResult)] = future.map { value =>
    if (value.isLeft) {
      val data = value.left.get
      (
        ExecuteReplyOk(1, Some(Payloads()), Some(UserExpressions())),
        ExecuteResult(1, data, Metadata())
      )
    } else {
      failureMatch(value.right.get)
    }
  }

  override def receive: Receive = {
    case (executeRequest: ExecuteRequest, parentMessage: KernelMessage,
      outputStream: OutputStream) =>
      val interpreterActor = actorLoader.load(SystemActorType.Interpreter)

      // Store our old sender so we don't lose it in the callback
      // NOTE: Should point back to our KernelMessageRelay
      val oldSender = sender()

      // Sets the outputStream for this particular ExecuteRequest
      import org.apache.toree.plugins.Implicits._
      pluginManager.fireEventFirstResult(
        NewOutputStream,
        "outputStream" -> outputStream
      )

      // Parse the code for magics before sending it to the interpreter and
      // pipe the response to sender
      (magicParser.parse(executeRequest.code) match {
        case Left(code) =>
          val parsedRequest =
            (executeRequest.copy(code = code), parentMessage, outputStream)
          val interpreterFuture = (interpreterActor ? parsedRequest)
            .mapTo[Either[ExecuteOutput, ExecuteFailure]]
          packageFutureResponse(interpreterFuture)

        case Right(error) =>
          val failure = ExecuteError("Error parsing magics!", error, Nil)
          Future { failureMatch(failure) }
      }) pipeTo oldSender
  }
} 
Example 53
Source File: JobService.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.job.service.job

import java.io.File
import java.nio.charset.StandardCharsets
import java.nio.file.{ Files, Path }

import akka.actor.typed.{ ActorRef, ActorSystem }
import akka.actor.typed.scaladsl.AskPattern._
import akka.http.scaladsl.server.directives.FileInfo
import akka.util.Timeout
import javax.inject.{ Inject, Singleton }
import mass.job.service.job.JobActor.CommandReply
import mass.message.job._

import scala.collection.immutable
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag

@Singleton
class JobService @Inject() (implicit system: ActorSystem[_]) {
  implicit val timeout: Timeout = Timeout(10.seconds)
  val jobActor: ActorRef[JobActor.Command] = JobActor.init(system)

  def listOption(): Future[JobGetAllOptionResp] = askToJob[JobGetAllOptionResp](JobGetAllOptionReq())

  def uploadFiles(list: immutable.Seq[(FileInfo, File)])(implicit ec: ExecutionContext): Future[JobUploadFilesResp] = {
    askToJob[JobUploadFilesResp](JobUploadFilesReq(list)).andThen {
      case _ => list.foreach { case (_, file) => Files.deleteIfExists(file.toPath) }
    }
  }

  def uploadJobOnZip(fileInfo: FileInfo, file: Path)(implicit ec: ExecutionContext): Future[JobUploadJobResp] = {
    val req = JobUploadJobReq(
      file,
      fileInfo.fileName,
      fileInfo.contentType.charsetOption.map(_.nioCharset()).getOrElse(StandardCharsets.UTF_8))
    askToJob[JobUploadJobResp](req).andThen { case _ => Files.deleteIfExists(file) }
  }

  def updateTrigger(req: JobUpdateReq): Future[JobSchedulerResp] = askToJob[JobSchedulerResp](req)

  def page(req: JobPageReq): Future[JobPageResp] = askToJob[JobPageResp](req)

  def findItemByKey(key: String): Future[JobSchedulerResp] = askToJob[JobSchedulerResp](JobFindReq(key = key))

  def createJob(req: JobCreateReq): Future[JobCreateResp] = askToJob[JobCreateResp](req)

  def updateJob(req: JobUpdateReq): Future[JobSchedulerResp] = askToJob[JobSchedulerResp](req)

  @inline private def askToJob[RESP](req: JobMessage)(implicit tag: ClassTag[RESP]): Future[RESP] =
    jobActor.ask[JobResponse](replyTo => CommandReply(req, replyTo)).mapTo[RESP]
} 
Example 54
Source File: TransformationFrontend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.transformation

import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.{ Actor, ActorRef, ActorSystem, Props, Terminated }
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

//#frontend
class TransformationFrontend extends Actor {
  var backends = IndexedSeq.empty[ActorRef]
  var jobCounter = 0

  def receive = {
    case job: TransformationJob if backends.isEmpty =>
      sender() ! JobFailed("Service unavailable, try again later", job)

    case job: TransformationJob =>
      jobCounter += 1
      backends(jobCounter % backends.size) forward job

    case BackendRegistration if !backends.contains(sender()) =>
      context watch sender()
      backends = backends :+ sender()

    case Terminated(a) =>
      backends = backends.filterNot(_ == a)
  }
}
//#frontend

object TransformationFrontend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]"))
      .withFallback(ConfigFactory.load("simple-cluster"))

    val system = ActorSystem("ClusterSystem", config)
    val frontend =
      system.actorOf(Props[TransformationFrontend], name = "frontend")

    val counter = new AtomicInteger
    import system.dispatcher
    system.scheduler.schedule(2.seconds, 2.seconds) {
      implicit val timeout = Timeout(5 seconds)
      (frontend ? TransformationJob("hello-" + counter.incrementAndGet())) foreach {
        case result => println(result)
      }
    }
    Future {
      TimeUnit.SECONDS.sleep(80)
      system.terminate()
    }(Implicits.global)
  }
} 
Example 55
Source File: ElasticSearchIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.stream.scaladsl.Source
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient
import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient.BulkOp
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.indexing.View.ElasticSearchView
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.kg.routes.Clients
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
@SuppressWarnings(Array("MaxParameters"))
object ElasticSearchIndexer {

  private implicit val log: Logger = Logger[ElasticSearchIndexer.type]

  
  final def start[F[_]: Timer](
      view: ElasticSearchView,
      resources: Resources[F],
      project: Project,
      restartOffset: Boolean
  )(
      implicit as: ActorSystem,
      actorInitializer: (Props, String) => ActorRef,
      projections: Projections[F, String],
      F: Effect[F],
      clients: Clients[F],
      config: AppConfig
  ): StreamSupervisor[F, ProjectionProgress] = {

    implicit val ec: ExecutionContext          = as.dispatcher
    implicit val p: Project                    = project
    implicit val indexing: IndexingConfig      = config.elasticSearch.indexing
    implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true)
    implicit val tm: Timeout                   = Timeout(config.elasticSearch.askTimeout)

    val client: ElasticSearchClient[F] = clients.elasticSearch.withRetryPolicy(config.elasticSearch.indexing.retry)

    def deleteOrIndex(res: ResourceV): Option[BulkOp] =
      if (res.deprecated && !view.filter.includeDeprecated) Some(delete(res))
      else view.toDocument(res).map(doc => BulkOp.Index(view.index, res.id.value.asString, doc))

    def delete(res: ResourceV): BulkOp =
      BulkOp.Delete(view.index, res.id.value.asString)

    val initFetchProgressF: F[ProjectionProgress] =
      if (restartOffset)
        projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress)
      else view.createIndex >> projections.progress(view.progressId)

    val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial =>
      val flow = ProgressFlowElem[F, Any]
        .collectCast[Event]
        .groupedWithin(indexing.batch, indexing.batchTimeout)
        .distinct()
        .mapAsync(view.toResource(resources, _))
        .collectSome[ResourceV]
        .collect {
          case res if view.allowedSchemas(res) && view.allowedTypes(res) => deleteOrIndex(res)
          case res if view.allowedSchemas(res)                           => Some(delete(res))
        }
        .collectSome[BulkOp]
        .runAsyncBatch(client.bulk(_))()
        .mergeEmit()
        .toPersistedProgress(view.progressId, initial)

      cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset)
        .via(flow)
        .via(kamonViewMetricsFlow(view, project))
    }
    StreamSupervisor.start(sourceF, view.progressId, actorInitializer)
  }
}
// $COVERAGE-ON$ 
Example 56
Source File: StorageIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import java.time.Instant

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.kg.storage.Storage
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object StorageIndexer {

  private implicit val log = Logger[StorageIndexer.type]

  def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])(
      implicit projectCache: ProjectCache[F],
      F: Effect[F],
      as: ActorSystem,
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: AppConfig
  ): StreamSupervisor[F, Unit] = {

    implicit val authToken                = config.iam.serviceAccountToken
    implicit val indexing: IndexingConfig = config.keyValueStore.indexing
    implicit val ec: ExecutionContext     = as.dispatcher
    implicit val tm: Timeout              = Timeout(config.keyValueStore.askTimeout)
    val name                              = "storage-indexer"

    def toStorage(event: Event): F[Option[(Storage, Instant)]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        storages.fetchStorage(event.id).value.map {
          case Left(err) =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(timedStorage) => Some(timedStorage)
        }
      }

    val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Storage.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toStorage)
      .collectSome[(Storage, Instant)]
      .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 57
Source File: SparqlIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.stream.scaladsl.Source
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.sparql.client.{BlazegraphClient, SparqlWriteQuery}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.indexing.View.SparqlView
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.kg.routes.Clients
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress
import ch.epfl.bluebrain.nexus.sourcing.projections._

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
@SuppressWarnings(Array("MaxParameters"))
object SparqlIndexer {

  
  final def start[F[_]: Timer](
      view: SparqlView,
      resources: Resources[F],
      project: Project,
      restartOffset: Boolean
  )(
      implicit as: ActorSystem,
      actorInitializer: (Props, String) => ActorRef,
      projections: Projections[F, String],
      F: Effect[F],
      clients: Clients[F],
      config: AppConfig
  ): StreamSupervisor[F, ProjectionProgress] = {

    implicit val ec: ExecutionContext          = as.dispatcher
    implicit val p: Project                    = project
    implicit val indexing: IndexingConfig      = config.sparql.indexing
    implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true)
    implicit val tm: Timeout                   = Timeout(config.sparql.askTimeout)

    val client: BlazegraphClient[F] =
      clients.sparql.copy(namespace = view.index).withRetryPolicy(config.sparql.indexing.retry)

    def buildInsertOrDeleteQuery(res: ResourceV): SparqlWriteQuery =
      if (res.deprecated && !view.filter.includeDeprecated) view.buildDeleteQuery(res)
      else view.buildInsertQuery(res)

    val initFetchProgressF: F[ProjectionProgress] =
      if (restartOffset)
        projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress)
      else view.createIndex >> projections.progress(view.progressId)

    val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial =>
      val flow = ProgressFlowElem[F, Any]
        .collectCast[Event]
        .groupedWithin(indexing.batch, indexing.batchTimeout)
        .distinct()
        .mapAsync(view.toResource(resources, _))
        .collectSome[ResourceV]
        .collect {
          case res if view.allowedSchemas(res) && view.allowedTypes(res) => buildInsertOrDeleteQuery(res)
          case res if view.allowedSchemas(res)                           => view.buildDeleteQuery(res)
        }
        .runAsyncBatch(client.bulk(_))()
        .mergeEmit()
        .toPersistedProgress(view.progressId, initial)
      cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset)
        .via(flow)
        .via(kamonViewMetricsFlow(view, project))
    }
    StreamSupervisor.start(sourceF, view.progressId, actorInitializer)
  }
}
// $COVERAGE-ON$ 
Example 58
Source File: ResolverIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resolve.Resolver
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object ResolverIndexer {

  private implicit val log = Logger[ResolverIndexer.type]

  
  final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])(
      implicit
      projectCache: ProjectCache[F],
      as: ActorSystem,
      F: Effect[F],
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: AppConfig
  ): StreamSupervisor[F, Unit] = {
    implicit val authToken                = config.iam.serviceAccountToken
    implicit val indexing: IndexingConfig = config.keyValueStore.indexing
    implicit val ec: ExecutionContext     = as.dispatcher
    implicit val tm: Timeout              = Timeout(config.keyValueStore.askTimeout)

    val name = "resolver-indexer"

    def toResolver(event: Event): F[Option[Resolver]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        resolvers.fetchResolver(event.id).value.map {
          case Left(err) =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(resolver) => Some(resolver)
        }
      }

    val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Resolver.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toResolver)
      .collectSome[Resolver]
      .runAsync(resolverCache.put)()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 59
Source File: ViewIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object ViewIndexer {

  private implicit val log = Logger[ViewIndexer.type]

  def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])(
      implicit projectCache: ProjectCache[F],
      F: Effect[F],
      as: ActorSystem,
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: AppConfig
  ): StreamSupervisor[F, Unit] = {

    implicit val authToken                = config.iam.serviceAccountToken
    implicit val indexing: IndexingConfig = config.keyValueStore.indexing
    implicit val ec: ExecutionContext     = as.dispatcher
    implicit val tm: Timeout              = Timeout(config.keyValueStore.askTimeout)
    val name                              = "view-indexer"

    def toView(event: Event): F[Option[View]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        views.fetchView(event.id).value.map {
          case Left(err) =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(view) => Some(view)
        }
      }

    val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.View.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toView)
      .collectSome[View]
      .runAsync(viewCache.put)()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 60
Source File: BootstrapTestApp.scala    From scalajs-bootstrap   with MIT License 5 votes vote down vote up
package com.karasiq.bootstrap.test.backend

import java.util.concurrent.TimeUnit

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps

import akka.actor._
import akka.io.IO
import akka.pattern.ask
import akka.util.Timeout
import spray.can.Http
import spray.http._
import spray.routing.HttpService

import com.karasiq.bootstrap.test.frontend.TestHtmlPage

object BootstrapTestApp extends App {
  final class AppHandler extends Actor with HttpService {
    override def receive: Actor.Receive = runRoute {
      get {
         {
          // Server-rendered page
          path("serverside.html") {
            complete(HttpResponse(entity = HttpEntity(ContentType(MediaTypes.`text/html`), TestHtmlPage())))
          } ~
          // Index page
          (pathSingleSlash & respondWithMediaType(MediaTypes.`text/html`)) {
            getFromResource("webapp/index.html")
          } ~
          // Other resources
          getFromResourceDirectory("webapp")
        }
      }
    }

    override def actorRefFactory: ActorRefFactory = context
  }

  def startup(): Unit = {
    implicit val timeout = Timeout(20 seconds)

    implicit val actorSystem = ActorSystem("bootstrap-test")

    Runtime.getRuntime.addShutdownHook(new Thread(new Runnable {
      override def run(): Unit = {
        Await.result(actorSystem.terminate(), FiniteDuration(5, TimeUnit.MINUTES))
      }
    }))

    val service = actorSystem.actorOf(Props[AppHandler], "webService")
    IO(Http) ? Http.Bind(service, interface = "localhost", port = 9000)
  }

  startup()
} 
Example 61
Source File: MusicCommands.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.examplecore.music

import ackcord._
import ackcord.commands.{CommandBuilder, CommandController, NamedCommand, VoiceGuildMemberCommandMessage}
import ackcord.data.{GuildId, TextChannel}
import ackcord.examplecore.music.MusicHandler.{NextTrack, QueueUrl, StopMusic, TogglePause}
import akka.NotUsed
import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.stream.typed.scaladsl.ActorFlow
import akka.util.Timeout

class MusicCommands(requests: Requests, guildId: GuildId, musicHandler: ActorRef[MusicHandler.Command])(
    implicit timeout: Timeout,
    system: ActorSystem[Nothing]
) extends CommandController(requests) {

  val VoiceCommand: CommandBuilder[VoiceGuildMemberCommandMessage, NotUsed] =
    GuildVoiceCommand.andThen(CommandBuilder.inOneGuild(guildId))

  val queue: NamedCommand[String] =
    VoiceCommand.named("&", Seq("q", "queue")).parsing[String].withSideEffects { m =>
      musicHandler.ask[MusicHandler.CommandAck.type](QueueUrl(m.parsed, m.textChannel, m.voiceChannel.id, _))
    }

  private def simpleCommand(
      aliases: Seq[String],
      mapper: (TextChannel, ActorRef[MusicHandler.CommandAck.type]) => MusicHandler.MusicHandlerEvents
  ): NamedCommand[NotUsed] = {
    VoiceCommand.andThen(CommandBuilder.inOneGuild(guildId)).named("&", aliases, mustMention = true).toSink {
      Flow[VoiceGuildMemberCommandMessage[NotUsed]]
        .map(_.textChannel)
        .via(ActorFlow.ask(requests.parallelism)(musicHandler)(mapper))
        .toMat(Sink.ignore)(Keep.none)
    }
  }

  val stop: NamedCommand[NotUsed] = simpleCommand(Seq("s", "stop"), StopMusic.apply)

  val next: NamedCommand[NotUsed] = simpleCommand(Seq("n", "next"), NextTrack.apply)

  val pause: NamedCommand[NotUsed] = simpleCommand(Seq("p", "pause"), TogglePause.apply)
} 
Example 62
Source File: MusicManager.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.concurrent.duration.FiniteDuration
import scala.util.{Failure, Success}

import ackcord.data.{GuildId, VoiceGuildChannelId}
import ackcord.lavaplayer.LavaplayerHandler
import akka.actor.typed._
import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.scaladsl._
import akka.util.Timeout
import com.sedmelluq.discord.lavaplayer.player.AudioPlayer

object MusicManager {

  private[ackcord] def apply(
      cache: Cache,
      players: Map[GuildId, (AudioPlayer, ActorRef[LavaplayerHandler.Command])] = Map.empty
  ): Behavior[Command] = Behaviors.receive {
    case (ctx, ConnectToChannel(guildId, channelId, force, createPlayer, timeoutDur, replyTo)) =>
      implicit val timeout: Timeout             = Timeout(timeoutDur)
      implicit val system: ActorSystem[Nothing] = ctx.system
      import ctx.executionContext

      val (usedPlayer, actor) = players.getOrElse(
        guildId, {
          val player = createPlayer()
          (player, ctx.spawn(LavaplayerHandler(player, guildId, cache), guildId.asString))
        }
      )

      //TODO: Handle errors
      actor.ask[LavaplayerHandler.Reply](LavaplayerHandler.ConnectVoiceChannel(channelId, force, _)).onComplete {
        case Success(_) => replyTo ! GotPlayer(usedPlayer)
        case Failure(e) => replyTo ! GotError(e)
      }

      apply(cache, players.updated(guildId, (usedPlayer, actor)))

    case (_, DisconnectFromChannel(guildId, destroyPlayer)) =>
      players.get(guildId).foreach {
        case (player, actor) =>
          actor ! LavaplayerHandler.DisconnectVoiceChannel

          if (destroyPlayer) {
            player.destroy()
          }
      }

      apply(cache, players - guildId)

    case (_, SetChannelPlaying(guildId, playing)) =>
      players.get(guildId).foreach {
        case (_, actor) =>
          actor ! LavaplayerHandler.SetPlaying(playing)
      }
      Behaviors.same
  }

  sealed trait Command

  sealed trait ConnectToChannelResponse
  case class GotPlayer(player: AudioPlayer) extends ConnectToChannelResponse
  case class GotError(e: Throwable)         extends ConnectToChannelResponse

  private[ackcord] case class ConnectToChannel(
      guildId: GuildId,
      channelId: VoiceGuildChannelId,
      force: Boolean,
      createPlayer: () => AudioPlayer,
      timeout: FiniteDuration,
      replyTo: ActorRef[ConnectToChannelResponse]
  ) extends Command

  private[ackcord] case class DisconnectFromChannel(guildId: GuildId, destroyPlayer: Boolean) extends Command
  private[ackcord] case class SetChannelPlaying(guildId: GuildId, playing: Boolean)           extends Command
} 
Example 63
Source File: RestServiceActors.scala    From kafka-with-akka-streams-kafka-streams-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.scala.akkastream.queryablestate.actors

import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives.{complete, get, onSuccess, path}
import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.lightbend.scala.akkastream.modelserver.actors.{GetModels, GetModelsResult, GetState}
import com.lightbend.scala.modelServer.model.ModelToServeStats
import de.heikoseeberger.akkahttpjackson.JacksonSupport

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration._


object RestServiceActors {

  // See http://localhost:5500/models
  // Then select a model shown and try http://localhost:5500/state/<model>, e.g., http://localhost:5500/state/wine
  def startRest(modelserver: ActorRef)(implicit system: ActorSystem, materializer: ActorMaterializer): Unit = {

    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    // Use with HTTP methods that accept an implicit timeout argument
    // implicit val timeout = Timeout(10.seconds)
    val host = "127.0.0.1"
    val port = 5500
    val routes: Route = QueriesAkkaHttpResource.storeRoutes(modelserver)

    Http().bindAndHandle(routes, host, port) map
      { binding => println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
}

object QueriesAkkaHttpResource extends JacksonSupport {

  implicit val askTimeout: Timeout = Timeout(30.seconds)

  def storeRoutes(modelserver: ActorRef): Route =
    get {
      path("state"/Segment) { datatype =>
        onSuccess(modelserver ? GetState(datatype)) {
          case info: ModelToServeStats =>
            complete(info)
        }
      } ~
        path("models") {
          onSuccess(modelserver ? GetModels()) {
            case models: GetModelsResult =>
              complete(models)
          }
        }
    }
} 
Example 64
Source File: RestServiceStore.scala    From kafka-with-akka-streams-kafka-streams-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.scala.kafkastreams.queriablestate.withstore

import javax.ws.rs.NotFoundException
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.lightbend.scala.kafkastreams.queriablestate.MetadataService
import com.lightbend.java.configuration.kafka.ApplicationKafkaParameters._
import com.lightbend.scala.kafkastreams.store.StoreState
import com.lightbend.scala.kafkastreams.store.store.custom.ModelStateStoreType
import de.heikoseeberger.akkahttpjackson.JacksonSupport
import org.apache.kafka.streams.KafkaStreams
import org.apache.kafka.streams.state.QueryableStoreTypes

import scala.concurrent.duration._


object RestServiceStore {

  implicit val system = ActorSystem("ModelServing")
  implicit val materializer = ActorMaterializer()
  implicit val executionContext = system.dispatcher
  implicit val timeout = Timeout(10.seconds)
  val host = "127.0.0.1"
  val port = 8888

  def startRestProxy(streams: KafkaStreams, port: Int, storeType : String) = {

    val routes: Route = QueriesResource.storeRoutes(streams, port, storeType)
    Http().bindAndHandle(routes, host, port) map
      { binding => println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
}

object QueriesResource extends JacksonSupport {

  private val customStoreType = new ModelStateStoreType()
  private val standardStoreType = QueryableStoreTypes.keyValueStore[Integer,StoreState]

  def storeRoutes(streams: KafkaStreams, port : Int, storeType : String): Route = {
    val metadataService = new MetadataService(streams)
    get {
      pathPrefix("state") {
        path("instances") {
          complete(
            metadataService.streamsMetadataForStore(STORE_NAME, port)
          )
        } ~
          path("value") {
            storeType match {
              case "custom" =>
                val store = streams.store(STORE_NAME, customStoreType)
                if (store == null) throw new NotFoundException
                complete(store.getCurrentServingInfo)
              case _ =>
                val store = streams.store(STORE_NAME, standardStoreType)
                if (store == null) throw new NotFoundException
                complete(store.get(STORE_ID).currentState)
            }
          }
        }
    }
  }
} 
Example 65
Source File: RestServiceInMemory.scala    From kafka-with-akka-streams-kafka-streams-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.scala.kafkastreams.queriablestate.inmemory

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.lightbend.scala.kafkastreams.store.StoreState
import de.heikoseeberger.akkahttpjackson.JacksonSupport
import org.apache.kafka.streams.KafkaStreams
import com.lightbend.scala.modelServer.model.ModelToServeStats

import scala.concurrent.duration._


object RestServiceInMemory{

  implicit val system = ActorSystem("ModelServing")
  implicit val materializer = ActorMaterializer()
  implicit val executionContext = system.dispatcher
  implicit val timeout = Timeout(10.seconds)
  val host = "127.0.0.1"
  val port = 8888
  val routes: Route = QueriesResource.storeRoutes()

  // Surf to http://localhost:8888/state/instances for the list of currently deployed instances.
  // Then surf to http://localhost:8888/state/value for the current state of execution for a given model.
  def startRestProxy(streams: KafkaStreams, port: Int) = {

    Http().bindAndHandle(routes, host, port) map
      { binding => println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
}

// Surf to http://localhost:8888/state/value

object QueriesResource extends JacksonSupport {

  def storeRoutes(): Route =
    get {
      pathPrefix("state") {
        path("value") {
          val info: ModelToServeStats = StoreState().currentState.getOrElse(ModelToServeStats.empty)
          complete(info)
        }
      }
    }
} 
Example 66
Source File: AkkaExecutionSequencer.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, ExtendedActorSystem, Props}
import akka.pattern.{AskTimeoutException, ask}
import akka.util.Timeout
import com.daml.grpc.adapter.RunnableSequencingActor.ShutdownRequest

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import com.daml.dec.DirectExecutionContext


  def closeAsync(implicit ec: ExecutionContext): Future[Done] =
    (actorRef ? ShutdownRequest).mapTo[Done].recover {
      case askTimeoutException: AskTimeoutException if actorIsTerminated(askTimeoutException) =>
        Done
    }

  private def actorIsTerminated(askTimeoutException: AskTimeoutException) = {
    AkkaExecutionSequencer.actorTerminatedRegex.findFirstIn(askTimeoutException.getMessage).nonEmpty
  }
}

object AkkaExecutionSequencer {
  def apply(name: String, terminationTimeout: FiniteDuration)(
      implicit system: ActorSystem): AkkaExecutionSequencer = {
    system match {
      case extendedSystem: ExtendedActorSystem =>
        new AkkaExecutionSequencer(
          extendedSystem.systemActorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))
      case _ =>
        new AkkaExecutionSequencer(system.actorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))

    }
  }

  private val actorTerminatedRegex = """Recipient\[.*]\] had already been terminated.""".r
}

private[grpc] class RunnableSequencingActor extends Actor with ActorLogging {
  @SuppressWarnings(Array("org.wartremover.warts.Any"))
  override val receive: Receive = {
    case runnable: Runnable =>
      try {
        runnable.run()
      } catch {
        case NonFatal(t) => log.error("Unexpected exception while executing Runnable", t)
      }
    case ShutdownRequest =>
      context.stop(self) // processing of the current message will continue
      sender() ! Done
  }
}

private[grpc] object RunnableSequencingActor {
  case object ShutdownRequest
} 
Example 67
Source File: Create.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.util.concurrent.TimeUnit

import com.daml.ledger.api.refinements.ApiTypes
import com.daml.navigator.console._
import com.daml.lf.value.json.ApiCodecCompressed
import com.daml.navigator.model
import com.daml.navigator.store.Store.CreateContract
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

@SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable"))
case object Create extends SimpleCommand {
  def name: String = "create"

  def description: String = "Create a contract"

  def params: List[Parameter] = List(
    ParameterTemplateId("template", "Template ID"),
    ParameterLiteral("with"),
    ParameterDamlValue("argument", "Contract argument")
  )

  def sendCommand(
      state: State,
      ps: model.PartyState,
      template: String,
      arg: model.ApiRecord): Future[ApiTypes.CommandId] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    val command = CreateContract(
      ps,
      model.TemplateStringId(template),
      arg
    )
    (state.store ? command)
      .mapTo[Try[ApiTypes.CommandId]]
      .map(c => c.get)
  }

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    args match {
      case templateName :: w :: damlA if w.equalsIgnoreCase("with") =>
        for {
          ps <- state.getPartyState ~> s"Unknown party ${state.party}"
          templateId <- model.parseOpaqueIdentifier(templateName) ~> s"Unknown template $templateName"
          apiValue <- Try(
            ApiCodecCompressed.stringToApiType(
              damlA.mkString(" "),
              templateId,
              ps.packageRegistry.damlLfDefDataType _)) ~> "Failed to parse DAML value"
          apiRecord <- Try(apiValue.asInstanceOf[model.ApiRecord]) ~> "Record argument required"
          future <- Try(sendCommand(state, ps, templateName, apiRecord)) ~> "Failed to create contract"
          commandId <- Try(Await.result(future, 30.seconds)) ~> "Failed to create contract"
        } yield {
          (state, Pretty.yaml(Pretty.commandResult(ps, commandId)))
        }
      case _ =>
        Left(CommandError("Invalid syntax", None))
    }
  }

} 
Example 68
Source File: SetTime.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.time.Instant
import java.time.format.DateTimeFormatter
import java.util.concurrent.TimeUnit

import com.daml.navigator.console._
import com.daml.navigator.store.Store.AdvanceTime
import com.daml.navigator.time.TimeProviderWithType
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

case object SetTime extends SimpleCommand {
  def name: String = "set_time"

  def description: String = "Set the (static) ledger effective time"

  def params: List[Parameter] = List(ParameterString("time", "New (static) ledger effective time"))

  private def advanceTime(state: State, newTime: Instant): Future[TimeProviderWithType] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    (state.store ? AdvanceTime(newTime))
      .mapTo[Try[TimeProviderWithType]]
      .map(t => t.get)
  }

  private def formatTime(t: Instant): String = DateTimeFormatter.ISO_INSTANT.format(t)

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    for {
      arg1 <- args.headOption ~> "Missing <time> argument"
      newTime <- Try(Instant.parse(arg1)) ~> "Failed to parse time"
      future <- Try(advanceTime(state, newTime)) ~> "Failed to advance time"
      confirmedTime <- Try(Await.result(future, 30.seconds)) ~> "Failed to advance time"
      result <- Try(formatTime(confirmedTime.time.getCurrentTime)) ~> "Failed to format time"
    } yield {
      (state, s"New ledger effective time: $result")
    }
  }

} 
Example 69
Source File: Exercise.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.util.concurrent.TimeUnit

import com.daml.ledger.api.refinements.ApiTypes
import com.daml.navigator.console._
import com.daml.lf.value.Value.ValueUnit
import com.daml.lf.value.json.ApiCodecCompressed
import com.daml.navigator.model
import com.daml.navigator.store.Store.ExerciseChoice
import akka.pattern.ask
import akka.util.Timeout
import com.daml.navigator.model.ApiValue

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

@SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable"))
case object Exercise extends SimpleCommand {
  def name: String = "exercise"

  def description: String = "Exercises a choice"

  def params: List[Parameter] = List(
    ParameterContractId("contract", "Contract ID"),
    ParameterChoiceId("choice", "Name of the choice"),
    ParameterLiteral("with"),
    ParameterDamlValue("argument", "Choice argument")
  )

  def sendCommand(
      state: State,
      ps: model.PartyState,
      contract: String,
      choice: String,
      arg: model.ApiValue): Future[ApiTypes.CommandId] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    val command = ExerciseChoice(
      ps,
      ApiTypes.ContractId(contract),
      ApiTypes.Choice(choice),
      arg
    )
    (state.store ? command)
      .mapTo[Try[ApiTypes.CommandId]]
      .map(c => c.get)
  }

  def exerciseChoice(
      state: State,
      cid: String,
      choice: String,
      damlA: Option[List[String]]): Either[CommandError, (State, String)] = {
    for {
      ps <- state.getPartyState ~> s"Unknown party ${state.party}"
      types = ps.packageRegistry
      contract <- ps.ledger.contract(ApiTypes.ContractId(cid), types) ~> s"Unknown contract $cid"
      choiceType <- contract.template.choices
        .find(c => ApiTypes.Choice.unwrap(c.name) == choice) ~> s"Unknown choice $choice"
      apiValue <- Try(
        // Use unit value if no argument is given
        damlA.fold[ApiValue](ValueUnit)(
          arg =>
            ApiCodecCompressed.stringToApiType(
              arg.mkString(" "),
              choiceType.parameter,
              ps.packageRegistry.damlLfDefDataType _))) ~> "Failed to parse choice argument"
      future <- Try(sendCommand(state, ps, cid, choice, apiValue)) ~> "Failed to exercise choice"
      commandId <- Try(Await.result(future, 30.seconds)) ~> "Failed to exercise choice"
    } yield {
      (state, Pretty.yaml(Pretty.commandResult(ps, commandId)))
    }
  }

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    args match {
      case cid :: choice :: Nil =>
        exerciseChoice(state, cid, choice, None)
      case cid :: choice :: w :: damlA if w.equalsIgnoreCase("with") =>
        exerciseChoice(state, cid, choice, Some(damlA))
      case _ =>
        Left(CommandError("Invalid syntax", None))
    }
  }

} 
Example 70
Source File: Time.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.time.Instant
import java.time.format.DateTimeFormatter
import java.util.concurrent.TimeUnit

import com.daml.navigator.console._
import com.daml.navigator.store.Store.ReportCurrentTime
import com.daml.navigator.time.TimeProviderWithType
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

case object Time extends SimpleCommand {
  def name: String = "time"

  def description: String = "Print the ledger effective time"

  def params: List[Parameter] = List.empty

  def getTime(state: State): Future[TimeProviderWithType] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    (state.store ? ReportCurrentTime)
      .mapTo[Try[TimeProviderWithType]]
      .map(t => t.get)
  }

  def formatTime(t: Instant): String = DateTimeFormatter.ISO_INSTANT.format(t)

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    for {
      future <- Try(getTime(state)) ~> "Failed to get time"
      time <- Try(Await.result(future, 30.seconds)) ~> "Failed to get time"
      result <- Try(formatTime(time.time.getCurrentTime)) ~> "Failed to format time"
    } yield {
      (state, result)
    }
  }

} 
Example 71
Source File: ChaosActorInterface.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import akka.actor.ActorRef
import akka.io.Tcp
import akka.util.ByteString
import akka.pattern.ask
import akka.util.Timeout
import com.rbmhtechnology.eventuate.chaos.ChaosActorInterface.HealthCheckResult
import com.rbmhtechnology.eventuate.chaos.ChaosActorInterface.HealthCheck

import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success


object ChaosActorInterface {
  case class HealthCheck(requester: ActorRef)
  case class HealthCheckResult(state: Int, requester: ActorRef)
}

class ChaosActorInterface(chaosActor: ActorRef) extends ChaosInterface {
  implicit val timeout = Timeout(1.seconds)

  def handleCommand = {
    case ("persist", None, recv) =>
      val check = HealthCheck(recv)

      (chaosActor ? check).mapTo[HealthCheckResult] onComplete {
        case Success(result) =>
          result.requester ! Tcp.Write(ByteString(result.state.toString))
          result.requester ! Tcp.Close
        case Failure(e) =>
          recv ! Tcp.Close
      }
  }
} 
Example 72
Source File: ChaosSetInterface.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import akka.actor.ActorRef
import akka.util.Timeout

import com.rbmhtechnology.eventuate.crdt.ORSetService

import scala.concurrent.duration._

class ChaosSetInterface(service: ORSetService[Int]) extends ChaosInterface {
  val setId = "test"

  implicit val timeout = Timeout(1.seconds)

  private def writeSet(set: Set[Int], receiver: ActorRef) = {
    reply(s"[${set.mkString(",")}]", receiver)
  }

  def handleCommand = {
    case ("add", Some(v), recv) =>
      service.add(setId, v).map(x => writeSet(x, recv))
    case ("remove", Some(v), recv) =>
      service.remove(setId, v).map(x => writeSet(x, recv))
    case ("get", None, recv) =>
      service.value(setId).map(x => writeSet(x, recv))
  }
} 
Example 73
Source File: ChaosAWSet.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos.crdt.pure

import akka.actor.ActorRef
import akka.actor.Props
import akka.util.Timeout
import com.rbmhtechnology.eventuate.ReplicationEndpoint
import com.rbmhtechnology.eventuate.chaos.ChaosInterface
import com.rbmhtechnology.eventuate.chaos.ChaosLeveldbSetup
import com.rbmhtechnology.eventuate.crdt.pure.AWSetService

class ChaosAWSetInterface(service: AWSetService[Int]) extends ChaosInterface {

  val setId = "test"

  import scala.concurrent.duration._
  implicit val timeout = Timeout(1.seconds)

  private def writeSet(set: Set[Int], receiver: ActorRef) = {
    reply(s"[${set.mkString(",")}]", receiver)
  }

  def handleCommand = {
    case ("add", Some(v), recv) =>
      service.add(setId, v).map(x => writeSet(x, recv))
    case ("remove", Some(v), recv) =>
      service.remove(setId, v).map(x => writeSet(x, recv))
    case ("clear", None, recv) =>
      service.clear(setId).map(x => writeSet(x, recv))
    case ("get", None, recv) =>
      service.value(setId).map(x => writeSet(x, recv))
  }

}

object ChaosAWSetLeveldb extends ChaosLeveldbSetup {
  implicit val system = getSystem
  val endpoint = getEndpoint

  val service = new AWSetService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName))
  system.actorOf(Props(new ChaosAWSetInterface(service)))
} 
Example 74
Source File: Evaluation.scala    From glintlda   with MIT License 5 votes vote down vote up
package glintlda

import akka.util.Timeout
import breeze.numerics._
import com.typesafe.scalalogging.slf4j.Logger
import glint.iterators.RowBlockIterator
import org.slf4j.LoggerFactory

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}


  def logCurrentState(iteration: Int, docLoglikelihood: Double, tokenCounts: Long, model: LDAModel): Unit = {

    // Construct necessary variables for pipelined communication with parameter server
    implicit val ec = ExecutionContext.Implicits.global
    implicit val timeout = new Timeout(300 seconds)

    // Get the independently computed log likelihood numbers
    val wordLoglikelihood = computeWordLoglikelihood(model)
    val loglikelihood = docLoglikelihood + wordLoglikelihood

    // Compute perplexity
    val perplexity = Math.exp(-loglikelihood / tokenCounts)

    // Print to log
    val logger = Logger(LoggerFactory getLogger s"${getClass.getSimpleName}")
    logger.info(s"Evaluation after iteration ${iteration}")
    logger.info(s"Doc log-likelihood:  ${docLoglikelihood}")
    logger.info(s"Word log-likelihood: ${wordLoglikelihood}")
    logger.info(s"Log-likelihood:      ${loglikelihood}")
    logger.info(s"Token counts:        ${tokenCounts}")
    logger.info(s"Perplexity:          ${perplexity}")

  }

} 
Example 75
Source File: AggregateBuffer.scala    From glintlda   with MIT License 5 votes vote down vote up
package glintlda.util

import akka.util.Timeout
import glint.models.client.BigMatrix
import glintlda.LDAConfig

import scala.concurrent.{ExecutionContext, Future}


  def flush(matrix: BigMatrix[Long])(implicit ec: ExecutionContext, timeout: Timeout): Future[Boolean] = {

    if (cutoff > 0) {
      val rows = new Array[Long](size)
      val cols = new Array[Int](size)
      var i = 0
      while (i < size) {
        rows(i) = i / config.topics
        cols(i) = i % config.topics
        i += 1
      }
      matrix.push(rows, cols, buffer)
    } else {
      Future { true }
    }

  }

} 
Example 76
Source File: NetService.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.jsonrpc

import akka.actor.ActorRef
import akka.agent.Agent
import akka.util.Timeout
import io.iohk.ethereum.jsonrpc.NetService.NetServiceConfig
import io.iohk.ethereum.network.PeerManagerActor
import io.iohk.ethereum.utils.ServerStatus.{Listening, NotListening}
import io.iohk.ethereum.utils.{Config, NodeStatus}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

object NetService {
  case class VersionRequest()
  case class VersionResponse(value: String)

  case class ListeningRequest()
  case class ListeningResponse(value: Boolean)

  case class PeerCountRequest()
  case class PeerCountResponse(value: Int)

  case class NetServiceConfig(peerManagerTimeout: FiniteDuration)

  object NetServiceConfig {
    def apply(etcClientConfig: com.typesafe.config.Config): NetServiceConfig = {
      val netServiceConfig = etcClientConfig.getConfig("network.rpc.net")
      NetServiceConfig(
        peerManagerTimeout = netServiceConfig.getDuration("peer-manager-timeout").toMillis.millis)
    }
  }
}

class NetService(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef, config: NetServiceConfig) {
  import NetService._

  def version(req: VersionRequest): ServiceResponse[VersionResponse] =
    Future.successful(Right(VersionResponse(Config.Network.peer.networkId.toString)))

  def listening(req: ListeningRequest): ServiceResponse[ListeningResponse] = {
    Future.successful {
      Right(
        nodeStatusHolder().serverStatus match {
          case _: Listening => ListeningResponse(true)
          case NotListening => ListeningResponse(false)
        }
      )
    }
  }

  def peerCount(req: PeerCountRequest): ServiceResponse[PeerCountResponse] = {
    import akka.pattern.ask
    implicit val timeout = Timeout(config.peerManagerTimeout)

    (peerManager ? PeerManagerActor.GetPeers)
      .mapTo[PeerManagerActor.Peers]
      .map { peers => Right(PeerCountResponse(peers.handshaked.size)) }
  }

} 
Example 77
Source File: TestSpec.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.serialization.SerializationExtension
import akka.stream.{ ActorMaterializer, Materializer }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.prop.PropertyChecks
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

trait TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually
    with PropertyChecks
    with AkkaPersistenceQueries
    with AkkaStreamUtils
    with InMemoryCleanup {

  implicit val timeout: Timeout = Timeout(10.seconds)
  implicit val system: ActorSystem = ActorSystem()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val mat: Materializer = ActorMaterializer()
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  val serialization = SerializationExtension(system)

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  def killActors(actors: ActorRef*): Unit = {
    val probe = TestProbe()
    actors.foreach { actor ⇒
      probe watch actor
      actor ! PoisonPill
      probe expectTerminated actor
    }
  }

  override protected def afterAll(): Unit = {
    system.terminate()
    system.whenTerminated.toTry should be a 'success
  }
} 
Example 78
Source File: AggregateStateGetter.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.testing

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.ask
import akka.util.Timeout
import no.nextgentel.oss.akkatools.persistence.GetState

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

object AggregateStateGetter {
  val defaultTimeout = Duration("60s")

  def apply[S:ClassTag](aggregateActorRef:ActorRef, timeout:Duration = defaultTimeout)(implicit system:ActorSystem):AggregateStateGetter[S] = new AggregateStateGetter[S](system, aggregateActorRef, timeout)
}

import AggregateStateGetter._

class AggregateStateGetter[S:ClassTag](system:ActorSystem, aggregateActorRef:ActorRef, timeout:Duration) {

  def getState():S = getState(None)
  def getState(aggregateId:Option[String]):S = {
    implicit val ec = system.dispatcher
    implicit val t = Timeout(timeout.toMillis, TimeUnit.MILLISECONDS)
    val getStateMsg = aggregateId match {
      case Some(id) => GetState(id)
      case None     => GetState()
    }
    val f = ask(aggregateActorRef, getStateMsg).mapTo[S]
    Await.result(f, timeout)
  }

}

class AggregateStateGetterJava(system:ActorSystem, aggregateActorRef:ActorRef, timeout:Duration)
  extends AggregateStateGetter[Any](system, aggregateActorRef, timeout) {

  def this(system:ActorSystem, aggregateActorRef:ActorRef) = this(system, aggregateActorRef, defaultTimeout)
} 
Example 79
Source File: Master.scala    From asyspark   with MIT License 5 votes vote down vote up
package org.apache.spark.asyspark.core

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Address, Props, Terminated}
import akka.util.Timeout
import com.typesafe.config.Config
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.spark.asyspark.core.messages.master.{ClientList, RegisterClient, RegisterServer, ServerList}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}



  var clients = Set.empty[ActorRef]

  override def receive: Receive = {
    case RegisterServer(server) =>
      log.info(s"Registering server ${server.path.toString}")
      println("register server")
      servers += server
      context.watch(server)
      sender ! true

    case RegisterClient(client)  =>
      log.info(s"Registering client ${sender.path.toString}")
      clients += client
      context.watch(client)
      sender ! true

    case ServerList() =>
      log.info(s"Sending current server list to ${sender.path.toString}")
      sender ! servers.toArray

    case ClientList() =>
      log.info(s"Sending current client list to ${sender.path.toString}")
      sender ! clients.toArray


    case Terminated(actor) =>
      actor match {
        case server: ActorRef if servers contains server =>
          log.info(s"Removing server ${server.path.toString}")
          servers -= server
        case client: ActorRef if clients contains client =>
          log.info(s"Removing client ${client.path.toString}")
          clients -= client
        case actor: ActorRef =>
          log.warning(s"Actor ${actor.path.toString} will be terminated for some unknown reason")
      }
  }

}

object Master extends StrictLogging {
  def run(config: Config): Future[(ActorSystem, ActorRef)] = {
    logger.debug("Starting master actor system")
    val system = ActorSystem(config.getString("asyspark.master.system"), config.getConfig("asyspark.master"))
    logger.debug("Starting master")
    val master = system.actorOf(Props[Master], config.getString("asyspark.master.name"))
    implicit val timeout = Timeout(config.getDuration("asyspark.master.startup-timeout", TimeUnit.MILLISECONDS) milliseconds)
    implicit val ec = ExecutionContext.Implicits.global
    val address = Address("akka.tcp", config.getString("asyspark.master.system"), config.getString("asyspark.master.host"),
    config.getString("asyspark.master.port").toInt)
    system.actorSelection(master.path.toSerializationFormat).resolveOne().map {
      case actor: ActorRef =>
        logger.debug("Master successfully started")
        (system, master)

    }
  }

} 
Example 80
Source File: TestSpec.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams

import akka.NotUsed
import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import com.github.dnvriend.streams.util.ClasspathResources
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.libs.json.{ Format, Json }
import play.api.test.WsTestClient

import scala.collection.immutable._
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

object Person {
  implicit val format: Format[Person] = Json.format[Person]
}

final case class Person(firstName: String, age: Int)

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with ClasspathResources
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]
  def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8081
  implicit val timeout: Timeout = 1.second
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]
  val log: LoggingAdapter = Logging(system, this.getClass)

  // ================================== Supporting Operations ====================================
  def id: String = java.util.UUID.randomUUID().toString

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, NotUsed]) {
    def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T =
    f(Source.fromIterator(() ⇒ Iterator from start))

  def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
    f(Source(xs).runWith(TestSink.probe(system)))

  def killActors(refs: ActorRef*): Unit = {
    val tp = TestProbe()
    refs.foreach { ref ⇒
      tp watch ref
      tp.send(ref, PoisonPill)
      tp.expectTerminated(ref)
    }
  }
} 
Example 81
Source File: RandomDataProducer.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.indefinite

import akka.actor.{Actor, ActorRef, Cancellable, Props, Scheduler}
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Random

object RandomDataProducer {

  private val words = Seq("Example", "how", "to", "setup", "indefinite", "stream", "with", "Parquet", "writer")

}

trait RandomDataProducer {

  this: Akka with Logger with Kafka =>

  import RandomDataProducer._

  private def nextWord: String = words(Random.nextInt(words.size - 1))
  private def action(): Unit = sendKafkaMessage(nextWord)

  private lazy val scheduler: ActorRef = system.actorOf(FluctuatingSchedulerActor.props(action))
  implicit private val stopTimeout: Timeout = new Timeout(FluctuatingSchedulerActor.MaxDelay)

  def startDataProducer(): Unit = {
    logger.info("Starting scheduler that sends messages to Kafka...")
    scheduler ! FluctuatingSchedulerActor.Start
  }

  def stopDataProducer(): Unit = {
    logger.info("Stopping scheduler...")
    Await.ready(scheduler.ask(FluctuatingSchedulerActor.Stop), Duration.Inf)
  }

}

private object FluctuatingSchedulerActor {

  case object Start
  case object ScheduleNext
  case object Stop

  val MinDelay: FiniteDuration = 1.milli
  val MaxDelay: FiniteDuration = 500.millis
  val StartDelay: FiniteDuration = 100.millis

  trait Direction
  case object Up extends Direction
  case object Down extends Direction

  def props(action: () => Unit): Props = Props(new FluctuatingSchedulerActor(action))

}

private class FluctuatingSchedulerActor(action: () => Unit) extends Actor {

  import FluctuatingSchedulerActor._

  implicit def executionContext: ExecutionContext = context.system.dispatcher
  def scheduler: Scheduler = context.system.scheduler
  var scheduled: Option[Cancellable] = None

  override def receive: Receive = {
    case Start =>
      self ! ScheduleNext
      context.become(scheduling(StartDelay, direction = Down), discardOld = true)
  }

  def scheduling(delay: FiniteDuration, direction: Direction): Receive = {
    case ScheduleNext =>
      action()

      val rate = Random.nextFloat / 10.0f
      val step = (delay.toMillis * rate).millis
      val (newDirection, newDelay) = direction match {
        case Up if delay + step < MaxDelay =>
          (Up, delay + step)
        case Up =>
          (Down, delay - step)
        case Down if delay - step > MinDelay =>
          (Down, delay - step)
        case Down =>
          (Up, delay + step)
      }

      scheduled = Some(scheduler.scheduleOnce(delay, self, ScheduleNext))
      context.become(scheduling(newDelay, newDirection), discardOld = true)

    case Stop =>
      scheduled.foreach(_.cancel())
      context.stop(self)
  }

} 
Example 82
Source File: Actors.scala    From scala-concurrency-playground   with MIT License 5 votes vote down vote up
package org.zalando.benchmarks

import akka.actor._
import akka.pattern.{ask, pipe}
import akka.routing.BalancingPool
import akka.util.Timeout
import org.zalando.benchmarks.ComputationFollowedByAsyncPublishing._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._

class Actors(system: ActorSystem) {
  def benchmark(coreFactor: Int): Unit = {
    import system.dispatcher
    implicit val timeout = Timeout(1 hour)

    // Route computations through a balanced pool of (cpu bound) computation workers.
    val router = system actorOf BalancingPool(numWorkers(coreFactor)).props(Props[ComputeActor])

    try {
      // Collect the results, sum them up and print the sum.
      printResult(Await.result(Future.traverse(1 to numTasks map Job) { job =>
        (router ? job).mapTo[PublishResult]
      }, 1 hour))

    } finally {
      // Shut down the actors.
      router ! PoisonPill
    }
  }
}

// Actor responsible for the computation, and for delegating to the publishing actor(s).
class ComputeActor extends Actor {
  val publisher = context actorOf Props[PublishActor]

  def receive = {
    case job: Job =>
      // tell the publisher about who sent us the job, and the job results
      val s = sender()
      publisher ! (s, Computer compute job)
  }
}

// Actor responsible for publishing, and for sending the response back.
class PublishActor extends Actor {
  import context.dispatcher

  def receive = {
    case (s: ActorRef, r: JobResult) =>
      // just pipe the result back to the original sender
      Publisher.publish(r, context.system) pipeTo s
  }
} 
Example 83
Source File: Server.scala    From glint   with MIT License 5 votes vote down vote up
package glint

import java.util.concurrent.TimeUnit

import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.Config
import com.typesafe.scalalogging.slf4j.StrictLogging
import glint.messages.master.RegisterServer

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


  def run(config: Config): Future[(ActorSystem, ActorRef)] = {

    logger.debug(s"Starting actor system ${config.getString("glint.server.system")}")
    val system = ActorSystem(config.getString("glint.server.system"), config.getConfig("glint.server"))

    logger.debug("Starting server actor")
    val server = system.actorOf(Props[Server], config.getString("glint.server.name"))

    logger.debug("Reading master information from config")
    val masterHost = config.getString("glint.master.host")
    val masterPort = config.getInt("glint.master.port")
    val masterName = config.getString("glint.master.name")
    val masterSystem = config.getString("glint.master.system")

    logger.info(s"Registering with master ${masterSystem}@${masterHost}:${masterPort}/user/${masterName}")
    implicit val ec = ExecutionContext.Implicits.global
    implicit val timeout = Timeout(config.getDuration("glint.server.registration-timeout", TimeUnit.MILLISECONDS) milliseconds)
    val master = system.actorSelection(s"akka.tcp://${masterSystem}@${masterHost}:${masterPort}/user/${masterName}")
    val registration = master ? RegisterServer(server)

    registration.map {
      case a =>
        logger.info("Server successfully registered with master")
        (system, server)
    }

  }
} 
Example 84
Source File: RowIterator.scala    From glint   with MIT License 5 votes vote down vote up
package glint.iterators

import akka.util.Timeout
import breeze.linalg.Vector
import glint.models.client.BigMatrix

import scala.concurrent.ExecutionContext


class RowIterator[V](matrix: BigMatrix[V], blockSize: Int = 100)(implicit val ec: ExecutionContext)
  extends Iterator[Vector[V]] {

  // Row progress
  var index: Long = 0
  val rows: Long = if (matrix.rows == 0 || matrix.cols == 0) {
    0L
  } else {
    matrix.rows
  }

  // The underlying block iterator
  val blockIterator = new RowBlockIterator[V](matrix, blockSize)

  // The local block progress
  var localIndex: Int = 0
  var localSize: Int = 0
  var block = new Array[Vector[V]](0)

  override def hasNext: Boolean = index < rows

  override def next(): Vector[V] = {
    if (localIndex >= localSize) {
      block = blockIterator.next()
      localIndex = 0
      localSize = block.length
    }
    localIndex += 1
    index += 1
    block(localIndex - 1)
  }

} 
Example 85
Source File: RowBlockIterator.scala    From glint   with MIT License 5 votes vote down vote up
package glint.iterators

import akka.util.Timeout
import breeze.linalg.Vector
import glint.models.client.BigMatrix

import scala.concurrent.{ExecutionContext, Future}


class RowBlockIterator[V](val matrix: BigMatrix[V],
                          val blockSize: Int)(implicit ec: ExecutionContext)
  extends PipelineIterator[Array[Vector[V]]] {

  if (matrix.cols == 0 || matrix.rows == 0) {
    total = 0
  } else {
    val inc = if (matrix.rows % blockSize == 0) {
      0
    } else {
      1
    }
    total = inc + (matrix.rows / blockSize).toInt
  }

  override protected def fetchNextFuture(): Future[Array[Vector[V]]] = {
    val nextRows = (index.toLong * blockSize until Math.min(matrix.rows, (index + 1) * blockSize)).toArray
    matrix.pull(nextRows)
  }

} 
Example 86
Source File: ColumnIteratorSpec.scala    From glint   with MIT License 5 votes vote down vote up
package glint.iterators

import akka.util.Timeout
import glint.SystemTest
import glint.mocking.MockBigMatrix
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._


class ColumnIteratorSpec extends FlatSpec with SystemTest with Matchers {

  "A ColumnIterator" should "iterate over all columns in order" in {

    // Construct mock matrix and data to push into it
    val nrOfRows = 2
    val nrOfCols = 4
    val mockMatrix = new MockBigMatrix[Long](nrOfRows, nrOfCols, 0, _ + _)

    val rows   = Array(0L, 1L, 0L, 1L, 0L, 1L, 0L, 1L)
    val cols   = Array( 0,  0,  1,  1,  2,  2,  3,  3)
    val values = Array(0L,  1,  2,  3,  4,  5,  6,  7)

    whenReady(mockMatrix.push(rows, cols, values)) { identity }

    // Check whether elements are in order
    var counter = 0
    val iterator = new ColumnIterator[Long](mockMatrix)
    iterator.foreach {
      case column => column.foreach {
        case value =>
          assert(value == counter)
          counter += 1
      }
    }

  }

  it should "iterate over all columns in order with larger rows" in {

    // Construct mock matrix and data to push into it
    val nrOfRows = 4
    val nrOfCols = 2
    val mockMatrix = new MockBigMatrix[Long](nrOfRows, nrOfCols, 0, _ + _)

    val rows   = Array(0L, 1L, 2L, 3L, 0L, 1L, 2L, 3L)
    val cols   = Array( 0,  0,  0,  0,  1,  1,  1,  1)
    val values = Array(0L,  1,  2,  3,  4,  5,  6,  7)

    whenReady(mockMatrix.push(rows, cols, values)) { identity }

    // Check whether elements are in order
    var counter = 0
    val iterator = new ColumnIterator[Long](mockMatrix)
    iterator.foreach {
      case column => column.foreach {
        case value =>
          assert(value == counter)
          counter += 1
      }
    }

  }

  it should "not iterate over an empty matrix" in {
    val mockMatrix = new MockBigMatrix[Double](0, 2, 0, _ + _)

    val iterator = new ColumnIterator[Double](mockMatrix)
    assert(!iterator.hasNext)
    iterator.foreach {
      case _ => fail("This should never execute")
    }

  }

} 
Example 87
Source File: RowBlockIteratorSpec.scala    From glint   with MIT License 5 votes vote down vote up
package glint.iterators

import akka.util.Timeout
import glint.SystemTest
import glint.mocking.MockBigMatrix
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._


class RowBlockIteratorSpec extends FlatSpec with SystemTest with Matchers {

  "A RowBlockIterator" should "iterate over all blocks of rows in order" in {

    // Construct mock matrix and data to push into it
    val nrOfRows = 5
    val nrOfCols = 2
    val mockMatrix = new MockBigMatrix[Long](nrOfRows, nrOfCols, 0, _ + _)

    val rows   = Array(0L, 0L, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L)
    val cols   = Array( 0,  1,  0,  1,  0,  1,  0,  1,  0,  1)
    val values = Array(0L,  1,  2,  3,  4,  5,  6,  7,  8,  9)

    whenReady(mockMatrix.push(rows, cols, values)) { identity }

    // Check whether elements are in order
    var counter = 0
    val iterator = new RowBlockIterator[Long](mockMatrix, 2)
    iterator.foreach {
      case rows => rows.foreach {
        case row => row.foreach {
          case value =>
            assert(value == counter)
            counter += 1
        }
      }
    }

  }

  it should "iterate over a single block" in {

    // Construct mock matrix and data to push into it
    val nrOfRows = 5
    val nrOfCols = 2
    val mockMatrix = new MockBigMatrix[Long](nrOfRows, nrOfCols, 0, _ + _)

    val rows = Array(0L, 0L, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L)
    val cols = Array(0, 1, 0, 1, 0, 1, 0, 1, 0, 1)
    val values = Array(0L, 1, 2, 3, 4, 5, 6, 7, 8, 9)

    whenReady(mockMatrix.push(rows, cols, values)) {
      identity
    }

    // Check whether elements are in order
    var counter = 0
    val iterator = new RowBlockIterator[Long](mockMatrix, 7)
    val resultRows = iterator.next()
    assert(!iterator.hasNext)
    resultRows.foreach {
      case row => row.foreach {
        case value =>
          assert(value == counter)
          counter += 1
      }
    }

  }

  it should "not iterate over an empty matrix" in {
    val mockMatrix = new MockBigMatrix[Double](0, 5, 0, _ + _)

    val iterator = new RowBlockIterator[Double](mockMatrix, 3)
    assert(!iterator.hasNext)
    iterator.foreach {
      case _ => fail("This should never execute")
    }

  }

} 
Example 88
Source File: RowIteratorSpec.scala    From glint   with MIT License 5 votes vote down vote up
package glint.iterators

import akka.util.Timeout
import glint.SystemTest
import glint.mocking.MockBigMatrix
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._


class RowIteratorSpec extends FlatSpec with SystemTest with Matchers {

  "A RowIterator" should "iterate over all rows in order" in {

    // Construct mock matrix and data to push into it
    val nrOfRows = 5
    val nrOfCols = 2
    val mockMatrix = new MockBigMatrix[Long](nrOfRows, nrOfCols, 0, _ + _)

    val rows   = Array(0L, 0L, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L)
    val cols   = Array( 0,  1,  0,  1,  0,  1,  0,  1,  0,  1)
    val values = Array(0L,  1,  2,  3,  4,  5,  6,  7,  8,  9)

    whenReady(mockMatrix.push(rows, cols, values)) { identity }

    // Check whether elements are in order
    var counter = 0
    val iterator = new RowIterator[Long](mockMatrix, 2)
    iterator.foreach {
      case row => row.foreach {
        case value =>
          assert(value == counter)
          counter += 1
      }
    }

  }

  it should "iterate over a single block" in {

    // Construct mock matrix and data to push into it
    val nrOfRows = 5
    val nrOfCols = 2
    val mockMatrix = new MockBigMatrix[Long](nrOfRows, nrOfCols, 0, _ + _)

    val rows = Array(0L, 0L, 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L)
    val cols = Array(0, 1, 0, 1, 0, 1, 0, 1, 0, 1)
    val values = Array(0L, 1, 2, 3, 4, 5, 6, 7, 8, 9)

    whenReady(mockMatrix.push(rows, cols, values)) {
      identity
    }

    // Check whether elements are in order
    var counter = 0
    val iterator = new RowIterator[Long](mockMatrix, 7)
    iterator.foreach {
      case row => row.foreach {
        case value =>
          assert(value == counter)
          counter += 1
      }
    }
    assert(!iterator.hasNext)

  }

  it should "not iterate over an empty matrix" in {
    val mockMatrix = new MockBigMatrix[Double](3, 0, 0, _ + _)

    val iterator = new RowBlockIterator[Double](mockMatrix, 3)
    assert(!iterator.hasNext)
    iterator.foreach {
      case _ => fail("This should never execute")
    }

  }

} 
Example 89
Source File: schema.scala    From sangria-subscriptions-example   with Apache License 2.0 5 votes vote down vote up
import akka.actor.ActorRef
import akka.util.Timeout
import generic.{Event, Versioned, View}
import generic.View.Get
import sangria.execution.UserFacingError
import sangria.schema._
import sangria.macros.derive._
import akka.pattern.ask
import akka.stream.Materializer
import sangria.execution.deferred.{Fetcher, HasId}

import scala.concurrent.ExecutionContext
import sangria.streaming.akkaStreams._

object schema {
  case class MutationError(message: String) extends Exception(message) with UserFacingError

  val authors = Fetcher.caching((c: Ctx, ids: Seq[String]) ⇒ c.loadAuthors(ids))(HasId(_.id))

  def createSchema(implicit timeout: Timeout, ec: ExecutionContext, mat: Materializer) = {
    val VersionedType = InterfaceType("Versioned", fields[Ctx, Versioned](
      Field("id", StringType, resolve = _.value.id),
      Field("version", LongType, resolve = _.value.version)))

    implicit val AuthorType = deriveObjectType[Unit, Author](Interfaces(VersionedType))

    val EventType = InterfaceType("Event", fields[Ctx, Event](
      Field("id", StringType, resolve = _.value.id),
      Field("version", LongType, resolve = _.value.version)))

    val AuthorCreatedType = deriveObjectType[Unit, AuthorCreated](Interfaces(EventType))
    val AuthorNameChangedType = deriveObjectType[Unit, AuthorNameChanged](Interfaces(EventType))
    val AuthorDeletedType = deriveObjectType[Unit, AuthorDeleted](Interfaces(EventType))

    val ArticleCreatedType = deriveObjectType[Unit, ArticleCreated](
      Interfaces(EventType),
      ReplaceField("authorId", Field("author", OptionType(AuthorType),
        resolve = c ⇒ authors.deferOpt(c.value.authorId))))

    val ArticleTextChangedType = deriveObjectType[Unit, ArticleTextChanged](Interfaces(EventType))
    val ArticleDeletedType = deriveObjectType[Unit, ArticleDeleted](Interfaces(EventType))

    implicit val ArticleType = deriveObjectType[Ctx, Article](
      Interfaces(VersionedType),
      ReplaceField("authorId", Field("author", OptionType(AuthorType),
        resolve = c ⇒ authors.deferOpt(c.value.authorId))))

    val IdArg = Argument("id", StringType)
    val OffsetArg = Argument("offset", OptionInputType(IntType), 0)
    val LimitArg = Argument("limit", OptionInputType(IntType), 100)

    def entityFields[T](name: String, tpe: ObjectType[Ctx, T], actor: Ctx ⇒ ActorRef) = fields[Ctx, Unit](
      Field(name, OptionType(tpe),
        arguments = IdArg :: Nil,
        resolve = c ⇒ (actor(c.ctx) ? Get(c.arg(IdArg))).mapTo[Option[T]]),
      Field(name + "s", ListType(tpe),
        arguments = OffsetArg :: LimitArg :: Nil,
        resolve = c ⇒ (actor(c.ctx) ? View.List(c.arg(OffsetArg), c.arg(LimitArg))).mapTo[Seq[T]]))

    val QueryType = ObjectType("Query",
      entityFields[Author]("author", AuthorType, _.authors) ++
      entityFields[Article]("article", ArticleType, _.articles))

    val MutationType = deriveContextObjectType[Ctx, Mutation, Unit](identity)

    
    def subscriptionField[T <: Event](tpe: ObjectType[Ctx, T]) = {
      val fieldName = tpe.name.head.toLower + tpe.name.tail

      Field.subs(fieldName, tpe,
        resolve = (c: Context[Ctx, Unit]) ⇒
          c.ctx.eventStream
            .filter(event ⇒ tpe.valClass.isAssignableFrom(event.getClass))
            .map(event ⇒ Action(event.asInstanceOf[T])))
    }

    val SubscriptionType = ObjectType("Subscription", fields[Ctx, Unit](
      subscriptionField(AuthorCreatedType),
      subscriptionField(AuthorNameChangedType),
      subscriptionField(AuthorDeletedType),
      subscriptionField(ArticleCreatedType),
      subscriptionField(ArticleTextChangedType),
      subscriptionField(ArticleDeletedType),
      Field.subs("allEvents", EventType, resolve = _.ctx.eventStream.map(Action(_)))
    ))

    Schema(QueryType, Some(MutationType), Some(SubscriptionType))
  }
} 
Example 90
Source File: Ctx.scala    From sangria-subscriptions-example   with Apache License 2.0 5 votes vote down vote up
import akka.NotUsed
import akka.util.Timeout
import schema.MutationError
import akka.actor.ActorRef
import generic.Event
import generic.MemoryEventStore._
import generic.View.{Get, GetMany}
import akka.pattern.ask
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Source
import org.reactivestreams.Publisher

import scala.concurrent.{ExecutionContext, Future}

case class Ctx(
  authors: ActorRef,
  articles: ActorRef,
  eventStore: ActorRef,
  eventStorePublisher: Publisher[Event],
  ec: ExecutionContext,
  to: Timeout
) extends Mutation {
  implicit def executionContext = ec
  implicit def timeout = to

  lazy val eventStream: Source[Event, NotUsed] =
    Source.fromPublisher(eventStorePublisher).buffer(100, OverflowStrategy.fail)

  def addEvent[T](view: ActorRef, event: Event) =
    (eventStore ? AddEvent(event)).flatMap {
      case EventAdded(_) ⇒
        (view ? Get(event.id, Some(event.version))).mapTo[Option[T]]
      case OverCapacity(_) ⇒
        throw MutationError("Service is overloaded.")
      case ConcurrentModification(_, latestVersion) ⇒
        throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.")
    }

  def addDeleteEvent(event: Event) =
    (eventStore ? AddEvent(event)).map {
      case EventAdded(e) ⇒  e
      case OverCapacity(_) ⇒
        throw MutationError("Service is overloaded.")
      case ConcurrentModification(_, latestVersion) ⇒
        throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.")
    }

  def loadLatestVersion(id: String, version: Long): Future[Long] =
    (eventStore ? LatestEventVersion(id)) map {
      case Some(latestVersion: Long) if version != latestVersion ⇒
        throw MutationError(s"Concurrent Modification error for entity '$id'. Latest entity version is '$latestVersion'.")
      case Some(version: Long) ⇒
        version + 1
      case _ ⇒
        throw MutationError(s"Entity with ID '$id' does not exist.")
    }

  def loadAuthors(ids: Seq[String]) =
    (authors ? GetMany(ids)).mapTo[Seq[Author]]
} 
Example 91
Source File: QueueActor.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.pattern._
import akka.actor.{Actor, ActorLogging, ActorRef, Props, Stash}
import akka.routing.{RoundRobinRoutingLogic, Routee, Router}
import akka.util.Timeout
import rhttpc.transport.{Message, RejectingMessage}

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

private class QueueActor(consumeTimeout: FiniteDuration,
                         retryDelay: FiniteDuration) extends Actor with Stash with ActorLogging {

  import context.dispatcher

  private var consumers = Map.empty[ActorRef, AskingActorRefRouteeWithSpecifiedMessageType]

  private var router = Router(RoundRobinRoutingLogic(), collection.immutable.IndexedSeq.empty)

  override def receive: Receive = {
    case RegisterConsumer(consumer, fullMessage) =>
      val routee = AskingActorRefRouteeWithSpecifiedMessageType(consumer, consumeTimeout, handleResponse, fullMessage)
      consumers += consumer -> routee
      router = router.addRoutee(routee)
      log.debug(s"${self.path.name}: registered consumer, unstashing")
      unstashAll()
    case UnregisterConsumer(consumer) =>
      log.debug(s"${self.path.name}: unregistered consumer")
      consumers.get(consumer).foreach { routee =>
        consumers -= consumer
        router = router.removeRoutee(routee)
      }
      sender() ! ((): Unit)
    case msg: Message[_] =>
      if (consumers.isEmpty) {
        log.debug(s"${self.path.name}: got message when no consumer registered, stashing")
        stash()
        implicit val timeout = Timeout(consumeTimeout)
        sender() ! ((): Unit)
      } else {
        router.route(msg, sender())
      }
  }

  private def handleResponse(future: Future[Any], msg: Message[_]): Unit =
    future.recover {
      case ex: AskTimeoutException =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of ask timeout")
      case ex: Exception with RejectingMessage =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of rejecting failure")
      case NonFatal(ex) =>
        log.error(ex, s"${self.path.name}: will RETRY [${msg.content.getClass.getName}] after $retryDelay because of failure")
        context.system.scheduler.scheduleOnce(retryDelay, self, msg)
    }

}

object QueueActor {
  def props(consumeTimeout: FiniteDuration,
            retryDelay: FiniteDuration): Props = Props(
    new QueueActor(
      consumeTimeout = consumeTimeout,
      retryDelay = retryDelay))
}

private[inmem] case class AskingActorRefRouteeWithSpecifiedMessageType(ref: ActorRef,
                                                                       askTimeout: FiniteDuration,
                                                                       handleResponse: (Future[Any], Message[_]) => Unit,
                                                                       fullMessage: Boolean)
  extends Routee {

  override def send(message: Any, sender: ActorRef): Unit = {
    val typedMessage = message.asInstanceOf[Message[_]]
    val msgToSend = if (fullMessage) message else typedMessage.content
    handleResponse(ref.ask(msgToSend)(askTimeout, sender), typedMessage)
  }
}

private[inmem] case class RegisterConsumer(consumer: ActorRef, fullMessage: Boolean)

private[inmem] case class UnregisterConsumer(consumer: ActorRef) 
Example 92
Source File: InMemTransport.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern._
import akka.util.Timeout
import rhttpc.transport.{InboundQueueData, Publisher, _}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, Future}

private[inmem] class InMemTransport(transportActor: ActorRef) // TODO: stopping of transports / actors
                                   (createTimeout: FiniteDuration,
                                    stopConsumingTimeout: FiniteDuration,
                                    stopTimeout: FiniteDuration)
                                   (implicit system: ActorSystem) extends PubSubTransport {

  import system.dispatcher

  override def publisher[PubMsg: Serializer](queueData: OutboundQueueData): Publisher[PubMsg] = {
    val queueActor = getOrCreateQueueActor(queueData.name)
    new InMemPublisher[PubMsg](queueActor)
  }

  override def subscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = {
    val queueActor = getOrCreateQueueActor(queueData.name)
    new InMemSubscriber[SubMsg](queueActor, consumer, fullMessage = false)(stopConsumingTimeout)
  }

  override def fullMessageSubscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = {
    val queueActor = getOrCreateQueueActor(queueData.name)
    new InMemSubscriber[SubMsg](queueActor, consumer, fullMessage = true)(stopConsumingTimeout)
  }

  private def getOrCreateQueueActor(name: String): ActorRef = {
    implicit val timeout = Timeout(createTimeout)
    Await.result((transportActor ? GetOrCreateQueue(name)).mapTo[ActorRef], createTimeout)
  }

  override def stop(): Future[Unit] = gracefulStop(transportActor, stopTimeout).map(_ => Unit)
}

object InMemTransport {
  def apply(createTimeout: FiniteDuration = InMemDefaults.createTimeout,
            consumeTimeout: FiniteDuration = InMemDefaults.consumeTimeout,
            retryDelay: FiniteDuration = InMemDefaults.retryDelay,
            stopConsumingTimeout: FiniteDuration = InMemDefaults.stopConsumingTimeout,
            stopTimeout: FiniteDuration = InMemDefaults.stopTimeout)
           (implicit system: ActorSystem): PubSubTransport = {
    val actor = system.actorOf(TransportActor.props(
      QueueActor.props(
        consumeTimeout = consumeTimeout,
        retryDelay = retryDelay
      )))
    new InMemTransport(actor)(
      createTimeout = createTimeout,
      stopConsumingTimeout = stopConsumingTimeout,
      stopTimeout = stopTimeout)
  }
} 
Example 93
Source File: InMemSubscriber.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import rhttpc.transport.Subscriber

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

class InMemSubscriber[Msg](queueActor: ActorRef,
                           consumer: ActorRef,
                           fullMessage: Boolean)
                          (stopTimeout: FiniteDuration) extends Subscriber[Msg] {

  override def start(): Unit = {
    queueActor ! RegisterConsumer(consumer, fullMessage)
  }

  override def stop(): Future[Unit] = {
    implicit val timeout = Timeout(stopTimeout)
    (queueActor ? UnregisterConsumer(consumer)).mapTo[Unit]
  }

} 
Example 94
Source File: RecoverableActorsManager.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkapersistence

import akka.actor._
import akka.pattern._
import akka.util.Timeout
import rhttpc.akkapersistence.impl._

import scala.concurrent.Future
import scala.concurrent.duration._

private class RecoverableActorsManager(persistenceCategory: String, childPropsCreate: String => Props) extends Actor with ActorLogging {

  import context.dispatcher

  override def receive: Receive = {
    case RecoverAllActors =>
      val registry = context.actorOf(SnapshotsRegistry.props(persistenceCategory), s"$persistenceCategory-registry")
      registry ! GetIdsWithStoredSnapshots
      context.become(waitForIdsWithStoredSnapshots(registry, sender()))
  }

  private def waitForIdsWithStoredSnapshots(registry: ActorRef, originalSender: ActorRef): Receive = {
    case IdsWithStoredSnapshots(ids) =>
      val recoveryFinishedFuture =
        if (ids.nonEmpty) {
          log.info(ids.mkString("Recovering actors from registry: ", ", ", ""))
          implicit val timeout = Timeout(1 minute)
          val futures = ids.map { id =>
            context.actorOf(childPropsCreate(id), id) ? NotifyAboutRecoveryCompleted
          }
          Future.sequence(futures).map { _ =>
            log.info("Recovering of all actors completed")
          }
        } else {
          log.info("Empty registry - nothing to recover")
          Future.successful(Unit)
        }
      recoveryFinishedFuture.foreach { _ =>
        self ! BecomeRecovered
        originalSender ! ActorsRecovered
        registry ! PoisonPill
      }
    case BecomeRecovered =>
      context.become(recovered)
  }

  val recovered: Receive = {
    case SendMsgToChild(id, msg) =>
      context.child(id) match {
        case Some(child) => child forward msg
        case None =>
          val child = context.actorOf(childPropsCreate(id), id)
          child forward msg
      }
  }

  case object BecomeRecovered
}

object RecoverableActorsManager {
  def props(persistenceCategory: String, childPropsCreate: String => Props): Props =
    Props(new RecoverableActorsManager(persistenceCategory, childPropsCreate))
}

case object RecoverAllActors
case object ActorsRecovered

case class SendMsgToChild(id: String, msg: Any) 
Example 95
Source File: SampleApp.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.sample

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server._
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.util.Timeout
import rhttpc.akkahttp.ReliableHttpClientFactory
import rhttpc.akkapersistence.{RecoverAllActors, RecoverableActorsManager, SendMsgToChild}
import rhttpc.client.subscription.ReplyFuture

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.reflectiveCalls

object SampleApp extends App with Directives {
  implicit val system = ActorSystem("rhttpc-sample")
  implicit val materializer = ActorMaterializer()
  import system.dispatcher

  val rhttpc = Await.result(ReliableHttpClientFactory().withOwnAmqpConnection.inOutWithSubscriptions(), 10 seconds)

  val client = new DelayedEchoClient {
    override def requestResponse(msg: String): ReplyFuture = {
      rhttpc.send(HttpRequest().withUri("http://sampleecho:8082").withMethod(HttpMethods.POST).withEntity(msg))
    }
  }

  val manager = system.actorOf(RecoverableActorsManager.props(
    FooBarActor.persistenceCategory,
    id => FooBarActor.props(id, rhttpc.subscriptionManager, client)
  ), "foobar")

  Await.result((manager ? RecoverAllActors)(Timeout(20 seconds)), 15 seconds)

  rhttpc.start()

  val route =
    path("healthcheck") {
      get {
        complete("OK")
      }
    } ~
    path(Segment) { id =>
      (post & entity(as[String])) { msg =>
        complete {
          implicit val sendMsgTimeout = Timeout(5 seconds)
          (manager ? SendMsgToChild(id, SendMsg(msg))).map(_ => "OK")
        }
      } ~
      get {
        complete {
          implicit val currentStateTimeout = Timeout(5 seconds)
          (manager ? SendMsgToChild(id, CurrentState)).mapTo[FooBarState].map(_.toString)
        }
      }
    }

  Http().bindAndHandle(route, interface = "0.0.0.0", port = 8081).map { binding =>
    Runtime.getRuntime.addShutdownHook(new Thread {
      override def run(): Unit = {
        Await.result(rhttpc.stop(), 10 seconds)
      }
    })
  }
} 
Example 96
Source File: InMemDelayedEchoClient.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.sample

import java.util.UUID

import akka.actor.{ActorRef, ActorSystem}
import akka.util.Timeout
import rhttpc.client.subscription._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

class InMemDelayedEchoClient(delay: FiniteDuration)(implicit system: ActorSystem) extends DelayedEchoClient {
  import system.dispatcher

  private val subOnMsg: collection.concurrent.Map[SubscriptionOnResponse, String] = collection.concurrent.TrieMap()

  val subscriptionManager: SubscriptionManager =
    new SubscriptionManager {

      override def confirmOrRegister(subscription: SubscriptionOnResponse, consumer: ActorRef): Unit = {
        system.scheduler.scheduleOnce(delay) {
          subOnMsg.remove(subscription).foreach { msg =>
            consumer ! msg
          }
        }
      }

      override def start(): Unit = {}

      override def stop(): Future[Unit] = Future.successful(Unit)
    }

  override def requestResponse(msg: String): ReplyFuture = {
    val uniqueSubOnResponse = SubscriptionOnResponse(UUID.randomUUID().toString)
    subOnMsg.put(uniqueSubOnResponse, msg)
    new ReplyFuture {
      override def pipeTo(listener: PublicationListener)
                         (implicit ec: ExecutionContext): Unit = {
        listener.subscriptionPromiseRegistered(uniqueSubOnResponse)
        listener.self ! RequestPublished(uniqueSubOnResponse)
      }

      override def toFuture(implicit system: ActorSystem, timeout: Timeout): Future[Any] = ???
    }
  }
} 
Example 97
Source File: MockTransport.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.client

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import rhttpc.client.protocol.{Correlated, FailureExchange, SuccessExchange}
import rhttpc.transport._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}

class MockTransport(awaitCond: (() => Boolean) => Unit)(implicit ec: ExecutionContext)
  extends PubSubTransport {

  @volatile private var _publicationPromise: Promise[Unit] = _
  @volatile private var _replySubscriptionPromise: Promise[String] = _
  @volatile private var _ackOnReplySubscriptionFuture: Future[Any] = _
  @volatile private var consumer: ActorRef = _

  def publicationPromise: Promise[Unit] = {
    awaitCond(() => _publicationPromise != null)
    _publicationPromise
  }

  def replySubscriptionPromise: Promise[String] = {
    awaitCond(() => _replySubscriptionPromise != null)
    _replySubscriptionPromise
  }

  def ackOnReplySubscriptionFuture: Future[Any] = {
    awaitCond(() => _ackOnReplySubscriptionFuture != null)
    _ackOnReplySubscriptionFuture
  }

  override def publisher[PubMsg: Serializer](data: OutboundQueueData): Publisher[PubMsg] =
    new Publisher[PubMsg] {
      override def publish(request: Message[PubMsg]): Future[Unit] = {
        request.content match {
          case Correlated(msg, correlationId) =>
            _publicationPromise = Promise[Unit]()
            _replySubscriptionPromise = Promise[String]()
            implicit val timeout = Timeout(5 seconds)
            _replySubscriptionPromise.future.onComplete {
              case Success(result) =>
                _ackOnReplySubscriptionFuture = consumer ? Correlated(SuccessExchange(msg, result), correlationId)
              case Failure(ex) =>
                _ackOnReplySubscriptionFuture = consumer ? Correlated(FailureExchange(msg, ex), correlationId)
            }
            _publicationPromise.future
          case other =>
            throw new IllegalArgumentException("Illegal message content: " + other)
        }
      }

      override def start(): Unit = {}

      override def stop(): Future[Unit] = Future.successful(Unit)
    }

  override def fullMessageSubscriber[SubMsg: Deserializer](data: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] =
    subscriber(data, consumer)

  override def subscriber[SubMsg: Deserializer](data: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] =
    new Subscriber[SubMsg] {
      MockTransport.this.consumer = consumer

      override def start(): Unit = {}

      override def stop(): Future[Unit] = Future.successful(Unit)
    }

  override def stop(): Future[Unit] = Future.successful(Unit)

}

object MockProxyTransport extends PubSubTransport {
  override def publisher[PubMsg: Serializer](queueData: OutboundQueueData): Publisher[PubMsg] =
    new Publisher[PubMsg] {
      override def publish(msg: Message[PubMsg]): Future[Unit] = Future.successful(Unit)

      override def start(): Unit = {}

      override def stop(): Future[Unit] = Future.successful(Unit)
    }

  override def fullMessageSubscriber[SubMsg: Deserializer](data: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] =
    subscriber(data, consumer)

  override def subscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] =
    new Subscriber[SubMsg] {
      override def start(): Unit = {}

      override def stop(): Future[Unit] = Future.successful(Unit)
    }

  override def stop(): Future[Unit] = Future.successful(Unit)
} 
Example 98
Source File: CustomCacheRunner.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor.typed

import akka.actor.typed.ActorSystem
import akka.actor.typed.scaladsl.AskPattern._
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{RestartSource, Sink, Source}
import akka.util.Timeout
import sample.stream_actor.typed.CustomCache.{AddDevices, CacheRequests, CacheResponses, CachedDevices}

import scala.concurrent.Future
import scala.concurrent.duration._


object CustomCacheRunner extends App {
  // the system is also the top level actor ref
  implicit val cache = ActorSystem[CacheRequests](CustomCache.empty, "CustomCache")
  implicit val ec = cache.executionContext
  implicit val timeout: Timeout = 5.seconds

  RestartSource
    .withBackoff(
      minBackoff = 0.seconds,
      maxBackoff = 60.seconds,
      randomFactor = 0.1
    ) { () =>
      Source
        .tick(initialDelay = 0.seconds, interval = 2.seconds, tick = ())
        .mapAsync(parallelism = 1) { _ => cache.ref.ask(ref => CustomCache.Get("42", ref)) }
        .map((each: CacheResponses) =>
          each match {
            case cachedDevices: CachedDevices => cache.log.info(s"Current amount of cached devices: ${cachedDevices.devices.size}")
            case _ => cache.log.info("No devices")
          })
        .recover {
          case ex => cache.log.error("Failed to read cached devices: ", ex)
        }
    }
    .runWith(Sink.ignore)

  val sourceOfUUID = Source(Stream.continually(java.util.UUID.randomUUID.toString).take(100))
  sourceOfUUID
    .throttle(10, 1.second, 10, ThrottleMode.shaping)
    .mapAsync(parallelism = 10)(each => Future(cache ! AddDevices(List(DeviceId(each)))))
    .runWith(Sink.ignore)
} 
Example 99
Source File: WordCountConsumer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.kafka

import akka.Done
import akka.actor.{ActorSystem, Props}
import akka.kafka.scaladsl.Consumer.DrainingControl
import akka.kafka.scaladsl.{Committer, Consumer}
import akka.kafka.{CommitterSettings, ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import alpakka.kafka.TotalFake.{IncrementMessage, IncrementWord}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{LongDeserializer, StringDeserializer}

import scala.concurrent.Future
import scala.concurrent.duration._


object WordCountConsumer extends App {
  implicit val system = ActorSystem("WordCountConsumer")
  implicit val ec = system.dispatcher

  val total = system.actorOf(Props[TotalFake], "totalFake")

  val committerSettings = CommitterSettings(system).withMaxBatch(1)

  def createConsumerSettings(group: String): ConsumerSettings[String, java.lang.Long] = {
    ConsumerSettings(system, new StringDeserializer , new LongDeserializer)
      .withBootstrapServers("localhost:9092")
      .withGroupId(group)
      //Define consumer behavior upon starting to read a partition for which it does not have a committed offset or if the committed offset it has is invalid
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
  }

  def createAndRunConsumerWordCount(id: String) = {
    Consumer.committableSource(createConsumerSettings("wordcount consumer group"), Subscriptions.topics("wordcount-output"))
      .mapAsync(1) { msg =>
        //println(s"$id - Offset: ${msg.record.offset()} - Partition: ${msg.record.partition()} Consume msg with key: ${msg.record.key()} and value: ${msg.record.value()}")
        if (msg.record.key().equalsIgnoreCase("fakeNews")) { //hardcoded because WordCountProducer.fakeNewsKeyword does not work
          import akka.pattern.ask
          implicit val askTimeout: Timeout = Timeout(3.seconds)
          (total ? IncrementWord(msg.record.value.toInt, id))
            .mapTo[Done]
            .map(_ => msg.committableOffset)
        } else {
          Future(msg).map(_ => msg.committableOffset)
        }
      }
      .via(Committer.flow(committerSettings))
      .toMat(Sink.seq)(DrainingControl.apply)
      .run()
  }

  def createAndRunConsumerMessageCount(id: String) = {
    Consumer.committableSource(createConsumerSettings("messagecount consumer group"), Subscriptions.topics("messagecount-output"))
      .mapAsync(1) { msg =>
        //println(s"$id - Offset: ${msg.record.offset()} - Partition: ${msg.record.partition()} Consume msg with key: ${msg.record.key()} and value: ${msg.record.value()}")
        import akka.pattern.ask
        implicit val askTimeout: Timeout = Timeout(3.seconds)
        (total ? IncrementMessage(msg.record.value.toInt, id))
          .mapTo[Done]
          .map(_ => msg.committableOffset)
      }
      .via(Committer.flow(committerSettings))
      .toMat(Sink.seq)(DrainingControl.apply)
      .run()
  }

  val drainingControlW1 = createAndRunConsumerWordCount("W.1")
  val drainingControlW2 = createAndRunConsumerWordCount("W.2")
  val drainingControlM = createAndRunConsumerMessageCount("M")


  sys.addShutdownHook{
    println("Got control-c cmd from shell, about to shutdown...")
    drainingControlW1.drainAndShutdown()
    drainingControlW2.drainAndShutdown()
    drainingControlM.drainAndShutdown()
  }
} 
Example 100
Source File: TotalTweetsScheduler.scala    From redrock   with Apache License 2.0 5 votes vote down vote up
package com.restapi

import java.io.{File, FileInputStream}

import akka.actor.{ActorRef, Actor, ActorSystem, Props}
import akka.io.IO
import org.slf4j.LoggerFactory
import play.api.libs.json.Json
import spray.can.Http
import akka.pattern.ask
import spray.http.DateTime
import scala.concurrent.duration._
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global
import org.apache.commons.codec.digest.DigestUtils
import scala.io.Source

case object GetTotalTweetsScheduler

object CurrentTotalTweets {
  @volatile
  var totalTweets: Long = 0
}

class ExecuterTotalTweetsES(delay: FiniteDuration, interval: FiniteDuration) extends Actor {
  context.system.scheduler.schedule(delay, interval) {
    getTotalTweetsES
  }

  val logger = LoggerFactory.getLogger(this.getClass)

  override def receive: Actor.Receive = {
    case GetTotalTweetsScheduler => {
      logger.info(s"Getting Total of Tweets. Begin: ${CurrentTotalTweets.totalTweets}")
    }
    case _ => // just ignore any messages
  }

  def getTotalTweetsES: Unit = {
    val elasticsearchRequests = new GetElasticsearchResponse(0, Array[String](), Array[String](),
      LoadConf.restConf.getString("searchParam.defaulStartDatetime"),
      LoadConf.restConf.getString("searchParam.defaultEndDatetime"),
      LoadConf.esConf.getString("decahoseIndexName"))
    val totalTweetsResponse = Json.parse(elasticsearchRequests.getTotalTweetsESResponse())
    logger.info(s"Getting Total of Tweets. Current: ${CurrentTotalTweets.totalTweets}")
    CurrentTotalTweets.totalTweets = (totalTweetsResponse \ "hits" \ "total").as[Long]
    logger.info(s"Total users updated. New: ${CurrentTotalTweets.totalTweets}")
  }
} 
Example 101
Source File: Application.scala    From redrock   with Apache License 2.0 5 votes vote down vote up
package com.restapi

import akka.actor.{ActorSystem, Props}
import akka.io.IO
import spray.can.Http
import akka.pattern.ask
import scala.concurrent.duration._
import akka.util.Timeout
import org.slf4j.LoggerFactory;


object Application extends App {
  val logger = LoggerFactory.getLogger(this.getClass)
  
  // we need an ActorSystem to host our application in
  implicit val system = ActorSystem(LoadConf.restConf.getString("actor"))
  // create and start our service actor
  val service = system.actorOf(Props[MyServiceActor], LoadConf.restConf.getString("name"))
  val sessionTimeout = system.actorOf(Props[SessionTimeoutActor])

  val sessionTable = system.actorOf(Props(classOf[SimpleSession], sessionTimeout,
    LoadConf.accessConf.getInt("delay") seconds,
    LoadConf.accessConf.getInt("timeout-interval") seconds))
  sessionTable ! InitSessionTable

  val sessionLoader = system.actorOf(Props(classOf[LoadSessionActor], sessionTable,
    LoadConf.accessConf.getInt("delay") seconds,
    LoadConf.accessConf.getInt("check-interval") seconds))
  sessionLoader ! InitFileMd5Sum

  val schedTotalTweets = system.actorOf(Props(classOf[ExecuterTotalTweetsES],
    LoadConf.restConf.getInt("totalTweetsScheduler.delay") seconds,
    LoadConf.restConf.getInt("totalTweetsScheduler.reapeatEvery") seconds))
  schedTotalTweets ! GetTotalTweetsScheduler

  implicit val timeout = Timeout(800.seconds)
  IO(Http) ? Http.Bind(service, interface = "0.0.0.0", port = LoadConf.restConf.getInt("port"))

  logger.info( s"""Application: ${LoadConf.globalConf.getString("appName")} running version: ${LoadConf.globalConf.getString("appVersion")}""".stripMargin)
} 
Example 102
Source File: AmqpTermination.scala    From gatling-amqp   with MIT License 5 votes vote down vote up
package io.gatling.amqp.config

import akka.pattern.ask
import akka.util.Timeout
import io.gatling.amqp.data._
import io.gatling.core.session.Session
import pl.project13.scala.rainbow._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util._


trait AmqpTermination { this: AmqpProtocol =>
  private val publishTimeout: Timeout = Timeout(1 hour)
  private val consumeTimeout: Timeout = Timeout(1 hour)

  protected def awaitTerminationFor(session: Session): Unit = {
    // wait nacker to ensure all confirms has been fired
    Await.result((nacker ask WaitTermination(session))(publishTimeout), Duration.Inf) match {
      case Success(m) => logger.debug(s"amqp: $m".green)
      case Failure(e) => throw e
    }

    // wait consumers
    Await.result((router ask WaitTermination(session))(consumeTimeout), Duration.Inf) match {
      case Success(m) => logger.debug(s"amqp: $m".green)
      case Failure(e) => throw e
    }
  }
} 
Example 103
Source File: AmqpPreparation.scala    From gatling-amqp   with MIT License 5 votes vote down vote up
package io.gatling.amqp.config

import akka.pattern.ask
import akka.util.Timeout
import pl.project13.scala.rainbow._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util._


trait AmqpPreparation { this: AmqpProtocol =>
  private val prepareTimeout: Timeout = Timeout(3 seconds)

  protected def awaitPreparation(): Unit = {
    for (msg <- preparings) {
      Await.result((manage ask msg)(prepareTimeout), Duration.Inf) match {
        case Success(m) => logger.info(s"amqp: $m".green)
        case Failure(e) => throw e
      }
    }
  }
} 
Example 104
Source File: Replica.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.{ OneForOneStrategy, Props, ActorRef, Actor }
import kvstore.Arbiter._
import scala.collection.immutable.Queue
import akka.actor.SupervisorStrategy.Restart
import scala.annotation.tailrec
import akka.pattern.{ ask, pipe }
import akka.actor.Terminated
import scala.concurrent.duration._
import akka.actor.PoisonPill
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy
import akka.util.Timeout

object Replica {
  sealed trait Operation {
    def key: String
    def id: Long
  }
  case class Insert(key: String, value: String, id: Long) extends Operation
  case class Remove(key: String, id: Long) extends Operation
  case class Get(key: String, id: Long) extends Operation

  sealed trait OperationReply
  case class OperationAck(id: Long) extends OperationReply
  case class OperationFailed(id: Long) extends OperationReply
  case class GetResult(key: String, valueOption: Option[String], id: Long) extends OperationReply

  def props(arbiter: ActorRef, persistenceProps: Props): Props = Props(new Replica(arbiter, persistenceProps))
}

class Replica(val arbiter: ActorRef, persistenceProps: Props) extends Actor {
  import Replica._
  import Replicator._
  import Persistence._
  import context.dispatcher

  
  val replica: Receive = {
    case _ =>
  }

} 
Example 105
Source File: SidechainNodeViewHolderTest.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.actors

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.horizen.SidechainNodeViewHolder.ReceivableMessages.GetDataFromCurrentSidechainNodeView
import com.horizen.fixtures.SidechainNodeViewHolderFixture
import com.horizen.node.SidechainNodeView
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}

import scala.concurrent._
import scala.concurrent.duration._
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner


@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest extends Suites(
  new SidechainNodeViewHolderTest1,
  new SidechainNodeViewHolderTest2
)

@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest1
  extends TestKit(ActorSystem("testsystem"))
  with FunSuiteLike
  with BeforeAndAfterAll
  with SidechainNodeViewHolderFixture
{

  implicit val timeout = Timeout(5, TimeUnit.SECONDS)

  override def afterAll: Unit = {
    //info("Actor system is shutting down...")
    TestKit.shutdownActorSystem(system)
  }

  test ("Test1") {
    def f(v: SidechainNodeView) = v
    val sidechainNodeViewHolderRef: ActorRef = getSidechainNodeViewHolderRef
    val nodeView = (sidechainNodeViewHolderRef ? GetDataFromCurrentSidechainNodeView(f))
      .mapTo[SidechainNodeView]

    assert(Await.result(nodeView, 5 seconds) != null)
  }

  test("Test2") {
  }

}

@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest2
  extends TestKit(ActorSystem("testSystem"))
  with FeatureSpecLike
  with BeforeAndAfterAll
  with Matchers
  with SidechainNodeViewHolderFixture
{

  implicit val timeout = Timeout(5, TimeUnit.SECONDS)

  override def afterAll: Unit = {
    //info("Actor system is shutting down...")
    TestKit.shutdownActorSystem(system)
  }

  feature("Actor1") {
    scenario("Scenario 1"){
      system should not be(null)

      def f(v: SidechainNodeView) = v
      val sidechainNodeViewHolderRef: ActorRef = getSidechainNodeViewHolderRef
      val nodeView = (sidechainNodeViewHolderRef ? GetDataFromCurrentSidechainNodeView(f))
        .mapTo[SidechainNodeView]

      Await.result(nodeView, 5 seconds) should not be(null)

    }
  }
} 
Example 106
Source File: GetInfo.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.info

import scala.concurrent.ExecutionContext.Implicits.global
import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.GetAllInfoHelper
import encry.cli.Response
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import akka.pattern._
import akka.util.Timeout
import encry.utils.NetworkTimeProvider
import io.circe._
import scala.concurrent._

object GetInfo extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetAllInfoHelper)
      .mapTo[Json]
      .map(x => Some(Response(x.toString())))
  }
} 
Example 107
Source File: Transfer.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import cats.Applicative
import cats.implicits._
import encry.EncryApp
import encry.EncryApp._
import encry.cli.{Ast, Response}
import encry.modifiers.mempool.TransactionFactory
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import encry.view.NodeViewHolder.ReceivableMessages.GetDataFromCurrentView
import encry.view.history.History
import encry.view.mempool.MemoryPool.NewTransaction
import encry.view.state.UtxoState
import encry.view.wallet.EncryWallet
import org.encryfoundation.common.crypto.PrivateKey25519
import org.encryfoundation.common.modifiers.mempool.transaction.EncryAddress.Address
import org.encryfoundation.common.modifiers.mempool.transaction.Transaction
import org.encryfoundation.common.modifiers.state.box.{AssetBox, MonetaryBox, TokenIssuingBox}
import org.encryfoundation.common.utils.Algos
import org.encryfoundation.common.utils.TaggedTypes.ADKey
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}

object Transfer extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ?
      GetDataFromCurrentView[History, UtxoState, EncryWallet, Option[Transaction]] { wallet =>
        Try {
          val secret: PrivateKey25519 = wallet.vault.accountManagers.head.mandatoryAccount
          val recipient: Address      = args.requireArg[Ast.Str]("addr").s
          val fee: Long               = args.requireArg[Ast.Num]("fee").i
          val amount: Long            = args.requireArg[Ast.Num]("amount").i
          val token                   = args.requireArgOrElse[Ast.Str]("token", Ast.Str("")).s
          val tokenOpt                = if (token.isEmpty) None else Some(token)
          val decodedTokenOpt         = tokenOpt.map(s => Algos.decode(s) match {
            case Success(value) => ADKey @@ value
            case Failure(_) => throw new RuntimeException(s"Failed to decode tokeId $s")
          })

          val boxes: IndexedSeq[MonetaryBox] = wallet.vault.walletStorage
            .getAllBoxes()
            .collect {
              case ab: AssetBox if ab.tokenIdOpt.isEmpty ||
                Applicative[Option].map2(ab.tokenIdOpt, decodedTokenOpt)(_.sameElements(_)).getOrElse(false) => ab
              case tib: TokenIssuingBox if decodedTokenOpt.exists(_.sameElements(tib.tokenId)) => tib
            }.foldLeft(Seq[MonetaryBox]()) {
              case (seq, box) if decodedTokenOpt.isEmpty =>
                if (seq.map(_.amount).sum < (amount + fee)) seq :+ box else seq
              case (seq, box: AssetBox) if box.tokenIdOpt.isEmpty =>
                if (seq.collect{ case ab: AssetBox => ab }.filter(_.tokenIdOpt.isEmpty).map(_.amount).sum < fee) seq :+ box else seq
              case (seq, box: AssetBox) =>
                val totalAmount =
                  seq.collect{ case ab: AssetBox => ab }.filter(_.tokenIdOpt.nonEmpty).map(_.amount).sum +
                    seq.collect{ case tib: TokenIssuingBox => tib }.map(_.amount).sum
                if (totalAmount < amount) seq :+ box else seq
              case (seq, box: TokenIssuingBox) =>
                val totalAmount =
                  seq.collect{ case ab: AssetBox => ab }.filter(_.tokenIdOpt.nonEmpty).map(_.amount).sum +
                    seq.collect{ case tib: TokenIssuingBox => tib }.map(_.amount).sum
                if (totalAmount < amount) seq :+ box else seq
            }
            .toIndexedSeq

          TransactionFactory.defaultPaymentTransaction(secret,
                                                       fee,
                                                       System.currentTimeMillis(),
                                                       boxes.map(_ -> None),
                                                       recipient,
                                                       amount,
                                                       decodedTokenOpt)
        }.toOption
      }).flatMap {
      case Some(tx: Transaction) =>
        EncryApp.system.eventStream.publish(NewTransaction(tx))
        Future.successful(Some(Response(tx.toString)))
      case _ => Future.successful(Some(Response("Operation failed. Malformed data.")))
    }
  }
} 
Example 108
Source File: PrintAddresses.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import encry.cli.Response
import encry.settings.EncryAppSettings
import encry.api.http.DataHolderForApi.GetViewPrintAddress
import encry.utils.NetworkTimeProvider
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object PrintAddresses extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetViewPrintAddress).mapTo[String].map(s => Some(Response(s)))
  }
} 
Example 109
Source File: CreateToken.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import encry.EncryApp
import encry.EncryApp._
import encry.cli.{Ast, Response}
import encry.modifiers.mempool.TransactionFactory
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import encry.view.NodeViewHolder.ReceivableMessages.GetDataFromCurrentView
import encry.view.history.History
import encry.view.mempool.MemoryPool.NewTransaction
import encry.view.state.UtxoState
import encry.view.wallet.EncryWallet
import org.encryfoundation.common.crypto.PrivateKey25519
import org.encryfoundation.common.modifiers.mempool.transaction.{PubKeyLockedContract, Transaction}
import org.encryfoundation.common.modifiers.state.box.AssetBox
import scala.concurrent.Future
import scala.util.Try


object CreateToken extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ?
      GetDataFromCurrentView[History, UtxoState, EncryWallet, Option[Transaction]] { wallet =>
        Try {
          val secret: PrivateKey25519 = wallet.vault.accountManagers.head.mandatoryAccount
          val fee: Long               = args.requireArg[Ast.Num]("fee").i
          val amount: Long            = args.requireArg[Ast.Num]("amount").i
          val boxes: AssetBox         =     wallet.vault.walletStorage
            .getAllBoxes().collect { case ab: AssetBox => ab }.head
          TransactionFactory.assetIssuingTransactionScratch(secret,
            fee,
            System.currentTimeMillis(),
            IndexedSeq(boxes).map(_ -> None),
            PubKeyLockedContract(wallet.vault.accountManagers.head.mandatoryAccount.publicImage.pubKeyBytes).contract,
            amount)
        }.toOption
      }).flatMap {
      case Some(tx: Transaction) =>
        EncryApp.system.eventStream.publish(NewTransaction(tx))
        Future.successful(Some(Response(tx.toString)))
      case _ => Future.successful(Some(Response("Operation failed. Malformed data.")))
    }
  }
} 
Example 110
Source File: PrintPrivKeys.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import encry.api.http.DataHolderForApi.GetViewPrintPrivKeys
import encry.cli.Response
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

//TODO This cmd is unsafe.
object PrintPrivKeys extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetViewPrintPrivKeys).mapTo[String].map(s => Some(Response(s)))
  }
} 
Example 111
Source File: CreateAccountFromSeed.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.util.Timeout
import akka.pattern._
import encry.cli.{Ast, Response}
import encry.settings.EncryAppSettings
import encry.EncryApp._
import encry.api.http.DataHolderForApi.CreateAccountManagerFromSeedHelper
import encry.utils.NetworkTimeProvider
import encry.view.NodeViewHolder.ReceivableMessages.CreateAccountManagerFromSeed
import encry.view.wallet.EncryWallet

import scala.concurrent.Future



object CreateAccountFromSeed extends Command {
  override def execute(args: Command.Args, settings: EncryAppSettings, dataHolder: ActorRef, nodeId: Array[Byte], ntp: NetworkTimeProvider): Future[Option[Response]] = {
    val seed: String = args.requireArg[Ast.Str]("seed").s
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? CreateAccountManagerFromSeedHelper(seed)).mapTo[Either[String, EncryWallet]].map {
      case Right(wallet) => Some(Response(s"Created account manager #${wallet.accountManagers.map(_.number).max}"))
      case Left(reasons) => Some(Response(s"Failed to create new account manager:\n$reasons"))
    }
  }
} 
Example 112
Source File: PrintPubKeys.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import encry.cli.Response
import encry.settings.EncryAppSettings
import encry.api.http.DataHolderForApi.GetViewPrintPubKeys
import encry.utils.NetworkTimeProvider
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object PrintPubKeys extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetViewPrintPubKeys).mapTo[String].map(s => Some(Response(s)))
  }
} 
Example 113
Source File: GetBannedPeers.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.peer

import java.net.InetAddress
import akka.actor.ActorRef
import encry.cli.Response
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import akka.pattern._
import akka.util.Timeout
import encry.api.http.DataHolderForApi.GetBannedPeersHelper
import encry.network.BlackList.{BanReason, BanTime, BanType}
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global

object GetBannedPeers extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetBannedPeersHelper).mapTo[Seq[(InetAddress, (BanReason, BanTime, BanType))]]
      .map(x => Some(Response(x.toString)))
  }
} 
Example 114
Source File: GetConnectedPeers.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.peer

import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.GetConnectedPeersHelper
import encry.api.http.routes.PeersApiRoute.PeerInfoResponse
import encry.cli.Response
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import akka.pattern._
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object GetConnectedPeers extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetConnectedPeersHelper).mapTo[Seq[PeerInfoResponse]].map(x => Some(Response(x.toString())))
  }
} 
Example 115
Source File: GetPeers.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.peer

import java.net.InetSocketAddress
import akka.actor.ActorRef
import encry.cli.Response
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import akka.pattern._
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import encry.api.http.DataHolderForApi.GetAllPeers

object GetPeers extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetAllPeers)
      .mapTo[Seq[InetSocketAddress]]
      .map(x => Some(Response(x.toString())))
  }
} 
Example 116
Source File: GetBan.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.peer

import java.net.InetSocketAddress

import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.{GetConnectedPeersHelper, PeerBanHelper}
import encry.api.http.routes.PeersApiRoute.PeerInfoResponse
import encry.cli.{Ast, Response}
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import akka.pattern._
import akka.util.Timeout
import encry.network.BlackList.BanReason.InvalidNetworkMessage
import encry.network.PeersKeeper.BanPeerFromAPI

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object GetBan extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    val host: String            = args.requireArg[Ast.Str]("host").s
    val port: Long              = args.requireArg[Ast.Num]("port").i
    val peer: InetSocketAddress = new InetSocketAddress(host, port.toInt)
    dataHolder ! PeerBanHelper(peer, "Banned by the user")
    Future.successful(Some(Response(s"Peer $peer was banned by the user")))
  }
} 
Example 117
Source File: CreateKey.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import encry.EncryApp._
import encry.api.http.DataHolderForApi.GetViewCreateKey
import encry.cli.Response
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import org.encryfoundation.common.crypto.PrivateKey25519
import scala.concurrent.Future

object CreateKey extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetViewCreateKey).mapTo[PrivateKey25519]
    Future.successful(Some(Response("Key was created")))
  }
} 
Example 118
Source File: GetBalance.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import encry.cli.Response
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import encry.view.NodeViewHolder.ReceivableMessages.GetDataFromCurrentView
import encry.view.history.History
import encry.view.state.UtxoState
import encry.view.wallet.EncryWallet
import org.encryfoundation.common.utils.Algos
import scala.concurrent.Future

object GetBalance extends Command {

  
  override def execute(args: Command.Args, settings: EncryAppSettings, dataHolder: ActorRef, nodeId: Array[Byte], ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ?
      GetDataFromCurrentView[History, UtxoState, EncryWallet, Option[Response]] { view =>
        Option(Response(
          {
            val balance: String =
              view.vault.getBalances.foldLeft("")((str, tokenInfo) =>
                if (tokenInfo._1._2 != Algos.encode(settings.constants.IntrinsicTokenId))
                  str.concat(s"TokenID(${tokenInfo._1._2}) for key ${tokenInfo._1._1} : ${tokenInfo._2}\n")
                else str.concat(s"TokenID(${tokenInfo._1._2}) for key ${tokenInfo._1._1} : ${BigDecimal(tokenInfo._2) / 100000000}\n")
            )
            if (balance.length == 0) "0" else balance
          }
        ))
      }).mapTo[Option[Response]]
  }

} 
Example 119
Source File: GetFullBlockById.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.history

import akka.actor.ActorRef
import akka.util.Timeout
import encry.api.http.DataHolderForApi.GetFullBlockByIdCommand
import encry.cli.{Ast, Response}
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import akka.pattern._
import encry.utils.NetworkTimeProvider
import org.encryfoundation.common.modifiers.history.Block
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import io.circe.syntax._

object GetFullBlockById extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)

    val mod = args.requireArg[Ast.Str]("modifier").s

    (dataHolder ? GetFullBlockByIdCommand(Left(mod))).mapTo[Option[Block]].map(x => Some(Response(x.asJson.toString())))
  }
} 
Example 120
Source File: GetTxById.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.history

import akka.actor.ActorRef
import akka.util.Timeout
import encry.cli.{ Ast, Response }
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import io.circe.syntax._

import scala.concurrent.ExecutionContext.Implicits.global
import akka.pattern._
import encry.api.http.DataHolderForApi.GetDataFromHistory
import encry.utils.NetworkTimeProvider
import encry.view.history.History
import org.encryfoundation.common.modifiers.history.Block
import org.encryfoundation.common.utils.Algos
import org.encryfoundation.common.utils.TaggedTypes.ModifierId

import scala.concurrent.Future

object GetTxById extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)

    def getFullBlockByHeaderId(headerId: String): Future[Option[Block]] =
      (dataHolder ?
        GetDataFromHistory).mapTo[History].map { history =>
        Algos
          .decode(headerId)
          .toOption
          .flatMap(decoded => history.getHeaderById(ModifierId @@ decoded))
          .flatMap(history.getBlockByHeader)
      }
    val mod = args.requireArg[Ast.Str]("modifier").s
    getFullBlockByHeaderId(mod).map(_.flatMap(x => Some(Response(x.payload.txs.asJson.toString))))
  }

} 
Example 121
Source File: GetCandidate.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.history

import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.GetMinerStatus
import encry.cli.Response
import encry.cli.commands.Command
import encry.local.miner.Miner.MinerStatus
import encry.settings.EncryAppSettings
import akka.pattern._
import akka.util.Timeout
import encry.utils.NetworkTimeProvider

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import io.circe.syntax._

object GetCandidate extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetMinerStatus).mapTo[MinerStatus].map(x => Some(Response(x.asJson.toString())))
  }
} 
Example 122
Source File: GetLastHeaders.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.history

import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.{ GetDataFromHistory, GetLastHeadersHelper }
import encry.cli.{ Ast, Response }
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import akka.pattern._
import akka.util.Timeout
import encry.utils.NetworkTimeProvider
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import org.encryfoundation.common.modifiers.history.Header
import io.circe.syntax._

object GetLastHeaders extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    val num: Long                 = args.requireArg[Ast.Num]("count").i
    (dataHolder ? GetLastHeadersHelper(num.toInt))
      .mapTo[IndexedSeq[Header]]
      .map(x => Some(Response(x.asJson.toString())))
  }
} 
Example 123
Source File: GetLastHeaderIdsAtHeight.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.history

import akka.actor.ActorRef
import encry.api.http.DataHolderForApi.GetLastHeaderIdAtHeightHelper
import encry.cli.{ Ast, Response }
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import akka.pattern._
import akka.util.Timeout
import encry.utils.NetworkTimeProvider
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import io.circe.syntax._

object GetLastHeaderIdsAtHeight extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)

    val num = args.requireArg[Ast.Num]("at").i

    (dataHolder ? GetLastHeaderIdAtHeightHelper(num.toInt))
      .mapTo[Seq[String]]
      .map(s => Some(Response(s.asJson.toString())))

  }
} 
Example 124
Source File: GetHeaderById.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.history

import akka.actor.ActorRef
import akka.util.Timeout
import encry.cli.{ Ast, Response }
import encry.cli.commands.Command
import encry.settings.EncryAppSettings

import scala.concurrent.ExecutionContext.Implicits.global
import akka.pattern._
import encry.api.http.DataHolderForApi.{ GetDataFromHistory, GetFullBlockByIdCommand }
import encry.utils.NetworkTimeProvider
import encry.view.history.History
import io.circe.syntax._
import org.encryfoundation.common.modifiers.history.Block
import org.encryfoundation.common.utils.Algos
import org.encryfoundation.common.utils.TaggedTypes.ModifierId

import scala.concurrent.Future

object GetHeaderById extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       networkTimeProvider: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)

    val num = args.requireArg[Ast.Str]("modifier").s

    (dataHolder ? GetFullBlockByIdCommand(Left(num)))
      .mapTo[Option[Block]]
      .map(x => Some(Response(x.map(_.header).asJson.toString())))
  }

} 
Example 125
Source File: ProcessStep.scala    From process   with Apache License 2.0 5 votes vote down vote up
package processframework

import scala.concurrent.duration.Duration
import scala.concurrent.{ ExecutionContext, Future, Promise }
import scala.reflect.ClassTag

import akka.actor.{ Actor, ActorContext, ActorRef, Props }
import akka.util.Timeout

trait ProcessStep[S] {
  implicit def context: ActorContext
  private[processframework] val promise: Promise[Unit] = Promise[Unit]()

  type Execution = S ⇒ Unit
  type UpdateFunction = PartialFunction[Process.Event, S ⇒ S]
  type CommandToEvent = PartialFunction[Any, Process.Event]

  def execute()(implicit process: ActorRef): Execution
  def receiveCommand: CommandToEvent
  def updateState: UpdateFunction

  def retryInterval: Duration = Duration.Inf

  final def isCompleted = promise.isCompleted
  final def markDone(): Unit = promise.trySuccess(())
  final def markDone(newState: S): S = {
    markDone()
    newState
  }
  private[processframework] def abort(): Unit = promise.tryFailure(new RuntimeException("Process aborted"))
  final def onComplete(completeFn: ((ActorContext, S)) ⇒ Unit)(implicit executionContext: ExecutionContext, process: ActorRef): Unit =
    promise.future.foreach { _ ⇒ process ! PersistentProcess.Perform(completeFn) }

  final def onCompleteAsync(completeFn: ⇒ Unit)(implicit executionContext: ExecutionContext): Unit = promise.future.foreach(_ ⇒ completeFn)

  final def ~>(next: ProcessStep[S]*)(implicit context: ActorContext): ProcessStep[S] = new Chain(this, next: _*)

  private[processframework] def run()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = runImpl
  private val innerActor = context.actorOf(Props(new Actor {
    def receive = {
      case msg if receiveCommand.isDefinedAt(msg) ⇒
        val event = receiveCommand(msg)
        context.parent ! event
    }
  }))
  private[processframework] def handleUpdateState: UpdateFunction = if (isCompleted) PartialFunction.empty[Process.Event, S ⇒ S] else updateState
  private[processframework] def handleReceiveCommand: CommandToEvent = if (isCompleted) PartialFunction.empty[Any, Process.Event] else receiveCommand
  private[processframework] def executeWithPossibleRetry()(implicit process: ActorRef): Execution = { state ⇒
    implicit val _ = context.dispatcher
    if (retryInterval.isFinite())
      context.system.scheduler.scheduleOnce(Duration.fromNanos(retryInterval.toNanos)) { if (!isCompleted) executeWithPossibleRetry()(process)(state) }
    execute()(process)(state)
  }
  private[processframework] def runImpl()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = {
    import akka.pattern.ask
    import scala.concurrent.duration._
    implicit val timeout: Timeout = 5 seconds

    if (!isCompleted) (process ? Process.GetState).mapTo[S].foreach(executeWithPossibleRetry()(innerActor))
    promise.future
  }
} 
Example 126
Source File: ProcessStepTestSupport.scala    From process   with Apache License 2.0 5 votes vote down vote up
package processframework

import akka.pattern.ask
import akka.actor.{ ActorRef, ActorContext, Actor, Props }
import akka.util.Timeout

import scala.concurrent.duration._
import scala.concurrent.Await
import scala.reflect.ClassTag

import akka.testkit.{ TestProbe, TestKit }
import org.scalatest.BeforeAndAfterEach

object ProcessStepTestSupport {
  case object GetStep
  case object ACommand
  case object AnEvent extends Process.Event
}

trait ProcessStepTestSupport[S, PS <: ProcessStep[S]] { this: TestKit with BeforeAndAfterEach ⇒
  implicit val timeout: Timeout = 1 second

  var testProbe: TestProbe = null
  var processActor: ActorRef = null

  override protected def beforeEach(): Unit = {
    testProbe = createTestProbe()
    processActor = createProcessActor()
  }

  def createTestProbe(): TestProbe
  def createProcessStep(executeProbe: TestProbe)(implicit context: ActorContext): PS

  def createProcessActor() = system.actorOf(Props(new Actor {
    val step = createProcessStep(testProbe)

    def receive = {
      case msg if sender() == step        ⇒ testActor forward msg
      case ProcessStepTestSupport.GetStep ⇒ sender() ! step
      case e: Process.Event               ⇒ testActor ! e
    }
  }))

  def processStep()(implicit classTag: ClassTag[PS]): PS =
    Await.result[PS]((processActor ? ProcessStepTestSupport.GetStep).mapTo[PS], 2 seconds)
} 
Example 127
Source File: AkkaTaskHandler.scala    From DataXServer   with Apache License 2.0 5 votes vote down vote up
package org.tianlangstudio.data.hamal.yarn.server.handler

import java.util
import java.util.concurrent.TimeUnit

import scala.concurrent.duration._
import akka.actor.ActorRef
import akka.util.Timeout
import org.tianlangstudio.data.hamal.common.{TaskCost, TaskResult}
import org.tianlangstudio.data.hamal.core.{ConfigUtil, Constants}
import org.tianlangstudio.data.hamal.core.handler.ITaskHandler
import org.tianlangstudio.data.hamal.server.thrift.{ThriftServerUtil, ThriftTaskCost, ThriftTaskResult}
import org.tianlangstudio.data.hamal.yarn.{CancelTask, SubmitTask, TaskInfo}
import org.tianlangstudio.data.hamal.yarn.util.Utils
import org.tianlangstudio.data.hamal.common.TaskCost


/**
 * Created by zhuhq on 2016/4/27.
 */
class AkkaTaskHandler(taskSchedulerActor:ActorRef) extends ITaskHandler{

  implicit val timeout = Timeout(30, TimeUnit.SECONDS)

  def submitTask(taskConfPath: String): String = {
    submitTaskWithParams(taskConfPath,null)
  }

  def getTaskStatus(taskId: String): String = {
    if(TaskInfo.taskId2ExecutorId.contains(taskId) || TaskInfo.acceptedTaskIds.contains(taskId) || TaskInfo.rerunTaskIds.contains(taskId)) {
      Constants.TASK_STATUS_RUNNING
    }else if(TaskInfo.taskId2Result.contains(taskId)) {
      Constants.TASK_STATUS_DONE
    }else {
      ""
    }
  }

  def getTaskCost(taskId: String): TaskCost = {
    null
  }
  def getThriftTaskCost(taskId: String): ThriftTaskCost = ThriftServerUtil.taskCost2Thrift(getTaskCost(taskId))
  def submitTaskWithParams(taskConfPath: String, params: util.Map[String, String]): String = {
    //val taskId = UUID.randomUUID().toString
    val taskDesc = ConfigUtil.readTaskDescIfInFileAndReplaceHolder(taskConfPath,params)
    //val taskId = DigestUtils.md5Hex(taskDesc);
    val taskId = Utils.genTaskId()
    taskSchedulerActor ! SubmitTask(taskId,taskDesc)
    taskId
  }

  def cancelTask(taskId: String): Boolean = {
    taskSchedulerActor ! CancelTask(taskId)
    true
  }

  def getTaskResult(taskId: String): TaskResult = {
    if(Constants.TASK_STATUS_DONE.equals(getTaskStatus(taskId))) {
      TaskInfo.taskId2Result.get(taskId) match {
        case Some(taskResult) =>
          taskResult
        case _ =>
          null
      }
    }else {
      null
    }
  }

  def getThriftTaskResult(taskId: String): ThriftTaskResult = ThriftServerUtil.taskResult2Thrift(getTaskResult(taskId))
} 
Example 128
Source File: AkkaThriftTaskHandler.scala    From DataXServer   with Apache License 2.0 5 votes vote down vote up
package org.tianlangstudio.data.hamal.yarn.thrift

import java.util
import java.util.UUID

import akka.pattern.ask
import akka.actor.ActorRef
import akka.util.Timeout
import org.apache.commons.codec.digest.DigestUtils
import org.tianlangstudio.data.hamal.common.{TaskCost, TaskResult}

import scala.concurrent.Await
import scala.concurrent.duration._
import org.tianlangstudio.data.hamal.core.{ConfigUtil, Constants}
import org.tianlangstudio.data.hamal.server.thrift.ThriftServer
import org.tianlangstudio.data.hamal.yarn.{CancelTask, SubmitTask, TaskInfo}
import org.tianlangstudio.data.hamal.yarn.server.handler.AkkaTaskHandler
import org.tianlangstudio.data.hamal.yarn.util.Utils
import org.tianlangstudio.data.hamal.common.TaskCost
/**
 * Created by zhuhq on 2016/4/27.
 */
class AkkaThriftTaskHandler(taskSchedulerActor:ActorRef) extends AkkaTaskHandler(taskSchedulerActor = taskSchedulerActor) with ThriftServer.Iface{

  override def submitTask(taskConfPath: String): String = {
    submitTaskWithParams(taskConfPath,null)
  }

  override def getTaskStatus(taskId: String): String = {
    if(TaskInfo.taskId2ExecutorId.contains(taskId) || TaskInfo.acceptedTaskIds.contains(taskId) || TaskInfo.rerunTaskIds.contains(taskId)) {
      Constants.TASK_STATUS_RUNNING
    }else if(TaskInfo.taskId2Result.contains(taskId)) {
      Constants.TASK_STATUS_DONE
    }else {
      ""
    }
  }

  override def getTaskCost(taskId: String): TaskCost = {
    null
  }

  override def submitTaskWithParams(taskConfPath: String, params: util.Map[String, String]): String = {
    //val taskId = UUID.randomUUID().toString
    val taskDesc = ConfigUtil.readTaskDescIfInFileAndReplaceHolder(taskConfPath,params)
    //val taskId = DigestUtils.md5Hex(taskDesc);
    val taskId = Utils.genTaskId()
    taskSchedulerActor ! SubmitTask(taskId,taskDesc)
    taskId
  }

  override def cancelTask(taskId: String): Boolean = {
    taskSchedulerActor ! CancelTask(taskId)
    true
  }

  override def getTaskResult(taskId: String): TaskResult = {
    if(Constants.TASK_STATUS_DONE.equals(getTaskStatus(taskId))) {
      TaskInfo.taskId2Result.get(taskId) match {
        case Some(taskResult) =>
          taskResult
        case _ =>
          null
      }
    }else {
      null
    }
  }
} 
Example 129
Source File: ConfigUtil.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.utils

import java.util.concurrent.TimeUnit

import akka.util.Timeout
import com.typesafe.config.{ConfigException, Config, ConfigFactory}

object ConfigUtil {

  lazy val referenceConfig = ConfigFactory.defaultReference

  
  def getDefaultTimeout(config:Config, path:String, default:Timeout, unit:TimeUnit=TimeUnit.SECONDS) : Timeout = {
    if (config.hasPath(path)) {
      val duration = config.getDuration(path, unit)
      Timeout(duration, unit)
    } else {
      default
    }
  }
} 
Example 130
Source File: ActorWaitHelper.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.utils

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.util.Timeout

import scala.concurrent.Await

object ActorWaitHelper {
  // Will wait until an actor has come up before returning its ActorRef
  def awaitActor(props: Props, system: ActorSystem, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = {
    val actor = actorName match {
      case Some(name) => system.actorOf(props, name)
      case None => system.actorOf(props)
    }
    awaitActorRef(actor, system)
  }

  // Will wait until an actor has come up before returning its ActorRef
  def awaitActorRef(actor: ActorRef, system: ActorSystem)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = {
    Await.result(system.actorSelection(actor.path).resolveOne(), timeout.duration)
    actor
  }
}

trait ActorWaitHelper { this: Actor =>
  // Will wait until an actor has come up before returning its ActorRef
  def awaitActor(props: Props, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef =
    ActorWaitHelper.awaitActor(props, context.system, actorName)(timeout)
} 
Example 131
Source File: Policy.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.policy

import akka.util.Timeout
import com.webtrends.harness.command.{BaseCommandResponse, CommandResponse}
import scala.concurrent.{Promise, Future}
import scala.concurrent.duration._
import scala.util.{Failure, Success}



  def policyName : String = getClass.getSimpleName


  def decomposeCommandResponse[T<:AnyRef:Manifest](bean:Future[BaseCommandResponse[T]]) : Future[T] = {
    import scala.concurrent.ExecutionContext.Implicits.global

    val f = Promise[T]()
    bean.mapTo[CommandResponse[T]] onComplete {
      case Success(resp) =>
        f.success(resp.data.get)
      case Failure(f) => PolicyException("Error in decomposeCommandResponse", f)
    }
    f.future
  }
} 
Example 132
Source File: HealthCheckProvider.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.health

import java.util.jar.Attributes.Name
import java.util.jar.{Attributes, JarFile}

import akka.actor.Actor
import akka.pattern._
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants
import com.webtrends.harness.logging.ActorLoggingAdapter
import com.webtrends.harness.service.messages.CheckHealth
import com.webtrends.harness.utils.ConfigUtil
import org.joda.time.DateTime

import scala.collection.mutable
import scala.concurrent.duration._
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}

trait HealthCheckProvider {
  this: Actor with ActorLoggingAdapter =>
  val upTime = DateTime.now
  implicit val timeout =
    ConfigUtil.getDefaultTimeout(context.system.settings.config, HarnessConstants.KeyDefaultTimeout, Timeout(15 seconds))

  val scalaVersion = util.Properties.versionString
  val file = getClass.getProtectionDomain.getCodeSource.getLocation.getFile

  val manifest = file match {
    case _ if file.endsWith(".jar") =>
      new JarFile(file).getManifest
    case _ =>
      val man = new java.util.jar.Manifest()
      man.getMainAttributes.put(Name.IMPLEMENTATION_TITLE, "Webtrends Harness Service")
      man.getMainAttributes.put(Name.IMPLEMENTATION_VERSION, "develop-SNAPSHOT")
      man.getMainAttributes.put(new Attributes.Name("Implementation-Build"), "N/A")
      man
  }

  val application = manifest.getMainAttributes.getValue(Name.IMPLEMENTATION_TITLE)
  val version = manifest.getMainAttributes.getValue(Name.IMPLEMENTATION_VERSION)
  val alerts: mutable.Buffer[ComponentHealth] = mutable.Buffer()

  
  def runChecks: Future[ApplicationHealth] = {

    import context.dispatcher

    // Ask for the health of each component
    val future = (context.actorSelection(HarnessConstants.ActorPrefix) ? CheckHealth).mapTo[Seq[HealthComponent]]
    val p = Promise[ApplicationHealth]

    future.onComplete({
      case Success(checks) =>
        // Rollup alerts for any critical or degraded components
        checks.foreach(checkComponents)
        // Rollup the statuses
        val overallHealth = rollupStatuses(alerts)
        alerts.clear()
        p success ApplicationHealth(application, version, upTime, overallHealth.state, overallHealth.details, checks)
      case Failure(e) =>
        log.error("An error occurred while fetching the health request results", e)
        p success ApplicationHealth(application, version, upTime, ComponentState.CRITICAL, e.getMessage, Nil)
    })

    p.future
  }
} 
Example 133
Source File: ActorHealth.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.health

import akka.actor.{Actor, ActorRef}
import akka.pattern._
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants
import com.webtrends.harness.logging.Logger
import com.webtrends.harness.service.messages.CheckHealth
import com.webtrends.harness.utils.ConfigUtil

import scala.concurrent.duration._
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success, Try}

trait ActorHealth {
  this: Actor =>

  private val _log = Logger(this, context.system)

  import context.dispatcher

  implicit val checkTimeout:Timeout =
    ConfigUtil.getDefaultTimeout(context.system.settings.config, HarnessConstants.KeyDefaultTimeout, Timeout(15 seconds))

  def health:Receive = {
    case CheckHealth =>
      pipe(Try(checkHealth)
        .recover({
        case e: Exception =>
          _log.error("Error fetching health", e)
          Future.successful(HealthComponent(getClass.getSimpleName, ComponentState.CRITICAL,
            "Exception when trying to check the health: %s".format(e.getMessage)))
      }).get
      ) to sender()
  }

  
  def checkHealth: Future[HealthComponent] = {
    val p = Promise[HealthComponent]()

    getHealth.onComplete {
      case Success(s) =>
        val healthFutures = getHealthChildren map { ref =>
          (ref ? CheckHealth).mapTo[HealthComponent] recover {
            case _: AskTimeoutException =>
              _log.warn(s"Health Check time out on child actor ${ref.path.toStringWithoutAddress}")
              HealthComponent(getClass.getSimpleName, ComponentState.CRITICAL,
                "Time out on child: %s".format(ref.path.toStringWithoutAddress))
            case ex: Exception =>
              HealthComponent(ref.path.name, ComponentState.CRITICAL, s"Failure to get health of child component. ${ex.getMessage}")
          }
        }

        Future.sequence(healthFutures) onComplete {
          case Failure(f) =>
            _log.debug(f, "Failed to retrieve health of children objects")
            p success HealthComponent(s.name, ComponentState.CRITICAL, s"Failure to get health of child components. ${f.getMessage}")
          case Success(healths) =>
            healths foreach { it => s.addComponent(it) }
            p success s
        }
      case Failure(f) =>
        _log.debug(f, "Failed to get health from component")
        p success HealthComponent(self.path.toString, ComponentState.CRITICAL, f.getMessage)
    }

    p.future
  }
} 
Example 134
Source File: Component.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.component

import akka.actor.{ActorRef, Status}
import akka.pattern.ask
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants
import com.webtrends.harness.app.HActor
import com.webtrends.harness.app.HarnessActor.{ConfigChange, PrepareForShutdown, SystemReady}

import scala.concurrent.duration._
import scala.util.{Failure, Success}

sealed class ComponentMessages()
case class StartComponent() extends ComponentMessages
case class StopComponent() extends ComponentMessages
case class ComponentRequest[T](msg:T, name:Option[String]=None, timeout:Timeout=5 seconds) extends ComponentMessages
case class ComponentMessage[T](msg:T, name:Option[String]=None) extends ComponentMessages

case class ComponentResponse[T](resp:T)


  def prepareForShutdown() = {}
}

object Component {
  def getActorPath() : String = {
    s"${HarnessConstants.ComponentName}/"
  }
} 
Example 135
Source File: TypedCommandExecution.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.command.typed

import akka.pattern._
import akka.util.Timeout

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

case class ExecuteTypedCommand(args: Any)

object TypedCommandExecution {

  def execute[U, V](name: String, args: U)(implicit executionContext: ExecutionContext, timeout: Timeout): Future[V] = {
    TypedCommandManager.commands.get(name) match {
      case Some(commandActor) =>
        (commandActor ? ExecuteTypedCommand(args)).map(_.asInstanceOf[V])
      case None =>
        Future.failed(new IllegalArgumentException(s"Command $name not found."))
    }
  }

} 
Example 136
Source File: TypedCommandHelper.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.command.typed

import akka.actor.{Actor, ActorRef, Props}
import akka.pattern._
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

trait TypedCommandHelper { this: Actor =>

  var typedCommandManager: Option[ActorRef] = None
  implicit def ec: ExecutionContext = context.dispatcher

  def registerTypedCommand[T<:TypedCommand[_,_]](name: String, actorClass: Class[T], checkHealth: Boolean = false): Future[ActorRef] = {
    implicit val timeout = Timeout(2 seconds)
    getManager().flatMap { cm =>
      (cm ? RegisterCommand(name, Props(actorClass), checkHealth)).mapTo[ActorRef]
    }
  }

  protected def getManager(): Future[ActorRef] = {
    typedCommandManager match {
      case Some(cm) => Future.successful(cm)
      case None =>
        context.system.actorSelection(HarnessConstants.TypedCommandFullName).resolveOne()(2 seconds).map { s =>
          typedCommandManager = Some(s)
          s
        }
    }
  }
} 
Example 137
Source File: CommandHelper.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.command

import akka.actor.{Props, ActorRef, Actor}
import akka.pattern.ask
import akka.util.Timeout
import com.webtrends.harness.app.Harness
import scala.concurrent.duration._
import com.webtrends.harness.HarnessConstants
import com.webtrends.harness.logging.ActorLoggingAdapter
import scala.concurrent.{Promise, Future}
import scala.util.{Failure, Success}

trait CommandHelper extends ActorLoggingAdapter with BaseCommandHelper {
  this: Actor =>
  override lazy implicit val actorSystem = context.system

}

  def executeCommand[T:Manifest](name:String, bean:Option[CommandBean]=None, server:Option[String]=None,
                        port:Int=2552)(implicit timeout:Timeout) : Future[BaseCommandResponse[T]] = {

    val p = Promise[BaseCommandResponse[T]]
    initCommandManager onComplete {
      case Success(_) =>
        commandManager match {
          case Some(cm) =>
            val msg = server match {
              case Some(srv) => ExecuteRemoteCommand(name, srv, port, bean, timeout)
              case None => ExecuteCommand(name, bean, timeout)
            }
            (cm ? msg)(timeout).mapTo[BaseCommandResponse[T]] onComplete {
              case Success(s) => p success s
              case Failure(f) => p failure CommandException("CommandManager", f)
            }
          case None => p failure CommandException("CommandManager", "CommandManager not found!")
        }
      case Failure(f) => p failure f
    }
    p.future
  }
} 
Example 138
Source File: InternalHttpSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.http

import java.net.{HttpURLConnection, URL}
import java.util.concurrent.TimeUnit
import akka.actor.{Props, ActorSystem}
import akka.testkit.TestKit
import akka.util.Timeout
import com.webtrends.harness.TestKitSpecificationWithJUnit
import com.webtrends.harness.service.messages.CheckHealth
import scala.concurrent.Await
import akka.pattern.ask
import scala.concurrent.duration.FiniteDuration

class InternalHttpSpec extends TestKitSpecificationWithJUnit(ActorSystem("test")) with InternalHttpClient {
  val port = 8123
  val path = "http://127.0.0.1:" + port + "/"
  val httpActor = system.actorOf(Props(classOf[SimpleHttpServer], port))

  // We need to make sure the httpActor has started up before trying to connect.
  implicit val timeout = Timeout(FiniteDuration(5, TimeUnit.SECONDS))
  Await.result(httpActor ? CheckHealth, timeout.duration)

  "Test handlers" should {
    "handle the get path /ping" in {
      val url = new URL(path + "ping")
      val conn = url.openConnection().asInstanceOf[HttpURLConnection]
      val resp = getResponseContent(conn)

      resp.status mustEqual "200"
      resp.content.length must be > 0
      resp.content.substring(0, 5) mustEqual "pong:"
    }
  }

  step {
    TestKit.shutdownActorSystem(system)
  }

} 
Example 139
Source File: ActorWaitSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, PoisonPill, Props}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.webtrends.harness.utils.ActorWaitHelper
import org.specs2.mutable.SpecificationLike

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class WaitedOnActor extends Actor with ActorWaitHelper {
  def receive: Receive = {
    case "message" => sender ! "waitedResponse"
  }
}

class WaitActor extends Actor with ActorWaitHelper {
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)
  val waited = awaitActor(Props[WaitedOnActor])

  def receive: Receive = {
    case "message" => sender ! "response"
    case "waited" => sender ! Await.result((waited ? "message").mapTo[String], Duration(5, "seconds"))
  }
}

class ActorWaitSpec extends TestKit(ActorSystem("wait-spec")) with SpecificationLike {
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)
  val waitActor = ActorWaitHelper.awaitActor(Props[WaitActor], system)

  sequential

  "ActorWaitSpec" should {
    "await the WaitActor successfully " in {
      Await.result((waitActor ? "message").mapTo[String], Duration(5, "seconds")) must beEqualTo("response")
    }

    "the WaitActor's awaited actor must have come up " in {
      Await.result((waitActor ? "waited").mapTo[String], Duration(5, "seconds")) must beEqualTo("waitedResponse")
    }
  }

  step {
    waitActor ! PoisonPill
  }
} 
Example 140
Source File: IngestorRegistryEndpoint.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.http

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import hydra.common.config.ConfigSupport
import ConfigSupport._
import hydra.core.http.RouteSupport
import hydra.ingest.bootstrap.HydraIngestorRegistryClient
import hydra.ingest.services.IngestorRegistry.{FindAll, LookupResult}

import scala.concurrent.duration.{FiniteDuration, _}


class IngestorRegistryEndpoint()(implicit system:ActorSystem) extends RouteSupport
    with HydraIngestJsonSupport
    with ConfigSupport {

  private val registryLookupTimeout = applicationConfig
    .getDurationOpt("ingest.service-lookup.timeout")
    .getOrElse(5.seconds)

  lazy val registry = HydraIngestorRegistryClient(applicationConfig).registry

  private implicit val timeout = Timeout(registryLookupTimeout)

  override val route: Route =
    path("ingestors" ~ Slash.?) {
      get {
        onSuccess(registry ? FindAll) {
          case response: LookupResult => complete(response.ingestors)
        }
      }
    }
} 
Example 141
Source File: IngestionHandlerGateway.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import akka.actor.SupervisorStrategy.Stop
import akka.actor.{OneForOneStrategy, _}
import akka.util.Timeout
import hydra.core.protocol.{InitiateHttpRequest, InitiateRequest}

import scala.concurrent.duration._


class IngestionHandlerGateway(registryPath: String)
    extends Actor
    with ActorLogging {

  private lazy val registry =
    context.actorSelection(registryPath).resolveOne()(Timeout(10.seconds))

  private implicit val ec = context.dispatcher

  override def receive = {
    case InitiateRequest(request, timeout, requestorOpt) =>
      val requestor = requestorOpt getOrElse sender
      ingest(
        registryRef =>
          DefaultIngestionHandler
            .props(request, registryRef, requestor, timeout),
        requestor
      )

    case InitiateHttpRequest(request, timeout, ctx) =>
      val requestor = sender
      ingest(
        registryRef =>
          HttpIngestionHandler.props(request, timeout, ctx, registryRef),
        requestor
      )
  }

  private def ingest(props: ActorRef => Props, requestor: ActorRef) = {
    registry
      .map(registryRef => context.actorOf(props(registryRef)))
      .recover { case e: Exception => requestor ! e }
  }

  override val supervisorStrategy =
    OneForOneStrategy() {
      case _ => Stop //stop ingestion IngestionRequestHandler always
    }
}

object IngestionHandlerGateway {

  val GroupName = "ingestion-handlers"

  def props(registryPath: String) =
    Props(classOf[IngestionHandlerGateway], registryPath)

} 
Example 142
Source File: HydraIngestorRegistryClient.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.actor.{ActorSelection, ActorSystem}
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.Config
import hydra.common.util.ActorUtils
import hydra.ingest.services.IngestorRegistry
import hydra.ingest.services.IngestorRegistry.{FindByName, LookupResult}

import scala.concurrent.Future


class HydraIngestorRegistryClient(registryPath: String)(
    implicit val system: ActorSystem
) {

  lazy val registry: ActorSelection = system.actorSelection(registryPath)

  def lookupIngestor(
      name: String
  )(implicit timeout: Timeout): Future[LookupResult] = {
    (registry ? FindByName(name)).mapTo[LookupResult]
  }
}

object HydraIngestorRegistryClient {

  import hydra.common.config.ConfigSupport._

  def registryPath(config: Config) =
    config
      .getStringOpt("ingest.ingestor-registry.path")
      .getOrElse(
        s"/user/service/${ActorUtils.actorName(classOf[IngestorRegistry])}"
      )

  def apply(
      config: Config
  )(implicit system: ActorSystem): HydraIngestorRegistryClient = {
    new HydraIngestorRegistryClient(registryPath(config))(system)
  }
} 
Example 143
Source File: IngestorRegistrarSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import hydra.common.util.ActorUtils
import hydra.ingest.services.IngestorRegistrar.UnregisterAll
import hydra.ingest.services.IngestorRegistry.{
  FindAll,
  FindByName,
  LookupResult
}
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._


class IngestorRegistrarSpec
    extends TestKit(ActorSystem("IngestorRegistrarSpec"))
    with Matchers
    with AnyFunSpecLike
    with ImplicitSender
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually {

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(10, Seconds), interval = Span(1, Seconds))

  val registry = system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val act = system.actorOf(Props[IngestorRegistrar])

  implicit val timeout = Timeout(3, TimeUnit.SECONDS)

  describe("The ingestor registrar actor") {
    it("registers from classpath on bootstrap") {
      eventually {
        whenReady(
          (registry ? FindByName(ActorUtils.actorName(classOf[TestIngestor])))
            .mapTo[LookupResult]
        ) { i =>
          i.ingestors.size shouldBe 1
          i.ingestors(0).name shouldBe ActorUtils.actorName(
            classOf[TestIngestor]
          )
        }
      }
    }

    it("unregisters") {
      act ! UnregisterAll
      eventually {
        whenReady((registry ? FindAll).mapTo[LookupResult]) { i =>
          i.ingestors.size shouldBe 0
        }
      }
    }
  }
} 
Example 144
Source File: RabbitTransport.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.rabbit

import akka.actor.Props
import akka.pattern.ask
import akka.util.Timeout
import com.spingo.op_rabbit.Message.{Ack, ConfirmResponse, Fail, Nack}
import com.spingo.op_rabbit._
import com.typesafe.config.Config
import hydra.core.transport.Transport
import hydra.core.transport.Transport.Deliver

import scala.concurrent.duration._

class RabbitTransport(rabbitControlProps: Props) extends Transport {
  implicit val ec = context.dispatcher

  val rabbitControl = context.actorOf(rabbitControlProps)

  private def sendMessage(r: RabbitRecord) = {
    implicit val timeout = Timeout(3 seconds)
    val message = r.destinationType match {
      case RabbitRecord.DESTINATION_TYPE_EXCHANGE =>
        val pub = Publisher.exchange(r.destination)
        Message(r.payload.getBytes(), pub)
      case RabbitRecord.DESTINATION_TYPE_QUEUE =>
        val pub = Publisher.queue(r.destination)
        Message(r.payload.getBytes(), pub)
    }
    (rabbitControl ? message).mapTo[ConfirmResponse]
  }

  override def transport = {
    case Deliver(r: RabbitRecord, deliveryId, callback) =>
      sendMessage(r).foreach { result =>
        result match {
          case x: Ack =>
            callback.onCompletion(
              deliveryId,
              Some(
                RabbitRecordMetadata(
                  System.currentTimeMillis(),
                  x.id,
                  r.destination,
                  r.destinationType,
                  r.ackStrategy
                )
              ),
              None
            )
          case _: Nack =>
            callback.onCompletion(
              deliveryId,
              None,
              Some(
                RabbitProducerException(
                  "Rabbit returned Nack, record not produced"
                )
              )
            )
          case x: Fail =>
            callback.onCompletion(deliveryId, None, Some(x.exception))
        }
      }
  }
}

object RabbitTransport {
  // will be used in testing
  def props(p: Props): Props = Props(classOf[RabbitTransport], p)

  // $COVERAGE-OFF$
  def props(c: Config): Props =
    Props(
      classOf[RabbitTransport],
      Props(
        classOf[RabbitControl],
        Left(ConnectionParams.fromConfig(c.getConfig("op-rabbit.connection")))
      )
    )

  // $COVERAGE-ON$
}

case class RabbitProducerException(msg: String) extends Exception(msg) 
Example 145
Source File: BootstrapEndpoint.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.endpoints

import akka.actor._
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import ch.megard.akka.http.cors.scaladsl.CorsDirectives._
import hydra.common.logging.LoggingAdapter
import hydra.core.http.{CorsSupport, HydraDirectives, RouteSupport}
import hydra.core.marshallers.TopicMetadataRequest
import hydra.kafka.model.TopicMetadataAdapter
import hydra.kafka.services.TopicBootstrapActor._

import scala.concurrent.duration._
import scala.util.{Failure, Success}

class BootstrapEndpoint(override val system:ActorSystem) extends RouteSupport
  with LoggingAdapter
  with TopicMetadataAdapter
  with HydraDirectives
  with CorsSupport
  with BootstrapEndpointActors {

  private implicit val timeout = Timeout(10.seconds)

  override val route: Route = cors(settings) {
    pathPrefix("streams") {
      pathEndOrSingleSlash {
        post {
          requestEntityPresent {
            entity(as[TopicMetadataRequest]) { topicMetadataRequest =>
              onComplete(
                bootstrapActor ? InitiateTopicBootstrap(topicMetadataRequest)
              ) {
                case Success(message) =>
                  message match {

                    case BootstrapSuccess(metadata) =>
                      complete(StatusCodes.OK, toResource(metadata))

                    case BootstrapFailure(reasons) =>
                      complete(StatusCodes.BadRequest, reasons)

                    case e: Exception =>
                      log.error("Unexpected error in TopicBootstrapActor", e)
                      complete(StatusCodes.InternalServerError, e.getMessage)
                  }

                case Failure(ex) =>
                  log.error("Unexpected error in BootstrapEndpoint", ex)
                  complete(StatusCodes.InternalServerError, ex.getMessage)
              }
            }
          }
        }
      } ~ get {
        pathEndOrSingleSlash(getAllStreams(None)) ~
          path(Segment)(subject => getAllStreams(Some(subject)))
      }
    }
  }

  private def getAllStreams(subject: Option[String]): Route = {
    onSuccess(bootstrapActor ? GetStreams(subject)) {
      case GetStreamsResponse(metadata) =>
        complete(StatusCodes.OK, metadata.map(toResource))
      case Failure(ex) =>
        throw ex
      case x =>
        log.error("Unexpected error in BootstrapEndpoint", x)
        complete(StatusCodes.InternalServerError, "Unknown error")
    }
  }
} 
Example 146
Source File: TopicsEndpoint.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.endpoints

import akka.actor.ActorSelection
import akka.http.scaladsl.common.EntityStreamingSupport
import akka.kafka.Subscriptions
import akka.kafka.scaladsl.Consumer
import akka.pattern.ask
import akka.util.Timeout
import hydra.core.http.RouteSupport
import hydra.kafka.consumer.KafkaConsumerProxy.{GetLatestOffsets, LatestOffsetsResponse}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition

import scala.collection.immutable.Map
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}


class TopicsEndpoint(consumerProxy:ActorSelection)(implicit ec:ExecutionContext) extends RouteSupport {

  import hydra.kafka.util.KafkaUtils._

  implicit val jsonStreamingSupport = EntityStreamingSupport.json()

  override val route =
    path("transports" / "kafka" / "consumer" / "topics" / Segment) {
      topicName =>
        get {
          extractRequestContext { ctx =>
            parameters('format.?, 'group.?, 'n ? 10, 'start ? "earliest") {
              (format, groupId, n, startOffset) =>
                val settings = loadConsumerSettings[Any, Any](
                  format.getOrElse("avro"),
                  groupId.getOrElse("hydra"),
                  startOffset
                )
                val offsets = latestOffsets(topicName)
                val source = Consumer
                  .plainSource(settings, Subscriptions.topics(topicName))
                  .initialTimeout(5.seconds)
                  .zipWithIndex
                  .takeWhile(rec =>
                    rec._2 <= n && !shouldCancel(offsets, rec._1)
                  )
                  .map(rec => rec._1.value().toString)
                  .watchTermination()((_, termination) =>
                    termination.failed.foreach {
                      case cause => ctx.fail(cause)
                    }
                  )
                complete(source)

            }
          }
        }
    }

  def shouldCancel(
      fpartitions: Future[Map[TopicPartition, Long]],
      record: ConsumerRecord[Any, Any]
  ): Boolean = {
    if (fpartitions.isCompleted) {
      val partitions = Await.result(fpartitions, 1.millis)
      val tp = new TopicPartition(record.topic(), record.partition())
      partitions.get(tp) match {
        case Some(offset) => record.offset() >= offset
        case None         => false
      }
    } else {
      false
    }

  }

  private def latestOffsets(
      topic: String
  ): Future[Map[TopicPartition, Long]] = {
    implicit val timeout = Timeout(5 seconds)
    (consumerProxy ? GetLatestOffsets(topic))
      .mapTo[LatestOffsetsResponse]
      .map(_.offsets)
  }

} 
Example 147
Source File: KafkaIngestor.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.ingestors

import akka.pattern.ask
import hydra.common.config.ConfigSupport._
import akka.util.Timeout
import hydra.core.ingest.RequestParams._
import hydra.core.ingest.{HydraRequest, Ingestor, RequestParams}
import hydra.core.protocol._
import hydra.kafka.config.KafkaConfigSupport
import hydra.kafka.ingestors.KafkaTopicsActor.{
  GetTopicRequest,
  GetTopicResponse
}
import hydra.kafka.producer.{KafkaProducerSupport, KafkaRecordFactories}

import scala.concurrent.Future
import scala.concurrent.duration._


class KafkaIngestor extends Ingestor with KafkaProducerSupport {

  override val recordFactory = new KafkaRecordFactories(schemaRegistryActor)

  private val timeoutDuration = applicationConfig
    .getDurationOpt("kafka-ingestor-timeout")
    .getOrElse(2.seconds)

  private implicit val timeout = Timeout(timeoutDuration)

  private val topicActor = context.actorOf(
    KafkaTopicsActor
      .props(KafkaConfigSupport.kafkaConfig.getConfig("kafka.admin"))
  )

  ingest {
    case Publish(request) =>
      val hasTopic = request.metadataValue(HYDRA_KAFKA_TOPIC_PARAM).isDefined
      sender ! (if (hasTopic) Join else Ignore)

    case Ingest(record, ackStrategy) => transport(record, ackStrategy)
  }
} 
Example 148
Source File: ProgressSource.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.stream

import java.util.concurrent.TimeUnit

import akka.NotUsed
import akka.actor._
import akka.pattern.ask
import akka.stream._
import akka.stream.scaladsl.Source
import akka.util.Timeout

import com.rbmhtechnology.eventuate.ReplicationProtocol._
import com.typesafe.config.Config

import scala.concurrent.Future
import scala.concurrent.duration._

private class ProgressSourceSettings(config: Config) {
  val readTimeout =
    config.getDuration("eventuate.log.read-timeout", TimeUnit.MILLISECONDS).millis
}

object ProgressSource {
  
  def apply(sourceLogId: String, targetLog: ActorRef)(implicit system: ActorSystem): Graph[SourceShape[Long], NotUsed] = {
    implicit val timeout = Timeout(new ProgressSourceSettings(system.settings.config).readTimeout)
    import system.dispatcher

    Source.fromFuture(targetLog.ask(GetReplicationProgress(sourceLogId)).flatMap {
      case GetReplicationProgressSuccess(_, progress, _) => Future.successful(progress)
      case GetReplicationProgressFailure(cause)          => Future.failed(cause)
    })
  }
} 
Example 149
Source File: OrderManager.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.ordermgnt

import akka.actor._
import akka.pattern.ask
import akka.util.Timeout

import com.rbmhtechnology.eventuate.EventsourcedView
import com.rbmhtechnology.eventuate.VersionedAggregate.Resolve

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util._


class OrderManager(replicaId: String, val eventLog: ActorRef) extends EventsourcedView {
  import OrderActor._
  import context.dispatcher

  private implicit val timeout = Timeout(10.seconds)
  private var orderActors: Map[String, ActorRef] = Map.empty

  override val id = s"s-om-$replicaId"

  override def onCommand = {
    case c: OrderCommand => orderActor(c.orderId) forward c
    case c: SaveSnapshot => orderActor(c.orderId) forward c
    case r: Resolve      => orderActor(r.id) forward r
    case GetState if orderActors.isEmpty =>
      sender() ! GetStateSuccess(Map.empty)
    case GetState =>
      val sdr = sender()
      val statesF = orderActors.values.map(_.ask(GetState).mapTo[GetStateSuccess].map(_.state))
      Future.sequence(statesF).map(_.reduce(_ ++ _)) onComplete {
        case Success(states) => sdr ! GetStateSuccess(states)
        case Failure(cause)  => sdr ! GetStateFailure(cause)
      }
  }

  override def onEvent = {
    // eagerly create order actor so that their console output is immediately visible
    case OrderCreated(orderId, _) if !orderActors.contains(orderId) => orderActor(orderId)
  }

  private def orderActor(orderId: String): ActorRef = orderActors.get(orderId) match {
    case Some(orderActor) => orderActor
    case None =>
      orderActors = orderActors + (orderId -> context.actorOf(Props(new OrderActor(orderId, replicaId, eventLog))))
      orderActors(orderId)
  }
} 
Example 150
Source File: WriterApp.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.querydb

import akka.actor._
import akka.pattern.ask
import akka.util.Timeout

import com.datastax.driver.core._
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._
import scala.util._

object WriterApp extends App {

  // ---------------------------------------------------------------
  //  Assumption: Cassandra 2.1 or higher running on localhost:9042
  // ---------------------------------------------------------------

  withQueryDB(drop = false) { session =>
    val system = ActorSystem("example-querydb", ConfigFactory.load(args(0)))
    val log = system.actorOf(LeveldbEventLog.props("example"))

    val emitter = system.actorOf(Props(new Emitter("emitter", log)))
    val writer = system.actorOf(Props(new Writer("writer", log, session)))

    import system.dispatcher

    implicit val timeout = Timeout(5.seconds)

    emitter ! CreateCustomer("Martin", "Krasser", "Somewhere 1")
    emitter ? CreateCustomer("Volker", "Stampa", "Somewhere 2") onComplete {
      case Success(CustomerCreated(cid, _, _, _)) => emitter ! UpdateAddress(cid, s"Somewhere ${Random.nextInt(10)}")
      case Failure(e)                             => e.printStackTrace()
    }

    Thread.sleep(3000)
    system.terminate()
  }

  def createQueryDB(drop: Boolean): Session = {
    val cluster = Cluster.builder().addContactPoint("localhost").build()
    val session = cluster.connect()

    if (drop) {
      session.execute("DROP KEYSPACE IF EXISTS QUERYDB")
    }

    session.execute("CREATE KEYSPACE IF NOT EXISTS QUERYDB WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }")
    session.execute("USE QUERYDB")

    session.execute("CREATE TABLE IF NOT EXISTS CUSTOMER (id bigint, first text, last text, address text, PRIMARY KEY (id))")
    session.execute("CREATE TABLE IF NOT EXISTS PROGRESS (id bigint, sequence_nr bigint, PRIMARY KEY (id))")
    session.execute("INSERT INTO PROGRESS (id, sequence_nr) VALUES(0, 0) IF NOT EXISTS")

    session
  }

  def dropQueryDB(session: Session): Unit = {
    session.close()
    session.getCluster.close()
  }

  def withQueryDB[A](drop: Boolean = true)(f: Session => A): A = {
    val session = createQueryDB(drop)
    try f(session) finally dropQueryDB(session)
  }
} 
Example 151
Source File: EventLogWriter.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log

import akka.actor._

import com.rbmhtechnology.eventuate._

import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util._

private object EventLogWriter {
  class EventLogWriterActor(val id: String, val eventLog: ActorRef, override val aggregateId: Option[String]) extends EventsourcedActor {
    override def onCommand: Receive = {
      case event => persist(event) {
        case Success(r) => sender() ! lastHandledEvent
        case Failure(e) => sender() ! Status.Failure(e)
      }
    }

    override def onEvent: Receive = {
      case event =>
    }
  }
}


  def stop(): Unit =
    system.stop(actor)
} 
Example 152
Source File: RestarterActor.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.utilities

import akka.actor._
import akka.pattern.ask
import akka.util.Timeout

class RestarterActor(props: Props, name: Option[String]) extends Actor {

  import RestarterActor._

  var child: ActorRef = newActor
  var requester: ActorRef = _

  override def receive = {
    case Restart =>
      requester = sender()
      context.watch(child)
      context.stop(child)
    case Terminated(_) =>
      child = newActor
      requester ! child
    case msg =>
      child forward msg
  }

  private def newActor: ActorRef =
    name.map(context.actorOf(props, _)).getOrElse(context.actorOf(props))
}

object RestarterActor {
  case object Restart

  implicit val timeout = Timeout(timeoutDuration)

  def restartActor(restarterRef: ActorRef): ActorRef =
    (restarterRef ? Restart).mapTo[ActorRef].await

  def props(props: Props, name: Option[String] = None) = Props(new RestarterActor(props, name))
} 
Example 153
Source File: package.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate

import akka.pattern.ask
import akka.testkit.TestProbe
import akka.util.Timeout

import com.rbmhtechnology.eventuate.EventsourcingProtocol._
import com.rbmhtechnology.eventuate.ReplicationFilter.NoFilter
import com.rbmhtechnology.eventuate.ReplicationProtocol._

import scala.collection.immutable.Seq
import scala.concurrent._
import scala.concurrent.duration._

package object utilities {
  val timeoutDuration = 20.seconds

  implicit class AwaitHelper[T](awaitable: Awaitable[T]) {
    def await: T = Await.result(awaitable, timeoutDuration)
  }

  def write(target: ReplicationTarget, events: Seq[String], aggregateId: Option[String] = None): Unit = {
    val system = target.endpoint.system
    val probe = TestProbe()(system)
    target.log ! Write(events.map(DurableEvent(_, target.logId, emitterAggregateId = aggregateId)), system.deadLetters, probe.ref, 0, 0)
    probe.expectMsgClass(classOf[WriteSuccess])
  }

  def read(target: ReplicationTarget): Seq[String] = {
    import target.endpoint.system.dispatcher
    implicit val timeout = Timeout(3.seconds)

    def readEvents: Future[ReplicationReadSuccess] =
      target.log.ask(ReplicationRead(1L, Int.MaxValue, Int.MaxValue, NoFilter, DurableEvent.UndefinedLogId, target.endpoint.system.deadLetters, VectorTime())).mapTo[ReplicationReadSuccess]

    val reading = for {
      res <- readEvents
    } yield res.events.map(_.payload.asInstanceOf[String])

    reading.await
  }

  def replicate(from: ReplicationTarget, to: ReplicationTarget, num: Int = Int.MaxValue): Int = {
    import to.endpoint.system.dispatcher
    implicit val timeout = Timeout(3.seconds)

    def readProgress: Future[GetReplicationProgressSuccess] =
      to.log.ask(GetReplicationProgress(from.logId)).mapTo[GetReplicationProgressSuccess]

    def readEvents(reply: GetReplicationProgressSuccess): Future[ReplicationReadSuccess] =
      from.log.ask(ReplicationRead(reply.storedReplicationProgress + 1, num, Int.MaxValue, NoFilter, to.logId, to.endpoint.system.deadLetters, reply.currentTargetVersionVector)).mapTo[ReplicationReadSuccess]

    def writeEvents(reply: ReplicationReadSuccess): Future[ReplicationWriteSuccess] =
      to.log.ask(ReplicationWrite(reply.events, Map(from.logId -> ReplicationMetadata(reply.replicationProgress, VectorTime.Zero)))).mapTo[ReplicationWriteSuccess]

    val replication = for {
      rps <- readProgress
      res <- readEvents(rps)
      wes <- writeEvents(res)
    } yield wes.events.size

    replication.await
  }
} 
Example 154
Source File: ActorStorageProvider.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorSystem, Status }
import akka.pattern.ask
import akka.testkit.TestProbe
import akka.util.Timeout
import com.rbmhtechnology.eventuate.adapter.vertx.api.StorageProvider

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }

class ActorStorageProvider(defaultId: String)(implicit system: ActorSystem) extends StorageProvider {
  implicit val timeout = Timeout(20.seconds)

  val probe = TestProbe()

  override def readProgress(id: String)(implicit executionContext: ExecutionContext): Future[Long] =
    probe.ref.ask(read(id)).mapTo[Long]

  override def writeProgress(id: String, sequenceNr: Long)(implicit executionContext: ExecutionContext): Future[Long] =
    probe.ref.ask(write(id, sequenceNr)).mapTo[Long]

  def expectRead(replySequenceNr: Long, id: String = defaultId): Unit = {
    probe.expectMsg(read(id))
    probe.reply(replySequenceNr)
  }

  def expectWrite(sequenceNr: Long, id: String = defaultId): Unit = {
    probe.expectMsg(write(id, sequenceNr))
    probe.reply(sequenceNr)
  }

  def expectWriteAndFail(sequenceNr: Long, failure: Throwable, id: String = defaultId): Unit = {
    probe.expectMsg(write(id, sequenceNr))
    probe.reply(Status.Failure(failure))
  }

  def expectWriteAnyOf(sequenceNrs: Seq[Long], id: String = defaultId): Unit = {
    probe.expectMsgAnyOf(sequenceNrs.map(write(id, _)): _*)
    probe.reply(sequenceNrs.max)
  }

  def expectNoMsg(duration: FiniteDuration): Unit = {
    probe.expectNoMsg(duration)
  }

  private def read(id: String): String =
    s"read[$id]"

  private def write(id: String, sequenceNr: Long): String =
    s"write[$id]-$sequenceNr"
} 
Example 155
Source File: ClusterAwareHostBalancer.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.scaladsl.Sink
import akka.stream.{ActorAttributes, Materializer, Supervision}
import akka.util.Timeout
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.{GetConnection, LogDeadConnections}
import com.crobox.clickhouse.balancing.discovery.cluster.ClusterConnectionFlow

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


case class ClusterAwareHostBalancer(host: Uri,
                                    cluster: String = "cluster",
                                    manager: ActorRef,
                                    scanningInterval: FiniteDuration)(
    implicit system: ActorSystem,
    connectionRetrievalTimeout: Timeout,
    ec: ExecutionContext,
    materializer: Materializer
) extends HostBalancer {

  ClusterConnectionFlow
    .clusterConnectionsFlow(Future.successful(host), scanningInterval, cluster)
    .withAttributes(
      ActorAttributes.supervisionStrategy({
        case ex: IllegalArgumentException =>
          logger.error("Failed resolving hosts for cluster, stopping the flow.", ex)
          Supervision.stop
        case ex =>
          logger.error("Failed resolving hosts for cluster, resuming.", ex)
          Supervision.Resume
      })
    )
    .runWith(Sink.actorRef(manager, LogDeadConnections))

  override def nextHost: Future[Uri] =
    (manager ? GetConnection()).mapTo[Uri]
} 
Example 156
Source File: ClickhouseClientAsyncSpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import akka.util.Timeout
import akka.util.Timeout.durationToTimeout
import com.crobox.clickhouse.balancing.HostBalancer
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.GetConnection
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import org.scalatest.flatspec.AsyncFlatSpecLike
import org.scalatest.matchers.should.Matchers

abstract class ClickhouseClientAsyncSpec(val config: Config = ConfigFactory.load())
    extends TestKit(ActorSystem("clickhouseClientAsyncTestSystem", config.getConfig("crobox.clickhouse.client")))
    with AsyncFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach {

  implicit val timeout: Timeout = 5.second
  implicit val materializer: Materializer = ActorMaterializer()

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally Await.result(system.terminate(), 10.seconds)
  }

  def requestParallelHosts(balancer: HostBalancer, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          balancer.nextHost
        })
    )

  def getConnections(manager: ActorRef, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          (manager ? GetConnection()).mapTo[Uri]
        })
    )

  //  TODO change this methods to custom matchers
  def returnsConnectionsInRoundRobinFashion(manager: ActorRef, expectedConnections: Set[Uri]): Future[Assertion] = {
    val RequestConnectionsPerHost = 100
    getConnections(manager, RequestConnectionsPerHost * expectedConnections.size)
      .map(connections => {
        expectedConnections.foreach(
          uri =>
            connections
              .count(_ == uri) shouldBe (RequestConnectionsPerHost +- RequestConnectionsPerHost / 10) //10% delta for warm-up phase
        )
        succeed
      })
  }

} 
Example 157
Source File: DemoApp.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination
package demo

import akka.actor.{ ActorRef, ActorSystem, Address }
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{ Duration, MILLISECONDS }

object DemoApp {

  val conf     = ConfigFactory.load()
  val hostname = conf.getString("demo.hostname")
  val httpPort = conf.getInt("demo.port")

  def main(args: Array[String]): Unit = {
    // Create an Akka system
    implicit val system = ActorSystem("ConstructR-Consul")
    import system.dispatcher
    implicit val mat = ActorMaterializer()

    // Create an actor that handles cluster domain events
    val cluster =
      system.actorOf(SimpleClusterListener.props, SimpleClusterListener.Name)
    Http().bindAndHandle(route(cluster), hostname, httpPort)
  }

  private def route(cluster: ActorRef) = {
    import Directives._
    implicit val timeout = Timeout(
      Duration(
        conf.getDuration("demo.cluster-view-timeout").toMillis,
        MILLISECONDS
      )
    )
    path("member-nodes") { // List cluster nodes
      get {
        onSuccess(
          (cluster ? SimpleClusterListener.GetMemberNodes).mapTo[Set[Address]]
        )(addresses => complete(addresses.mkString("\n")))
      }
    }
  }

} 
Example 158
Source File: ReplicaRemoteWriter.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.replica.write

import akka.pattern.ask
import akka.util.Timeout
import justin.db.Data
import justin.db.actors.StorageNodeActorRef
import justin.db.actors.protocol.{StorageNodeFailedWrite, StorageNodeWriteDataLocal, StorageNodeWriteResponse}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

class ReplicaRemoteWriter(implicit ec: ExecutionContext) {

  private implicit val timeout = Timeout(3.seconds) // TODO: tune this value

  def apply(storageNodeRefs: List[StorageNodeActorRef], data: Data): Future[List[StorageNodeWriteResponse]] = {
    Future.sequence(storageNodeRefs.map(putLocalValue(_, data)))
  }

  private def putLocalValue(node: StorageNodeActorRef, data: Data): Future[StorageNodeWriteResponse] = {
    (node.ref ? StorageNodeWriteDataLocal(data))
      .mapTo[StorageNodeWriteResponse]
      .recover { case _ => StorageNodeFailedWrite(data.id) }
  }
} 
Example 159
Source File: ReplicaRemoteReader.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.replica.read

import java.util.UUID

import akka.pattern.ask
import akka.util.Timeout
import justin.db.actors.StorageNodeActorRef
import justin.db.actors.protocol.{StorageNodeFailedRead, StorageNodeLocalRead, StorageNodeReadResponse}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

class ReplicaRemoteReader(implicit ec: ExecutionContext) {

  private implicit val timeout = Timeout(3.seconds) // TODO: tune this value

  def apply(storageNodeRefs: List[StorageNodeActorRef], id: UUID): Future[List[StorageNodeReadResponse]] = {
    Future.sequence(storageNodeRefs.map(getValue(_, id)))
  }

  private def getValue(node: StorageNodeActorRef, id: UUID): Future[StorageNodeReadResponse] = {
    (node.ref ? StorageNodeLocalRead(id))
      .mapTo[StorageNodeReadResponse]
      .recover { case _ => StorageNodeFailedRead(id) }
  }
} 
Example 160
Source File: ActorRefStorageNodeClient.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.client

import java.util.UUID

import akka.pattern.ask
import akka.util.Timeout
import justin.db.Data
import justin.db.actors.StorageNodeActorRef
import justin.db.actors.protocol._
import justin.db.replica.{R, W}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

class ActorRefStorageNodeClient(storageNodeActor: StorageNodeActorRef)(implicit ex: ExecutionContext) extends StorageNodeClient {

  implicit val timeout = Timeout(5.seconds) // TODO: tune this value

  override def get(id: UUID, r: R): Future[GetValueResponse] = {
    (storageNodeActor.ref ? Internal.ReadReplica(r, id)).mapTo[StorageNodeReadResponse].map {
      case StorageNodeFoundRead(data)      => GetValueResponse.Found(data)
      case StorageNodeConflictedRead(data) => GetValueResponse.Conflicts(data)
      case StorageNodeNotFoundRead(id)     => GetValueResponse.NotFound(id)
      case StorageNodeFailedRead(_)        => GetValueResponse.Failure(s"Couldn't read value with id ${id.toString}")
    } recover { case ex: Throwable         => GetValueResponse.Failure(s"Unsuccessful read of value with id ${id.toString}") }
  }

  override def write(data: Data, w: W): Future[WriteValueResponse] = {
    (storageNodeActor.ref ? Internal.WriteReplica(w, data)).mapTo[StorageNodeWriteResponse].map {
      case StorageNodeSuccessfulWrite(id)   => WriteValueResponse.Success(id)
      case StorageNodeConflictedWrite(_, _) => WriteValueResponse.Conflict
      case StorageNodeFailedWrite(id)       => WriteValueResponse.Failure(s"Couldn't write value with id ${id.toString}")
    } recover { case ex: Throwable          => WriteValueResponse.Failure(s"Unsuccessful write of value with id ${data.id.toString}") }
  }
} 
Example 161
Source File: Boot.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.example

import java.net.URL

import akka.actor.ActorSystem
import akka.io.IO
import akka.pattern._
import akka.util.Timeout
import spray.can.Http
import spray.json.{ JsString, JsObject }
import stormlantern.consul.client.discovery.{ ConnectionStrategy, ServiceDefinition, ConnectionProvider }
import stormlantern.consul.client.loadbalancers.RoundRobinLoadBalancer
import stormlantern.consul.client.ServiceBroker
import stormlantern.consul.client.DNS

import scala.concurrent.Future
import scala.concurrent.duration._

object Boot extends App {
  implicit val system = ActorSystem("reactive-consul")
  implicit val executionContext = system.dispatcher

  val service = system.actorOf(ReactiveConsulHttpServiceActor.props(), "webservice")

  implicit val timeout = Timeout(5.seconds)

  IO(Http) ? Http.Bind(service, interface = "0.0.0.0", port = 8080)

  def connectionProviderFactory = (host: String, port: Int) ⇒ new ConnectionProvider {
    val client = new SprayExampleServiceClient(new URL(s"http://$host:$port"))
    override def getConnection: Future[Any] = Future.successful(client)
  }
  val connectionStrategy1 = ConnectionStrategy("example-service-1", connectionProviderFactory)
  val connectionStrategy2 = ConnectionStrategy("example-service-2", connectionProviderFactory)

  val services = Set(connectionStrategy1, connectionStrategy2)
  val serviceBroker = ServiceBroker(DNS.lookup("consul-8500.service.consul"), services)

  system.scheduler.schedule(5.seconds, 5.seconds) {
    serviceBroker.withService("example-service-1") { client: SprayExampleServiceClient ⇒
      client.identify
    }.foreach(println)
    serviceBroker.withService("example-service-2") { client: SprayExampleServiceClient ⇒
      client.identify
    }.foreach(println)
  }
} 
Example 162
Source File: ServiceBroker.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client

import java.net.URL

import scala.concurrent.duration._
import scala.concurrent._

import akka.actor._
import akka.util.Timeout
import akka.pattern.ask

import stormlantern.consul.client.dao._
import stormlantern.consul.client.dao.akka.AkkaHttpConsulClient
import stormlantern.consul.client.discovery._
import stormlantern.consul.client.election.LeaderInfo
import stormlantern.consul.client.loadbalancers.LoadBalancerActor
import stormlantern.consul.client.util._

class ServiceBroker(serviceBrokerActor: ActorRef, consulClient: ConsulHttpClient)(implicit ec: ExecutionContext) extends RetryPolicy with Logging {

  private[this] implicit val timeout = Timeout(10.seconds)

  def withService[A, B](name: String)(f: A ⇒ Future[B]): Future[B] = {
    logger.info(s"Trying to get connection for service $name")
    serviceBrokerActor.ask(ServiceBrokerActor.GetServiceConnection(name)).mapTo[ConnectionHolder].flatMap { connectionHolder ⇒
      logger.info(s"Received connectionholder $connectionHolder")
      try {
        connectionHolder.connection.flatMap(c ⇒ f(c.asInstanceOf[A]))
      } finally {
        connectionHolder.loadBalancer ! LoadBalancerActor.ReturnConnection(connectionHolder)
      }
    }
  }

  def registerService(registration: ServiceRegistration): Future[Unit] = {
    consulClient.putService(registration).map { serviceId ⇒
      // Add shutdown hook
      val deregisterService = new Runnable {
        override def run(): Unit = consulClient.deleteService(serviceId)
      }
      Runtime.getRuntime.addShutdownHook(new Thread(deregisterService))
    }
  }

  def withLeader[A](key: String)(f: Option[LeaderInfo] ⇒ Future[A]): Future[A] = {
    ???
  }

  def joinElection(key: String): Future[Unit] = {
    ???
  }
}

object ServiceBroker {

  def apply(rootActor: ActorSystem, httpClient: ConsulHttpClient, services: Set[ConnectionStrategy]): ServiceBroker = {
    implicit val ec = ExecutionContext.Implicits.global
    val serviceAvailabilityActorFactory = (factory: ActorRefFactory, service: ServiceDefinition, listener: ActorRef) ⇒
      factory.actorOf(ServiceAvailabilityActor.props(httpClient, service, listener))
    val actorRef = rootActor.actorOf(ServiceBrokerActor.props(services, serviceAvailabilityActorFactory), "ServiceBroker")
    new ServiceBroker(actorRef, httpClient)
  }

  def apply(consulAddress: URL, services: Set[ConnectionStrategy]): ServiceBroker = {
    implicit val rootActor = ActorSystem("reactive-consul")
    val httpClient = new AkkaHttpConsulClient(consulAddress)
    ServiceBroker(rootActor, httpClient, services)
  }

}

case class ServiceUnavailableException(service: String) extends RuntimeException(s"$service service unavailable") 
Example 163
Source File: JsonMessageConversion.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.shared

import akka.util.Timeout
import com.omearac.shared.EventMessages.FailedMessageConversion
import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage}
import play.api.libs.json.Json
import spray.json._

import scala.concurrent.duration._



            def convertFromJson(msg: String): Either[FailedMessageConversion, ExampleAppEvent] = {
                try {
                     Right(msg.parseJson.convertTo[ExampleAppEvent])
                }
                catch {
                    case e: Exception => Left(FailedMessageConversion("kafkaTopic", msg, "to: ExampleAppEvent"))
                }
            }
            def convertToJson(msg: ExampleAppEvent) = {
                implicit val writes = Json.writes[ExampleAppEvent]
                Json.toJson(msg).toString
            }
        }

        //Adding some sweet sweet syntactic sugar
        def apply[T: Conversion] : Conversion[T] = implicitly
    }
} 
Example 164
Source File: ConsumerCommands.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.http.routes

import akka.actor.ActorRef
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.pattern.ask
import akka.util.Timeout
import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream}

import scala.concurrent.duration._



trait ConsumerCommands {
  def dataConsumer: ActorRef

  def eventConsumer: ActorRef

  def log: LoggingAdapter

  val dataConsumerHttpCommands: Route = pathPrefix("data_consumer") {
    implicit val timeout = Timeout(10 seconds)
    path("stop") {
      get {
        onSuccess(dataConsumer ? ManuallyTerminateStream) {
          case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message);
          case _ => complete(StatusCodes.InternalServerError)
        }
      }
    } ~
      path("start") {
        get {
          onSuccess(dataConsumer ? ManuallyInitializeStream) {
            case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message)
            case _ => complete(StatusCodes.InternalServerError)
          }
        }
      }
  }

  val eventConsumerHttpCommands: Route = pathPrefix("event_consumer") {
    implicit val timeout = Timeout(10 seconds)
    path("stop") {
      get {
        onSuccess(eventConsumer ? ManuallyTerminateStream) {
          case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message);
          case _ => complete(StatusCodes.InternalServerError)
        }
      }
    } ~
      path("start") {
        get {
          onSuccess(eventConsumer ? ManuallyInitializeStream) {
            case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message)
            case _ => complete(StatusCodes.InternalServerError)
          }
        }
      }
  }

} 
Example 165
Source File: ProducerCommands.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.http.routes

import akka.actor.ActorRef
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import com.omearac.producers.DataProducer.PublishMessages
import com.omearac.shared.EventMessages.MessagesPublished

import scala.concurrent.duration._




trait ProducerCommands {
    def log: LoggingAdapter
    def dataProducer: ActorRef

    val producerHttpCommands: Route = pathPrefix("data_producer"){
        implicit val timeout = Timeout(10 seconds)
        path("produce" / IntNumber) {
            {numOfMessagesToProduce =>
                get {
                    onSuccess(dataProducer ? PublishMessages(numOfMessagesToProduce)) {
                        case MessagesPublished(numberOfMessages) => complete(StatusCodes.OK,  numberOfMessages + " messages Produced as Ordered, Boss!")
                        case _ => complete(StatusCodes.InternalServerError)
                    }
                }
            }
        }
    }
} 
Example 166
Source File: MetricsRepoService.scala    From prometheus-opentsdb-exporter   with Apache License 2.0 5 votes vote down vote up
package services

import scala.concurrent.duration._

import java.io.{File, FileInputStream}
import javax.inject._

import akka.actor.{ActorNotFound, ActorSystem}
import akka.util.Timeout

import play.api.libs.json._
import play.api.{Configuration, Logger}

import models.Metric
import actors.MetricsRepoActor
import actors.MetricsRepoActor.{RegisterMetrics, ResetMetrics}


@Singleton
class MetricsRepoService @Inject()(
  configuration: Configuration,
  system: ActorSystem
) {
  private implicit val to: Timeout = 5 seconds

  private val metricsDir = configuration.getString("metrics.dir").get

  private implicit val ec = system.dispatcher

  private def getListOfFiles(dir: String):List[File] = {
    val d = new File(dir)
    if (d.exists && d.isDirectory) {
      d.listFiles.filter(_.isFile).toList.sortBy(_.getAbsolutePath)
    } else {
      Logger.warn(s"Metrics dir not found: $dir")
      Logger.info(s"Working dir: ${new File(".").getAbsolutePath}")
      List[File]()
    }
  }

  lazy val metricsRepo = {
    Logger.info(s"Initializing the metrics repo.")
    system.actorSelection(s"${MetricsRepoActor.name}")
      .resolveOne()
      .recover {
        case ActorNotFound(_) =>
          system.actorOf(MetricsRepoActor.props(), MetricsRepoActor.name)
      }
  }

  def reloadMetrics(): Unit = {
    metricsRepo.foreach { mr =>
      Logger.info("Loading metrics definitions.")

      mr ! ResetMetrics

      getListOfFiles(metricsDir).foreach { f =>
        Logger.info(s"Loading metrics definitions from: ${f.getAbsolutePath}")

        Json.parse(new FileInputStream(f)).validate[Seq[Metric]].fold(
          valid = metrics => {
            Logger.info("Metrics definitions parsed and validating. Reloading...")
            mr ! RegisterMetrics(metrics)
          },
          invalid = errors =>
            Logger.error(errors.mkString("\n"))
        )
      }
    }
  }

  reloadMetrics()
} 
Example 167
Source File: SampleFramework.scala    From mesos-actor   with Apache License 2.0 5 votes vote down vote up
package com.adobe.api.platform.runtime.mesos.sample

import akka.actor.ActorSystem
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.adobe.api.platform.runtime.mesos._
import java.time.Instant
import java.util.UUID

import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration._


object SampleFramework {

  def main(args: Array[String]): Unit = {
    implicit val system = ActorSystem("sample-framework-system")
    implicit val mat = ActorMaterializer()
    implicit val log = system.log
    implicit val ec = system.dispatcher

    val taskLaunchTimeout = Timeout(15 seconds)
    val taskDeleteTimeout = Timeout(10 seconds)
    val subscribeTimeout = Timeout(5 seconds)
    val teardownTimeout = Timeout(5 seconds)

    val mesosClientActor = system.actorOf(
      MesosClient.props(
        () => "sample-" + UUID.randomUUID(),
        "sample-framework",
        "http://192.168.99.100:5050",
        "*",
        30.seconds,
        taskStore = new LocalTaskStore))

    mesosClientActor
      .ask(Subscribe)(subscribeTimeout)
      .mapTo[SubscribeComplete]
      .onComplete(complete => {
        log.info("subscribe completed successfully...")
      })

    var taskCount = 0
    def nextName() = {
      taskCount += 1
      s"sample-task-${Instant.now.getEpochSecond}-${taskCount}"
    }
    def nextId() = "sample-task-" + UUID.randomUUID()

    (1 to 3).foreach(_ => {
      val task = TaskDef(
        nextId(),
        nextName(),
        "trinitronx/python-simplehttpserver",
        0.1,
        24,
        List(8080, 8081),
        Some(HealthCheckConfig(0)),
        commandDef = Some(CommandDef()))
      val launched: Future[TaskState] = mesosClientActor.ask(SubmitTask(task))(taskLaunchTimeout).mapTo[TaskState]
      launched map {
        case taskDetails: Running => {
          val taskHost = taskDetails.hostname
          val taskPorts = taskDetails.hostports
          log.info(
            s"launched task id ${taskDetails.taskId} with state ${taskDetails.taskStatus.getState} on agent ${taskHost} listening on ports ${taskPorts}")

          //schedule delete in 10 seconds
          system.scheduler.scheduleOnce(10.seconds) {
            log.info(s"removing previously created task ${taskDetails.taskId}")
            mesosClientActor
              .ask(DeleteTask(taskDetails.taskId))(taskDeleteTimeout)
              .mapTo[Deleted]
              .map(deleted => {
                log.info(s"task killed ended with state ${deleted.taskStatus.getState}")
              })
          }
        }
        case s => log.error(s"failed to launch task; state is ${s}")
      } recover {
        case t => log.error(s"task launch failed ${t.getMessage}", t)
      }
    })

    system.scheduler.scheduleOnce(30.seconds) {
      val complete: Future[Any] = mesosClientActor.ask(Teardown)(teardownTimeout)
      Await.result(complete, 10.seconds)
      println("teardown completed!")
      system.terminate().map(_ => System.exit(0))
    }

  }

} 
Example 168
Source File: VisualMailboxMetricServer.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.io.Udp.{Bind, Bound, CommandFailed}
import akka.io.{IO, Udp}
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import de.aktey.akka.visualmailbox.data.DataSourceEndpoint
import de.aktey.akka.visualmailbox.web.{Routing, WebConfig}

import scala.concurrent.duration._


object VisualMailboxMetricServer extends App {

  val allConfig = ConfigFactory.load()
  val config = VisualMailboxMetricClientConfig.fromConfig(allConfig)

  implicit val system = ActorSystem("visualmailbox-visualizer")
  implicit val meterializer = ActorMaterializer()
  implicit val bindTimeout = Timeout(2.seconds)

  import system._

  val router = system.actorOf(MetricsRouter.props(), "router")

  val dataHandler = system.actorOf(DataSourceEndpoint.props(router), "data-sink")

  (IO(Udp) ? Bind(dataHandler, config.serverAddress)).map {
    case CommandFailed(cmd) =>
      system.terminate()
    case Bound(address) =>
      log.info(s"""{"type":"udp-bound","address":"$address"}""")
  }

  val webConfig = WebConfig.fromConfig(allConfig)

  Http()
    .bindAndHandle(Routing.root(MetricFlow.metricSource(router)), webConfig.host, webConfig.port)
    .foreach { case ServerBinding(address) =>
      log.info(s"""{"type":"http-bound","address":"$address"}""")
    }
} 
Example 169
Source File: Shutdown.scala    From typed-actors   with Apache License 2.0 5 votes vote down vote up
package de.knutwalker.akka.typed

import akka.actor.ActorSystem
import akka.util.Timeout

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

object Shutdown {
  def apply(system: ActorSystem): Unit = {
    Await.result(system.terminate(), Duration.Inf)
    ()
  }
}

object TimeoutMessage {
  def apply[A](ref: ActorRef[A])(implicit ct: ClassTag[A], timeout: Timeout): String = {
    s"""Ask timed out on [$ref] after [${timeout.duration.toMillis} ms]. Sender[null] sent message of type "${ct.runtimeClass.getName}"."""
  }
} 
Example 170
Source File: AskSupport.scala    From typed-actors   with Apache License 2.0 5 votes vote down vote up
package akka.typedactors

import akka.actor.InternalActorRef
import akka.pattern.AskTimeoutException
import akka.util.Timeout
import de.knutwalker.akka.typed._

import scala.concurrent.Future
import scala.reflect.ClassTag



object AskSupport {
  def ask[A, B](ref: ActorRef[A], f: ActorRef[B] ⇒ A, timeout: Timeout, ctA: ClassTag[A], sender: UntypedActorRef): Future[B] =
    internalAsk[A, B](ref.untyped, timeout, f.asInstanceOf[UntypedActorRef ⇒ Any], sender, ctA)

  private def internalAsk[A, B](_ref: UntypedActorRef, timeout: Timeout, f: UntypedActorRef ⇒ Any, sender: UntypedActorRef, ctA: ClassTag[A]): Future[B] = _ref match {
    case r: InternalActorRef if r.isTerminated ⇒
      val msg = f(r.provider.deadLetters)
      _ref.tell(msg, sender)
      Future.failed[B](new AskTimeoutException(s"Recipient[${_ref}] had already been terminated. Sender[$sender] sent the message of type '${msg.getClass.getName}'."))
    case r: InternalActorRef ⇒
      if (timeout.duration.length <= 0) {
        Future.failed[B](new IllegalArgumentException(s"Timeout length must not be negative, question not sent to [${_ref}]. Sender[$sender] sent the message of type '${ctA.runtimeClass.getName}'."))
      } else {
        val ref = PromiseRef(r, _ref, sender, timeout, ctA)
        val msg = f(ref)
        _ref.tell(msg, ref)
        ref.result.future.asInstanceOf[Future[B]]
      }
  }
} 
Example 171
Source File: Xmlrpc.scala    From xmlrpc   with MIT License 5 votes vote down vote up
package xmlrpc

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding._
import akka.http.scaladsl.marshallers.xml.ScalaXmlSupport._
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.{FromResponseUnmarshaller, Unmarshal}
import akka.stream.Materializer
import akka.util.Timeout
import xmlrpc.protocol._

import scala.concurrent.{ExecutionContext, Future}
import scala.xml.NodeSeq


object Xmlrpc {

  import XmlrpcProtocol._

  case class XmlrpcServer(fullAddress: String) {
    def uri: Uri = Uri(fullAddress)
  }

  def invokeMethod[P: Datatype, R: Datatype](name: String, parameter: P = Void)
                                            (implicit xmlrpcServer: XmlrpcServer,
                                             as: ActorSystem,
                                             ma: Materializer,
                                             ec: ExecutionContext,
                                             fc: Timeout): XmlrpcResponse[R] = {

    import XmlrpcResponse.AkkaHttpToXmlrpcResponse

    def unmarshall[A](f: Future[HttpResponse])(implicit um: FromResponseUnmarshaller[A]): Future[A] =
      f.flatMap(Unmarshal(_).to[A])


    val request: NodeSeq = writeXmlRequest(name, parameter)
    val requestWithHeader: String = """<?xml version="1.0"?>""" + request.toString


    try {
      (Http().singleRequest(Post(xmlrpcServer.uri, request)) ~> unmarshall[NodeSeq]).asXmlrpcResponse[R]
    } catch {
      case t: Throwable => XmlrpcResponse(ConnectionError("An exception has been thrown by Spray", Some(t)).failures)
    }
  }
} 
Example 172
Source File: XmlrpcConnection.scala    From xmlrpc   with MIT License 5 votes vote down vote up
package xmlrpc

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.util.Timeout
import org.scalatest.FunSpec
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import xmlrpc.protocol.XmlrpcProtocol

import scala.concurrent.duration._
import scala.language.postfixOps
import scalaz.{Success, Failure}

class XmlrpcConnection extends FunSpec with ScalaFutures {
  // Xmlrpc imports
  import Xmlrpc._
  import XmlrpcProtocol._

  // Scalatest setup
  implicit val default: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))

  // Xmrpc setup, server is up but it is not mine, found on Internet
  implicit val testServer = XmlrpcServer("http://betty.userland.com/RPC2")

  // Spray setup
  implicit val system = ActorSystem()
  implicit val ma = ActorMaterializer()
  implicit val timeout = Timeout(5 seconds)
  import system.dispatcher

  describe("The connection with a XML-RPC server") {
    it("should invoke the test method successfully in the server") {
      val invocation = invokeMethod[Int, String]("examples.getStateName", 41).underlying
      val responseMessage = "South Dakota"

      whenReady(invocation) {
        case Success(value) => assertResult(responseMessage) {value}
        case Failure(errors) => fail("Errors when deserializing\n" + errors)
      }
    }
  }
} 
Example 173
Source File: ControlInterface.scala    From changestream   with MIT License 5 votes vote down vote up
package changestream.actors

import akka.util.Timeout
import spray.httpx.SprayJsonSupport._
import spray.routing._
import akka.actor._
import ch.qos.logback.classic.Level
import changestream.{ChangeStream, ChangeStreamEventDeserializer, ChangeStreamEventListener}
import org.slf4j.LoggerFactory
import ch.qos.logback.classic.Logger
import spray.http.StatusCodes
import spray.routing.HttpService
import spray.json.DefaultJsonProtocol

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps

class ControlInterfaceActor extends Actor with ControlInterface {
  def actorRefFactory = context
  def receive = runRoute(controlRoutes)
}

trait ControlInterface extends HttpService with DefaultJsonProtocol {
  import ControlActor._

  protected val log = LoggerFactory.getLogger(getClass)

  // yes this is backward on purpose
  implicit val memoryInfoFormat = jsonFormat3(MemoryInfo)
  implicit val statusFormat = jsonFormat7(Status)
  implicit def executionContext = actorRefFactory.dispatcher
  implicit val timeout = Timeout(10 seconds)

  def controlRoutes: Route = {
    get {
      pathSingleSlash {
        detach() {
          complete(getStatus)
        }
      } ~
      path("status") {
        detach() {
          complete(getStatus)
        }
      } ~
      path("logs") {
        parameter('level) { level => setLogLevel(level) }
      }
    }
  }

  def setLogLevel(level: String) = {
    level.toLowerCase match {
      case "all" | "trace" | "debug" | "info" | "warn" | "error" | "off" =>
        val rootLogger = LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).asInstanceOf[Logger]
        rootLogger.setLevel(Level.toLevel(level))
        complete("ChangeStream logging level has been set to {}.", level)
      case _ =>
        log.error("ControlActor received invalid log level {}.", level)
        complete(StatusCodes.BadRequest, s"Invalid log level: ${level}")
    }
  }

  def getStatus = {
    val storedPosition = Await.result(ChangeStreamEventListener.getStoredPosition, 60 seconds)

    Status(
      server = ChangeStream.serverName,
      clientId = ChangeStream.clientId,
      isConnected = ChangeStream.isConnected,
      binlogClientPosition = ChangeStreamEventListener.getCurrentPosition,
      lastStoredPosition = storedPosition.getOrElse(""),
      binlogClientSequenceNumber = ChangeStreamEventDeserializer.getCurrentSequenceNumber,
      memoryInfo = MemoryInfo(
        Runtime.getRuntime().totalMemory(),
        Runtime.getRuntime().maxMemory(),
        Runtime.getRuntime().freeMemory()
      )
    )
  }
}

object ControlActor {
  case class Status(
                     server: String,
                     clientId: Long,
                     isConnected: Boolean,
                     binlogClientPosition: String,
                     lastStoredPosition: String,
                     binlogClientSequenceNumber: Long,
                     memoryInfo: MemoryInfo
                   )

  case class MemoryInfo(heapSize: Long, maxHeap: Long, freeHeap: Long)
} 
Example 174
Source File: TFuturesTest.scala    From Scala-for-Machine-Learning-Second-Edition   with MIT License 5 votes vote down vote up
package org.scalaml.scalability.akka

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout
import org.scalaml.Logging
import org.scalaml.Predef.DblVec
import org.scalaml.filtering.dft.DFT
import org.scalaml.scalability.akka.message._
import org.scalaml.util.FormatUtils._
import org.scalatest.{FlatSpec, Matchers}


  protected[this] val name: String = "Scala futures"

  private val NUM_WORKERS = 8
  private val NUM_DATA_POINTS = 1000000
  private val h = (x: Double) => 2.0 * Math.cos(Math.PI * 0.005 * x) + // simulated first harmonic
    Math.cos(Math.PI * 0.05 * x) + // simulated second harmonic
    0.5 * Math.cos(Math.PI * 0.2 * x) + // simulated third harmonic
    0.2 * Random.nextDouble

  private val TimeOut = 5000L
  private val duration = Duration(TimeOut, "millis")
  implicit val timeout = new Timeout(duration)


  it should s"$name Data transformation futures using Akka actors" in {
    show("$name Data transformation futures using Akka actors")

    val actorSystem = ActorSystem("System")
    val xt = Vector.tabulate(NUM_DATA_POINTS)(h(_))

    val master = actorSystem.actorOf(
      Props(new DFTFutures(xt, NUM_WORKERS)),
      "DFTTransform"
    )

    val future = master ? Start()
    Thread.sleep(TimeOut)

    actorSystem.shutdown()
  }
}

// -----------------------------------------------  EOF --------------------------- 
Example 175
Source File: config.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch14

import java.util.concurrent.TimeUnit

import akka.util.Timeout
import com.typesafe.config
import com.typesafe.config.{ConfigFactory, ConfigResolveOptions}

case class ServerConfig(host: String, port: Int)

case class DBConfig(driver: String, url: String, user: String, password: String)

case class Config(server: ServerConfig, database: DBConfig, timeout: Timeout)

object Config {
  def load(): Config = {
    val c = ConfigFactory.parseResources("application.conf").resolve()
    val srv = c.getConfig("server")
    val serverConfig = ServerConfig(srv.getString("host"), srv.getInt("port"))
    val db = c.getConfig("slick.db")
    val dbConfig = DBConfig(db.getString("driver"),
                            db.getString("url"),
                            db.getString("user"),
                            db.getString("password"))
    val d = c.getDuration("timeout")
    val timeout = new Timeout(d.toMillis, TimeUnit.MILLISECONDS)
    Config(serverConfig, dbConfig, timeout)
  }
} 
Example 176
Source File: Routes.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch14

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.directives.MethodDirectives.{delete, get, post}
import akka.http.scaladsl.server.directives.PathDirectives.path
import akka.http.scaladsl.server.directives.RouteDirectives.complete
import akka.pattern.ask
import akka.util.Timeout
import ch14.Commands._
import ch14.Events.{
  ArticleCreated,
  ArticleDeleted,
  ArticlesPurchased,
  ArticlesRestocked
}

import scala.concurrent.{ExecutionContext, Future}

trait Routes extends JsonSupport {
  implicit def system: ActorSystem
  def inventory: ActorRef
  def config: Config

  implicit lazy val timeout: Timeout = config.timeout
  implicit lazy val ec: ExecutionContext = system.dispatcher

  lazy val articlesRoutes: Route =
    pathPrefix("articles") {
      concat(
        path(Segment) { name =>
          concat(
            post {
              val changedInventory: Future[Option[ArticleCreated]] =
                (inventory ? CreateArticle(name, 0))
                  .mapTo[Option[ArticleCreated]]
              onSuccess(changedInventory) {
                case None        => complete(StatusCodes.Conflict)
                case Some(event) => complete(StatusCodes.Created, event)
              }
            },
            delete {
              val changedInventory: Future[Option[ArticleDeleted]] =
                (inventory ? DeleteArticle(name)).mapTo[Option[ArticleDeleted]]
              rejectEmptyResponse {
                complete(changedInventory)
              }
            },
            get {
              complete((inventory ? GetArticle(name)).mapTo[Inventory])
            }
          )
        }
      )
    }

  lazy val inventoryRoutes: Route =
    path("inventory") {
      get {
        complete((inventory ? GetInventory).mapTo[Inventory])
      }
    } ~
      path("purchase") {
        post {
          entity(as[PurchaseArticles]) { order =>
            val response: Future[Option[ArticlesPurchased]] =
              (inventory ? order).mapTo[Option[ArticlesPurchased]]
            onSuccess(response) {
              case None        => complete(StatusCodes.Conflict)
              case Some(event) => complete(event)
            }
          }
        }
      } ~
      path("restock") {
        post {
          entity(as[RestockArticles]) { stock =>
            val response: Future[Option[ArticlesRestocked]] =
              (inventory ? stock).mapTo[Option[ArticlesRestocked]]
            complete(response)
          }
        }
      }


  lazy val routes: Route = articlesRoutes ~ inventoryRoutes

} 
Example 177
Source File: LowLevelServer.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.lowlevelserver

import akka.NotUsed
import akka.actor.{ ActorSystem, Props }
import akka.event.{ Logging, LoggingAdapter }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.pattern.ask
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.Timeout
import com.github.dnvriend.component.lowlevelserver.dto.{ Person, PersonWithId }
import com.github.dnvriend.component.lowlevelserver.marshaller.Marshaller
import com.github.dnvriend.component.lowlevelserver.repository.PersonRepository
import spray.json.{ DefaultJsonProtocol, _ }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }

class LowLevelServer(implicit val system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter, timeout: Timeout) extends DefaultJsonProtocol with Marshaller {
  val personDb = system.actorOf(Props[PersonRepository])

  def debug(t: Any)(implicit log: LoggingAdapter = null): Unit =
    if (Option(log).isEmpty) println(t) else log.debug(t.toString)

  def http200Okay(req: HttpRequest): HttpResponse =
    HttpResponse(StatusCodes.OK)

  def http200AsyncOkay(req: HttpRequest): Future[HttpResponse] =
    Future(http200Okay(req))

  val http200OkayFlow: Flow[HttpRequest, HttpResponse, NotUsed] = Flow[HttpRequest].map { req =>
    HttpResponse(StatusCodes.OK)
  }

  val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] =
    Http().bind(interface = "localhost", port = 8080)

  val binding: Future[Http.ServerBinding] = serverSource.to(Sink.foreach { conn =>
    //    conn.handleWith(http200OkayFlow)
    //    conn.handleWithSyncHandler(http200Okay)
    //    conn.handleWithAsyncHandler(http200AsyncOkay, 8)
    conn.handleWithAsyncHandler(personRequestHandler)
  }).run()

  def personRequestHandler(req: HttpRequest): Future[HttpResponse] = req match {
    case HttpRequest(HttpMethods.GET, Uri.Path("/api/person"), _, _, _) => for {
      xs <- (personDb ? "findAll").mapTo[List[PersonWithId]]
      entity = HttpEntity(ContentTypes.`application/json`, xs.toJson.compactPrint)
    } yield HttpResponse(StatusCodes.OK, entity = entity)
    case HttpRequest(HttpMethods.POST, Uri.Path("/api/person"), _, ent, _) => for {
      strictEntity <- ent.toStrict(1.second)
      person <- (personDb ? strictEntity.data.utf8String.parseJson.convertTo[Person]).mapTo[PersonWithId]
    } yield HttpResponse(StatusCodes.OK, entity = person.toJson.compactPrint)
    case req =>
      req.discardEntityBytes()
      Future.successful(HttpResponse(StatusCodes.NotFound))
  }
}

object LowLevelServerLauncher extends App with DefaultJsonProtocol {
  // setting up some machinery
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val timeout: Timeout = Timeout(10.seconds)

  new LowLevelServer()
} 
Example 178
Source File: RestRoute.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.highlevelserver.route

import akka.actor.ActorRef
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.marshalling.ToResponseMarshaller
import akka.http.scaladsl.model.{ StatusCodes, Uri }
import akka.http.scaladsl.server.{ Directives, Route }
import akka.http.scaladsl.unmarshalling.FromRequestUnmarshaller
import akka.pattern.ask
import akka.util.Timeout
import com.github.dnvriend.component.highlevelserver.dto.PersonWithId
import com.github.dnvriend.component.highlevelserver.marshaller.Marshaller
import com.github.dnvriend.component.simpleserver.dto.http.Person

import scala.concurrent.Future

// see: akka.http.scaladsl.marshalling.ToResponseMarshallable
// see: akka.http.scaladsl.marshalling.PredefinedToResponseMarshallers
object RestRoute extends Directives with SprayJsonSupport with Marshaller {
  def routes(personDb: ActorRef)(implicit timeout: Timeout, trmSingle: ToResponseMarshaller[PersonWithId], trmList: ToResponseMarshaller[List[PersonWithId]], fru: FromRequestUnmarshaller[Person]): Route = {
    pathEndOrSingleSlash {
      redirect(Uri("/api/person"), StatusCodes.PermanentRedirect)
    } ~
      pathPrefix("api" / "person") {
        get {
          path(IntNumber) { id =>
            println(s"PathEndsInNumber=$id")
            complete((personDb ? "findAll").mapTo[List[PersonWithId]])
          } ~
            pathEndOrSingleSlash {
              parameter("foo") { foo =>
                println(s"foo=$foo")
                complete((personDb ? "findAll").mapTo[List[PersonWithId]])
              } ~
                parameter('bar) { bar =>
                  println(s"bar=$bar")
                  complete((personDb ? "findAll").mapTo[List[PersonWithId]])
                } ~
                complete((personDb ? "findAll").mapTo[List[PersonWithId]])
            }
        } ~
          (post & pathEndOrSingleSlash & entity(as[Person])) { person =>
            complete((personDb ? person).mapTo[PersonWithId])
          }
      } ~
      path("failure") {
        pathEnd {
          complete(Future.failed[String](new RuntimeException("Simulated Failure")))
        }
      } ~
      path("success") {
        pathEnd {
          complete(Future.successful("Success!!"))
        }
      }
  }
} 
Example 179
Source File: TestSpec.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.test.WsTestClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]

  def getAnnotatedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8080
  implicit val timeout: Timeout = 10.seconds
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]

  // ================================== Supporting Operations ====================================
  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, _]) {
    def testProbe(f: TestSubscriber.Probe[A] => Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def killActors(actors: ActorRef*): Unit = {
    val tp = TestProbe()
    actors.foreach { (actor: ActorRef) =>
      tp watch actor
      actor ! PoisonPill
      tp.expectTerminated(actor)
    }
  }

  override protected def beforeEach(): Unit = {
  }
} 
Example 180
Source File: GracefulStopSupport.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.pattern

import akka.actor._
import akka.util.{ Timeout }
import akka.dispatch.sysmsg.{ Unwatch, Watch }
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

trait GracefulStopSupport {
  
  def gracefulStop(target: ActorRef, timeout: FiniteDuration, stopMessage: Any = PoisonPill): Future[Boolean] = {
    val internalTarget = target.asInstanceOf[InternalActorRef]
    val ref = PromiseActorRef(internalTarget.provider, Timeout(timeout), target, stopMessage.getClass.getName)
    internalTarget.sendSystemMessage(Watch(internalTarget, ref))
    target.tell(stopMessage, Actor.noSender)
    ref.result.future.transform(
      {
        case Terminated(t) if t.path == target.path ⇒ true
        case _ ⇒ { internalTarget.sendSystemMessage(Unwatch(target, ref)); false }
      },
      t ⇒ { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ref.internalCallingThreadExecutionContext)
  }
} 
Example 181
Source File: Routes.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.Directives.{as, complete, concat, entity, get, onSuccess, pathPrefix, post, _}
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import com.lightbend.akka_oled.DistributedDataTracker.{Get, UpdateStatus}
import com.lightbend.akka_oled.Main.NodeStatus

import scala.concurrent.duration._

class Routes(tracker: ActorRef[DistributedDataTracker.Command])(implicit system: ActorSystem[_]) extends SprayJsonSupport {
  implicit val timeout: Timeout = 8.seconds

  val route: Route =
    pathPrefix("status" / "[0-9a-zA-Z]+".r) {
      node =>
        concat(
          get {
            onSuccess(tracker.ask[String](Get(node, _))) {
              value => complete(value + "\n")
            }
          },
          post {
            entity(as[NodeStatus]) { status =>
              tracker ! UpdateStatus(node, status.status)
              complete("Ok\n")
            }
          }
        )
    }

} 
Example 182
Source File: Routes.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.actor.typed.ActorSystem
import akka.cluster.sharding.typed.scaladsl.ClusterSharding
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.Directives.{as, complete, concat, entity, get, onSuccess, pathPrefix, post, _}
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import com.lightbend.akka_oled.ClientEntity.{Get, PostPoints}
import com.lightbend.akka_oled.Main.AddPoints

import scala.concurrent.duration._

object Routes {

  case class NodeStatus(status: String)

}

class Routes(sharding: ClusterSharding)(implicit system: ActorSystem[_]) extends SprayJsonSupport {
  implicit val timeout: Timeout = 8.seconds
  implicit val scheduler = system.scheduler

  lazy val route: Route =
    pathPrefix("user" / "[0-9a-zA-Z]+".r) { username =>
      concat(
        get {
          val entityRef = sharding.entityRefFor(ClientEntity.TypeKey, username)
          onSuccess(entityRef ? Get(username)) {
            value: Int => complete(value.toString + "\n")
          }
        },
        post {
          entity(as[AddPoints]) { transaction =>
            val entityRef = sharding.entityRefFor(ClientEntity.TypeKey, username)
            onSuccess(entityRef ? PostPoints(username, transaction.points)) {
              result => complete(result)
            }
          }
        }
      )
    }

} 
Example 183
Source File: ShardStateTracker.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.actor.typed.{ActorRef, Behavior}
import akka.actor.typed.scaladsl.Behaviors
import akka.cluster.sharding.ShardRegion.CurrentShardRegionState
import akka.cluster.sharding.typed.{ClusterShardingQuery, GetShardRegionState}
import akka.util.Timeout
import com.lightbend.akka_oled.OledShardingVisualizer.ShardRegionState
import scala.concurrent.duration._

object ShardStateTracker {

  implicit val timeout: Timeout = 6.seconds

  def apply(visualizer: ActorRef[OledShardingVisualizer.Command]): Behavior[CurrentShardRegionState] = Behaviors.setup { context =>
    Behaviors.receiveMessage {
      message: CurrentShardRegionState =>
        visualizer.tell(ShardRegionState(message.shards))
        Behaviors.same
    }
  }
}

object ShardStateScheduler {

  implicit val timeout: Timeout = 6.seconds

  case class Tick()

  def apply(shardState: ActorRef[ClusterShardingQuery],
            shardTracker: ActorRef[CurrentShardRegionState]): Behavior[Tick] =
    Behaviors.withTimers { timer =>
      timer.startTimerAtFixedRate(Tick(), 1.second)
      Behaviors.receiveMessage { _: Tick =>
        shardState ! GetShardRegionState(ClientEntity.TypeKey, shardTracker)
        Behaviors.same
      }

    }
} 
Example 184
Source File: TestLogkafkaStateActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager

import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern._
import akka.util.Timeout
import akka.util.Timeout._
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.features.ClusterFeatures
import kafka.manager.logkafka.LogkafkaStateActor
import kafka.manager.model.{ActorModel, ClusterConfig, ClusterContext}
import kafka.manager.utils.KafkaServerInTest
import ActorModel._
import kafka.test.SeededBroker

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try


class TestLogkafkaStateActor extends KafkaServerInTest with BaseTest {

  private[this] val akkaConfig: Properties = new Properties()
  akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
  akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
  private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
  private[this] val system = ActorSystem("test-logkafka-state-actor",config)
  private[this] val broker = new SeededBroker("ks-test",4)
  override val kafkaServerZkPath = broker.getZookeeperConnectionString
  private[this] var logkafkaStateActor : Option[ActorRef] = None
  private[this] implicit val timeout: Timeout = 10.seconds
  private[this] val defaultClusterConfig = ClusterConfig("test","0.8.2.0","localhost:2818",100,false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
  private[this] val defaultClusterContext = ClusterContext(ClusterFeatures.from(defaultClusterConfig), defaultClusterConfig)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val props = Props(classOf[LogkafkaStateActor],sharedCurator, defaultClusterContext)

    logkafkaStateActor = Some(system.actorOf(props.withDispatcher("pinned-dispatcher"),"lksa"))
  }

  override protected def afterAll(): Unit = {
    logkafkaStateActor.foreach( _ ! Kill )
    Try(Await.ready(system.terminate(), Duration(5, TimeUnit.SECONDS)))
    Try(broker.shutdown())
    super.afterAll()
  }

  private[this] def withLogkafkaStateActor[Input,Output,FOutput](msg: Input)(fn: Output => FOutput)(implicit tag: ClassTag[Output]) : FOutput = {
    require(logkafkaStateActor.isDefined, "logkafkaStateActor undefined!")
    val future = ask(logkafkaStateActor.get, msg).mapTo[Output]
    val result = Await.result(future,10.seconds)
    fn(result)
  }

  test("get logkafka logkafka id list") {
    withLogkafkaStateActor(LKSGetLogkafkaLogkafkaIds) { result: LogkafkaLogkafkaIdList =>
      result.list foreach println
    }
  }

  test("get logkafka config") {
    withLogkafkaStateActor(LKSGetLogkafkaLogkafkaIds) { result: LogkafkaLogkafkaIdList =>
      val configs = result.list map { logkafka_id =>
        withLogkafkaStateActor(LKSGetLogkafkaConfig(logkafka_id)) { logkafkaConfig: LogkafkaConfig => logkafkaConfig }
      }
      configs foreach println
    }
  }

  test("get logkafka client") {
    withLogkafkaStateActor(LKSGetLogkafkaLogkafkaIds) { result: LogkafkaLogkafkaIdList =>
      val clients = result.list map { logkafka_id =>
        withLogkafkaStateActor(LKSGetLogkafkaClient(logkafka_id)) { logkafkaClient: LogkafkaClient => logkafkaClient }
      }
      clients foreach println
    }
  }

  test("get logkafka configs") {
    withLogkafkaStateActor(LKSGetAllLogkafkaConfigs()) { lc: LogkafkaConfigs =>
      lc.configs foreach println
    }
  }

  test("get logkafka clients") {
    withLogkafkaStateActor(LKSGetAllLogkafkaClients()) { lc: LogkafkaClients =>
      lc.clients foreach println
    }
  }

} 
Example 185
Source File: TestBrokerViewCacheActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager

import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern._
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.actor.cluster.{BrokerViewCacheActor, BrokerViewCacheActorConfig, KafkaManagedOffsetCacheConfig, KafkaStateActor, KafkaStateActorConfig}
import kafka.manager.base.LongRunningPoolConfig
import kafka.manager.features.ClusterFeatures
import kafka.manager.model.{ActorModel, ClusterConfig, ClusterContext}
import kafka.manager.utils.KafkaServerInTest
import ActorModel._
import kafka.test.SeededBroker

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try


class TestBrokerViewCacheActor extends KafkaServerInTest with BaseTest {
  private[this] val akkaConfig: Properties = new Properties()
  akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
  akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
  private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
  private[this] val system = ActorSystem("test-broker-view-cache-actor",config)
  private[this] val broker = new SeededBroker("bvc-test",4)
  override val kafkaServerZkPath = broker.getZookeeperConnectionString
  private[this] var kafkaStateActor : Option[ActorRef] = None
  private[this] implicit val timeout: Timeout = 10.seconds

  private[this] var brokerViewCacheActor : Option[ActorRef] = None
  private[this] val defaultClusterConfig = ClusterConfig("test","0.8.2.0","localhost:2818",100,false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxSsl = false, jmxPass = None, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
  private[this] val defaultClusterContext = ClusterContext(ClusterFeatures.from(defaultClusterConfig), defaultClusterConfig)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val clusterConfig = ClusterConfig("dev","0.8.2.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
    val clusterContext = ClusterContext(ClusterFeatures.from(clusterConfig), clusterConfig)
    val ksConfig = KafkaStateActorConfig(sharedCurator, "pinned-dispatcher", clusterContext, LongRunningPoolConfig(2,100), LongRunningPoolConfig(2,100), 5, 10000, None, KafkaManagedOffsetCacheConfig())
    val props = Props(classOf[KafkaStateActor],ksConfig)

    kafkaStateActor = Some(system.actorOf(props.withDispatcher("pinned-dispatcher"),"ksa"))

    val bvConfig = BrokerViewCacheActorConfig(kafkaStateActor.get.path, clusterContext, LongRunningPoolConfig(2,100), FiniteDuration(10, SECONDS))
    val bvcProps = Props(classOf[BrokerViewCacheActor],bvConfig)

    brokerViewCacheActor = Some(system.actorOf(bvcProps,"broker-view"))

    brokerViewCacheActor.get ! BVForceUpdate
    Thread.sleep(10000)
  }

  override protected def afterAll(): Unit = {
    brokerViewCacheActor.foreach( _ ! Kill )
    kafkaStateActor.foreach( _ ! Kill )
    Try(Await.ready(system.terminate(), Duration(5, TimeUnit.SECONDS)))
    Try(broker.shutdown())
    super.afterAll()
  }

  private[this] def withBrokerViewCacheActor[Input,Output,FOutput]
  (msg: Input)(fn: Output => FOutput)(implicit tag: ClassTag[Output]) : FOutput = {
    require(brokerViewCacheActor.isDefined, "brokerViewCacheActor undefined!")
    val future = ask(brokerViewCacheActor.get, msg).mapTo[Output]
    val result = Await.result(future,10.seconds)
    fn(result)
  }

  test("get broker view") {
    withBrokerViewCacheActor(BVGetView(1)) { optionalBrokerView : Option[BVView] =>
      println(optionalBrokerView)
    }
  }

} 
Example 186
Source File: TestLogkafkaViewCacheActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager

import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern._
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.actor.cluster.KafkaStateActor
import kafka.manager.base.LongRunningPoolConfig
import kafka.manager.features.ClusterFeatures
import kafka.manager.logkafka.{LogkafkaViewCacheActor, LogkafkaViewCacheActorConfig}
import kafka.manager.model.{ActorModel, ClusterConfig, ClusterContext}
import kafka.manager.utils.KafkaServerInTest
import ActorModel._
import kafka.test.SeededBroker

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try


class TestLogkafkaViewCacheActor extends KafkaServerInTest with BaseTest {
  private[this] val akkaConfig: Properties = new Properties()
  akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
  akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
  private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
  private[this] val system = ActorSystem("test-logkafka-view-cache-actor",config)
  private[this] val broker = new SeededBroker("lkvc-test",4)
  override val kafkaServerZkPath = broker.getZookeeperConnectionString
  private[this] var logkafkaStateActor : Option[ActorRef] = None
  private[this] implicit val timeout: Timeout = 10.seconds

  private[this] var logkafkaViewCacheActor : Option[ActorRef] = None
  private[this] val defaultClusterConfig = ClusterConfig("test","0.8.2.0","localhost:2818",100,false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
  private[this] val defaultClusterContext = ClusterContext(ClusterFeatures.from(defaultClusterConfig), defaultClusterConfig)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val clusterConfig = ClusterConfig("dev","0.8.2.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
    val clusterContext = ClusterContext(ClusterFeatures.from(clusterConfig), clusterConfig)
    val props = Props(classOf[KafkaStateActor],sharedCurator, defaultClusterContext)

    logkafkaStateActor = Some(system.actorOf(props.withDispatcher("pinned-dispatcher"),"lksa"))

    val lkvConfig = LogkafkaViewCacheActorConfig(logkafkaStateActor.get.path, clusterContext, LongRunningPoolConfig(2,100), FiniteDuration(10, SECONDS))
    val lkvcProps = Props(classOf[LogkafkaViewCacheActor],lkvConfig)

    logkafkaViewCacheActor = Some(system.actorOf(lkvcProps,"logkafka-view"))

    logkafkaViewCacheActor.get ! BVForceUpdate
    Thread.sleep(10000)
  }

  override protected def afterAll(): Unit = {
    logkafkaViewCacheActor.foreach( _ ! Kill )
    logkafkaStateActor.foreach( _ ! Kill )
    Try(Await.ready(system.terminate(), Duration(5, TimeUnit.SECONDS)))
    Try(broker.shutdown())
    super.afterAll()
  }

  private[this] def withLogkafkaViewCacheActor[Input,Output,FOutput]
  (msg: Input)(fn: Output => FOutput)(implicit tag: ClassTag[Output]) : FOutput = {
    require(logkafkaViewCacheActor.isDefined, "logkafkaViewCacheActor undefined!")
    val future = ask(logkafkaViewCacheActor.get, msg).mapTo[Output]
    val result = Await.result(future,10.seconds)
    fn(result)
  }
} 
Example 187
Source File: NodeRoutes.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.api

import com.elleflorio.scalachain.actor.Node._
import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import com.elleflorio.scalachain.blockchain.{Chain, Transaction}
import com.elleflorio.scalachain.cluster.ClusterManager.GetMembers
import com.elleflorio.scalachain.utils.JsonSupport._

import scala.concurrent.Future
import scala.concurrent.duration._

trait NodeRoutes extends SprayJsonSupport {

  implicit def system: ActorSystem

  def node: ActorRef
  def clusterManager: ActorRef

  implicit lazy val timeout = Timeout(5.seconds)

  lazy val statusRoutes: Route = pathPrefix("status") {
    concat(
      pathEnd {
        concat(
          get {
            val statusFuture: Future[Chain] = (node ? GetStatus).mapTo[Chain]
            onSuccess(statusFuture) { status =>
              complete(StatusCodes.OK, status)
            }
          }
        )
      },
      pathPrefix("members") {
        concat(
          pathEnd {
            concat(
              get {
                val membersFuture: Future[List[String]] = (clusterManager ? GetMembers).mapTo[List[String]]
                onSuccess(membersFuture) { members =>
                  complete(StatusCodes.OK, members)
                }
              }
            )
          }
        )
      }
    )
  }

  lazy val transactionRoutes: Route = pathPrefix("transactions") {
    concat(
      pathEnd {
        concat(
          get {
            val transactionsRetrieved: Future[List[Transaction]] =
              (node ? GetTransactions).mapTo[List[Transaction]]
            onSuccess(transactionsRetrieved) { transactions =>
              complete(transactions.toList)
            }
          },
          post {
            entity(as[Transaction]) { transaction =>
              val transactionCreated: Future[Int] =
                (node ? AddTransaction(transaction)).mapTo[Int]
              onSuccess(transactionCreated) { done =>
                complete((StatusCodes.Created, done.toString))
              }
            }
          }
        )
      }
    )
  }

  lazy val mineRoutes: Route = pathPrefix("mine") {
    concat(
      pathEnd {
        concat(
          get {
            node ! Mine
            complete(StatusCodes.OK)
          }
        )
      }
    )
  }

} 
Example 188
Source File: DriverActorTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import java.nio.file.{Files, Path}

import akka.actor.{ActorSystem, Props}
import akka.testkit.{DefaultTimeout, ImplicitSender, TestKit}
import akka.util.Timeout
import com.stratio.sparta.serving.api.actor.DriverActor.UploadDrivers
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.models.SpartaSerializer
import com.stratio.sparta.serving.core.models.files.{SpartaFile, SpartaFilesResponse}
import com.typesafe.config.{Config, ConfigFactory}
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import spray.http.BodyPart

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}

@RunWith(classOf[JUnitRunner])
class DriverActorTest extends TestKit(ActorSystem("PluginActorSpec"))
  with DefaultTimeout
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfterEach
  with MockitoSugar with SpartaSerializer {

  val tempDir: Path = Files.createTempDirectory("test")
  tempDir.toFile.deleteOnExit()

  val localConfig: Config = ConfigFactory.parseString(
    s"""
       |sparta{
       |   api {
       |     host = local
       |     port= 7777
       |   }
       |}
       |
       |sparta.config.driverPackageLocation = "$tempDir"
    """.stripMargin)

  val fileList = Seq(BodyPart("reference.conf", "file"))

  override def beforeEach(): Unit = {
    SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
    SpartaConfig.initApiConfig()
  }

  override def afterAll: Unit = {
    shutdown()
  }

  override implicit val timeout: Timeout = Timeout(15 seconds)

  "DriverActor " must {

    "Not save files with wrong extension" in {
      val driverActor = system.actorOf(Props(new DriverActor()))
      driverActor ! UploadDrivers(fileList)
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.isEmpty shouldBe true
      }
    }
    "Not upload empty files" in {
      val driverActor = system.actorOf(Props(new DriverActor()))
      driverActor ! UploadDrivers(Seq.empty)
      expectMsgPF() {
        case SpartaFilesResponse(Failure(f)) => f.getMessage shouldBe "At least one file is expected"
      }
    }
    "Save a file" in {
      val driverActor = system.actorOf(Props(new DriverActor()))
      driverActor ! UploadDrivers(Seq(BodyPart("reference.conf", "file.jar")))
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.head.fileName.endsWith("file.jar") shouldBe true
      }
    }
  }
} 
Example 189
Source File: PluginActorTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import java.nio.file.{Files, Path}

import akka.actor.{ActorSystem, Props}
import akka.testkit.{DefaultTimeout, ImplicitSender, TestKit}
import akka.util.Timeout
import com.stratio.sparta.serving.api.actor.PluginActor.{PluginResponse, UploadPlugins}
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.models.SpartaSerializer
import com.stratio.sparta.serving.core.models.files.{SpartaFile, SpartaFilesResponse}
import com.typesafe.config.{Config, ConfigFactory}
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import spray.http.BodyPart

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}

@RunWith(classOf[JUnitRunner])
class PluginActorTest extends TestKit(ActorSystem("PluginActorSpec"))
  with DefaultTimeout
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfterEach
  with MockitoSugar with SpartaSerializer {

  val tempDir: Path = Files.createTempDirectory("test")
  tempDir.toFile.deleteOnExit()

  val localConfig: Config = ConfigFactory.parseString(
    s"""
       |sparta{
       |   api {
       |     host = local
       |     port= 7777
       |   }
       |}
       |
       |sparta.config.pluginPackageLocation = "$tempDir"
    """.stripMargin)


  val fileList = Seq(BodyPart("reference.conf", "file"))

  override def beforeEach(): Unit = {
    SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
    SpartaConfig.initApiConfig()
  }

  override def afterAll: Unit = {
    shutdown()
  }

  override implicit val timeout: Timeout = Timeout(15 seconds)

  "PluginActor " must {

    "Not save files with wrong extension" in {
      val pluginActor = system.actorOf(Props(new PluginActor()))
      pluginActor ! UploadPlugins(fileList)
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.isEmpty shouldBe true
      }
    }
    "Not upload empty files" in {
      val pluginActor = system.actorOf(Props(new PluginActor()))
      pluginActor ! UploadPlugins(Seq.empty)
      expectMsgPF() {
        case SpartaFilesResponse(Failure(f)) => f.getMessage shouldBe "At least one file is expected"
      }
    }
    "Save a file" in {
      val pluginActor = system.actorOf(Props(new PluginActor()))
      pluginActor ! UploadPlugins(Seq(BodyPart("reference.conf", "file.jar")))
      expectMsgPF() {
        case SpartaFilesResponse(Success(f: Seq[SpartaFile])) => f.head.fileName.endsWith("file.jar") shouldBe true
      }
    }
  }

} 
Example 190
Source File: RabbitIntegrationSpec.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.input.rabbitmq

import akka.actor.ActorSystem
import akka.event.slf4j.SLF4JLogging
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.concurrent.TimeLimitedTests
import org.scalatest.time.{Minute, Span}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, WordSpec}

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Try


abstract class RabbitIntegrationSpec extends WordSpec with Matchers with SLF4JLogging with TimeLimitedTests
  with BeforeAndAfter with BeforeAndAfterAll {
  private lazy val config = ConfigFactory.load()


  implicit val system = ActorSystem("ActorRabbitMQSystem")
  implicit val timeout = Timeout(10 seconds)
  val timeLimit = Span(1, Minute)
  
  val RabbitTimeOut = 3 second
  val configQueueName = Try(config.getString("rabbitmq.queueName")).getOrElse("rabbitmq-queue")
  val configExchangeName = Try(config.getString("rabbitmq.exchangeName")).getOrElse("rabbitmq-exchange")
  val exchangeType = Try(config.getString("rabbitmq.exchangeType")).getOrElse("topic")
  val routingKey = Try(config.getString("rabbitmq.routingKey")).getOrElse("")
  val vHost = Try(config.getString("rabbitmq.vHost")).getOrElse("/")
  val hosts = Try(config.getString("rabbitmq.hosts")).getOrElse("127.0.0.1")
  val userName = Try(config.getString("rabbitmq.userName")).getOrElse("guest")
  val password = Try(config.getString("rabbitmq.password")).getOrElse("guest")
  val RabbitConnectionURI = s"amqp://$userName:$password@$hosts/%2F"
  var sc: Option[SparkContext] = None
  var ssc: Option[StreamingContext] = None

  def initSpark(): Unit = {
    sc = Some(new SparkContext(conf))
    ssc = Some(new StreamingContext(sc.get, Seconds(1)))
  }

  def stopSpark(): Unit = {
    ssc.foreach(_.stop())
    sc.foreach(_.stop())

    System.gc()
  }

  def initRabbitMQ(): Unit

  def closeRabbitMQ(): Unit

  before {
    log.info("Init spark")
    initSpark()
    log.info("Sending messages to queue..")
    initRabbitMQ()
    log.info("Messages in queue.")
  }

  after {
    log.info("Stop spark")
    stopSpark()
    log.info("Clean rabbitmq")
    closeRabbitMQ()
  }
} 
Example 191
Source File: Main.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns

import akka.actor.ActorSystem
import akka.event.{Logging, LoggingAdapter}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import me.snov.sns.actor._
import me.snov.sns.api._
import me.snov.sns.service.FileDbService
import me.snov.sns.util.ToStrict

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.Properties

object Main extends App with ToStrict {
  implicit val system = ActorSystem("sns")
  implicit val executor: ExecutionContext = system.dispatcher
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  implicit val logger: LoggingAdapter = Logging(system, getClass)
  implicit val timeout = new Timeout(1.second)

  val config = ConfigFactory.load()
  val dbService = new FileDbService(Properties.envOrElse("DB_PATH", config.getString("db.path")))

  val dbActor = system.actorOf(DbActor.props(dbService), name = "DbActor")
  val homeActor = system.actorOf(HomeActor.props, name = "HomeActor")
  val subscribeActor = system.actorOf(SubscribeActor.props(dbActor), name = "SubscribeActor")
  val publishActor = system.actorOf(PublishActor.props(subscribeActor), name = "PublishActor")

  val routes: Route =
    toStrict {
      TopicApi.route(subscribeActor) ~
      SubscribeApi.route(subscribeActor) ~
      PublishApi.route(publishActor) ~
      HealthCheckApi.route ~
      HomeApi.route(homeActor)
    }

  logger.info("SNS v{} is starting", getClass.getPackage.getImplementationVersion)

  Http().bindAndHandle(
    handler = logRequestResult("akka-http-sns")(routes),
    interface = Properties.envOrElse("HTTP_INTERFACE", config.getString("http.interface")),
    port = Properties.envOrElse("HTTP_PORT", config.getString("http.port")).toInt
  )
} 
Example 192
Source File: PublishActor.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.actor

import akka.actor.Status.{Failure, Success}
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.pattern.ask
import akka.pattern.pipe
import akka.util.Timeout
import me.snov.sns.actor.SubscribeActor.CmdFanOut
import me.snov.sns.model.{Message, MessageAttribute}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object PublishActor {
  def props(actor: ActorRef) = Props(classOf[PublishActor], actor)

  case class CmdPublish(topicArn: String, bodies: Map[String, String], messageAttributes: Map[String, MessageAttribute])
}

class PublishActor(subscribeActor: ActorRef) extends Actor with ActorLogging {
  import me.snov.sns.actor.PublishActor._

  private implicit val timeout = Timeout(1.second)
  private implicit val ec = context.dispatcher

  private def publish(topicArn: String, bodies: Map[String, String], messageAttributes: Map[String, MessageAttribute])(implicit ec: ExecutionContext) = {
    val message = Message(bodies, messageAttributes = messageAttributes)

    (subscribeActor ? CmdFanOut(topicArn, message)).map {
      case Failure(e) => Failure(e)
      case Success => message
    }
  }

  override def receive = {
    case CmdPublish(topicArn, bodies, attributes) => publish(topicArn, bodies, attributes) pipeTo sender
  }
} 
Example 193
Source File: TopicApi.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.api

import akka.actor.ActorRef
import akka.actor.Status.Success
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import me.snov.sns.actor.SubscribeActor.{CmdListTopics, CmdDeleteTopic, CmdCreateTopic}
import me.snov.sns.model.Topic
import me.snov.sns.response.TopicResponse

import scala.concurrent.ExecutionContext

object TopicApi {
  private val namePattern = """([\w+_-]{1,256})""".r
  private val arnPattern = """([\w+_:-]{1,512})""".r

  def route(actor: ActorRef)(implicit timeout: Timeout, ec: ExecutionContext): Route = {
    pathSingleSlash {
      formField('Action ! "CreateTopic") {
        formField('Name) {
          case namePattern(name) => complete {
            (actor ? CmdCreateTopic(name)).mapTo[Topic].map {
              TopicResponse.create
            }
          }
          case _ => complete(HttpResponse(400, entity = "InvalidParameter: invalid topic name"))
        } ~
        complete(HttpResponse(400, entity = "Topic name is missing"))
      } ~
      formField('Action ! "DeleteTopic") {
        formField('TopicArn) {
          case arnPattern(arn) => complete {
            (actor ? CmdDeleteTopic(arn)).map {
              case Success => TopicResponse.delete
              case _ => HttpResponse(404, entity = "NotFound")
            }
          }
          case _ => complete(HttpResponse(400, entity = "Invalid topic ARN"))
        } ~
        complete(HttpResponse(404, entity = "NotFound"))
      } ~ 
      formField('Action ! "ListTopics") {
        complete {
          (actor ? CmdListTopics).mapTo[Iterable[Topic]].map {
            TopicResponse.list
          }
        }
      }
    }
  }
} 
Example 194
Source File: PublishApi.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.api

import akka.actor.ActorRef
import akka.event.Logging

import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import me.snov.sns.actor.PublishActor.CmdPublish
import me.snov.sns.model.{Message, MessageAttribute, TopicNotFoundException}
import me.snov.sns.response.PublishResponse
import spray.json.DefaultJsonProtocol._
import spray.json._

import scala.concurrent.{ExecutionContext, Future}

case class InvalidTopicArnException(msg: String) extends Exception(msg)

object PublishApi {
  private val arnPattern = """([\w+_:-]{1,512})""".r

  def route(actorRef: ActorRef)(implicit timeout: Timeout, ec: ExecutionContext): Route = {
    pathSingleSlash {
      formField('Action ! "Publish") {
        formFieldSeq { fields =>
          val messageAttributes: Map[String, MessageAttribute] = MessageAttribute.parse(fields)
          formFields('TopicArn.?, 'TargetArn.?, 'MessageStructure.?, 'Message) { (topicArnMaybe, targetArnMaybe, messageStructure, message) =>
            try {
              topicArn(topicArnMaybe, targetArnMaybe) match {
                case arnPattern(topic) => complete {
                  val bodies = messageStructure match {
                    case Some("json") => message.parseJson.asJsObject.convertTo[Map[String, String]]
                    case Some(_) => throw new RuntimeException("Invalid MessageStructure value");
                    case None => Map("default" -> message)
                  }
                  (actorRef ? CmdPublish(topic, bodies, messageAttributes)).collect {
                    case m: Message => PublishResponse.publish(m)
                  }.recover {
                    case t: TopicNotFoundException => PublishResponse.topicNotFound(t.getMessage)
                    case t: Throwable => HttpResponse(500, entity = t.getMessage)
                  }
                }
                case _ => complete(HttpResponse(400, entity = "Invalid topic ARN"))
              }
            } catch {
              case e: InvalidTopicArnException => complete(HttpResponse(400, entity = e.getMessage))
              case e: RuntimeException => complete(HttpResponse(400, entity = e.getMessage))
            }
          }
        } ~
          complete(HttpResponse(400, entity = "TopicArn is required"))
      }
    }
  }

  private def topicArn(topicArnMaybe: Option[String], targetArnMaybe: Option[String]): String = {
    topicArnMaybe.getOrElse(targetArnMaybe.getOrElse(throw InvalidTopicArnException("Neither TopicArn nor TargetArn provided")))
  }
} 
Example 195
Source File: SubscribeApi.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.api

import akka.actor.ActorRef
import akka.actor.Status.{Success, Failure}
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import me.snov.sns.actor.SubscribeActor.{CmdListSubscriptions, CmdListSubscriptionsByTopic, CmdSubscribe, CmdUnsubscribe,CmdSetSubscriptionAttributes,CmdGetSubscriptionAttributes}
import me.snov.sns.model.Subscription
import me.snov.sns.response.SubscribeResponse

import scala.concurrent.ExecutionContext

object SubscribeApi {
  private val arnPattern = """([\w+_:-]{1,512})""".r

  def route(actorRef: ActorRef)(implicit timeout: Timeout, ec: ExecutionContext): Route = {
    pathSingleSlash {
      formField('Action ! "Subscribe") {
        formFields('Endpoint, 'Protocol, 'TopicArn) { (endpoint, protocol, topicArn) =>
          complete {
            (actorRef ? CmdSubscribe(topicArn, protocol, endpoint)).mapTo[Subscription] map {
              SubscribeResponse.subscribe
            }
          }
        } ~
          complete(HttpResponse(400, entity = "Endpoint, Protocol, TopicArn are required"))
      } ~
        formField('Action ! "ListSubscriptionsByTopic") {
          formField('TopicArn) {
            case arnPattern(topicArn) => complete {
              (actorRef ? CmdListSubscriptionsByTopic(topicArn)).mapTo[Iterable[Subscription]] map {
                SubscribeResponse.listByTopic
              }
            }
            case _ => complete(HttpResponse(400, entity = "Invalid topic ARN"))
          } ~
            complete(HttpResponse(400, entity = "TopicArn is missing"))
        } ~
        formField('Action ! "ListSubscriptions") {
          complete {
            (actorRef ? CmdListSubscriptions()).mapTo[Iterable[Subscription]] map {
              SubscribeResponse.list
            }
          }
        } ~
        formField('Action ! "Unsubscribe") {
          formField('SubscriptionArn) { (arn) =>
            complete {
              (actorRef ? CmdUnsubscribe(arn)).map {
                case Success => SubscribeResponse.unsubscribe
                case _ => HttpResponse(404, entity = "NotFound")
              }
            }
          } ~
          complete(HttpResponse(400, entity = "SubscriptionArn is missing"))
        } ~
        formField('Action ! "SetSubscriptionAttributes") {
          formField('SubscriptionArn, 'AttributeName, 'AttributeValue) { (arn, name, value) =>
            complete {
              (actorRef ? CmdSetSubscriptionAttributes(arn, name, value)).map {
                case Success => SubscribeResponse.setSubscriptionAttributes
                case Failure(ex) => HttpResponse(404, entity = "NotFound")
              }
            }
          } ~
          complete(HttpResponse(400, entity = "SubscriptionArn is missing"))
        } ~
        formField('Action ! "GetSubscriptionAttributes") {
          formField('SubscriptionArn) { (arn) =>
            complete {
              (actorRef ? CmdGetSubscriptionAttributes(arn)).mapTo[Option[Map[String,String]]] map { attributes =>
                attributes
                  .map(SubscribeResponse.getSubscriptionAttributes)
                  .getOrElse {
                    HttpResponse(404, entity = "Not Found")
                  }
              }
            }
          } ~
          complete(HttpResponse(400, entity = "SubscriptionArn is missing"))
        }
    }
  }
} 
Example 196
Source File: TopicSpec.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.api

import java.util.concurrent.TimeUnit

import akka.actor.ActorRef
import akka.http.scaladsl.model.{FormData, HttpResponse, StatusCodes}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.testkit.{TestActor, TestProbe}
import akka.util.Timeout
import me.snov.sns.actor.SubscribeActor.{CmdDeleteTopic, CmdCreateTopic}
import me.snov.sns.model.Topic
import org.scalatest.{Matchers, WordSpec}

class TopicSpec extends WordSpec with Matchers with ScalatestRouteTest {
  implicit val timeout = new Timeout(100, TimeUnit.MILLISECONDS)

  val probe = TestProbe()
  val route = TopicApi.route(probe.ref)

  "Requires topic name" in {
    Post("/", FormData(Map("Action" -> "CreateTopic"))) ~> route ~> check {
      status shouldBe StatusCodes.BadRequest
    }
  }

  "Validates topic name" in {
    Post("/", FormData(Map("Action" -> "CreateTopic", "Name" -> "f$$"))) ~> route ~> check {
      status shouldBe StatusCodes.BadRequest
    }
  }

  "TopicDelete validates topic name" in {
    Post("/", FormData(Map("Action" -> "DeleteTopic", "TopicArn" -> "f$$"))) ~> route ~> check {
      status shouldBe StatusCodes.BadRequest
    }
  }

  "Sends create command to actor" in {
    probe.setAutoPilot(new TestActor.AutoPilot {
      def run(sender: ActorRef, msg: Any) = {
        sender ! new Topic("foo", "bar")
        this
      }
    })
    Post("/", FormData(Map("Action" -> "CreateTopic", "Name" -> "foo"))) ~> route ~> check {
      probe.expectMsg(CmdCreateTopic("foo"))
    }
  }

  "Sends delete command to actor" in {
    probe.setAutoPilot(new TestActor.AutoPilot {
      def run(sender: ActorRef, msg: Any) = {
        sender ! new Topic("foo", "bar")
        this
      }
    })
    Post("/", FormData(Map("Action" -> "DeleteTopic", "TopicArn" -> "arn-foo"))) ~> route ~> check {
      probe.expectMsg(CmdDeleteTopic("arn-foo"))
    }
  }
} 
Example 197
Source File: PublishSpec.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.api

import java.util.concurrent.TimeUnit

import akka.actor.ActorRef
import akka.http.scaladsl.model.{FormData, StatusCodes}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.testkit.{TestActor, TestProbe}
import akka.util.Timeout
import me.snov.sns.actor.PublishActor.CmdPublish
import me.snov.sns.model.{Message, MessageAttribute}
import org.scalatest.{Matchers, WordSpec}

class PublishSpec extends WordSpec with Matchers with ScalatestRouteTest {
  implicit val timeout = new Timeout(100, TimeUnit.MILLISECONDS)

  val probe = TestProbe()
  val route = PublishApi.route(probe.ref)

  "Publish requires topic ARN" in {
    val params = Map("Action" -> "Publish")
    Post("/", FormData(params)) ~> route ~> check {
      status shouldBe StatusCodes.BadRequest
    }
  }

  "Sends publish command" in {
    val params = Map(
      "Action" -> "Publish",
      "TopicArn" -> "foo",
      "Message" -> "bar"
    )

    probe.setAutoPilot(new TestActor.AutoPilot {
      def run(sender: ActorRef, msg: Any) = {
        sender ! Message(Map("default" -> "foo"))
        this
      }
    })
    Post("/", FormData(params)) ~> route ~> check {
      probe.expectMsg(CmdPublish("foo", Map("default" -> "bar"), Map.empty))
    }
  }

  "Sends publish command to TargetArn" in {
    val params = Map(
      "Action" -> "Publish",
      "TargetArn" -> "foo",
      "Message" -> "bar"
    )

    probe.setAutoPilot(new TestActor.AutoPilot {
      def run(sender: ActorRef, msg: Any) = {
        sender ! Message(Map("default" -> "foo"))
        this
      }
    })
    Post("/", FormData(params)) ~> route ~> check {
      probe.expectMsg(CmdPublish("foo", Map("default" -> "bar"), Map.empty))
    }
  }

  "Sends publish command with attributes" in {
    val params = Map(
      "Action" -> "Publish",
      "TopicArn" -> "foo",
      "Message" -> "bar",
      "MessageAttributes.entry.1.Value.DataType" -> "String",
      "MessageAttributes.entry.1.Value.StringValue" -> "AttributeValue",
      "MessageAttributes.entry.1.Name" -> "AttributeName"
    )

    probe.setAutoPilot(new TestActor.AutoPilot {
      def run(sender: ActorRef, msg: Any) = {
        sender ! Message(Map("default" -> "foo"), messageAttributes = Map("AttributeName" -> MessageAttribute("StringValue", "AttributeValue")))
        this
      }
    })
    Post("/", FormData(params)) ~> route ~> check {
      probe.expectMsg(CmdPublish("foo", Map("default" -> "bar"),Map("AttributeName" -> MessageAttribute("StringValue", "AttributeValue"))))
    }
  }
} 
Example 198
Source File: RestServices.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Route, _}
import akka.stream.ActorMaterializer
import akka.util.Timeout
import org.apache.commons.lang.exception.ExceptionUtils

import org.apache.gearpump.jarstore.JarStoreClient
import org.apache.gearpump.util.{Constants, LogUtil}
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._


class RestServices(master: ActorRef, mat: ActorMaterializer, system: ActorSystem)
  extends RouteService {

  private val LOG = LogUtil.getLogger(getClass)

  implicit val timeout = Constants.FUTURE_TIMEOUT

  private val config = system.settings.config

  private val jarStoreClient = new JarStoreClient(config, system)

  private val securityEnabled = config.getBoolean(
    Constants.GEARPUMP_UI_SECURITY_AUTHENTICATION_ENABLED)

  private val supervisorPath = system.settings.config.getString(
    Constants.GEARPUMP_SERVICE_SUPERVISOR_PATH)

  private val myExceptionHandler: ExceptionHandler = ExceptionHandler {
    case ex: Throwable => {
      extractUri { uri =>
        LOG.error(s"Request to $uri could not be handled normally", ex)
        complete(InternalServerError, ExceptionUtils.getStackTrace(ex))
      }
    }
  }

  // Makes sure staticRoute is the final one, as it will try to lookup resource in local path
  // if there is no match in previous routes
  private val static = new StaticService(system, supervisorPath).route

  def supervisor: ActorRef = {
    if (supervisorPath == null || supervisorPath.isEmpty()) {
      null
    } else {
      val actorRef = system.actorSelection(supervisorPath).resolveOne()
      Await.result(actorRef, new Timeout(Duration.create(5, "seconds")).duration)
    }
  }

  override def route: Route = {
    if (securityEnabled) {
      val security = new SecurityService(services, system)
      handleExceptions(myExceptionHandler) {
        security.route ~ static
      }
    } else {
      handleExceptions(myExceptionHandler) {
        services.route ~ static
      }
    }
  }

  private def services: RouteService = {

    val admin = new AdminService(system)
    val masterService = new MasterService(master, jarStoreClient, system)
    val worker = new WorkerService(master, system)
    val app = new AppMasterService(master, jarStoreClient, system)
    val sup = new SupervisorService(master, supervisor, system)

    new RouteService {
      override def route: Route = {
        admin.route ~ sup.route ~ masterService.route ~ worker.route ~ app.route
      }
    }
  }
} 
Example 199
Source File: SinkBridgeTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import java.time.Instant
import java.util
import java.util.concurrent.TimeUnit

import akka.actor.Actor.Receive
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.util.Timeout
import org.apache.gearpump.Message
import org.apache.gearpump.akkastream.task.SinkBridgeTask.RequestMessage
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.cluster.client.ClientContext
import org.apache.gearpump.streaming.ProcessorId
import org.apache.gearpump.streaming.appmaster.AppMaster.{LookupTaskActorRef, TaskActorRef}
import org.apache.gearpump.streaming.task.{Task, TaskContext, TaskId}
import org.apache.gearpump.util.LogUtil
import org.reactivestreams.{Publisher, Subscriber, Subscription}


class SinkBridgeTask(taskContext : TaskContext, userConf : UserConfig)
  extends Task(taskContext, userConf) {
  import taskContext.taskId

  val queue = new util.LinkedList[Message]()
  var subscriber: ActorRef = _

  var request: Int = 0

  override def onStart(startTime : Instant) : Unit = {}

  override def onNext(msg: Message) : Unit = {
    queue.add(msg)
    trySendingData()
  }

  override def onStop() : Unit = {}

  private def trySendingData(): Unit = {
    if (subscriber != null) {
      (0 to request).map(_ => queue.poll()).filter(_ != null).foreach { msg =>
        subscriber ! msg.value
        request -= 1
      }
    }
  }

  override def receiveUnManagedMessage: Receive = {
    case RequestMessage(n) =>
      this.subscriber = sender
      LOG.info("the downstream has requested " + n + " messages from " + subscriber)
      request += n.toInt
      trySendingData()
    case msg =>
      LOG.error("Failed! Received unknown message " + "taskId: " + taskId + ", " + msg.toString)
  }
}

object SinkBridgeTask {

  case class RequestMessage(number: Int)

  class SinkBridgeTaskClient(system: ActorSystem, context: ClientContext, appId: Int,
      processorId: ProcessorId) extends Publisher[AnyRef] with Subscription {
    private val taskId = TaskId(processorId, index = 0)
    private val LOG = LogUtil.getLogger(getClass)

    private var actor: ActorRef = _
    import system.dispatcher

    private val task =
      context.askAppMaster[TaskActorRef](appId, LookupTaskActorRef(taskId)).map{container =>
      // println("Successfully resolved taskRef for taskId " + taskId + ", " + container.task)
      container.task
    }

    override def subscribe(subscriber: Subscriber[_ >: AnyRef]): Unit = {
      this.actor = system.actorOf(Props(new ClientActor(subscriber)))
      subscriber.onSubscribe(this)
    }

    override def cancel(): Unit = Unit

    private implicit val timeout = Timeout(5, TimeUnit.SECONDS)

    override def request(l: Long): Unit = {
      task.foreach{ task =>
        task.tell(RequestMessage(l.toInt), actor)
      }
    }
  }

  class ClientActor(subscriber: Subscriber[_ >: AnyRef]) extends Actor {
    def receive: Receive = {
      case result: AnyRef =>
        subscriber.onNext(result)
    }
  }
} 
Example 200
Source File: RunningApplication.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.client

import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import org.apache.gearpump.cluster.ClientToMaster.{RegisterAppResultListener, ResolveAppId, ShutdownApplication}
import org.apache.gearpump.cluster.MasterToClient._
import org.apache.gearpump.cluster.client.RunningApplication._
import org.apache.gearpump.util.{ActorUtil, LogUtil}
import org.slf4j.Logger
import java.time.Duration
import java.util.concurrent.TimeUnit

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}

class RunningApplication(val appId: Int, master: ActorRef, timeout: Timeout) {
  lazy val appMaster: Future[ActorRef] = resolveAppMaster(appId)

  def shutDown(): Unit = {
    val result = ActorUtil.askActor[ShutdownApplicationResult](master,
      ShutdownApplication(appId), timeout)
    result.appId match {
      case Success(_) =>
      case Failure(ex) => throw ex
    }
  }

  
  def waitUntilFinish(): Unit = {
    this.waitUntilFinish(INF_DURATION)
  }

  def waitUntilFinish(duration: Duration): Unit = {
    val result = ActorUtil.askActor[ApplicationResult](master,
      RegisterAppResultListener(appId), new Timeout(duration.getSeconds, TimeUnit.SECONDS))
    if (result.appId == appId) {
      result match {
        case failed: ApplicationFailed =>
          throw failed.error
        case _: ApplicationSucceeded =>
          LOG.info(s"Application $appId succeeded")
        case _: ApplicationTerminated =>
          LOG.info(s"Application $appId terminated")
      }
    } else {
      LOG.warn(s"Received unexpected result $result for application $appId")
    }
  }

  def askAppMaster[T](msg: Any): Future[T] = {
    appMaster.flatMap(_.ask(msg)(timeout).asInstanceOf[Future[T]])
  }

  private def resolveAppMaster(appId: Int): Future[ActorRef] = {
    master.ask(ResolveAppId(appId))(timeout).
      asInstanceOf[Future[ResolveAppIdResult]].map(_.appMaster.get)
  }
}

object RunningApplication {
  private val LOG: Logger = LogUtil.getLogger(getClass)
  // This magic number is derived from Akka's configuration, which is the maximum delay
  private val INF_DURATION = Duration.ofSeconds(2147482)
}