akka.stream.scaladsl.Source Scala Examples
The following examples show how to use akka.stream.scaladsl.Source.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpjsoniterscala import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.{ Directives, Route } import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn object ExampleApp { final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system: ActorSystem = ActorSystem() Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") Await.ready(system.terminate(), Duration.Inf) } def route(implicit sys: ActorSystem): Route = { import Directives._ import JsoniterScalaSupport._ import com.github.plokhotnyuk.jsoniter_scala.core._ import com.github.plokhotnyuk.jsoniter_scala.macros._ // here you should provide implicit codecs for in/out messages of all routes implicit val codec: JsonValueCodec[Foo] = JsonCodecMaker.make[Foo](CodecMakerConfig) // also, you can provide an implicit reader/writer configs to override defaults: // // implicit val readerConfig = ReaderConfig.withThrowReaderExceptionWithStackTrace(true) // implicit val writerConfig = WriterConfig.withIndentionStep(2) pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 2
Source File: S3CopyService.scala From iep-apps with Apache License 2.0 | 5 votes |
package com.netflix.atlas.persistence import java.io.File import java.nio.file.Files import java.nio.file.Paths import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.KillSwitch import akka.stream.KillSwitches import akka.stream.scaladsl.Keep import akka.stream.scaladsl.Source import com.netflix.atlas.core.util.Streams import com.netflix.iep.service.AbstractService import com.netflix.spectator.api.Registry import com.typesafe.config.Config import com.typesafe.scalalogging.StrictLogging import javax.inject.Inject import javax.inject.Singleton import scala.concurrent.duration._ @Singleton class S3CopyService @Inject()( val config: Config, val registry: Registry, implicit val system: ActorSystem ) extends AbstractService with StrictLogging { private val dataDir = config.getString("atlas.persistence.local-file.data-dir") private implicit val mat = ActorMaterializer() private var killSwitch: KillSwitch = _ private val s3Config = config.getConfig("atlas.persistence.s3") private val cleanupTimeoutMs = s3Config.getDuration("cleanup-timeout").toMillis private val maxInactiveMs = s3Config.getDuration("max-inactive-duration").toMillis private val maxFileDurationMs = config.getDuration("atlas.persistence.local-file.max-duration").toMillis require( maxInactiveMs > maxFileDurationMs, "`max-inactive-duration` MUST be longer than `max-duration`, otherwise file may be renamed before normal write competes" ) override def startImpl(): Unit = { logger.info("Starting service") killSwitch = Source .tick(1.second, 5.seconds, NotUsed) .viaMat(KillSwitches.single)(Keep.right) .flatMapMerge(Int.MaxValue, _ => Source(FileUtil.listFiles(new File(dataDir)))) .toMat(new S3CopySink(s3Config, registry, system))(Keep.left) .run() } override def stopImpl(): Unit = { logger.info("Stopping service") waitForCleanup() if (killSwitch != null) killSwitch.shutdown() } private def waitForCleanup(): Unit = { logger.info("Waiting for cleanup") val start = System.currentTimeMillis while (hasMoreFiles) { if (System.currentTimeMillis() > start + cleanupTimeoutMs) { logger.error("Cleanup timeout") return } Thread.sleep(1000) } logger.info("Cleanup done") } private def hasMoreFiles: Boolean = { try { Streams.scope(Files.list(Paths.get(dataDir))) { dir => dir.anyMatch(f => Files.isRegularFile(f)) } } catch { case e: Exception => { logger.error(s"Error checking hasMoreFiles in $dataDir", e) true // Assuming there's more files on error to retry } } } }
Example 3
Source File: EvalFlowSuite.scala From iep-apps with Apache License 2.0 | 5 votes |
package com.netflix.atlas.stream import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import akka.stream.scaladsl.Source import com.netflix.atlas.akka.DiagnosticMessage import com.netflix.atlas.eval.stream.Evaluator.DataSource import com.netflix.atlas.eval.stream.Evaluator.DataSources import com.netflix.atlas.eval.stream.Evaluator.MessageEnvelope import com.netflix.spectator.api.NoopRegistry import com.typesafe.config.ConfigFactory import org.scalatest.funsuite.AnyFunSuite import scala.concurrent.Await import scala.concurrent.duration.Duration class EvalFlowSuite extends AnyFunSuite { private implicit val system = ActorSystem(getClass.getSimpleName) private implicit val mat = ActorMaterializer() private val config = ConfigFactory.load private val registry = new NoopRegistry() private val validateNoop: DataSource => Unit = _ => () private val dataSourceStr = """[{"id":"abc", "step": 10, "uri":"http://local-dev/api/v1/graph?q=name,a,:eq"}]""" test("register and get message") { val evalService = new EvalService(config, registry, null, system) { override def updateDataSources(streamId: String, dataSources: DataSources): Unit = { val handler = getStreamInfo(streamId).handler handler.offer(new MessageEnvelope("mockId", DiagnosticMessage.info("mockMsg"))) handler.complete() } } val evalFlow = EvalFlow.createEvalFlow(evalService, DataSourceValidator(10, validateNoop)) Source.single(dataSourceStr).via(evalFlow) val future = Source .single(dataSourceStr) .via(evalFlow) .filter(envelope => envelope.getId != "_") //filter out heartbeat .runWith(Sink.head) val messageEnvelope = Await.result(future, Duration.Inf) assert(messageEnvelope.getId === "mockId") } }
Example 4
Source File: UpickleCustomizationSupportSpec.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpupickle import akka.actor.ActorSystem import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.ContentTypes.{ `application/json`, `text/plain(UTF-8)` } import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshaller.UnsupportedContentTypeException import akka.http.scaladsl.unmarshalling.{ Unmarshal, Unmarshaller } import akka.stream.scaladsl.{ Sink, Source } import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec import upickle.AttributeTagged import upickle.core.Visitor import scala.concurrent.Await import scala.concurrent.duration.DurationInt final class UpickleCustomizationSupportSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll { private implicit val system = ActorSystem() object FooApi extends AttributeTagged { override implicit val IntWriter: FooApi.Writer[Int] = new Writer[Int] { override def write0[V](out: Visitor[_, V], v: Int): V = out.visitString("foo", -1) } } object UpickleFoo extends UpickleCustomizationSupport { override type Api = FooApi.type override def api: FooApi.type = FooApi } import UpickleFoo._ "UpickleCustomizationSupport" should { "support custom configuration" in { Marshal(123) .to[RequestEntity] .flatMap(Unmarshal(_).to[String]) .map(_ shouldBe "foo") } } override protected def afterAll() = { Await.ready(system.terminate(), 42.seconds) super.afterAll() } }
Example 5
Source File: UpickleSupportSpec.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpupickle import akka.actor.ActorSystem import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.ContentTypes.{ `application/json`, `text/plain(UTF-8)` } import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshaller.UnsupportedContentTypeException import akka.http.scaladsl.unmarshalling.{ Unmarshal, Unmarshaller } import akka.stream.scaladsl.{ Sink, Source } import org.scalatest.BeforeAndAfterAll import scala.concurrent.Await import scala.concurrent.duration.DurationInt import upickle.default.{ ReadWriter, macroRW } import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec object UpickleSupportSpec { final object Foo { implicit val rw: ReadWriter[Foo] = macroRW } final case class Foo(bar: String) { require(bar startsWith "bar", "bar must start with 'bar'!") } } final class UpickleSupportSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll { import UpickleSupport._ import UpickleSupportSpec._ private implicit val system = ActorSystem() "UpickleSupport" should { "enable marshalling and unmarshalling of case classes" in { val foo = Foo("bar") Marshal(foo) .to[RequestEntity] .flatMap(Unmarshal(_).to[Foo]) .map(_ shouldBe foo) } "enable streamed marshalling and unmarshalling for json arrays" in { val foos = (0 to 100).map(i => Foo(s"bar-$i")).toList Marshal(Source(foos)) .to[RequestEntity] .flatMap(entity => Unmarshal(entity).to[SourceOf[Foo]]) .flatMap(_.runWith(Sink.seq)) .map(_ shouldBe foos) } "provide proper error messages for requirement errors" in { val entity = HttpEntity(MediaTypes.`application/json`, """{ "bar": "baz" }""") Unmarshal(entity) .to[Foo] .failed .map(_ should have message "requirement failed: bar must start with 'bar'!") } "fail with NoContentException when unmarshalling empty entities" in { val entity = HttpEntity.empty(`application/json`) Unmarshal(entity) .to[Foo] .failed .map(_ shouldBe Unmarshaller.NoContentException) } "fail with UnsupportedContentTypeException when Content-Type is not `application/json`" in { val entity = HttpEntity("""{ "bar": "bar" }""") Unmarshal(entity) .to[Foo] .failed .map( _ shouldBe UnsupportedContentTypeException(Some(`text/plain(UTF-8)`), `application/json`) ) } "allow unmarshalling with passed in Content-Types" in { val foo = Foo("bar") val `application/json-home` = MediaType.applicationWithFixedCharset("json-home", HttpCharsets.`UTF-8`, "json-home") final object CustomUpickleSupport extends UpickleSupport { override def unmarshallerContentTypes = List(`application/json`, `application/json-home`) } import CustomUpickleSupport._ val entity = HttpEntity(`application/json-home`, """{ "bar": "bar" }""") Unmarshal(entity).to[Foo].map(_ shouldBe foo) } } override protected def afterAll() = { Await.ready(system.terminate(), 42.seconds) super.afterAll() } }
Example 6
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpupickle import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn import upickle.default.{ ReadWriter, macroRW } object ExampleApp { final object Foo { implicit val rw: ReadWriter[Foo] = macroRW } final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system = ActorSystem() Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") Await.ready(system.terminate(), Duration.Inf) } def route(implicit system: ActorSystem) = { import Directives._ import UpickleSupport._ pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 7
Source File: UpickleCustomizationSupport.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpupickle import akka.http.javadsl.common.JsonEntityStreamingSupport import akka.http.scaladsl.common.EntityStreamingSupport import akka.http.scaladsl.marshalling.{ Marshaller, Marshalling, ToEntityMarshaller } import akka.http.scaladsl.model.{ ContentTypeRange, HttpEntity, MediaType, MessageEntity } import akka.http.scaladsl.model.MediaTypes.`application/json` import akka.http.scaladsl.unmarshalling.{ FromEntityUnmarshaller, Unmarshal, Unmarshaller } import akka.http.scaladsl.util.FastFuture import akka.stream.scaladsl.{ Flow, Source } import akka.util.ByteString import UpickleCustomizationSupport._ import scala.collection.immutable.Seq import scala.concurrent.Future import scala.util.Try import scala.util.control.NonFatal // This companion object only exists for binary compatibility as adding methods with default implementations // (including val's as they create synthetic methods) is not compatible. private object UpickleCustomizationSupport { private def jsonStringUnmarshaller(support: UpickleCustomizationSupport) = Unmarshaller.byteStringUnmarshaller .forContentTypes(support.unmarshallerContentTypes: _*) .mapWithCharset { case (ByteString.empty, _) => throw Unmarshaller.NoContentException case (data, charset) => data.decodeString(charset.nioCharset.name) } private def jsonSourceStringMarshaller(support: UpickleCustomizationSupport) = Marshaller.oneOf(support.mediaTypes: _*)(support.sourceByteStringMarshaller) private def jsonStringMarshaller(support: UpickleCustomizationSupport) = Marshaller.oneOf(support.mediaTypes: _*)(Marshaller.stringMarshaller) } implicit def sourceMarshaller[A](implicit writes: apiInstance.Writer[A], support: JsonEntityStreamingSupport = EntityStreamingSupport.json() ): ToEntityMarshaller[SourceOf[A]] = jsonSourceStringMarshaller(this).compose(jsonSource[A]) }
Example 8
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpargonaut import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.{ HttpRequest, RequestEntity } import akka.http.scaladsl.server.Directives import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import argonaut.Argonaut.casecodec1 import argonaut.CodecJson import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn object ExampleApp { final object Foo { implicit val fooCodec: CodecJson[Foo] = casecodec1(Foo.apply, Foo.unapply)("bar") } final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system = ActorSystem() Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") Await.ready(system.terminate(), Duration.Inf) } def route(implicit sys: ActorSystem) = { import ArgonautSupport._ import Directives._ pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => import sys._ Marshal(Source.single(Foo("a"))).to[RequestEntity] complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 9
Source File: JsoniterScalaSupportSpec.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpjsoniterscala import akka.actor.ActorSystem import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.ContentTypes.{ `application/json`, `text/plain(UTF-8)` } import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshaller.UnsupportedContentTypeException import akka.http.scaladsl.unmarshalling.{ Unmarshal, Unmarshaller } import akka.stream.scaladsl.{ Sink, Source } import com.github.plokhotnyuk.jsoniter_scala.core.JsonValueCodec import com.github.plokhotnyuk.jsoniter_scala.macros._ import org.scalatest.BeforeAndAfterAll import scala.concurrent.Await import scala.concurrent.duration.DurationInt import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec object JsoniterScalaSupportSpec { final case class Foo(bar: String) { require(bar startsWith "bar", "bar must start with 'bar'!") } } final class JsoniterScalaSupportSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll { import JsoniterScalaSupport._ import JsoniterScalaSupportSpec._ private implicit val system: ActorSystem = ActorSystem() private implicit val codec: JsonValueCodec[Foo] = JsonCodecMaker.make[Foo](CodecMakerConfig) "JsoniterScalaSupport" should { "should enable marshalling and unmarshalling" in { val foo = Foo("bar") Marshal(foo) .to[RequestEntity] .flatMap(Unmarshal(_).to[Foo]) .map(_ shouldBe foo) } "enable streamed marshalling and unmarshalling for json arrays" in { val foos = (0 to 100).map(i => Foo(s"bar-$i")).toList Marshal(Source(foos)) .to[RequestEntity] .flatMap(entity => Unmarshal(entity).to[SourceOf[Foo]]) .flatMap(_.runWith(Sink.seq)) .map(_ shouldBe foos) } "provide proper error messages for requirement errors" in { val entity = HttpEntity(MediaTypes.`application/json`, """{ "bar": "baz" }""") Unmarshal(entity) .to[Foo] .failed .map(_ should have message "requirement failed: bar must start with 'bar'!") } "fail with NoContentException when unmarshalling empty entities" in { val entity = HttpEntity.empty(`application/json`) Unmarshal(entity) .to[Foo] .failed .map(_ shouldBe Unmarshaller.NoContentException) } "fail with UnsupportedContentTypeException when Content-Type is not `application/json`" in { val entity = HttpEntity("""{ "bar": "bar" }""") Unmarshal(entity) .to[Foo] .failed .map( _ shouldBe UnsupportedContentTypeException(Some(`text/plain(UTF-8)`), `application/json`) ) } "allow unmarshalling with passed in Content-Types" in { val foo = Foo("bar") val `application/json-home` = MediaType.applicationWithFixedCharset("json-home", HttpCharsets.`UTF-8`, "json-home") final object CustomJsoniterScalaSupport extends JsoniterScalaSupport { override def unmarshallerContentTypes: List[ContentTypeRange] = List(`application/json`, `application/json-home`) } import CustomJsoniterScalaSupport._ val entity = HttpEntity(`application/json-home`, """{ "bar": "bar" }""") Unmarshal(entity).to[Foo].map(_ shouldBe foo) } } override protected def afterAll(): Unit = { Await.ready(system.terminate(), 42.seconds) super.afterAll() } }
Example 10
Source File: RestPi.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl._ import akka.http.scaladsl.common.{ EntityStreamingSupport, JsonEntityStreamingSupport } import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.server.{ Directives, Route } import akka.stream.scaladsl.{ Flow, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.ByteString import com.github.dnvriend.spark.CalculatePi import org.apache.spark.SparkContext import org.apache.spark.sql.SparkSession import spray.json.DefaultJsonProtocol import scala.concurrent.{ ExecutionContext, Future } object RestPi extends App with Directives with SprayJsonSupport with DefaultJsonProtocol { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val spark = SparkSession.builder() .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse") .config("spark.scheduler.mode", "FAIR") .config("spark.sql.crossJoin.enabled", "true") .master("local") // use as many threads as cores .appName("RestPi") // The appName parameter is a name for your application to show on the cluster UI. .getOrCreate() final case class Pi(pi: Double) implicit val piJsonFormat = jsonFormat1(Pi) val start = ByteString.empty val sep = ByteString("\n") val end = ByteString.empty implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json() .withFramingRenderer(Flow[ByteString].intersperse(start, sep, end)) .withParallelMarshalling(parallelism = 8, unordered = true) def sparkContext: SparkContext = spark.newSession().sparkContext def calculatePi(num: Long = 1000000, slices: Int = 2): Future[Double] = Future(CalculatePi(sparkContext, num, slices)).map(count => slices.toDouble * count / (num - 1)) val route: Route = pathEndOrSingleSlash { complete(calculatePi().map(Pi)) } ~ path("pi" / LongNumber / IntNumber) { (num, slices) => complete(calculatePi(num, slices).map(Pi)) } ~ path("stream" / "pi" / LongNumber) { num => complete(Source.fromFuture(calculatePi()).map(Pi) .flatMapConcat(Source.repeat).take(num)) } Http().bindAndHandle(route, "0.0.0.0", 8008) sys.addShutdownHook { spark.stop() system.terminate() } }
Example 11
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpplayjson import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import play.api.libs.json.{ Format, JsValue, Json } import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn object ExampleApp { final object Foo { implicit val fooFormat: Format[Foo] = Json.format[Foo] } final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system = ActorSystem() Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") Await.ready(system.terminate(), Duration.Inf) } def route(implicit sys: ActorSystem) = { import Directives._ import PlayJsonSupport._ implicit val prettyPrint: JsValue => String = Json.prettyPrint pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 12
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpjson4s import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import org.json4s.{ DefaultFormats, jackson } import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn object ExampleApp { final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system = ActorSystem() Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") Await.ready(system.terminate(), Duration.Inf) } def route(implicit sys: ActorSystem) = { import Directives._ import Json4sSupport._ implicit val serialization = jackson.Serialization // or native.Serialization implicit val formats = DefaultFormats pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 13
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpavro4s import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.{ HttpRequest, RequestEntity } import akka.http.scaladsl.server.Directives import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import com.sksamuel.avro4s.{ FromRecord, SchemaFor, ToRecord } import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn object ExampleApp { final object Foo { implicit val schemaFor = SchemaFor[Foo] implicit val toRecord = ToRecord[Foo] implicit val fromRecord = FromRecord[Foo] } final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system = ActorSystem() Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") Await.ready(system.terminate(), Duration.Inf) } def route(implicit sys: ActorSystem) = { import AvroSupport._ import Directives._ pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => import sys._ Marshal(Source.single(Foo("a"))).to[RequestEntity] complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 14
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpcirce import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.{ HttpRequest, RequestEntity } import akka.http.scaladsl.server.Directives import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import scala.io.StdIn import scala.concurrent.duration._ object ExampleApp { private final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system = ActorSystem() Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") system.terminate() } private def route(implicit sys: ActorSystem) = { import Directives._ import FailFastCirceSupport._ import io.circe.generic.auto._ pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => import sys._ Marshal(Source.single(Foo("a"))).to[RequestEntity] complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 15
Source File: ExampleApp.scala From akka-http-json with Apache License 2.0 | 5 votes |
package de.heikoseeberger.akkahttpjackson import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.scaladsl.Source import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn object ExampleApp { final case class Foo(bar: String) def main(args: Array[String]): Unit = { implicit val system = ActorSystem() // provide an implicit ObjectMapper if you want serialization/deserialization to use it // instead of a default ObjectMapper configured only with DefaultScalaModule provided // by JacksonSupport // // for example: // // implicit val objectMapper = new ObjectMapper() // .registerModule(DefaultScalaModule) // .registerModule(new GuavaModule()) Http().bindAndHandle(route, "127.0.0.1", 8000) StdIn.readLine("Hit ENTER to exit") Await.ready(system.terminate(), Duration.Inf) } def route(implicit sys: ActorSystem) = { import Directives._ import JacksonSupport._ pathSingleSlash { post { entity(as[Foo]) { foo => complete { foo } } } } ~ pathPrefix("stream") { post { entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] => complete(fooSource.throttle(1, 2.seconds)) } } ~ get { pathEndOrSingleSlash { complete( Source(0 to 5) .throttle(1, 1.seconds) .map(i => Foo(s"bar-$i")) ) } ~ pathPrefix("remote") { onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) { response => complete(Unmarshal(response).to[SourceOf[Foo]]) } } } } } }
Example 16
Source File: FlowProducer.scala From kafka-scala-api with Apache License 2.0 | 5 votes |
package com.example.producer import akka.kafka.ProducerMessage import akka.kafka.scaladsl.Producer import akka.stream.scaladsl.{Sink, Source} import org.apache.kafka.clients.producer.ProducerRecord import com.example._ object FlowProducer extends App { val done = Source(100 to 111) .map { n => val partition = 1 ProducerMessage.Message(new ProducerRecord[Array[Byte], String]( topic , partition, null, n.toString ), n) } .via(Producer.flow(producerSettings)) .map { result => val record = result.message.record println(s"${record.topic}/${record.partition} ${result.offset}: ${record.value}" + s"(${result.message.passThrough})") result } .runWith(Sink.ignore) }
Example 17
Source File: akkaStreams.scala From sangria-akka-streams with Apache License 2.0 | 5 votes |
package sangria.streaming import scala.language.higherKinds import akka.NotUsed import akka.event.Logging import akka.stream.ActorAttributes.SupervisionStrategy import akka.stream._ import akka.stream.scaladsl.{Merge, Sink, Source} import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} import scala.concurrent.Future object akkaStreams { type AkkaSource[+T] = Source[T, NotUsed] abstract class SimpleLinearGraphStage[T] extends GraphStage[FlowShape[T, T]] { val in = Inlet[T](Logging.simpleName(this) + ".in") val out = Outlet[T](Logging.simpleName(this) + ".out") override val shape = FlowShape(in, out) } class AkkaStreamsSubscriptionStream(implicit materializer: Materializer) extends SubscriptionStream[AkkaSource] { def supported[T[_]](other: SubscriptionStream[T]) = other.isInstanceOf[AkkaStreamsSubscriptionStream] def map[A, B](source: AkkaSource[A])(fn: A => B) = source.map(fn) def singleFuture[T](value: Future[T]) = Source.fromFuture(value) def single[T](value: T) = Source.single(value) def mapFuture[A, B](source: AkkaSource[A])(fn: A => Future[B]) = source.mapAsync(1)(fn) def first[T](s: AkkaSource[T]) = s.runWith(Sink.head) def failed[T](e: Throwable) = Source.failed(e).asInstanceOf[AkkaSource[T]] def onComplete[Ctx, Res](result: AkkaSource[Res])(op: => Unit) = result .via(OnComplete(() => op)) .recover {case e => op; throw e} .asInstanceOf[AkkaSource[Res]] def flatMapFuture[Ctx, Res, T](future: Future[T])(resultFn: T => AkkaSource[Res]) = Source.fromFuture(future).flatMapMerge(1, resultFn) def merge[T](streams: Vector[AkkaSource[T]]) = { if (streams.size > 1) Source.combine(streams(0), streams(1), streams.drop(2): _*)(Merge(_)) else if (streams.nonEmpty) streams.head else throw new IllegalStateException("No streams produced!") } def recover[T](stream: AkkaSource[T])(fn: Throwable => T) = stream recover {case e => fn(e)} } implicit def akkaSubscriptionStream(implicit materializer: Materializer): SubscriptionStream[AkkaSource] = new AkkaStreamsSubscriptionStream implicit def akkaStreamIsValidSubscriptionStream[A[_, _], Ctx, Res, Out](implicit materializer: Materializer, ev1: ValidOutStreamType[Res, Out]): SubscriptionStreamLike[Source[A[Ctx, Res], NotUsed], A, Ctx, Res, Out] = new SubscriptionStreamLike[Source[A[Ctx, Res], NotUsed], A, Ctx, Res, Out] { type StreamSource[X] = AkkaSource[X] val subscriptionStream = new AkkaStreamsSubscriptionStream } private final case class OnComplete[T](op: () => Unit) extends SimpleLinearGraphStage[T] { override def toString: String = "OnComplete" override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler with InHandler { def decider = inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider) override def onPush(): Unit = { push(out, grab(in)) } override def onPull(): Unit = pull(in) override def onDownstreamFinish() = { op() super.onDownstreamFinish() } override def onUpstreamFinish() = { op() super.onUpstreamFinish() } setHandlers(in, out, this) } } }
Example 18
Source File: HelloServiceImpl.scala From scala-tutorials with MIT License | 5 votes |
package com.baeldung.hello.impl import akka.NotUsed import akka.actor.ActorSystem import akka.cluster.Cluster import akka.cluster.routing.{ClusterRouterGroup, ClusterRouterGroupSettings} import akka.pattern.ask import akka.routing.ConsistentHashingGroup import akka.stream.scaladsl.Source import akka.util.Timeout import com.baeldung.hello.akka.{Job, JobAccepted, JobStatus, Worker} import com.baeldung.hello.api.HelloService import com.lightbend.lagom.scaladsl.api.ServiceCall import com.lightbend.lagom.scaladsl.pubsub.{PubSubRegistry, TopicId} import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration._ class HelloServiceImpl(system: ActorSystem, pubSub: PubSubRegistry)(implicit ec: ExecutionContext) extends HelloService { if (Cluster.get(system).selfRoles("worker-node")) { system.actorOf(Worker.props(pubSub), "worker") } val workerRouter = { val paths = List("/user/worker") val groupConf = ConsistentHashingGroup(paths, hashMapping = { case Job(_, task, _) => task }) val routerProps = ClusterRouterGroup( groupConf, ClusterRouterGroupSettings( totalInstances = 1000, routeesPaths = paths, allowLocalRoutees = true, useRoles = Set("worker-node") ) ).props system.actorOf(routerProps, "workerRouter") } override def submit(): ServiceCall[Job, JobAccepted] = ServiceCall { job => //Future{JobAccepted(job.jobId)} implicit val timeout = Timeout(5.seconds) (workerRouter ? job).mapTo[JobAccepted] } override def status(): ServiceCall[NotUsed, Source[JobStatus, NotUsed]] = ServiceCall { _ => val topic = pubSub.refFor(TopicId[JobStatus]("job-status")) Future.successful(topic.subscriber) } }
Example 19
Source File: ExperimentVariantEventTest.scala From izanami with Apache License 2.0 | 5 votes |
package domains.abtesting import java.time.LocalDateTime import java.time.temporal.ChronoUnit import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Sink, Source} import domains.Key import domains.abtesting.events._ import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import test.IzanamiSpec class ExperimentVariantEventTest extends IzanamiSpec with ScalaFutures with IntegrationPatience { "ExperimentVariantEvent" must { "aggregate event" in { implicit val system: ActorSystem = ActorSystem() val variantId = "vId" val variant = Variant(variantId, "None", None, Traffic(0), None) val flow: Flow[ExperimentVariantEvent, VariantResult, NotUsed] = ExperimentVariantEvent.eventAggregation("experiment.id", 1, ChronoUnit.HOURS) val firstDate = LocalDateTime.now().minus(5, ChronoUnit.HOURS) val experimentKey = Key(s"experiment:id") def experimentVariantEventKey(counter: Int): ExperimentVariantEventKey = ExperimentVariantEventKey(experimentKey, variantId, s"client:id:$counter", "namespace", s"$counter") def clientId(i: Int): String = s"client:id:$i" def date(i: Int): LocalDateTime = firstDate.plus(15 * i, ChronoUnit.MINUTES) val source = (1 to 20) .flatMap { counter => val d = date(counter) val key = experimentVariantEventKey(counter) counter match { case i if i % 2 > 0 => List(ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId)) case i => List( ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId), ExperimentVariantWon(key, experimentKey, clientId(i), variant, d, 0, variantId) ) } } val expectedEvents = Seq( ExperimentResultEvent(experimentKey, variant, date(1), 0.0, "vId"), ExperimentResultEvent(experimentKey, variant, date(5), 40.0, "vId"), ExperimentResultEvent(experimentKey, variant, date(9), 44.44444444444444, "vId"), ExperimentResultEvent(experimentKey, variant, date(13), 46.15384615384615, "vId"), ExperimentResultEvent(experimentKey, variant, date(17), 47.05882352941177, "vId") ) val evts = Source(source).via(flow).runWith(Sink.seq).futureValue val allEvents = evts.flatMap(_.events) allEvents must be(expectedEvents) } } }
Example 20
Source File: BasicEventStore.scala From izanami with Apache License 2.0 | 5 votes |
package domains.events.impl import akka.actor.{Actor, ActorSystem, Props} import akka.stream.scaladsl.{Source, SourceQueueWithComplete} import akka.stream.{ActorMaterializer, Materializer} import akka.{Done, NotUsed} import domains.Domain.Domain import domains.configuration.PlayModule import domains.events.EventLogger._ import domains.events.EventStore import domains.events.Events.IzanamiEvent import libs.streams.CacheableQueue import domains.errors.IzanamiErrors import store.datastore.DataStoreLayerContext import zio.{IO, Task, ZLayer} import scala.util.Try object BasicEventStore { val live: ZLayer[DataStoreLayerContext, Throwable, EventStore] = ZLayer.fromFunction { mix => implicit val system: ActorSystem = mix.get[PlayModule.Service].system new BasicEventStore } } class BasicEventStore(implicit system: ActorSystem) extends EventStore.Service { logger.info("Starting default event store") private val queue = CacheableQueue[IzanamiEvent](500, queueBufferSize = 500) system.actorOf(EventStreamActor.props(queue)) override def publish(event: IzanamiEvent): IO[IzanamiErrors, Done] = //Already published Task { system.eventStream.publish(event) Done }.orDie override def events(domains: Seq[Domain], patterns: Seq[String], lastEventId: Option[Long]): Source[IzanamiEvent, NotUsed] = lastEventId match { case Some(_) => queue.sourceWithCache .via(dropUntilLastId(lastEventId)) .filter(eventMatch(patterns, domains)) case None => queue.rawSource .filter(eventMatch(patterns, domains)) } override def check(): Task[Unit] = IO.succeed(()) } private[events] object EventStreamActor { def props(queue: SourceQueueWithComplete[IzanamiEvent]) = Props(new EventStreamActor(queue)) } private[events] class EventStreamActor(queue: SourceQueueWithComplete[IzanamiEvent]) extends Actor { import context.dispatcher override def receive = { case e: IzanamiEvent => logger.debug(s"New event : $e") queue.offer(e) } override def preStart(): Unit = { queue .watchCompletion() .onComplete(_ => Try(context.system.eventStream.unsubscribe(self))) context.system.eventStream.subscribe(self, classOf[IzanamiEvent]) } override def postStop(): Unit = { context.system.eventStream.unsubscribe(self) queue.complete() } }
Example 21
Source File: SearchController.scala From izanami with Apache License 2.0 | 5 votes |
package controllers import akka.actor.ActorSystem import akka.stream.scaladsl.{GraphDSL, Interleave, Sink, Source} import akka.stream.{ActorMaterializer, SourceShape} import controllers.actions.SecuredAuthContext import domains.abtesting.ExperimentService import domains.config.ConfigService import domains.feature.FeatureService import domains.script.GlobalScriptService import domains.configuration.GlobalContext import play.api.libs.json.{JsArray, JsValue, Json} import play.api.mvc.{AbstractController, ActionBuilder, AnyContent, ControllerComponents} import store.Query import zio.{Runtime, ZIO} import libs.http.HttpContext class SearchController(AuthAction: ActionBuilder[SecuredAuthContext, AnyContent], cc: ControllerComponents)( implicit system: ActorSystem, R: HttpContext[GlobalContext] ) extends AbstractController(cc) { import libs.http._ def search(pattern: String, features: Boolean, configs: Boolean, experiments: Boolean, scripts: Boolean) = AuthAction.asyncTask[GlobalContext] { ctx => val query: Query = Query.oneOf(ctx.authorizedPatterns).and(pattern.split(",").toList) for { featuresRes <- if (features) FeatureService .findByQuery(query, 1, 10) .map(_.results.map(value => Json.obj("type" -> "features", "id" -> Json.toJson(value.id)))) .map(value => Source(value.toList)) else ZIO.succeed(Source.empty[JsValue]) configsRes <- if (configs) ConfigService .findByQuery(query, 1, 10) .map( _.results.map(value => Json.obj("type" -> "configurations", "id" -> Json.toJson(value.id))) ) .map(value => Source(value.toList)) else ZIO.succeed(Source.empty[JsValue]) experimentsRes <- if (experiments) ExperimentService .findByQuery(query, 1, 10) .map( _.results.map(value => Json.obj("type" -> "experiments", "id" -> Json.toJson(value.id))) ) .map(value => Source(value.toList)) else ZIO.succeed(Source.empty[JsValue]) scriptsRes <- if (scripts) GlobalScriptService .findByQuery(query, 1, 10) .map(_.results.map(value => Json.obj("type" -> "scripts", "id" -> Json.toJson(value.id)))) .map(value => Source(value.toList)) else ZIO.succeed(Source.empty[JsValue]) res <- ZIO.fromFuture { implicit ec => val all = Source.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val interleave = builder.add(Interleave[JsValue](4, 1)) featuresRes ~> interleave.in(0) configsRes ~> interleave.in(1) experimentsRes ~> interleave.in(2) scriptsRes ~> interleave.in(3) SourceShape(interleave.out) }) all.take(10).runWith(Sink.seq) map { jsons => Ok(JsArray(jsons)) } } } yield res } }
Example 22
Source File: InitIza.scala From izanami with Apache License 2.0 | 5 votes |
package experiments import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.RawHeader import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import akka.util.ByteString import scala.collection.immutable import scala.concurrent.{Future} object InitIza extends App { implicit val system: ActorSystem = ActorSystem() implicit val materializer: ActorMaterializer = ActorMaterializer() import system.dispatcher private val http = Http() private val features = "http://localhost:9000/api/features" //private val features = "http://izanami-perfs.cleverapps.io/api/features" Source(0 to 2000) .mapAsyncUnordered(10) { postFeature } .alsoTo(Sink.foreach { case (c, s) if c == StatusCodes.Created => case (c, s) => println(s"Oups $c $s") }) .runWith(Sink.ignore) .onComplete { _ => println("Done") } private def postFeature(i: Int): Future[(StatusCode, String)] = { val headers: immutable.Seq[HttpHeader] = immutable.Seq( RawHeader("Izanami-Client-Id", "xxxx"), RawHeader("Izanami-Client-Secret", "xxxx") ) val body = s""" | { | "id": "a:key:$i", | "enabled": true, | "activationStrategy": "NO_STRATEGY" | } """.stripMargin http .singleRequest( HttpRequest( HttpMethods.POST, Uri(features), headers = headers, entity = HttpEntity.Strict(ContentTypes.`application/json`, ByteString(body)) ) ) .flatMap { case HttpResponse(code, _, entity, _) => entity.dataBytes.map(_.utf8String).runFold("")((str, acc) => str + acc).map(s => (code, s)) } } }
Example 23
Source File: HttpIncomingReceiverTest.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.http_frontend import java.time.Instant import akka.actor.{ActorSystem, Props} import akka.http.scaladsl.model.ws.TextMessage import akka.stream.scaladsl.Source import akka.testkit.{TestActorRef, TestActors, TestKit, TestProbe} import com.sumologic.sumobot.core.HttpReceptionist import com.sumologic.sumobot.core.model.IncomingMessage import com.sumologic.sumobot.test.SumoBotSpec import com.sumologic.sumobot.test.annotated.SumoBotTestKit import org.scalatest.BeforeAndAfterAll class HttpIncomingReceiverTest extends SumoBotTestKit(ActorSystem("HttpIncomingReceiverTest")) with BeforeAndAfterAll { private val probe = new TestProbe(system) system.eventStream.subscribe(probe.ref, classOf[IncomingMessage]) private val dummyActor = TestActorRef(TestActors.blackholeProps) private val httpIncomingReceiver = TestActorRef(new HttpIncomingReceiver(dummyActor)) "HttpIncomingReceiver" should { "publish IncomingMessage" when { "received streamed TextMessage" in { val msgSource = Source(List("hello")) val streamedMsg = TextMessage.Streamed(msgSource) httpIncomingReceiver ! streamedMsg val result = probe.expectMsgClass(classOf[IncomingMessage]) result.canonicalText should be ("hello") result.addressedToUs should be (true) result.channel should be (HttpReceptionist.DefaultSumoBotChannel) result.attachments should be (Seq.empty) result.sentBy.plainTextReference should be (HttpReceptionist.DefaultClientUser.id) } "received strict TextMessage" in { val strictMsg = TextMessage.Strict("hi!") httpIncomingReceiver ! strictMsg val result = probe.expectMsgClass(classOf[IncomingMessage]) result.canonicalText should be ("hi!") result.addressedToUs should be (true) result.channel should be (HttpReceptionist.DefaultSumoBotChannel) result.attachments should be (Seq.empty) result.sentBy.plainTextReference should be (HttpReceptionist.DefaultClientUser.id) } "properly format date" when { "sending IncomingMessage" in { val strictMsg = TextMessage.Strict("test") httpIncomingReceiver ! strictMsg val result = probe.expectMsgClass(classOf[IncomingMessage]) val currentDate = Instant.now().getEpochSecond.toDouble val messageDate = result.idTimestamp.toDouble messageDate should be (currentDate +- 5.0) } } } "stop itself and outcoming actor" when { "stream ended" in { val outcomingActor = TestActorRef(TestActors.blackholeProps) val testProbeOutcoming = TestProbe() testProbeOutcoming.watch(outcomingActor) val shutdownReceiver = TestActorRef(new HttpIncomingReceiver(outcomingActor)) val testProbeShutdown = TestProbe() testProbeShutdown.watch(shutdownReceiver) shutdownReceiver ! HttpIncomingReceiver.StreamEnded testProbeOutcoming.expectTerminated(outcomingActor) testProbeShutdown.expectTerminated(shutdownReceiver) } } } override def afterAll: Unit = { TestKit.shutdownActorSystem(system) } }
Example 24
Source File: UseCase.scala From Fast-Data-Processing-Systems-with-SMACK-Stack with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import com.softwaremill.react.kafka.KafkaMessages._ import org.apache.kafka.common.serialization.{StringSerializer, StringDeserializer} import com.softwaremill.react.kafka.{ProducerMessage, ConsumerProperties, ProducerProperties, ReactiveKafka} import org.reactivestreams.{ Publisher, Subscriber } implicit val actorSystem = ActorSystem("ReactiveKafka") implicit val materializer = ActorMaterializer() val kafka = new ReactiveKafka() val publisher: Publisher[StringConsumerRecord] = kafka.consume(ConsumerProperties( bootstrapServers = "localhost:9092", topic = "lowercaseStrings", groupId = "groupName", valueDeserializer = new StringDeserializer() )) val subscriber: Subscriber[StringProducerMessage] = kafka.publish(ProducerProperties( bootstrapServers = "localhost:9092", topic = "uppercaseStrings", valueSerializer = new StringSerializer() )) Source.fromPublisher(publisher).map(m => ProducerMessage(m.value().toUpperCase)) .to(Sink.fromSubscriber(subscriber)).run()
Example 25
Source File: ElasticSearchClient.scala From pipelines-examples with Apache License 2.0 | 5 votes |
package warez import akka.NotUsed import akka.kafka.ConsumerMessage.CommittableOffset import akka.stream.alpakka.elasticsearch.{ ReadResult, WriteMessage, WriteResult } import akka.stream.alpakka.elasticsearch.scaladsl.{ ElasticsearchFlow, ElasticsearchSource } import akka.stream.scaladsl.Source import org.apache.http.HttpHost import org.elasticsearch.client.RestClient import spray.json.{ JsObject, JsonFormat } import pipelines.akkastream.scaladsl.FlowWithOffsetContext implicit val esClient: RestClient = RestClient.builder(new HttpHost(hostname, port)).build() def indexFlow(): FlowWithOffsetContext[WriteMessage[T, NotUsed], WriteResult[T, CommittableOffset]] = ElasticsearchFlow.createWithContext[T, CommittableOffset](indexName, typeName) def querySource(searchCriteria: String): Source[ReadResult[JsObject], NotUsed] = ElasticsearchSource .create(indexName, typeName, query = s"""{ "bool": { "must": { "query_string": { "query": "$searchCriteria" } } } }""") }
Example 26
Source File: StreamingAkkaServer.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.examples import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Route import akka.stream.scaladsl.Source import akka.util.ByteString import sttp.client._ import sttp.tapir._ import sttp.tapir.server.akkahttp._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} object StreamingAkkaServer extends App { // The endpoint: corresponds to GET /receive. // We need to provide both the schema of the value (for documentation), as well as the format (media type) of the // body. Here, the schema is a `string` and the media type is `text/plain`. val streamingEndpoint: Endpoint[Unit, Unit, Source[ByteString, Any], Source[ByteString, Any]] = endpoint.get.in("receive").out(streamBody[Source[ByteString, Any]](schemaFor[String], CodecFormat.TextPlain())) // converting an endpoint to a route (providing server-side logic); extension method comes from imported packages val testStream: Source[ByteString, Any] = Source.repeat("Hello!").take(10).map(s => ByteString(s)) val streamingRoute: Route = streamingEndpoint.toRoute(_ => Future.successful(Right(testStream))) // starting the server implicit val actorSystem: ActorSystem = ActorSystem() import actorSystem.dispatcher val bindAndCheck = Http().bindAndHandle(streamingRoute, "localhost", 8080).map { _ => // testing implicit val backend: SttpBackend[Identity, Nothing, NothingT] = HttpURLConnectionBackend() val result: String = basicRequest.response(asStringAlways).get(uri"http://localhost:8080/receive").send().body println("Got result: " + result) assert(result == "Hello!" * 10) } Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute) }
Example 27
Source File: Cache.scala From AckCord with MIT License | 5 votes |
package ackcord import scala.collection.immutable import ackcord.gateway.GatewayMessage import akka.actor.typed.ActorSystem import akka.stream.scaladsl.{Sink, Source} import akka.{NotUsed, actor => classic} def create( cacheProcessor: MemoryCacheSnapshot.CacheProcessor = MemoryCacheSnapshot.defaultCacheProcessor, parallelism: Int = 4 )(implicit system: ActorSystem[Nothing]): Cache = { val (publish, subscribe) = CacheStreams.cacheStreams(cacheProcessor) val (gatewayPublish, gatewaySubscribe) = CacheStreams.gatewayEvents[Any] //Keep it drained if nothing else is using it subscribe.runWith(Sink.ignore) Cache(publish, subscribe, gatewayPublish, gatewaySubscribe, parallelism) } }
Example 28
Source File: HelloService.scala From scala-tutorials with MIT License | 5 votes |
package com.baeldung.hello.api import akka.NotUsed import akka.stream.scaladsl.Source import com.baeldung.hello.akka.{Job, JobAccepted, JobStatus} import com.lightbend.lagom.scaladsl.api.{Descriptor, Service, ServiceAcl, ServiceCall} trait HelloService extends Service { def submit(): ServiceCall[Job, JobAccepted] def status(): ServiceCall[NotUsed, Source[JobStatus, NotUsed]] override final def descriptor: Descriptor = { import Service._ named("hello") .withCalls( pathCall("/api/submit", submit _), pathCall("/api/status", status _) ) .withAutoAcl(true) .withAcls( ServiceAcl(pathRegex = Some("/api/play")) ) } }
Example 29
Source File: Client.scala From twitter4s with Apache License 2.0 | 5 votes |
package com.danielasfregola.twitter4s.http.clients import akka.actor.ActorSystem import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import com.danielasfregola.twitter4s.http.oauth.OAuth1Provider import scala.concurrent.Future trait Client extends OAuthClient { val withLogRequest = false val withLogRequestResponse = true def oauthProvider: OAuth1Provider protected def sendAndReceive[T](request: HttpRequest, f: HttpResponse => Future[T])( implicit system: ActorSystem, materializer: Materializer): Future[T] = { implicit val r: HttpRequest = request val requestStartTime = System.currentTimeMillis if (withLogRequest) logRequest Source .single(request) .via(connection) .mapAsync(1)(implicit response => unmarshal(requestStartTime, f)) .runWith(Sink.head) } }
Example 30
Source File: SocketWordCountTest.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.spark.sstreaming import akka.Done import akka.stream.scaladsl.Tcp._ import akka.stream.scaladsl.{ Flow, Sink, Source, Tcp } import akka.util.ByteString import com.github.dnvriend.TestSpec import org.scalatest.Ignore import scala.collection.immutable._ import scala.concurrent.Future import scala.concurrent.duration._ @Ignore class SocketWordCountTest extends TestSpec { def withSocketServer(xs: Seq[String])(f: Future[Done] => Unit): Unit = { val connections: Source[IncomingConnection, Future[ServerBinding]] = Tcp().bind("127.0.0.1", 9999) val socketServer = connections.runForeach { connection => println(s"New connection from: ${connection.remoteAddress}") val src = Source.cycle(() => xs.iterator).map(txt => ByteString(txt) ++ ByteString("\n")) .flatMapConcat(msg => Source.tick(0.seconds, 200.millis, msg)) val echo = Flow.fromSinkAndSource(Sink.ignore, src) connection.handleWith(echo) } f(socketServer) } it should "a running word count of text data received via a TCP server" in withSparkSession { spark => withSocketServer(List("apache spark")) { socketServer => import spark.implicits._ val lines = spark.readStream .format("socket") .option("host", "localhost") .option("port", 9999) .load() // Split the lines into words val words = lines.as[String].flatMap(_.split(" ")) // Generate running word count val wordCounts = words.groupBy("value").count() // Start running the query that prints the running counts to the console val query = wordCounts.writeStream .outputMode("complete") .format("console") .start() query.awaitTermination(10.seconds) } } }
Example 31
Source File: DataFrameTest.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.spark.dataframe import akka.stream.scaladsl.Source import com.github.dnvriend.TestSpec import com.github.dnvriend.spark.datasources.SparkImplicits._ import com.github.dnvriend.spark.datasources.person.Person import scala.collection.immutable._ import scala.concurrent.Future class DataFrameTest extends TestSpec { it should "create a DataFrame from an akka.stream.scaladsl.Source" in withSparkSession { spark => val df = spark.fromSource(Source(List(Person(1, "foo", 25), Person(2, "bar", 20), Person(3, "baz", 30)))) df.show() df.printSchema() } it should "create a DataFrame from a Future" in withSparkSession { spark => val df = spark.fromFuture(Future.successful(List(Person(1, "foo", 25), Person(2, "bar", 20), Person(3, "baz", 30)))) df.show() df.printSchema() } }
Example 32
Source File: CreatePosts.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import java.nio.file.Paths import java.nio.file.StandardOpenOption._ import java.text.SimpleDateFormat import java.util.Date import akka.actor.{ ActorSystem, Terminated } import akka.stream.scaladsl.{ FileIO, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.ByteString import play.api.libs.json.Json import scala.concurrent.{ ExecutionContext, Future } import scala.util.Random object CreatePosts extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher def terminate: Future[Terminated] = system.terminate() sys.addShutdownHook { terminate } object Post { implicit val format = Json.format[Post] } final case class Post( commentCount: Int, lastActivityDate: String, ownerUserId: Long, body: String, score: Int, creationDate: String, viewCount: Int, title: String, tags: String, answerCount: Int, acceptedAnswerId: Long, postTypeId: Long, id: Long ) def rng = Random.nextInt(20000) def now: String = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX").format(new Date()) val lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam fringilla magna et pharetra vestibulum." val title = " Ut id placerat sapien. Aliquam vel metus orci." Source.fromIterator(() => Iterator from 0).map { id => Post(rng, now, rng, List.fill(Random.nextInt(5))(lorem).mkString("\n"), rng, now, rng, s"$rng - $title", title, rng, rng, rng, id) }.map(Json.toJson(_).toString) .map(json => ByteString(json + "\n")) .take(1000000) .via(LogProgress.flow()) .runWith(FileIO.toPath(Paths.get("/tmp/posts.json"), Set(WRITE, TRUNCATE_EXISTING, CREATE))) .flatMap { done => println(done) terminate }.recoverWith { case cause: Throwable => cause.printStackTrace() terminate } }
Example 33
Source File: CreateZipcodes.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import java.nio.file.Paths import java.nio.file.StandardOpenOption._ import akka.NotUsed import akka.actor.{ ActorSystem, Terminated } import akka.stream.scaladsl.{ FileIO, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.ByteString import play.api.libs.json.Json import scala.concurrent.{ ExecutionContext, Future } object CreateZipcodes extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher sys.addShutdownHook { terminate } object Zipcode { implicit val format = Json.format[Zipcode] } final case class Zipcode(value: String) val numZips = 50000000 def zips(range: Range): Source[ByteString, NotUsed] = Source(range).flatMapConcat { district => Source('A' to 'Z').flatMapConcat { l1 => Source('A' to 'Z').flatMapConcat { l2 => Source(1 to 399).map(num => f"$district$l1$l2-$num%03d") } } }.map(Zipcode.apply).map(Json.toJson(_).toString).map(json => ByteString(json + "\n")) zips(1000 until 2000) .merge(zips(2000 until 3000)) .merge(zips(3000 until 4000)) .merge(zips(4000 until 5000)) .merge(zips(5000 until 6000)) .merge(zips(6000 until 7000)) .merge(zips(7000 until 8000)) .merge(zips(8000 until 9000)) .take(numZips) .via(LogProgress.flow(each = 250000)) .runWith(FileIO.toPath(Paths.get("/tmp/zips.json"), Set(WRITE, TRUNCATE_EXISTING, CREATE))) .flatMap { done => println(done) terminate }.recoverWith { case cause: Throwable => cause.printStackTrace() terminate } def terminate: Future[Terminated] = system.terminate() }
Example 34
Source File: SparkImplicits.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.spark.datasources import java.util.Properties import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.{ Sink, Source } import org.apache.spark.sql._ import org.apache.spark.sql.streaming.DataStreamReader import scala.collection.immutable._ import scala.concurrent.duration.{ FiniteDuration, _ } import scala.concurrent.{ Await, Future } import scala.reflect.runtime.universe._ import slick.driver.PostgresDriver.api._ object SparkImplicits { implicit class DataSourceOps(dfr: DataFrameReader) { def helloworld(path: String): DataFrame = dfr.format("helloworld").load(path) def person(path: String): DataFrame = dfr.format("person").load(path) def jdbc(table: String)(implicit jdbcOptions: Map[String, String]): DataFrame = dfr.format("jdbc").options(jdbcOptions ++ Map("dbtable" -> table)).load() } implicit class DataStreamReaderOps(dsr: DataStreamReader) { def currentPersistenceIds(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-persistence-id").load(path) def eventsByPersistenceId(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-events-by-persistence-id").load(path) } implicit class DataFrameWriterOps[T](dfw: DataFrameWriter[T]) { def ignore = dfw.mode(SaveMode.Ignore) def jdbc(table: String)(implicit jdbcOptions: Map[String, String]) = { val properties = jdbcOptions.foldLeft(new Properties) { case (prop, (k, v)) => prop.put(k, v); prop } dfw.jdbc(jdbcOptions("url"), table, properties) // does not (yet) work see: https://issues.apache.org/jira/browse/SPARK-7646 // dfw.format("jdbc").mode(SaveMode.Overwrite).options(jdbcOptions ++ Map("dbtable" -> table)) } } trait DataFrameQueryGenerator[A] { def upsert: String } implicit class DatasetOps(df: DataFrame) { def withSession[A](db: Database)(f: Session => A): A = { val session = db.createSession() try f(session) finally session.close() } def withStatement[A](db: Database)(f: java.sql.Statement => A): A = withSession(db)(session ⇒ session.withStatement()(f)) def upsert[A](table: String)(implicit db: Database, dfq: DataFrameQueryGenerator[A]): DataFrame = withStatement(db) { stmt => stmt.executeUpdate(dfq.upsert) df } } implicit class SparkSessionOps(spark: SparkSession) { def fromFuture[A <: Product: TypeTag](data: Future[Seq[A]])(implicit _timeout: FiniteDuration = null): DataFrame = spark.createDataFrame(Await.result(data, Option(_timeout).getOrElse(15.minutes))) def fromSource[A <: Product: TypeTag](data: Source[A, NotUsed])(implicit _timeout: FiniteDuration = null, mat: Materializer): DataFrame = fromFuture(data.runWith(Sink.seq)) } }
Example 35
Source File: Converters.scala From korolev with Apache License 2.0 | 5 votes |
package korolev.akka import akka.NotUsed import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Sink, Source} import korolev.akka.util.{KorolevStreamPublisher, KorolevStreamSubscriber} import korolev.effect.{Effect, Stream} import org.reactivestreams.Publisher object Converters { implicit final class SinkCompanionOps(value: Sink.type) { def korolevStream[F[_]: Effect, T]: Sink[T, Stream[F, T]] = { val subscriber = new KorolevStreamSubscriber[F, T]() Sink .fromSubscriber(subscriber) .mapMaterializedValue(_ => subscriber) } } implicit final class StreamCompanionOps(value: Stream.type) { def fromPublisher[F[_]: Effect, T](publisher: Publisher[T]): Stream[F, T] = { val result = new KorolevStreamSubscriber[F, T]() publisher.subscribe(result) result } } implicit final class KorolevStreamsOps[F[_]: Effect, T](stream: Stream[F, T]) { def asPublisher(fanout: Boolean = false): Publisher[T] = new KorolevStreamPublisher(stream, fanout) def asAkkaSource: Source[T, NotUsed] = { val publisher = new KorolevStreamPublisher(stream, fanout = false) Source .fromPublisher(publisher) .buffer(10, OverflowStrategy.backpressure) // FIXME should work without this line. Looks like bug in akka-streams } } }
Example 36
Source File: EventRoutesSpec.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.routes import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.model.headers.`Last-Event-ID` import akka.http.scaladsl.model.sse.ServerSentEvent import akka.persistence.query.{EventEnvelope, NoOffset, Offset, Sequence} import akka.stream.scaladsl.Source import ch.epfl.bluebrain.nexus.iam.client.types._ import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.resources.Event import ch.epfl.bluebrain.nexus.kg.routes.EventRoutesSpec.TestableEventRoutes import io.circe.Encoder class EventRoutesSpec extends EventsSpecBase { val eventRoutes = new TestableEventRoutes(events, acls, caller) "EventRoutes" should { "return all events for a project" in { Get("/") ~> eventRoutes.routes(project) ~> check { val expected = jsonContentOf("/events/events.json").asArray.value status shouldEqual StatusCodes.OK responseAs[String] shouldEqual eventStreamFor(expected) } } "return all events for a project from the last seen" in { Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> eventRoutes.routes(project) ~> check { val expected = jsonContentOf("/events/events.json").asArray.value status shouldEqual StatusCodes.OK responseAs[String] shouldEqual eventStreamFor(expected, 1) } } "return all events for an organization" in { Get("/") ~> eventRoutes.routes(organization) ~> check { val expected = jsonContentOf("/events/events.json").asArray.value status shouldEqual StatusCodes.OK responseAs[String] shouldEqual eventStreamFor(expected) } } "return all events for an organization from the last seen" in { Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> eventRoutes.routes(organization) ~> check { val expected = jsonContentOf("/events/events.json").asArray.value status shouldEqual StatusCodes.OK responseAs[String] shouldEqual eventStreamFor(expected, 1) } } } } object EventRoutesSpec { class TestableEventRoutes(events: List[Event], acls: AccessControlLists, caller: Caller)( implicit as: ActorSystem, config: AppConfig ) extends EventRoutes(acls, caller) { private val envelopes = events.zipWithIndex.map { case (ev, idx) => EventEnvelope(Sequence(idx.toLong), "persistenceid", 1L, ev) } override protected def source( tag: String, offset: Offset )(implicit enc: Encoder[Event]): Source[ServerSentEvent, NotUsed] = { val toDrop = offset match { case NoOffset => 0 case Sequence(v) => v + 1 } Source(envelopes).drop(toDrop).flatMapConcat(ee => Source(eventToSse(ee).toList)) } } }
Example 37
Source File: EventSource.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.client import java.util.UUID import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.persistence.query.{NoOffset, Offset, Sequence, TimeBasedUUID} import akka.stream.Materializer import akka.stream.alpakka.sse.scaladsl.{EventSource => SSESource} import akka.stream.scaladsl.Source import ch.epfl.bluebrain.nexus.rdf.implicits._ import ch.epfl.bluebrain.nexus.iam.client.types.AuthToken import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri import com.typesafe.scalalogging.Logger import io.circe.Decoder import io.circe.parser.decode import scala.concurrent.{ExecutionContext, Future} import scala.util.Try trait EventSource[A] { def apply[A: Decoder]( config: KgClientConfig )(implicit as: ActorSystem, mt: Materializer, ec: ExecutionContext): EventSource[A] = new EventSource[A] { private val logger = Logger[this.type] private val http = Http() private def addCredentials(request: HttpRequest)(implicit cred: Option[AuthToken]): HttpRequest = cred.map(token => request.addCredentials(OAuth2BearerToken(token.value))).getOrElse(request) private def send(request: HttpRequest)(implicit cred: Option[AuthToken]): Future[HttpResponse] = http.singleRequest(addCredentials(request)).map { resp => if (!resp.status.isSuccess()) logger.warn(s"HTTP response when performing SSE request: status = '${resp.status}'") resp } private def toOffset(id: String): Offset = Try(TimeBasedUUID(UUID.fromString(id))).orElse(Try(Sequence(id.toLong))).getOrElse(NoOffset) override def apply(iri: AbsoluteIri, offset: Option[String])( implicit cred: Option[AuthToken] ): Source[(Offset, A), NotUsed] = SSESource(iri.asAkka, send, offset, config.sseRetryDelay).flatMapConcat { sse => val offset = sse.id.map(toOffset).getOrElse(NoOffset) decode[A](sse.data) match { case Right(ev) => Source.single(offset -> ev) case Left(err) => logger.error(s"Failed to decode admin event '$sse'", err) Source.empty } } } }
Example 38
Source File: ViewIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ViewIndexer { private implicit val log = Logger[ViewIndexer.type] def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])( implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "view-indexer" def toView(event: Event): F[Option[View]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => views.fetchView(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(view) => Some(view) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.View.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toView) .collectSome[View] .runAsync(viewCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 39
Source File: ResolverIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resolve.Resolver import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ResolverIndexer { private implicit val log = Logger[ResolverIndexer.type] final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])( implicit projectCache: ProjectCache[F], as: ActorSystem, F: Effect[F], projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "resolver-indexer" def toResolver(event: Event): F[Option[Resolver]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => resolvers.fetchResolver(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(resolver) => Some(resolver) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Resolver.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toResolver) .collectSome[Resolver] .runAsync(resolverCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 40
Source File: SparqlIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.sparql.client.{BlazegraphClient, SparqlWriteQuery} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.indexing.View.SparqlView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object SparqlIndexer { final def start[F[_]: Timer]( view: SparqlView, resources: Resources[F], project: Project, restartOffset: Boolean )( implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: AppConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.sparql.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.sparql.askTimeout) val client: BlazegraphClient[F] = clients.sparql.copy(namespace = view.index).withRetryPolicy(config.sparql.indexing.retry) def buildInsertOrDeleteQuery(res: ResourceV): SparqlWriteQuery = if (res.deprecated && !view.filter.includeDeprecated) view.buildDeleteQuery(res) else view.buildInsertQuery(res) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => buildInsertOrDeleteQuery(res) case res if view.allowedSchemas(res) => view.buildDeleteQuery(res) } .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 41
Source File: StorageIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import java.time.Instant import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.storage.Storage import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object StorageIndexer { private implicit val log = Logger[StorageIndexer.type] def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])( implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "storage-indexer" def toStorage(event: Event): F[Option[(Storage, Instant)]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => storages.fetchStorage(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(timedStorage) => Some(timedStorage) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Storage.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toStorage) .collectSome[(Storage, Instant)] .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 42
Source File: ElasticSearchIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient.BulkOp import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.indexing.View.ElasticSearchView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object ElasticSearchIndexer { private implicit val log: Logger = Logger[ElasticSearchIndexer.type] final def start[F[_]: Timer]( view: ElasticSearchView, resources: Resources[F], project: Project, restartOffset: Boolean )( implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: AppConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.elasticSearch.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.elasticSearch.askTimeout) val client: ElasticSearchClient[F] = clients.elasticSearch.withRetryPolicy(config.elasticSearch.indexing.retry) def deleteOrIndex(res: ResourceV): Option[BulkOp] = if (res.deprecated && !view.filter.includeDeprecated) Some(delete(res)) else view.toDocument(res).map(doc => BulkOp.Index(view.index, res.id.value.asString, doc)) def delete(res: ResourceV): BulkOp = BulkOp.Delete(view.index, res.id.value.asString) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => deleteOrIndex(res) case res if view.allowedSchemas(res) => Some(delete(res)) } .collectSome[BulkOp] .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 43
Source File: EventCommonRoutes.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.routes import java.util.UUID import akka.NotUsed import akka.actor.ActorSystem import akka.http.javadsl.server.Rejections.validationRejection import akka.http.scaladsl.model.headers.`Last-Event-ID` import akka.http.scaladsl.model.sse.ServerSentEvent import akka.http.scaladsl.server.Directive1 import akka.http.scaladsl.server.Directives.{optionalHeaderValueByName, provide, reject} import akka.persistence.query._ import akka.persistence.query.scaladsl.EventsByTagQuery import akka.stream.scaladsl.Source import ch.epfl.bluebrain.nexus.commons.circe.syntax._ import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.resources.Event import io.circe.syntax._ import io.circe.{Encoder, Printer} import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} protected def lastEventId: Directive1[Offset] = optionalHeaderValueByName(`Last-Event-ID`.name) .map(_.map(id => `Last-Event-ID`(id))) .flatMap { case Some(header) => Try[Offset](TimeBasedUUID(UUID.fromString(header.id))) orElse Try(Sequence(header.id.toLong)) match { case Success(value) => provide(value) case Failure(_) => reject(validationRejection("The value of the `Last-Event-ID` header is not valid.")) } case None => provide(NoOffset) } private def aToSse[A: Encoder](a: A, offset: Offset): ServerSentEvent = { val json = a.asJson.sortKeys(AppConfig.orderedKeys) ServerSentEvent( data = json.printWith(printer), eventType = json.hcursor.get[String]("@type").toOption, id = offset match { case NoOffset => None case Sequence(value) => Some(value.toString) case TimeBasedUUID(uuid) => Some(uuid.toString) } ) } protected def eventToSse(envelope: EventEnvelope)(implicit enc: Encoder[Event]): Option[ServerSentEvent] = envelope.event match { case value: Event => Some(aToSse(value, envelope.offset)) case _ => None } }
Example 44
Source File: GlobalEventRoutesSpec.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.routes import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.model.headers.`Last-Event-ID` import akka.http.scaladsl.model.sse.ServerSentEvent import akka.persistence.query.{EventEnvelope, NoOffset, Offset, Sequence} import akka.stream.scaladsl.Source import ch.epfl.bluebrain.nexus.iam.client.types.{AccessControlLists, Caller} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.resources.Event import ch.epfl.bluebrain.nexus.kg.routes.GlobalEventRoutesSpec.TestableEventRoutes import io.circe.Encoder class GlobalEventRoutesSpec extends EventsSpecBase { val routes = new TestableEventRoutes(events, acls, caller).routes "GlobalEventRoutes" should { "return all events for a project" in { Get("/") ~> routes ~> check { val expected = jsonContentOf("/events/events.json").asArray.value status shouldEqual StatusCodes.OK responseAs[String] shouldEqual eventStreamFor(expected) } } "return all events for a project from the last seen" in { Get("/").addHeader(`Last-Event-ID`(0.toString)) ~> routes ~> check { val expected = jsonContentOf("/events/events.json").asArray.value status shouldEqual StatusCodes.OK responseAs[String] shouldEqual eventStreamFor(expected, 1) } } } } object GlobalEventRoutesSpec { class TestableEventRoutes(events: List[Event], acls: AccessControlLists, caller: Caller)( implicit as: ActorSystem, config: AppConfig ) extends GlobalEventRoutes(acls, caller) { private val envelopes = events.zipWithIndex.map { case (ev, idx) => EventEnvelope(Sequence(idx.toLong), "persistenceid", 1L, ev) } override protected def source( tag: String, offset: Offset )(implicit enc: Encoder[Event]): Source[ServerSentEvent, NotUsed] = { val toDrop = offset match { case NoOffset => 0 case Sequence(v) => v + 1 } Source(envelopes).drop(toDrop).flatMapConcat(ee => Source(eventToSse(ee).toList)) } } }
Example 45
Source File: ExtensionExample.scala From korolev with Apache License 2.0 | 5 votes |
import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Sink, Source} import korolev._ import korolev.akka._ import korolev.server._ import korolev.state.javaSerialization._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future object ExtensionExample extends SimpleAkkaHttpKorolevApp { private val ctx = Context[Future, List[String], String] import ctx._ private val (queue, queueSource) = Source .queue[String](10, OverflowStrategy.fail) .preMaterialize() private val topicListener = Extension.pure[Future, List[String], String] { access => val queueSink = queueSource.runWith(Sink.queue[String]) def aux(): Future[Unit] = queueSink.pull() flatMap { case Some(message) => access .transition(_ :+ message) .flatMap(_ => aux()) case None => Future.unit } aux() Extension.Handlers[Future, List[String], String]( onMessage = message => queue.offer(message).map(_ => ()), onDestroy = () => Future.successful(queueSink.cancel()) ) } private def onSubmit(access: Access) = { for { sessionId <- access.sessionId name <- access.valueOf(nameElement) text <- access.valueOf(textElement) userName = if (name.trim.isEmpty) s"Anonymous #${sessionId.hashCode().toHexString}" else name _ <- if (text.trim.isEmpty) Future.unit else access.publish(s"$userName: $text") _ <- access.property(textElement).set("value", "") } yield () } private val nameElement = elementId() private val textElement = elementId() private val config = KorolevServiceConfig[Future, List[String], String]( stateLoader = StateLoader.default(Nil), extensions = List(topicListener), document = { message => import levsha.dsl._ import html._ optimize { Html( body( div( backgroundColor @= "yellow", padding @= "10px", border @= "1px solid black", "This is a chat. Open this app in few browser tabs or on few different computers" ), div( marginTop @= "10px", padding @= "10px", height @= "250px", backgroundColor @= "#eeeeee", message map { x => div(x) } ), form( marginTop @= "10px", input(`type` := "text", placeholder := "Name", nameElement), input(`type` := "text", placeholder := "Message", textElement), button("Sent"), event("submit")(onSubmit) ) ) ) } } ) val service: AkkaHttpService = akkaHttpService(config) }
Example 46
Source File: EtlStreamFactory.scala From fusion-data with Apache License 2.0 | 5 votes |
package mass.rdp.etl.graph import java.sql.PreparedStatement import akka.NotUsed import akka.stream.scaladsl.{ Sink, Source } import fusion.jdbc.util.JdbcUtils import mass.connector.Connector import mass.connector.sql._ import mass.core.event.{ EventData, EventDataSimple } import scala.concurrent.Future trait EtlStreamFactory { def `type`: String def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed] def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]] } class EtlStreamJdbcFactory extends EtlStreamFactory { override def `type`: String = "jdbc" override def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed] = JdbcSource(s.script.content.get, Nil, 1000)(c.asInstanceOf[SQLConnector].dataSource) .via(JdbcFlow.flowJdbcResultSet) .map(jrs => EventDataSql(jrs)) def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]] = { def action(event: EventData, stmt: PreparedStatement): Unit = { val args: Iterable[Any] = event match { case _: EventDataSimple => event.data.asInstanceOf[Iterable[Any]] case eventDataSql: EventDataSql => eventDataSql.data.values case _ => throw new EtlGraphException(s"Invalid EventData: $event.") } JdbcUtils.setStatementParameters(stmt, args) } JdbcSink[EventData](conn => conn.prepareStatement(s.script.content.get), action, 1000)( c.asInstanceOf[SQLConnector].dataSource) } }
Example 47
Source File: EtlGraphImpl.scala From fusion-data with Apache License 2.0 | 5 votes |
package mass.rdp.etl.graph import akka.NotUsed import akka.stream.scaladsl.{ Sink, Source } import com.typesafe.scalalogging.StrictLogging import javax.script.SimpleBindings import mass.connector.Connector import mass.connector.sql._ import mass.core.event.{ EventData, EventDataSimple } import mass.core.script.ScriptManager import mass.rdp.RdpSystem import mass.rdp.etl.{ EtlResult, EtlWorkflowExecution, SqlEtlResult } import scala.collection.immutable import scala.concurrent.{ Future, Promise } import scala.util.{ Failure, Success } case class EtlGraphImpl(graphSetting: EtlGraphSetting) extends EtlGraph with StrictLogging { override def run(connectors: immutable.Seq[Connector], rdpSystem: RdpSystem): EtlWorkflowExecution = { implicit val ec = rdpSystem.materializer.system.dispatcher implicit val mat = rdpSystem.materializer def getConnector(name: String): Connector = connectors.find(_.name == name) orElse rdpSystem.connectorSystem.getConnector(name) getOrElse (throw new EtlGraphException(s"connector ref: $name 不存在")) val promise = Promise[EtlResult]() val source = dataSource(getConnector(graphSource.connector.ref), rdpSystem) val sink = dataSink(getConnector(graphSink.connector.ref), rdpSystem) graphFlows .foldLeft(source)((s, etlFlow) => s.map { event => val engine = ScriptManager.scriptJavascript val bindings = new SimpleBindings() bindings.put("event", event.asInstanceOf[EventDataSql]) val data = engine.eval(etlFlow.script.content.get, bindings) // TODO 在此可设置是否发送通知消息给在线监控系统 logger.debug(s"engine: $engine, event: $event, result data: $data") EventDataSimple(data) }) .runWith(sink) .onComplete { case Success(result) => promise.success(SqlEtlResult(result)) case Failure(e) => promise.failure(e) } new EtlWorkflowExecution(promise, () => ()) } private def dataSource(connector: Connector, rdpSystem: RdpSystem): Source[EventData, NotUsed] = rdpSystem.streamFactories.get(connector.`type`.toString) match { case Some(b) => b.buildSource(connector, graphSource) case _ => throw new EtlGraphException(s"未知Connector: $connector") } private def dataSink(connector: Connector, rdpSystem: RdpSystem): Sink[EventData, Future[JdbcSinkResult]] = rdpSystem.streamFactories.get(connector.`type`.toString) match { case Some(b) => b.buildSink(connector, graphSink) case _ => throw new EtlGraphException(s"未知Connector: $connector") } }
Example 48
Source File: JdbcSource.scala From fusion-data with Apache License 2.0 | 5 votes |
package mass.connector.sql import java.sql.ResultSet import akka.NotUsed import akka.stream.scaladsl.Source import fusion.jdbc.ConnectionPreparedStatementCreator import fusion.jdbc.util.JdbcUtils import javax.sql.DataSource object JdbcSource { def apply(sql: String, args: Iterable[Any], fetchRowSize: Int)( implicit dataSource: DataSource): Source[ResultSet, NotUsed] = Source.fromGraph(new JdbcSourceStage(dataSource, conn => { val stmt = conn.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY) JdbcUtils.setStatementParameters(stmt, args) }, fetchRowSize)) def apply(creator: ConnectionPreparedStatementCreator, fetchRowSize: Int)( implicit dataSource: DataSource): Source[ResultSet, NotUsed] = Source.fromGraph(new JdbcSourceStage(dataSource, creator, fetchRowSize)) }
Example 49
Source File: MaterializeValue.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.graph import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Flow, Keep, RunnableGraph, Sink, Source, Tcp } import akka.util.ByteString import scala.concurrent.{ Future, Promise } object MaterializeValue { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() import system.dispatcher case class MyClass(private val p: Promise[Option[Int]], conn: Tcp.OutgoingConnection) extends AutoCloseable { override def close(): Unit = p.trySuccess(None) } // Materializes to Promise[Option[Int]] val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int] // Materializes to NotUsed val flow1: Flow[Int, Int, NotUsed] = Flow[Int].take(100) // Materializes to Promise[Int] val nestedSource : Source[Int, Promise[Option[Int]]] = source.viaMat(flow1)(Keep.left).named("nestedSource") // viaMat === via()(Keep.left) // val nestedSource2: Source[Int, NotUsed] = source.viaMat(flow1)(Keep.right) // Materializes to NotUsed val flow2: Flow[Int, ByteString, NotUsed] = Flow[Int].map(i => ByteString(i.toString)) // Materializes to Future[Tcp.OutgoingConnection (Keep.right) val flow3: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] = Tcp().outgoingConnection("localhost", 8080) val nestedFlow: Flow[Int, ByteString, Future[Tcp.OutgoingConnection]] = flow2.viaMat(flow3)(Keep.right) val nestedFlow2: Flow[Int, ByteString, NotUsed] = flow2.viaMat(flow3)(Keep.left) // flow2.via(flow3) val nestedFlow3: Flow[Int, ByteString, (NotUsed, Future[Tcp.OutgoingConnection])] = flow2.viaMat(flow3)(Keep.both) // Materializes to Future[String] (Keep.right) val sink: Sink[ByteString, Future[String]] = Sink.fold[String, ByteString]("")(_ + _.utf8String) val nestedSink: Sink[Int, (Future[Tcp.OutgoingConnection], Future[String])] = nestedFlow.toMat(sink)(Keep.both) def f(p: Promise[Option[Int]], rest: (Future[Tcp.OutgoingConnection], Future[String])): Future[MyClass] = { val connFuture = rest._1 connFuture.map(outConn => MyClass(p, outConn)) } // Materializes to Future[MyClass] val runnableGraph: RunnableGraph[Future[MyClass]] = nestedSource.toMat(nestedSink)(f) val r: RunnableGraph[Promise[Option[Int]]] = nestedSource.toMat(nestedSink)(Keep.left) val r2: RunnableGraph[(Future[Tcp.OutgoingConnection], Future[String])] = nestedSource.toMat(nestedSink)(Keep.right) }
Example 50
Source File: PartialGraph.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.graph import akka.actor.ActorSystem import akka.stream.scaladsl.{ Balance, Broadcast, Flow, GraphDSL, Keep, Merge, RunnableGraph, Sink, Source } import akka.stream.{ ActorMaterializer, FlowShape, SourceShape } import scala.concurrent.Future import scala.io.StdIn object PartialGraph extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() import system.dispatcher def partial = GraphDSL .create() { implicit b => import GraphDSL.Implicits._ val B = b.add(Broadcast[Int](2)) val C = b.add(Merge[Int](2)) val D = Flow[Int].map(_ + 1) val E = b.add(Balance[Int](2)) val F = b.add(Merge[Int](2)) C <~ F B ~> C ~> F B ~> D ~> E ~> F FlowShape(B.in, E.out(1)) } .named("partial") // 转换partial从FlowShape到Flow,可访问流DSL(比如:.filter() 函数) val flow = Flow.fromGraph(partial) val source = Source.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val merge = b.add(Merge[Int](2)) Source.single(0) ~> merge Source(List(2, 3, 4)) ~> merge SourceShape(merge.out) }) val sink: Sink[Int, Future[Int]] = Flow[Int].map(_ * 2).drop(10).named("nestedFlow").toMat(Sink.head)(Keep.right) val closed: RunnableGraph[Future[Int]] = source.via(flow.filter(_ > 1)).toMat(sink)(Keep.right) closed.run().foreach(println) StdIn.readLine() system.terminate() }
Example 51
Source File: EchoDemo.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.streamio import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Flow, Framing, Sink, Source, Tcp } import akka.util.ByteString import example.akkastream.streamio.EchoServer.system import scala.concurrent.Future import scala.io.StdIn object EchoServer extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() val connections = Tcp().bind("localhost", 8888) connections.runForeach { connection => println(s"New connection from: ${connection.remoteAddress}") val echo: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString] .via(Framing.delimiter(ByteString("\n"), 256, true)) .map(_.utf8String) .map(_ + "!!!\n") .map(ByteString(_)) connection.handleWith(echo) } StdIn.readLine() system.terminate() } object EchoClient extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() val connection = Tcp().outgoingConnection("localhost", 8888) val replParser = Flow[String].takeWhile(_ != "q").concat(Source.single("BYE")).map { elem => println(s"send msg: $elem") ByteString(s"$elem\n") } val repl = Flow[ByteString] .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true)) .map(_.utf8String) .map(text => println("Server: " + text)) .map(_ => StdIn.readLine("> ")) .via(replParser) val connected: Future[Tcp.OutgoingConnection] = connection.join(repl).run() // StdIn.readLine() // system.terminate() } object EchoDemo {}
Example 52
Source File: Graph1.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.basic import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, ClosedShape } import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } import scala.collection.immutable import scala.io.StdIn object Graph1 extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() val graph = g(1 to 2) graph.run() StdIn.readLine() system.terminate() def g(data: immutable.Iterable[Int]) = RunnableGraph.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] => import GraphDSL.Implicits._ val in = Source(data) val out = Sink.foreach(println) val bcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) val f1, f2, f3, f4 = Flow[Int].map(_ + 10) in ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out bcast ~> f4 ~> merge ClosedShape }) }
Example 53
Source File: TestHelper.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg import java.time.Clock import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.Source import akka.util.ByteString import ch.epfl.bluebrain.nexus.commons.test.{EitherValues, Randomness} import ch.epfl.bluebrain.nexus.iam.client.types.Identity.Anonymous import ch.epfl.bluebrain.nexus.iam.client.types.{AccessControlList, Identity, Permission, ResourceAccessControlList} import ch.epfl.bluebrain.nexus.kg.config.Schemas.unconstrainedSchemaUri import ch.epfl.bluebrain.nexus.kg.resources.ResourceF.Value import ch.epfl.bluebrain.nexus.kg.resources.{Ref, ResId, ResourceF} import ch.epfl.bluebrain.nexus.kg.storage.AkkaSource import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri import ch.epfl.bluebrain.nexus.rdf.implicits._ import io.circe.Json trait TestHelper extends EitherValues with Randomness { private val clock = Clock.systemUTC() val read: Permission = Permission.unsafe("resources/read") val write: Permission = Permission.unsafe("files/write") def consume(source: AkkaSource)(implicit mt: Materializer): String = { import org.scalatest.concurrent.ScalaFutures._ source.runFold("")(_ ++ _.utf8String).futureValue } def produce(string: String, chunkSize: Int = 100): AkkaSource = Source(string.grouped(chunkSize).map(ByteString(_)).toList) def resourceAcls(acl: AccessControlList): ResourceAccessControlList = ResourceAccessControlList( url"http://example.com/id", 1L, Set.empty, clock.instant(), Anonymous, clock.instant(), Anonymous, acl ) def simpleV( id: ResId, value: Json, rev: Long = 1L, types: Set[AbsoluteIri] = Set.empty, deprecated: Boolean = false, schema: Ref = Ref(unconstrainedSchemaUri), created: Identity = Anonymous, updated: Identity = Anonymous )(implicit clock: Clock): ResourceF[Value] = ResourceF( id, rev, types, deprecated, Map.empty, None, clock.instant(), clock.instant(), created, updated, schema, Value(value, value.contextValue, value.toGraph(id.value).rightValue) ) def simpleV(res: ResourceF[Json])(implicit clock: Clock) = ResourceF( res.id, res.rev, res.types, res.deprecated, Map.empty, None, clock.instant(), clock.instant(), res.createdBy, res.updatedBy, res.schema, Value(res.value, res.value.contextValue, res.value.toGraph(res.id.value).rightValue) ) def genUUID: UUID = UUID.randomUUID() def genIri: AbsoluteIri = url"http://example.com/" + genUUID.toString private def sourceInChunks(input: String): AkkaSource = Source.fromIterator(() => input.grouped(10000).map(ByteString(_))) def genSource: AkkaSource = sourceInChunks(genString()) }
Example 54
Source File: AliasApiRoute.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.api.http.alias import akka.NotUsed import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport} import akka.http.scaladsl.server.Route import akka.stream.scaladsl.Source import cats.syntax.either._ import com.wavesplatform.account.Alias import com.wavesplatform.api.common.CommonTransactionsApi import com.wavesplatform.api.http._ import com.wavesplatform.api.http.requests.CreateAliasRequest import com.wavesplatform.http.BroadcastRoute import com.wavesplatform.network.UtxPoolSynchronizer import com.wavesplatform.settings.RestAPISettings import com.wavesplatform.state.Blockchain import com.wavesplatform.transaction._ import com.wavesplatform.utils.Time import com.wavesplatform.wallet.Wallet import play.api.libs.json.{JsString, JsValue, Json} case class AliasApiRoute( settings: RestAPISettings, commonApi: CommonTransactionsApi, wallet: Wallet, utxPoolSynchronizer: UtxPoolSynchronizer, time: Time, blockchain: Blockchain ) extends ApiRoute with BroadcastRoute with AuthRoute { override val route: Route = pathPrefix("alias") { addressOfAlias ~ aliasOfAddress ~ deprecatedRoute } private def deprecatedRoute: Route = path("broadcast" / "create") { broadcast[CreateAliasRequest](_.toTx) } ~ (path("create") & withAuth) { broadcast[CreateAliasRequest](TransactionFactory.createAlias(_, wallet, time)) } def addressOfAlias: Route = (get & path("by-alias" / Segment)) { aliasName => complete { Alias .create(aliasName) .flatMap { a => blockchain.resolveAlias(a).bimap(_ => TxValidationError.AliasDoesNotExist(a), addr => Json.obj("address" -> addr.stringRepr)) } } } private implicit val ess: JsonEntityStreamingSupport = EntityStreamingSupport.json() def aliasOfAddress: Route = (get & path("by-address" / AddrSegment)) { address => extractScheduler { implicit s => val value: Source[JsValue, NotUsed] = Source.fromPublisher(commonApi.aliasesOfAddress(address).map { case (_, tx) => JsString(tx.alias.stringRepr) }.toReactivePublisher) complete(value) } } }
Example 55
Source File: UserRepository.scala From gabbler with Apache License 2.0 | 5 votes |
package de.heikoseeberger.gabbler.user import akka.NotUsed import akka.actor.{ ActorLogging, Props } import akka.persistence.PersistentActor import akka.persistence.query.EventEnvelope import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery import akka.stream.scaladsl.Source object UserRepository { sealed trait UserEvent final case object GetUsers final case class Users(users: Set[User]) final case class AddUser(username: String, nickname: String, email: String) final case class UserAdded(user: User) extends UserEvent final case class UsernameTaken(username: String) final case class RemoveUser(id: Long) final case class UserRemoved(user: User) extends UserEvent final case class IdUnknown(id: Long) final case class GetUserEvents(fromSeqNo: Long) final case class UserEvents(userEvents: Source[(Long, UserEvent), NotUsed]) final case class User(id: Long, username: String, nickname: String, email: String) final val Name = "user-repository" def apply(readJournal: EventsByPersistenceIdQuery): Props = Props(new UserRepository(readJournal)) } final class UserRepository(readJournal: EventsByPersistenceIdQuery) extends PersistentActor with ActorLogging { import UserRepository._ override val persistenceId = Name private var users = Map.empty[String, User] override def receiveCommand = { case GetUsers => sender() ! Users(users.valuesIterator.to[Set]) case AddUser(username, nickname, email) => handleAddUser(username, nickname, email) case RemoveUser(id) => handleRemoveUser(id) case GetUserEvents(fromSeqNo) => handleGetUserEvents(fromSeqNo) } override def receiveRecover = { case UserAdded(user) => users += user.username -> user case UserRemoved(user) => users -= user.username } private def handleAddUser(username: String, nickname: String, email: String) = { def add() = persist(UserAdded(User(lastSequenceNr, username, nickname, email))) { userAdded => receiveRecover(userAdded) log.info("Added user with username {}", username) sender() ! userAdded } if (!users.contains(username)) add() else sender() ! UsernameTaken(username) } private def handleRemoveUser(id: Long) = { def remove(user: User) = persist(UserRemoved(user)) { userRemoved => receiveRecover(userRemoved) log.info("Removed user with id {} and username {}", id, user.username) sender() ! userRemoved } users.valuesIterator.find(_.id == id) match { case Some(user) => remove(user) case None => sender() ! IdUnknown(id) } } private def handleGetUserEvents(fromSeqNo: Long) = { val userEvents = readJournal .eventsByPersistenceId(Name, fromSeqNo, Long.MaxValue) .collect { case EventEnvelope(_, _, seqNo, event: UserEvent) => seqNo -> event } sender() ! UserEvents(userEvents) } }
Example 56
Source File: CommandRegistration.scala From AckCord with MIT License | 5 votes |
package ackcord.commands import scala.concurrent.Future import akka.Done import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source} import akka.stream.{KillSwitches, UniqueKillSwitch} case class CommandRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) { def stop(): Unit = killSwitch.shutdown() } object CommandRegistration { def toSink[A, M](source: Source[A, M]): RunnableGraph[CommandRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) { case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch) } def withRegistration[A, M](source: Source[A, M]): Source[A, CommandRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).watchTermination() { case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch) } }
Example 57
Source File: VoiceUDPHandler.scala From AckCord with MIT License | 5 votes |
package ackcord.voice import java.net.InetSocketAddress import scala.concurrent.duration._ import scala.util.{Failure, Success} import ackcord.data.{RawSnowflake, UserId} import akka.NotUsed import akka.actor.typed._ import akka.actor.typed.scaladsl._ import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Keep, Sink, Source, SourceQueueWithComplete} import akka.util.ByteString import org.slf4j.Logger object VoiceUDPHandler { def apply( address: String, port: Int, ssrc: Int, serverId: RawSnowflake, userId: UserId, soundProducer: Source[ByteString, NotUsed], soundConsumer: Sink[AudioAPIMessage, NotUsed], parent: ActorRef[VoiceHandler.Command] ): Behavior[Command] = Behaviors .supervise( Behaviors.setup[Command] { ctx => implicit val system: ActorSystem[Nothing] = ctx.system val ((queue, futIp), watchDone) = soundProducer .viaMat( VoiceUDPFlow .flow( new InetSocketAddress(address, port), ssrc, serverId, userId, Source.queue[Option[ByteString]](0, OverflowStrategy.dropBuffer) ) .watchTermination()(Keep.both) )(Keep.right) .to(soundConsumer) .run() ctx.pipeToSelf(futIp) { case Success(value) => IPDiscoveryResult(value) case Failure(e) => SendExeption(e) } ctx.pipeToSelf(watchDone)(_ => ConnectionDied) handle(ctx, ctx.log, ssrc, queue, parent) } ) .onFailure( SupervisorStrategy .restartWithBackoff(100.millis, 5.seconds, 1D) .withResetBackoffAfter(10.seconds) .withMaxRestarts(5) ) def handle( ctx: ActorContext[Command], log: Logger, ssrc: Int, queue: SourceQueueWithComplete[Option[ByteString]], parent: ActorRef[VoiceHandler.Command] ): Behavior[Command] = Behaviors.receiveMessage { case SendExeption(e) => throw e case ConnectionDied => Behaviors.stopped case Shutdown => queue.complete() Behaviors.same case IPDiscoveryResult(VoiceUDPFlow.FoundIP(localAddress, localPort)) => parent ! VoiceHandler.GotLocalIP(localAddress, localPort) Behaviors.same case SetSecretKey(key) => queue.offer(key) Behaviors.same } sealed trait Command case object Shutdown extends Command private case class SendExeption(e: Throwable) extends Command private case object ConnectionDied extends Command private case class IPDiscoveryResult(foundIP: VoiceUDPFlow.FoundIP) extends Command private[voice] case class SetSecretKey(key: Option[ByteString]) extends Command }
Example 58
Source File: VoiceUDPFlow.scala From AckCord with MIT License | 5 votes |
package ackcord.voice import java.net.InetSocketAddress import java.nio.ByteOrder import scala.concurrent.{Future, Promise} import ackcord.data.{RawSnowflake, UserId} import ackcord.util.UdpConnectedFlow import akka.NotUsed import akka.actor.typed.ActorSystem import akka.stream.scaladsl.{BidiFlow, Concat, Flow, GraphDSL, Keep, Source} import akka.stream.{BidiShape, OverflowStrategy} import akka.util.ByteString object VoiceUDPFlow { val silence = ByteString(0xF8, 0xFF, 0xFE) val SampleRate = 48000 val FrameSize = 960 val FrameTime = 20 def flow[Mat]( remoteAddress: InetSocketAddress, ssrc: Int, serverId: RawSnowflake, userId: UserId, secretKeys: Source[Option[ByteString], Mat] )(implicit system: ActorSystem[Nothing]): Flow[ByteString, AudioAPIMessage.ReceivedData, (Mat, Future[FoundIP])] = NaclBidiFlow .bidiFlow(ssrc, serverId, userId, secretKeys) .atopMat(voiceBidi(ssrc).reversed)(Keep.both) .async .join(Flow[ByteString].buffer(32, OverflowStrategy.backpressure).via(UdpConnectedFlow.flow(remoteAddress))) def voiceBidi(ssrc: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[FoundIP]] = { implicit val byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN val ipDiscoveryPacket = { val byteBuilder = ByteString.createBuilder byteBuilder.sizeHint(74) byteBuilder.putShort(0x1).putShort(70).putInt(ssrc) byteBuilder.putBytes(new Array[Byte](66)) byteBuilder.result() } val valvePromise = Promise[Unit] val valve = Source.future(valvePromise.future).drop(1).asInstanceOf[Source[ByteString, NotUsed]] val ipDiscoveryFlow = Flow[ByteString] .viaMat(new IPDiscoveryFlow(() => valvePromise.success(())))(Keep.right) BidiFlow .fromGraph(GraphDSL.create(ipDiscoveryFlow) { implicit b => ipDiscovery => import GraphDSL.Implicits._ val voiceIn = b.add(Flow[ByteString]) val ipDiscoverySource = b.add(Source.single(ipDiscoveryPacket) ++ valve) val ipDiscoveryAndThenVoiceData = b.add(Concat[ByteString]()) ipDiscoverySource ~> ipDiscoveryAndThenVoiceData voiceIn ~> ipDiscoveryAndThenVoiceData BidiShape( ipDiscovery.in, ipDiscovery.out, voiceIn.in, ipDiscoveryAndThenVoiceData.out ) }) } case class FoundIP(address: String, port: Int) }
Example 59
Source File: CmdStreams.scala From AckCord with MIT License | 5 votes |
package ackcord.oldcommands import ackcord._ import akka.NotUsed import akka.actor.typed.ActorSystem import akka.stream.scaladsl.{BroadcastHub, Keep, Source} import akka.stream.{ActorAttributes, Supervision} object CmdStreams { def cmdStreams[A]( settings: AbstractCommandSettings, apiMessages: Source[APIMessage, A] )(implicit system: ActorSystem[Nothing]): (A, Source[RawCmdMessage, NotUsed]) = { apiMessages .collect { case APIMessage.MessageCreate(msg, c) => implicit val cache: MemoryCacheSnapshot = c.current CmdHelper.isValidCommand(settings.needMention(msg), msg).map { args => if (args == Nil) NoCmd(msg, c.current) else { settings .getPrefix(args, msg) .fold[RawCmdMessage](NoCmdPrefix(msg, args.head, args.tail, cache)) { case (prefix, remaining) => RawCmd(msg, prefix, remaining.head, remaining.tail.toList, c.current) } } } } .mapConcat(_.toList) .toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both) .addAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .run() } }
Example 60
Source File: CoreCommands.scala From AckCord with MIT License | 5 votes |
package ackcord.oldcommands import ackcord._ import akka.stream.scaladsl.Source object CoreCommands { def create[A]( settings: AbstractCommandSettings, apiMessages: Source[APIMessage, A], requests: Requests ): (A, Commands) = { import requests.system val (materialized, streams) = CmdStreams.cmdStreams(settings, apiMessages) materialized -> Commands(streams, requests) } }
Example 61
Source File: Commands.scala From AckCord with MIT License | 5 votes |
package ackcord.oldcommands import scala.concurrent.Future import ackcord.CacheSnapshot import ackcord.requests.{Requests, SupervisionStreams} import akka.stream.scaladsl.{Keep, Source} import akka.{Done, NotUsed} def subscribe[A, Mat, Mat2]( factory: ParsedCmdFactory[A, Mat] )(combine: (Future[Done], Mat) => Mat2): Mat2 = SupervisionStreams .addLogAndContinueFunction( subscribeCmdParsed(factory.refiner)(factory.parser) .via(CmdHelper.addErrorHandlingParsed(requests)) .watchTermination()(Keep.right) .toMat(factory.sink(requests))(combine) .addAttributes ) .run() }
Example 62
Source File: LavaplayerSource.scala From AckCord with MIT License | 5 votes |
package ackcord.lavaplayer import scala.concurrent.duration._ import akka.NotUsed import akka.stream.scaladsl.Source import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler, TimerGraphStageLogicWithLogging} import akka.stream.{Attributes, Outlet, SourceShape} import akka.util.ByteString import com.sedmelluq.discord.lavaplayer.player.AudioPlayer class LavaplayerSource(player: AudioPlayer) extends GraphStage[SourceShape[ByteString]] { val out: Outlet[ByteString] = Outlet("LavaplayerSource.out") override def shape: SourceShape[ByteString] = SourceShape(out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogicWithLogging(shape) with OutHandler { override def onPull(): Unit = tryPushFrame() override protected def onTimer(timerKey: Any): Unit = timerKey match { case "RetryProvide" => tryPushFrame() } def tryPushFrame(): Unit = { val frame = player.provide() if (frame != null) { push(out, ByteString.fromArray(frame.getData)) //log.debug("Sending data") } else { //log.debug("Scheduling attempt to provide frame") scheduleOnce("RetryProvide", 20.millis) } } setHandler(out, this) } } object LavaplayerSource { def source(player: AudioPlayer): Source[ByteString, NotUsed] = Source.fromGraph(new LavaplayerSource(player)) }
Example 63
Source File: EventRegistration.scala From AckCord with MIT License | 5 votes |
package ackcord import scala.concurrent.Future import akka.Done import akka.stream.{KillSwitches, UniqueKillSwitch} import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source} case class EventRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) { def stop(): Unit = killSwitch.shutdown() } object EventRegistration { def toSink[A, M](source: Source[A, M]): RunnableGraph[EventRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) { case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch) } def withRegistration[A, M](source: Source[A, M]): Source[A, EventRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).watchTermination() { case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch) } }
Example 64
Source File: StreamInstances.scala From AckCord with MIT License | 5 votes |
package ackcord.util import akka.NotUsed import akka.stream.scaladsl.{Flow, Merge, Sink, Source} import cats.{Alternative, Contravariant, Functor, MonadError, StackSafeMonad} object StreamInstances { type SourceRequest[A] = Source[A, NotUsed] implicit val sourceInstance: MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] = new MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] with StackSafeMonad[SourceRequest] { override def empty[A]: SourceRequest[A] = Source.empty[A] override def pure[A](x: A): SourceRequest[A] = Source.single(x) override def map[A, B](fa: SourceRequest[A])(f: A => B): SourceRequest[B] = fa.map(f) override def flatMap[A, B](fa: SourceRequest[A])(f: A => SourceRequest[B]): SourceRequest[B] = fa.flatMapConcat[B, NotUsed](f) override def product[A, B](fa: SourceRequest[A], fb: SourceRequest[B]): SourceRequest[(A, B)] = fa.zip(fb) override def combineK[A](x: SourceRequest[A], y: SourceRequest[A]): SourceRequest[A] = Source.combine(x, y)(Merge.apply(_)) override def raiseError[A](e: Throwable): SourceRequest[A] = Source.failed(e) override def handleErrorWith[A](fa: SourceRequest[A])(f: Throwable => SourceRequest[A]): SourceRequest[A] = fa.recoverWithRetries[A]( 5, { case e: Throwable => f(e).mapMaterializedValue(_ => NotUsed) } ) } implicit def flowInstance[In, Mat]: Functor[Flow[In, *, Mat]] = new Functor[Flow[In, *, Mat]] { override def map[A, B](fa: Flow[In, A, Mat])(f: A => B): Flow[In, B, Mat] = fa.map(f) } implicit def sinkInstance[Mat]: Contravariant[Sink[*, Mat]] = new Contravariant[Sink[*, Mat]] { override def contramap[A, B](fa: Sink[A, Mat])(f: B => A): Sink[B, Mat] = fa.contramap(f) } //For syntax on Source can be brittle implicit class SourceFlatmap[A, M1](private val source: Source[A, M1]) extends AnyVal { def flatMap[B, M2](f: A => Source[B, M2]): Source[B, M1] = source.flatMapConcat(f) } }
Example 65
Source File: Streamable.scala From AckCord with MIT License | 5 votes |
package ackcord.util import scala.concurrent.Future import akka.NotUsed import akka.stream.scaladsl.Source import cats.{Foldable, Id} import cats.data.OptionT trait Streamable[F[_]] { def toSource[A](fa: F[A]): Source[A, NotUsed] def optionToSource[A](opt: OptionT[F, A]): Source[A, NotUsed] = toSource(opt.value).mapConcat(_.toList) } object Streamable { def apply[F[_]](implicit F: Streamable[F]): Streamable[F] = F type OptionTRequest[A] = OptionT[Future, A] implicit val idStreamable: Streamable[Id] = new Streamable[Id] { override def toSource[A](fa: Id[A]): Source[A, NotUsed] = Source.single(fa) override def optionToSource[A](opt: OptionT[Id, A]): Source[A, NotUsed] = Source(opt.value.toList) } implicit val futureStreamable: Streamable[Future] = new Streamable[Future] { override def toSource[A](fa: Future[A]): Source[A, NotUsed] = Source.future(fa) } implicit def futureFoldableStreamable[F[_]: Foldable]: Streamable[λ[A => Future[F[A]]]] = new Streamable[λ[A => Future[F[A]]]] { override def toSource[A](fa: Future[F[A]]): Source[A, NotUsed] = { import cats.syntax.all._ Source.future(fa).mapConcat(_.toList) } } implicit val futureOptionTStreamable: Streamable[OptionT[Future, *]] = new Streamable[OptionT[Future, *]] { override def toSource[A](fa: OptionT[Future, A]): Source[A, NotUsed] = Source.future(fa.value).mapConcat(_.toList) } implicit val sourceStreamable: Streamable[Source[*, NotUsed]] = new Streamable[Source[?, NotUsed]] { override def toSource[A](fa: Source[A, NotUsed]): Source[A, NotUsed] = fa } }
Example 66
Source File: SupervisionStreams.scala From AckCord with MIT License | 5 votes |
package ackcord.requests import akka.actor.typed.ActorSystem import akka.stream.javadsl.RunnableGraph import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{ActorAttributes, Attributes, Supervision} object SupervisionStreams { def addLogAndContinueFunction[G](addAtributes: Attributes => G)(implicit system: ActorSystem[Nothing]): G = addAtributes(ActorAttributes.supervisionStrategy { case _: RetryFailedRequestException[_] => Supervision.Stop case e => system.log.error("Unhandled exception in stream", e) Supervision.Resume }) def logAndContinue[M](graph: RunnableGraph[M])(implicit system: ActorSystem[Nothing]): RunnableGraph[M] = addLogAndContinueFunction(graph.addAttributes) def logAndContinue[Out, Mat](source: Source[Out, Mat])(implicit system: ActorSystem[Nothing]): Source[Out, Mat] = addLogAndContinueFunction(source.addAttributes) def logAndContinue[In, Out, Mat]( flow: Flow[In, Out, Mat] )(implicit system: ActorSystem[Nothing]): Flow[In, Out, Mat] = addLogAndContinueFunction(flow.addAttributes) def logAndContinue[In, Mat](sink: Sink[In, Mat])(implicit system: ActorSystem[Nothing]): Sink[In, Mat] = addLogAndContinueFunction(sink.addAttributes) }
Example 67
Source File: CacheStreams.scala From AckCord with MIT License | 5 votes |
package ackcord import scala.collection.mutable import ackcord.cachehandlers.CacheSnapshotBuilder import ackcord.gateway.GatewayEvent.ReadyData import ackcord.gateway.GatewayMessage import ackcord.requests.SupervisionStreams import akka.NotUsed import akka.actor.typed.ActorSystem import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Sink, Source} import org.slf4j.Logger object CacheStreams { def cacheUpdater( cacheProcessor: MemoryCacheSnapshot.CacheProcessor )(implicit system: ActorSystem[Nothing]): Flow[CacheEvent, (CacheEvent, CacheState), NotUsed] = Flow[CacheEvent].statefulMapConcat { () => var state: CacheState = null implicit val log: Logger = system.log //We only handle events when we are ready to, and we have received the ready event. def isReady: Boolean = state != null { case readyEvent @ APIMessageCacheUpdate(_: ReadyData, _, _, _, _) => val builder = new CacheSnapshotBuilder( 0, null, //The event will populate this, mutable.Map.empty, mutable.Map.empty, mutable.Map.empty, mutable.Map.empty, mutable.Map.empty, mutable.Map.empty, mutable.Map.empty, mutable.Map.empty, cacheProcessor ) readyEvent.process(builder) val snapshot = builder.toImmutable state = CacheState(snapshot, snapshot) List(readyEvent -> state) case handlerEvent: CacheEvent if isReady => val builder = CacheSnapshotBuilder(state.current) handlerEvent.process(builder) state = state.update(builder.toImmutable) List(handlerEvent -> state) case _ if !isReady => log.error("Received event before ready") Nil } } }
Example 68
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.{Command, Commands} import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val packageClient = client.packageClient private val commandClient = client.commandClient private val transactionClient = client.transactionClient def listPackages(implicit ec: ExecutionContext): Future[Set[String]] = packageClient.listPackages().map(_.packageIds.toSet) def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand(party: String, workflowId: WorkflowId, cmd: Command.Command): Future[Empty] = { val commands = Commands( ledgerId = LedgerId.unwrap(ledgerId), workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = party, commands = Seq(Command(cmd)), ) commandClient.submitSingleCommand(SubmitRequest(Some(commands), None)) } def nextTransaction(party: String, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: String, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(parties: String*): TransactionFilter = TransactionFilter(parties.map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: String): WorkflowId = WorkflowId(s"$p Workflow") }
Example 69
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.ledger.client.binding.{Primitive => P} import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scalaz.syntax.tag._ import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val commandClient = client.commandClient private val transactionClient = client.transactionClient def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand[T]( sender: P.Party, workflowId: WorkflowId, command: P.Update[P.ContractId[T]]): Future[Empty] = { commandClient.submitSingleCommand(submitRequest(sender, workflowId, command)) } def submitRequest[T]( party: P.Party, workflowId: WorkflowId, seq: P.Update[P.ContractId[T]]*): SubmitRequest = { val commands = Commands( ledgerId = ledgerId.unwrap, workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = P.Party.unwrap(party), commands = seq.map(_.command) ) SubmitRequest(Some(commands), None) } def nextTransaction(party: P.Party, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(ps: P.Party*): TransactionFilter = TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: P.Party): WorkflowId = WorkflowId(s"${P.Party.unwrap(p): String} Workflow") }
Example 70
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.ledger.client.binding.{Primitive => P} import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scalaz.syntax.tag._ import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val commandClient = client.commandClient private val transactionClient = client.transactionClient def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand[T]( sender: P.Party, workflowId: WorkflowId, command: P.Update[P.ContractId[T]]): Future[Empty] = { commandClient.submitSingleCommand(submitRequest(sender, workflowId, command)) } def submitRequest[T]( party: P.Party, workflowId: WorkflowId, seq: P.Update[P.ContractId[T]]*): SubmitRequest = { val commands = Commands( ledgerId = ledgerId.unwrap, workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = P.Party.unwrap(party), commands = seq.map(_.command) ) SubmitRequest(Some(commands), None) } def nextTransaction(party: P.Party, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(ps: P.Party*): TransactionFilter = TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: P.Party): WorkflowId = WorkflowId(s"${P.Party.unwrap(p): String} Workflow") }
Example 71
Source File: IndexTransactionsService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.index.v2 import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.lf.data.Ref import com.daml.ledger.api.domain.{LedgerOffset, TransactionFilter, TransactionId} import com.daml.ledger.api.v1.transaction_service.{ GetFlatTransactionResponse, GetTransactionResponse, GetTransactionTreesResponse, GetTransactionsResponse } import scala.concurrent.Future trait IndexTransactionsService extends LedgerEndService { def transactions( begin: LedgerOffset, endAt: Option[LedgerOffset], filter: TransactionFilter, verbose: Boolean, ): Source[GetTransactionsResponse, NotUsed] def transactionTrees( begin: LedgerOffset, endAt: Option[LedgerOffset], filter: TransactionFilter, verbose: Boolean, ): Source[GetTransactionTreesResponse, NotUsed] def getTransactionById( transactionId: TransactionId, requestingParties: Set[Ref.Party], ): Future[Option[GetFlatTransactionResponse]] def getTransactionTreeById( transactionId: TransactionId, requestingParties: Set[Ref.Party], ): Future[Option[GetTransactionResponse]] }
Example 72
Source File: package.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.index import java.time.{Duration, Instant} import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.lf.data.Ref import com.daml.lf.value.Value import com.daml.ledger.api.domain._ package object v2 { object AcsUpdateEvent { final case class Create( transactionId: TransactionId, eventId: EventId, contractId: Value.ContractId, templateId: Ref.Identifier, argument: Value.VersionedValue[Value.ContractId], // TODO(JM,SM): understand witnessing parties stakeholders: Set[Ref.Party], contractKey: Option[Value.VersionedValue[Value.ContractId]], signatories: Set[Ref.Party], observers: Set[Ref.Party], agreementText: String ) } final case class ActiveContractSetSnapshot( takenAt: LedgerOffset.Absolute, activeContracts: Source[(Option[WorkflowId], AcsUpdateEvent.Create), NotUsed]) final case class CommandDeduplicationDuplicate(deduplicateUntil: Instant) extends CommandDeduplicationResult }
Example 73
Source File: TimedReadService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.v1.metrics import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.api.health.HealthStatus import com.daml.ledger.participant.state.v1.{LedgerInitialConditions, Offset, ReadService, Update} import com.daml.metrics.{Metrics, Timed} final class TimedReadService(delegate: ReadService, metrics: Metrics) extends ReadService { override def getLedgerInitialConditions(): Source[LedgerInitialConditions, NotUsed] = Timed.source( metrics.daml.services.read.getLedgerInitialConditions, delegate.getLedgerInitialConditions()) override def stateUpdates(beginAfter: Option[Offset]): Source[(Offset, Update), NotUsed] = Timed.source(metrics.daml.services.read.stateUpdates, delegate.stateUpdates(beginAfter)) override def currentHealth(): HealthStatus = delegate.currentHealth() }
Example 74
Source File: InMemoryLedgerReader.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.memory import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.api.health.{HealthStatus, Healthy} import com.daml.ledger.participant.state.kvutils.KVOffset import com.daml.ledger.participant.state.kvutils.api.{LedgerReader, LedgerRecord} import com.daml.ledger.participant.state.v1.{LedgerId, Offset} import com.daml.metrics.{Metrics, Timed} import com.daml.platform.akkastreams.dispatcher.Dispatcher import com.daml.platform.akkastreams.dispatcher.SubSource.RangeSource class InMemoryLedgerReader( override val ledgerId: LedgerId, dispatcher: Dispatcher[Index], state: InMemoryState, metrics: Metrics) extends LedgerReader { override def events(startExclusive: Option[Offset]): Source[LedgerRecord, NotUsed] = dispatcher .startingAt( startExclusive .map(KVOffset.highestIndex(_).toInt) .getOrElse(StartIndex), RangeSource((startExclusive, endInclusive) => Source.fromIterator(() => { Timed.value( metrics.daml.ledger.log.read, state .readLog( _.view.zipWithIndex.map(_.swap).slice(startExclusive + 1, endInclusive + 1)) .iterator) })) ) .map { case (_, updates) => updates } override def currentHealth(): HealthStatus = Healthy }
Example 75
Source File: Timed.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.metrics import java.util.concurrent.CompletionStage import akka.Done import akka.stream.scaladsl.{Keep, Source} import com.codahale.metrics.{Counter, Timer} import com.daml.dec.DirectExecutionContext import scala.concurrent.Future object Timed { def value[T](timer: Timer, value: => T): T = timer.time(() => value) def completionStage[T](timer: Timer, future: => CompletionStage[T]): CompletionStage[T] = { val ctx = timer.time() future.whenComplete { (_, _) => ctx.stop() () } } def future[T](timer: Timer, future: => Future[T]): Future[T] = { val ctx = timer.time() val result = future result.onComplete(_ => ctx.stop())(DirectExecutionContext) result } def trackedFuture[T](counter: Counter, future: => Future[T]): Future[T] = { counter.inc() future.andThen { case _ => counter.dec() }(DirectExecutionContext) } def timedAndTrackedFuture[T](timer: Timer, counter: Counter, future: => Future[T]): Future[T] = { Timed.future(timer, trackedFuture(counter, future)) } def source[Out, Mat](timer: Timer, source: => Source[Out, Mat]): Source[Out, Mat] = { val ctx = timer.time() source .watchTermination()(Keep.both[Mat, Future[Done]]) .mapMaterializedValue { case (mat, done) => done.onComplete(_ => ctx.stop())(DirectExecutionContext) mat } } }
Example 76
Source File: DummyCommands.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.perf import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.api.domain import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.platform.sandbox.services.TestCommands trait DummyCommands extends TestCommands { protected def dummyCreates(ledgerId: domain.LedgerId): Source[SubmitAndWaitRequest, NotUsed] = { val templates = templateIds Source .unfold(0) { i => val next = i + 1 Some((next, next)) } .map( i => buildRequest( ledgerId = ledgerId, commandId = s"command-id-create-$i", commands = Seq(createWithOperator(templates.dummy)), appId = "app1" ).toSync) } }
Example 77
Source File: CommandCompletionSource.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.commands import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.akka.ClientAdapter import com.daml.ledger.api.v1.command_completion_service.{ CompletionStreamRequest, CompletionStreamResponse } import io.grpc.stub.StreamObserver import scala.collection.{breakOut, immutable} object CommandCompletionSource { def toStreamElements( response: CompletionStreamResponse): immutable.Iterable[CompletionStreamElement] = { val completions: Vector[CompletionStreamElement] = response.completions.map(CompletionStreamElement.CompletionElement)(breakOut) response.checkpoint.fold(completions)(cp => completions :+ CompletionStreamElement.CheckpointElement(cp)) } def apply( request: CompletionStreamRequest, stub: (CompletionStreamRequest, StreamObserver[CompletionStreamResponse]) => Unit)( implicit esf: ExecutionSequencerFactory): Source[CompletionStreamElement, NotUsed] = { ClientAdapter .serverStreaming(request, stub) .mapConcat(toStreamElements) .log( "completion at client", { case CompletionStreamElement.CheckpointElement(c) => s"Checkpoint ${c.offset}" case CompletionStreamElement.CompletionElement(c) => s"Completion $c" } ) } }
Example 78
Source File: LedgerConfigurationClient.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.configuration import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.akka.ClientAdapter import com.daml.ledger.api.domain import com.daml.ledger.api.v1.ledger_configuration_service.{ GetLedgerConfigurationRequest, LedgerConfiguration } import com.daml.ledger.api.v1.ledger_configuration_service.LedgerConfigurationServiceGrpc.{ LedgerConfigurationServiceStub } import com.daml.ledger.client.LedgerClient import scalaz.syntax.tag._ final class LedgerConfigurationClient( ledgerId: domain.LedgerId, service: LedgerConfigurationServiceStub)(implicit esf: ExecutionSequencerFactory) { def getLedgerConfiguration(token: Option[String] = None): Source[LedgerConfiguration, NotUsed] = ClientAdapter .serverStreaming( GetLedgerConfigurationRequest(ledgerId.unwrap), LedgerClient.stub(service, token).getLedgerConfiguration) .map(_.ledgerConfiguration.getOrElse(sys.error("No LedgerConfiguration in response."))) }
Example 79
Source File: TransactionSource.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.transactions import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.akka.ClientAdapter import com.daml.ledger.api.v1.transaction.{Transaction, TransactionTree} import com.daml.ledger.api.v1.transaction_service.{ GetTransactionTreesResponse, GetTransactionsRequest, GetTransactionsResponse } import com.daml.util.akkastreams.ImmutableIterable import io.grpc.stub.StreamObserver object TransactionSource { def trees( stub: (GetTransactionsRequest, StreamObserver[GetTransactionTreesResponse]) => Unit, request: GetTransactionsRequest)( implicit esf: ExecutionSequencerFactory): Source[TransactionTree, NotUsed] = { ClientAdapter .serverStreaming(request, stub) .mapConcat(batch => ImmutableIterable(batch.transactions)) } def flat( stub: (GetTransactionsRequest, StreamObserver[GetTransactionsResponse]) => Unit, request: GetTransactionsRequest)( implicit esf: ExecutionSequencerFactory): Source[Transaction, NotUsed] = { ClientAdapter .serverStreaming(request, stub) .mapConcat(batch => ImmutableIterable(batch.transactions)) } }
Example 80
Source File: TransactionClient.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.transactions import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.{Transaction, TransactionTree} import com.daml.ledger.api.v1.transaction_filter.TransactionFilter import com.daml.ledger.api.v1.transaction_service.TransactionServiceGrpc.TransactionServiceStub import com.daml.ledger.api.v1.transaction_service._ import com.daml.ledger.client.LedgerClient import scalaz.syntax.tag._ import scala.concurrent.{ExecutionContext, Future} final class TransactionClient(ledgerId: LedgerId, service: TransactionServiceStub)( implicit esf: ExecutionSequencerFactory) { def getTransactionTrees( start: LedgerOffset, end: Option[LedgerOffset], transactionFilter: TransactionFilter, verbose: Boolean = false, token: Option[String] = None ): Source[TransactionTree, NotUsed] = TransactionSource.trees( LedgerClient.stub(service, token).getTransactionTrees, GetTransactionsRequest(ledgerId.unwrap, Some(start), end, Some(transactionFilter), verbose)) def getTransactions( start: LedgerOffset, end: Option[LedgerOffset], transactionFilter: TransactionFilter, verbose: Boolean = false, token: Option[String] = None ): Source[Transaction, NotUsed] = TransactionSource.flat( LedgerClient.stub(service, token).getTransactions, GetTransactionsRequest(ledgerId.unwrap, Some(start), end, Some(transactionFilter), verbose)) def getTransactionById(transactionId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetTransactionResponse] = LedgerClient .stub(service, token) .getTransactionById(GetTransactionByIdRequest(ledgerId.unwrap, transactionId, parties)) def getTransactionByEventId(eventId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetTransactionResponse] = LedgerClient .stub(service, token) .getTransactionByEventId(GetTransactionByEventIdRequest(ledgerId.unwrap, eventId, parties)) def getFlatTransactionById( transactionId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetFlatTransactionResponse] = LedgerClient .stub(service, token) .getFlatTransactionById(GetTransactionByIdRequest(ledgerId.unwrap, transactionId, parties)) def getFlatTransactionByEventId( eventId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetFlatTransactionResponse] = LedgerClient .stub(service, token) .getFlatTransactionByEventId( GetTransactionByEventIdRequest(ledgerId.unwrap, eventId, parties)) def getLedgerEnd(token: Option[String] = None): Future[GetLedgerEndResponse] = LedgerClient.stub(service, token).getLedgerEnd(GetLedgerEndRequest(ledgerId.unwrap)) }
Example 81
Source File: ActiveContractSetClient.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.acs import akka.NotUsed import akka.stream.scaladsl.{Keep, Source} import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.akka.ClientAdapter import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc.ActiveContractsServiceStub import com.daml.ledger.api.v1.active_contracts_service.{ GetActiveContractsRequest, GetActiveContractsResponse } import com.daml.ledger.api.v1.transaction_filter.TransactionFilter import com.daml.ledger.client.LedgerClient import com.daml.util.akkastreams.ExtractMaterializedValue import scalaz.syntax.tag._ import scala.concurrent.Future object ActiveContractSetClient { private val extractOffset = new ExtractMaterializedValue[GetActiveContractsResponse, String](r => if (r.offset.nonEmpty) Some(r.offset) else None) } final class ActiveContractSetClient(ledgerId: LedgerId, service: ActiveContractsServiceStub)( implicit esf: ExecutionSequencerFactory) { import ActiveContractSetClient.extractOffset private def request(filter: TransactionFilter, verbose: Boolean) = GetActiveContractsRequest(ledgerId.unwrap, Some(filter), verbose) private def activeContractSource( request: GetActiveContractsRequest, token: Option[String]): Source[GetActiveContractsResponse, NotUsed] = ClientAdapter.serverStreaming(request, LedgerClient.stub(service, token).getActiveContracts) def getActiveContracts( filter: TransactionFilter, verbose: Boolean = false, token: Option[String] = None): Source[GetActiveContractsResponse, Future[String]] = activeContractSource(request(filter, verbose), token).viaMat(extractOffset)(Keep.right) }
Example 82
Source File: ExtractSingleMaterializedValueTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.util.akkastreams import akka.stream.scaladsl.{Keep, Sink, Source} import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import org.scalatest.{Matchers, WordSpec} import scala.util.Random class ExtractSingleMaterializedValueTest extends WordSpec with Matchers with ScalaFutures with AkkaBeforeAndAfterAll { private val discriminator = { i: Int => if (i < 0) Some(i) else None } private val elemsThatPassThrough = 0.to(10).toVector ExtractMaterializedValue.getClass.getSimpleName when { "there's a single valid value" should { "extract it" in { val elemToExtract = -1 val elements = elemToExtract +: elemsThatPassThrough val (extractedF, restF) = processElements(Random.shuffle(elements)) whenReady(extractedF)(_ shouldEqual elemToExtract) whenReady(restF)(_ should contain theSameElementsAs elements) } } "there are multiple valid values" should { "extract the first matching element" in { val elemToExtract = -1 val otherCandidateShuffledIn = -2 val elements = elemToExtract +: Random.shuffle( otherCandidateShuffledIn +: elemsThatPassThrough) val (extractedF, restF) = processElements(elements) whenReady(extractedF)(_ shouldEqual elemToExtract) whenReady(restF)(_ should contain theSameElementsAs elements) } } "there are no valid values" should { "fail the materialized future, but let the stream continue otherwise" in { val (extractedF, restF) = processElements(Random.shuffle(elemsThatPassThrough)) whenReady(extractedF.failed)(_ shouldBe a[RuntimeException]) whenReady(restF)(_.sorted shouldEqual elemsThatPassThrough) } } } private def processElements(elements: Iterable[Int]) = { Source .fromIterator(() => elements.iterator) .viaMat(ExtractMaterializedValue(discriminator))(Keep.right) .toMat(Sink.seq)(Keep.both) .run() } }
Example 83
Source File: JdbcIndex.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.index import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.participant.state.index.v2 import com.daml.ledger.participant.state.index.v2.IndexService import com.daml.ledger.participant.state.v1.{Configuration, ParticipantId} import com.daml.logging.LoggingContext import com.daml.metrics.Metrics import com.daml.platform.configuration.ServerRole import com.daml.platform.store.dao.events.LfValueTranslation import com.daml.resources.ResourceOwner object JdbcIndex { def owner( serverRole: ServerRole, initialConfig: Configuration, ledgerId: LedgerId, participantId: ParticipantId, jdbcUrl: String, eventsPageSize: Int, metrics: Metrics, lfValueTranslationCache: LfValueTranslation.Cache, )(implicit mat: Materializer, logCtx: LoggingContext): ResourceOwner[IndexService] = new ReadOnlySqlLedger.Owner( serverRole, jdbcUrl, ledgerId, eventsPageSize, metrics, lfValueTranslationCache, ).map { ledger => new LedgerBackedIndexService(MeteredReadOnlyLedger(ledger, metrics), participantId) { override def getLedgerConfiguration(): Source[v2.LedgerConfiguration, NotUsed] = // FIXME(JM): The indexer should on start set the default configuration. Source.single(v2.LedgerConfiguration(initialConfig.maxDeduplicationTime)) } } }
Example 84
Source File: LedgerEntries.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.stores.ledger.inmemory import java.util.concurrent.atomic.AtomicReference import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.v1.Offset import com.daml.lf.data.Ref import com.daml.platform.akkastreams.dispatcher.Dispatcher import com.daml.platform.akkastreams.dispatcher.SubSource.RangeSource import org.slf4j.LoggerFactory import com.daml.platform.ApiOffset.ApiOffsetConverter import com.daml.platform.sandbox.stores.ledger.SandboxOffset import scala.collection.immutable.TreeMap private[ledger] class LedgerEntries[T](identify: T => String) { private val logger = LoggerFactory.getLogger(this.getClass) private case class Entries(ledgerEnd: Offset, items: TreeMap[Offset, T]) // Tuple of (ledger end cursor, ledger map). There is never an entry for the initial cursor. End is inclusive. private val state = new AtomicReference(Entries(ledgerBeginning, TreeMap.empty)) private def store(item: T): Offset = { val Entries(newOffset, _) = state.updateAndGet({ case Entries(ledgerEnd, ledger) => val newEnd = SandboxOffset.toOffset(SandboxOffset.fromOffset(ledgerEnd) + 1) Entries(newEnd, ledger + (newEnd -> item)) }) if (logger.isTraceEnabled()) logger.trace("Recording `{}` at offset `{}`", identify(item): Any, newOffset.toApiString: Any) newOffset } def incrementOffset(increment: Int): Offset = { val Entries(newOffset, _) = state.updateAndGet({ case Entries(ledgerEnd, ledger) => val newEnd = SandboxOffset.toOffset(SandboxOffset.fromOffset(ledgerEnd) + increment) Entries(newEnd, ledger) }) if (logger.isTraceEnabled()) logger.trace("Bumping offset to `{}`", newOffset.toApiString) newOffset } private val dispatcher = Dispatcher[Offset]("inmemory-ledger", Offset.beforeBegin, ledgerEnd) def getSource( startExclusive: Option[Offset], endInclusive: Option[Offset]): Source[(Offset, T), NotUsed] = dispatcher.startingAt( startExclusive.getOrElse(ledgerBeginning), RangeSource( (exclusiveStart, inclusiveEnd) => Source[(Offset, T)]( state.get().items.from(exclusiveStart).filter(_._1 > exclusiveStart).to(inclusiveEnd)), ), endInclusive ) def publish(item: T): Offset = { val newHead = store(item) dispatcher.signalNewHead(newHead) newHead } def ledgerBeginning: Offset = SandboxOffset.toOffset(0) def items = state.get().items.iterator def ledgerEnd: Offset = state.get().ledgerEnd def nextTransactionId: Ref.LedgerString = Ref.LedgerString.assertFromString((SandboxOffset.fromOffset(ledgerEnd) + 1).toString) }
Example 85
Source File: ApiActiveContractsService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.index.v2.{IndexActiveContractsService => ACSBackend} import com.daml.dec.DirectExecutionContext import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc.ActiveContractsService import com.daml.ledger.api.v1.active_contracts_service._ import com.daml.ledger.api.validation.TransactionFilterValidator import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.validation.ActiveContractsServiceValidation import io.grpc.{BindableService, ServerServiceDefinition} import scala.concurrent.ExecutionContext final class ApiActiveContractsService private ( backend: ACSBackend, )( implicit executionContext: ExecutionContext, protected val mat: Materializer, protected val esf: ExecutionSequencerFactory, logCtx: LoggingContext, ) extends ActiveContractsServiceAkkaGrpc with GrpcApiService { private val logger = ContextualizedLogger.get(this.getClass) override protected def getActiveContractsSource( request: GetActiveContractsRequest): Source[GetActiveContractsResponse, NotUsed] = { logger.trace("Serving an Active Contracts request...") TransactionFilterValidator .validate(request.getFilter, "filter") .fold(Source.failed, backend.getActiveContracts(_, request.verbose)) .via(logger.logErrorsOnStream) } override def bindService(): ServerServiceDefinition = ActiveContractsServiceGrpc.bindService(this, DirectExecutionContext) } object ApiActiveContractsService { def create(ledgerId: LedgerId, backend: ACSBackend)( implicit ec: ExecutionContext, mat: Materializer, esf: ExecutionSequencerFactory, logCtx: LoggingContext): ActiveContractsService with GrpcApiService = new ActiveContractsServiceValidation(new ApiActiveContractsService(backend), ledgerId) with BindableService { override def bindService(): ServerServiceDefinition = ActiveContractsServiceGrpc.bindService(this, DirectExecutionContext) } }
Example 86
Source File: ApiCommandCompletionService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services import java.util.concurrent.atomic.AtomicLong import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.index.v2.IndexCompletionsService import com.daml.dec.DirectExecutionContext import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain import com.daml.ledger.api.domain.{LedgerId, LedgerOffset} import com.daml.ledger.api.messages.command.completion.CompletionStreamRequest import com.daml.ledger.api.v1.command_completion_service._ import com.daml.ledger.api.validation.PartyNameChecker import com.daml.logging.LoggingContext.withEnrichedLoggingContext import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.services.domain.CommandCompletionService import com.daml.platform.server.api.services.grpc.GrpcCommandCompletionService import io.grpc.ServerServiceDefinition import scala.concurrent.{ExecutionContext, Future} final class ApiCommandCompletionService private (completionsService: IndexCompletionsService)( implicit ec: ExecutionContext, protected val mat: Materializer, protected val esf: ExecutionSequencerFactory, logCtx: LoggingContext) extends CommandCompletionService { private val logger = ContextualizedLogger.get(this.getClass) private val subscriptionIdCounter = new AtomicLong() override def completionStreamSource( request: CompletionStreamRequest): Source[CompletionStreamResponse, NotUsed] = withEnrichedLoggingContext(logging.parties(request.parties), logging.offset(request.offset)) { implicit logCtx => val subscriptionId = subscriptionIdCounter.getAndIncrement().toString logger.debug(s"Received request for completion subscription $subscriptionId: $request") val offset = request.offset.getOrElse(LedgerOffset.LedgerEnd) completionsService .getCompletions(offset, request.applicationId, request.parties) .via(logger.logErrorsOnStream) } override def getLedgerEnd(ledgerId: domain.LedgerId): Future[LedgerOffset.Absolute] = completionsService.currentLedgerEnd().andThen(logger.logErrorsOnCall[LedgerOffset.Absolute]) } object ApiCommandCompletionService { def create(ledgerId: LedgerId, completionsService: IndexCompletionsService)( implicit ec: ExecutionContext, mat: Materializer, esf: ExecutionSequencerFactory, logCtx: LoggingContext): GrpcCommandCompletionService with GrpcApiService = { val impl: CommandCompletionService = new ApiCommandCompletionService(completionsService) new GrpcCommandCompletionService(ledgerId, impl, PartyNameChecker.AllowAllParties) with GrpcApiService { override def bindService(): ServerServiceDefinition = CommandCompletionServiceGrpc.bindService(this, DirectExecutionContext) } } }
Example 87
Source File: ApiLedgerConfigurationService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.index.v2.IndexConfigurationService import com.daml.api.util.DurationConversion._ import com.daml.dec.DirectExecutionContext import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.ledger_configuration_service._ import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.validation.LedgerConfigurationServiceValidation import io.grpc.{BindableService, ServerServiceDefinition} import scala.concurrent.ExecutionContext final class ApiLedgerConfigurationService private (configurationService: IndexConfigurationService)( implicit protected val esf: ExecutionSequencerFactory, protected val mat: Materializer, logCtx: LoggingContext) extends LedgerConfigurationServiceAkkaGrpc with GrpcApiService { private val logger = ContextualizedLogger.get(this.getClass) override protected def getLedgerConfigurationSource( request: GetLedgerConfigurationRequest): Source[GetLedgerConfigurationResponse, NotUsed] = configurationService .getLedgerConfiguration() .map( configuration => GetLedgerConfigurationResponse( Some(LedgerConfiguration( Some(toProto(configuration.maxDeduplicationTime)), )))) .via(logger.logErrorsOnStream) override def bindService(): ServerServiceDefinition = LedgerConfigurationServiceGrpc.bindService(this, DirectExecutionContext) } object ApiLedgerConfigurationService { def create(ledgerId: LedgerId, configurationService: IndexConfigurationService)( implicit ec: ExecutionContext, esf: ExecutionSequencerFactory, mat: Materializer, logCtx: LoggingContext) : LedgerConfigurationServiceGrpc.LedgerConfigurationService with GrpcApiService = new LedgerConfigurationServiceValidation( new ApiLedgerConfigurationService(configurationService), ledgerId) with BindableService { override def bindService(): ServerServiceDefinition = LedgerConfigurationServiceGrpc.bindService(this, DirectExecutionContext) } }
Example 88
Source File: PaginatingAsyncStream.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.store.dao import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.dec.DirectExecutionContext import scala.concurrent.Future object PaginatingAsyncStream { def streamFrom[Off, T](startFromOffset: Off, getOffset: T => Off)( query: Off => Future[Vector[T]] ): Source[T, NotUsed] = { Source .unfoldAsync(Option(startFromOffset)) { case None => Future.successful(None) // finished reading the whole thing case Some(offset) => query(offset).map { result => val nextPageOffset: Option[Off] = result.lastOption.map(getOffset) Some((nextPageOffset, result)) }(DirectExecutionContext) // run in the same thread as the query, avoid context switch for a cheap operation } .flatMapConcat(Source(_)) } }
Example 89
Source File: package.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.store.dao import akka.stream.scaladsl.Source private[events] def groupContiguous[A, K, Mat](source: Source[A, Mat])( by: A => K): Source[Vector[A], Mat] = source .statefulMapConcat(() => { var previousSegmentKey: K = null.asInstanceOf[K] entry => { val keyForEntry = by(entry) val entryWithSplit = entry -> (keyForEntry != previousSegmentKey) previousSegmentKey = keyForEntry List(entryWithSplit) } }) .splitWhen(_._2) .map(_._1) .fold(Vector.empty[A])(_ :+ _) .mergeSubstreams // Dispatches the call to either function based on the cardinality of the input // This is mostly designed to route requests to queries specialized for single/multi-party subs // Callers should ensure that the set is not empty, which in the usage this // is designed for should be provided by the Ledger API validation layer private[events] def route[A, B]( set: Set[A], )(single: A => B, multi: Set[A] => B): B = { assume(set.nonEmpty, "Empty set, unable to dispatch to single/multi implementation") set.size match { case 1 => single(set.toIterator.next) case n if n > 1 => multi(set) } } private[events] def convert(template: Identifier, key: lftx.Node.KeyWithMaintainers[Value]): Key = Key.assertBuild(template, key.key.value) }
Example 90
Source File: CommandCompletionsReader.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.store.dao import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.v1.Offset import com.daml.lf.data.Ref import com.daml.ledger.ApplicationId import com.daml.ledger.api.v1.command_completion_service.CompletionStreamResponse import com.daml.metrics.Metrics import com.daml.platform.ApiOffset private[dao] final class CommandCompletionsReader(dispatcher: DbDispatcher, metrics: Metrics) { private def offsetFor(response: CompletionStreamResponse): Offset = ApiOffset.assertFromString(response.checkpoint.get.offset.get.getAbsolute) def getCommandCompletions( startExclusive: Offset, endInclusive: Offset, applicationId: ApplicationId, parties: Set[Ref.Party]): Source[(Offset, CompletionStreamResponse), NotUsed] = { val query = CommandCompletionsTable.prepareGet( startExclusive = startExclusive, endInclusive = endInclusive, applicationId = applicationId, parties = parties, ) Source .future(dispatcher.executeSql(metrics.daml.index.db.getCompletions) { implicit connection => query.as(CommandCompletionsTable.parser.*) }) .mapConcat(_.map(response => offsetFor(response) -> response)) } }
Example 91
Source File: ReadOnlyLedger.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.store import java.time.Instant import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.index.v2.{CommandDeduplicationResult, PackageDetails} import com.daml.ledger.participant.state.v1.{Configuration, Offset} import com.daml.lf.data.Ref import com.daml.lf.data.Ref.{Identifier, PackageId, Party} import com.daml.lf.language.Ast import com.daml.lf.transaction.Node.GlobalKey import com.daml.lf.value.Value import com.daml.lf.value.Value.{ContractId, ContractInst} import com.daml.daml_lf_dev.DamlLf.Archive import com.daml.ledger.TransactionId import com.daml.ledger.api.domain.{ApplicationId, CommandId, LedgerId, PartyDetails} import com.daml.ledger.api.health.ReportsHealth import com.daml.ledger.api.v1.active_contracts_service.GetActiveContractsResponse import com.daml.ledger.api.v1.command_completion_service.CompletionStreamResponse import com.daml.ledger.api.v1.transaction_service.{ GetFlatTransactionResponse, GetTransactionResponse, GetTransactionTreesResponse, GetTransactionsResponse, } import com.daml.platform.store.entries.{ConfigurationEntry, PackageLedgerEntry, PartyLedgerEntry} import scala.concurrent.Future def removeExpiredDeduplicationData( currentTime: Instant, ): Future[Unit] }
Example 92
Source File: LedgerEntriesSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.stores.ledger.inmemory import akka.stream.ThrottleMode import akka.stream.scaladsl.{Flow, Keep, Sink, Source} import com.daml.ledger.participant.state.v1.Offset import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll import org.scalatest.{AsyncWordSpec, Inspectors, Matchers} import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.Random class LedgerEntriesSpec extends AsyncWordSpec with Matchers with AkkaBeforeAndAfterAll with Inspectors { case class Error(msg: String) case class Transaction(content: String) val NO_OF_MESSAGES = 10000 val NO_OF_SUBSCRIBERS = 50 private def genTransactions() = (1 to NO_OF_MESSAGES).map { i => if (Random.nextBoolean()) Right(Transaction(i.toString)) else Left(Error(i.toString)) } "LedgerEntries" should { "store new blocks and a late subscriber can read them" in { val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString) val transactions = genTransactions() transactions.foreach(t => ledger.publish(t)) val sink = Flow[(Offset, Either[Error, Transaction])] .take(NO_OF_MESSAGES.toLong) .toMat(Sink.seq)(Keep.right) val blocksF = ledger.getSource(None, None).runWith(sink) blocksF.map { blocks => val readTransactions = blocks.collect { case (_, transaction) => transaction } readTransactions shouldEqual transactions } } "store new blocks while multiple subscribers are reading them with different pace" in { val transactions = genTransactions() val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString) val publishRate = NO_OF_MESSAGES / 10 val blocksInStream = Source(transactions) .throttle(publishRate, 100.milliseconds, publishRate, ThrottleMode.shaping) .to(Sink.foreach { t => ledger.publish(t) () }) def subscribe() = { val subscribeRate = NO_OF_MESSAGES / (Random.nextInt(100) + 1) ledger .getSource(None, None) .runWith( Flow[(Offset, Either[Error, Transaction])] .throttle(subscribeRate, 100.milliseconds, subscribeRate, ThrottleMode.shaping) .take(NO_OF_MESSAGES.toLong) .toMat(Sink.seq)(Keep.right) ) } val readBlocksF = Future.sequence((1 to NO_OF_SUBSCRIBERS).map(_ => subscribe())) blocksInStream.run() readBlocksF.map { readBlocksForAll => forAll(readBlocksForAll) { readBlocks => val readTransactions = readBlocks.collect { case (_, transaction) => transaction } readTransactions shouldEqual transactions } } } } }
Example 93
Source File: TrackerImplTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import akka.NotUsed import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Keep, Source, SourceQueueWithComplete} import akka.stream.testkit.TestSubscriber import akka.stream.testkit.scaladsl.TestSink import com.daml.ledger.api.testing.utils.{ AkkaBeforeAndAfterAll, IsStatusException, TestingException } import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.completion.Completion import com.daml.dec.DirectExecutionContext import com.google.rpc.status.{Status => RpcStatus} import io.grpc.Status import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterEach, Matchers, Succeeded, WordSpec} import scala.concurrent.ExecutionContext.Implicits.global class TrackerImplTest extends WordSpec with Matchers with BeforeAndAfterEach with ScalaFutures with AkkaBeforeAndAfterAll { private var sut: Tracker = _ private var consumer: TestSubscriber.Probe[NotUsed] = _ private var queue: SourceQueueWithComplete[TrackerImpl.QueueInput] = _ private def input(cid: Int) = SubmitAndWaitRequest(Some(Commands(commandId = cid.toString))) override protected def beforeEach(): Unit = { val (q, sink) = Source .queue[TrackerImpl.QueueInput](1, OverflowStrategy.dropNew) .map { in => in.context.success(Completion(in.value.getCommands.commandId, Some(RpcStatus()))) NotUsed } .toMat(TestSink.probe[NotUsed])(Keep.both) .run() queue = q sut = new TrackerImpl(q) consumer = sink } override protected def afterEach(): Unit = { consumer.cancel() queue.complete() } "Tracker Implementation" when { "input is submitted, and the queue is available" should { "work successfully" in { val resultF1 = sut.track(input(1)) consumer.requestNext() val resultF = resultF1.flatMap(_ => sut.track(input(2)))(DirectExecutionContext) consumer.requestNext() whenReady(resultF)(_ => Succeeded) } } "input is submitted, and the queue is backpressuring" should { "return a RESOURCE_EXHAUSTED error" in { sut.track(input(1)) whenReady(sut.track(input(2)).failed)(IsStatusException(Status.RESOURCE_EXHAUSTED)) } } "input is submitted, and the queue has been completed" should { "return an ABORTED error" in { queue.complete() whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED)) } } "input is submitted, and the queue has failed" should { "return an ABORTED error" in { queue.fail(TestingException("The queue fails with this error.")) whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED)) } } } }
Example 94
Source File: GroupContiguousSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.store.dao.events import akka.stream.scaladsl.{Sink, Source} import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import org.scalatest.prop.PropertyChecks import org.scalatest.{AsyncFlatSpec, Matchers} final class GroupContiguousSpec extends AsyncFlatSpec with Matchers with PropertyChecks with ScalaFutures with AkkaBeforeAndAfterAll { behavior of "groupContiguous" override def spanScaleFactor: Double = 10 // Give some extra slack on CI it should "be equivalent to grouping on inputs with an ordered key" in forAll { pairs: List[(Int, String)] => val sortedPairs = pairs.sortBy(_._1) val grouped = groupContiguous(Source(sortedPairs))(by = _._1) whenReady(grouped.runWith(Sink.seq[Vector[(Int, String)]])) { _ should contain theSameElementsAs pairs.groupBy(_._1).values } } it should "be equivalent to grouping on inputs with a contiguous key" in { val pairsWithContiguousKeys = List(1 -> "baz", 0 -> "foo", 0 -> "bar", 0 -> "quux") val grouped = groupContiguous(Source(pairsWithContiguousKeys))(by = _._1) whenReady(grouped.runWith(Sink.seq[Vector[(Int, String)]])) { _.map(_.toSet) should contain theSameElementsAs pairsWithContiguousKeys .groupBy(_._1) .map(_._2.toSet) } } it should "behave as expected when grouping inputs without a contiguous key" in { val pairs = List(0 -> "foo", 0 -> "bar", 1 -> "baz", 0 -> "quux") val grouped = groupContiguous(Source(pairs))(by = _._1) whenReady(grouped.runWith(Sink.seq[Vector[(Int, String)]])) { _.map(_.toSet) should contain theSameElementsAs Vector( Set(0 -> "foo", 0 -> "bar"), Set(1 -> "baz"), Set(0 -> "quux"), ) } } }
Example 95
Source File: GrpcCommandCompletionService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api.services.grpc import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.dec.DirectExecutionContext import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.messages.command.completion.{ CompletionStreamRequest => ValidatedCompletionStreamRequest } import com.daml.ledger.api.v1.command_completion_service._ import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.validation.{CompletionServiceRequestValidator, PartyNameChecker} import com.daml.platform.server.api.services.domain.CommandCompletionService import scala.concurrent.Future object GrpcCommandCompletionService { private[this] val completionStreamDefaultOffset = Some(domain.LedgerOffset.LedgerEnd) private def fillInWithDefaults( request: ValidatedCompletionStreamRequest): ValidatedCompletionStreamRequest = if (request.offset.isDefined) { request } else { request.copy(offset = completionStreamDefaultOffset) } } class GrpcCommandCompletionService( ledgerId: LedgerId, service: CommandCompletionService, partyNameChecker: PartyNameChecker )(implicit protected val esf: ExecutionSequencerFactory, protected val mat: Materializer) extends CommandCompletionServiceAkkaGrpc { private val validator = new CompletionServiceRequestValidator(ledgerId, partyNameChecker) override def completionStreamSource( request: CompletionStreamRequest): Source[CompletionStreamResponse, akka.NotUsed] = { validator .validateCompletionStreamRequest(request) .fold( Source.failed[CompletionStreamResponse], GrpcCommandCompletionService.fillInWithDefaults _ andThen service.completionStreamSource ) } override def completionEnd(request: CompletionEndRequest): Future[CompletionEndResponse] = validator .validateCompletionEndRequest(request) .fold( Future.failed[CompletionEndResponse], req => service .getLedgerEnd(req.ledgerId) .map(abs => CompletionEndResponse(Some(LedgerOffset(LedgerOffset.Value.Absolute(abs.value)))))( DirectExecutionContext) ) }
Example 96
Source File: GrpcHealthService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api.services.grpc import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.health.HealthChecks import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.DropRepeated import com.daml.platform.server.api.services.grpc.GrpcHealthService._ import io.grpc.health.v1.health.{ HealthAkkaGrpc, HealthCheckRequest, HealthCheckResponse, HealthGrpc } import io.grpc.{ServerServiceDefinition, Status, StatusException} import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} class GrpcHealthService( healthChecks: HealthChecks, maximumWatchFrequency: FiniteDuration = 1.second, )( implicit protected val esf: ExecutionSequencerFactory, protected val mat: Materializer, executionContext: ExecutionContext, ) extends HealthAkkaGrpc with GrpcApiService { override def bindService(): ServerServiceDefinition = HealthGrpc.bindService(this, DirectExecutionContext) override def check(request: HealthCheckRequest): Future[HealthCheckResponse] = Future.fromTry(matchResponse(serviceFrom(request))) override def watchSource(request: HealthCheckRequest): Source[HealthCheckResponse, NotUsed] = Source .fromIterator(() => Iterator.continually(matchResponse(serviceFrom(request)).get)) .throttle(1, per = maximumWatchFrequency) .via(DropRepeated()) private def matchResponse(componentName: Option[String]): Try[HealthCheckResponse] = if (!componentName.forall(healthChecks.hasComponent)) Failure(new StatusException(Status.NOT_FOUND)) else if (healthChecks.isHealthy(componentName)) Success(servingResponse) else Success(notServingResponse) } object GrpcHealthService { private[grpc] val servingResponse = HealthCheckResponse(HealthCheckResponse.ServingStatus.SERVING) private[grpc] val notServingResponse = HealthCheckResponse(HealthCheckResponse.ServingStatus.NOT_SERVING) private def serviceFrom(request: HealthCheckRequest): Option[String] = { Option(request.service).filter(_.nonEmpty) } }
Example 97
Source File: TransactionService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api.services.domain import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.api.domain.LedgerOffset import com.daml.ledger.api.messages.transaction.{ GetTransactionByEventIdRequest, GetTransactionByIdRequest, GetTransactionTreesRequest, GetTransactionsRequest } import com.daml.ledger.api.v1.transaction_service.{ GetFlatTransactionResponse, GetTransactionResponse, GetTransactionTreesResponse, GetTransactionsResponse } import scala.concurrent.Future trait TransactionService { def getTransactions(req: GetTransactionsRequest): Source[GetTransactionsResponse, NotUsed] def getTransactionTrees( req: GetTransactionTreesRequest): Source[GetTransactionTreesResponse, NotUsed] def getLedgerEnd(ledgerId: String): Future[LedgerOffset.Absolute] def offsetOrdering: Ordering[LedgerOffset.Absolute] def getTransactionById(req: GetTransactionByIdRequest): Future[GetTransactionResponse] def getTransactionByEventId(req: GetTransactionByEventIdRequest): Future[GetTransactionResponse] def getFlatTransactionById(req: GetTransactionByIdRequest): Future[GetFlatTransactionResponse] def getFlatTransactionByEventId( req: GetTransactionByEventIdRequest): Future[GetFlatTransactionResponse] }
Example 98
Source File: SubSource.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.akkastreams.dispatcher import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.dec.DirectExecutionContext import scala.concurrent.Future final case class RangeSource[Index: Ordering, T]( getRange: (Index, Index) => Source[(Index, T), NotUsed]) extends SubSource[Index, T] { override def subSource( startExclusive: Index, endInclusive: Index): Source[(Index, T), NotUsed] = getRange(startExclusive, endInclusive) } }
Example 99
Source File: Dispatcher.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.akkastreams.dispatcher import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.resources.ResourceOwner def apply[Index: Ordering]( name: String, zeroIndex: Index, headAtInitialization: Index, ): Dispatcher[Index] = new DispatcherImpl[Index](name: String, zeroIndex, headAtInitialization) def owner[Index: Ordering]( name: String, zeroIndex: Index, headAtInitialization: Index, ): ResourceOwner[Dispatcher[Index]] = ResourceOwner.forCloseable(() => apply(name, zeroIndex, headAtInitialization)) }
Example 100
Source File: DispatcherImpl.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.akkastreams.dispatcher import java.util.concurrent.atomic.AtomicReference import akka.NotUsed import akka.stream.scaladsl.Source import com.github.ghik.silencer.silent import org.slf4j.LoggerFactory import scala.collection.immutable final class DispatcherImpl[Index: Ordering]( name: String, zeroIndex: Index, headAtInitialization: Index) extends Dispatcher[Index] { private val logger = LoggerFactory.getLogger(getClass) require( !indexIsBeforeZero(headAtInitialization), s"head supplied at Dispatcher initialization $headAtInitialization is before zero index $zeroIndex. " + s"This would imply that the ledger end is before the ledger begin, which makes this invalid configuration." ) private sealed abstract class State extends Product with Serializable { def getSignalDispatcher: Option[SignalDispatcher] def getLastIndex: Index } // the following silent are due to // <https://github.com/scala/bug/issues/4440> @silent private final case class Running(lastIndex: Index, signalDispatcher: SignalDispatcher) extends State { override def getLastIndex: Index = lastIndex override def getSignalDispatcher: Option[SignalDispatcher] = Some(signalDispatcher) } @silent private final case class Closed(lastIndex: Index) extends State { override def getLastIndex: Index = lastIndex override def getSignalDispatcher: Option[SignalDispatcher] = None } // So why not broadcast the actual new index, instead of using a signaller? // The reason is if we do that, the new indices race with readHead // in a way that makes it hard to start up new subscriptions. In particular, // we can tolerate NewIndexSignals being out of order or dropped, maintaining the weaker invariant that, // if head is updated, at least one NewIndexSignal eventually arrives. private val state = new AtomicReference[State](Running(headAtInitialization, SignalDispatcher())) override def apply(newHead: Index): immutable.Iterable[(Index, Index)] = if (Ordering[Index].gt(newHead, max)) { val intervalBegin = max max = newHead List(intervalBegin -> newHead) } else Nil } private def indexIsBeforeZero(checkedIndex: Index): Boolean = Ordering[Index].gt(zeroIndex, checkedIndex) def close(): Unit = state.getAndUpdate { case Running(idx, _) => Closed(idx) case c: Closed => c } match { case Running(idx, disp) => disp.signal() disp.close() case c: Closed => () } private def closedError: IllegalStateException = new IllegalStateException(s"$name: Dispatcher is closed") }
Example 101
Source File: DropRepeatedSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api import akka.actor.ActorSystem import akka.pattern.pipe import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.testkit.{TestKit, TestProbe} import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.collection.immutable import scala.concurrent.ExecutionContext final class DropRepeatedSpec extends TestKit(ActorSystem(classOf[DropRepeatedSpec].getSimpleName)) with WordSpecLike with Matchers with BeforeAndAfterAll { private[this] implicit val materializer: Materializer = Materializer(system) private[this] implicit val executionContext: ExecutionContext = materializer.executionContext override def afterAll: Unit = { TestKit.shutdownActorSystem(system) } "DropRepeated" should { "drop repeated elements" in { val probe = TestProbe() val input = immutable.Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5) val _ = Source(input) .via(DropRepeated()) .runWith(Sink.seq) .pipeTo(probe.ref) .failed .foreach(fail(_)) probe.expectMsg(Vector(1, 2, 3, 4, 5)) } "does not drop duplicate elements that are not repeated" in { val probe = TestProbe() val input = immutable.Seq(1, 1, 2, 2, 1, 1, 2, 2) val _ = Source(input) .via(DropRepeated()) .runWith(Sink.seq) .pipeTo(probe.ref) .failed .foreach(fail(_)) probe.expectMsg(Vector(1, 2, 1, 2)) } } }
Example 102
Source File: KeyValueParticipantStateReader.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.kvutils.api import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.api.health.HealthStatus import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlLogEntryId import com.daml.ledger.participant.state.kvutils.{Envelope, KVOffset, KeyValueConsumption} import com.daml.ledger.participant.state.v1._ import com.daml.lf.data.Time import com.daml.metrics.{Metrics, Timed} class KeyValueParticipantStateReader(reader: LedgerReader, metrics: Metrics)( implicit materializer: Materializer) extends ReadService { override def getLedgerInitialConditions(): Source[LedgerInitialConditions, NotUsed] = Source.single(createLedgerInitialConditions()) override def stateUpdates(beginAfter: Option[Offset]): Source[(Offset, Update), NotUsed] = { Source .single(beginAfter.map(KVOffset.onlyKeepHighestIndex)) .flatMapConcat(reader.events) .flatMapConcat { case LedgerRecord(offset, entryId, envelope) => Timed .value(metrics.daml.kvutils.reader.openEnvelope, Envelope.open(envelope)) .flatMap { case Envelope.LogEntryMessage(logEntry) => Timed.value( metrics.daml.kvutils.reader.parseUpdates, { val logEntryId = DamlLogEntryId.parseFrom(entryId) val updates = KeyValueConsumption.logEntryToUpdate(logEntryId, logEntry) val updateOffset: (Offset, Int) => Offset = if (updates.size > 1) KVOffset.setMiddleIndex else (offset, _) => offset val updatesWithOffsets = Source(updates).zipWithIndex.map { case (update, index) => updateOffset(offset, index.toInt) -> update } Right(updatesWithOffsets) } ) case _ => Left("Envelope does not contain a log entry") } .getOrElse(throw new IllegalArgumentException( s"Invalid log entry received at offset $offset")) } } override def currentHealth(): HealthStatus = reader.currentHealth() private def createLedgerInitialConditions(): LedgerInitialConditions = LedgerInitialConditions( reader.ledgerId(), LedgerReader.DefaultConfiguration, Time.Timestamp.Epoch) }
Example 103
Source File: KeyValueParticipantState.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.kvutils.api import java.util.concurrent.CompletionStage import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.daml_lf_dev.DamlLf import com.daml.ledger.api.health.HealthStatus import com.daml.ledger.participant.state.v1._ import com.daml.lf.data.Time import com.daml.metrics.Metrics class KeyValueParticipantState( reader: LedgerReader, writer: LedgerWriter, metrics: Metrics, )(implicit materializer: Materializer) extends ReadService with WriteService { private val readerAdapter = new KeyValueParticipantStateReader(reader, metrics) private val writerAdapter = new KeyValueParticipantStateWriter(new TimedLedgerWriter(writer, metrics), metrics) override def getLedgerInitialConditions(): Source[LedgerInitialConditions, NotUsed] = readerAdapter.getLedgerInitialConditions() override def stateUpdates(beginAfter: Option[Offset]): Source[(Offset, Update), NotUsed] = readerAdapter.stateUpdates(beginAfter) override def submitTransaction( submitterInfo: SubmitterInfo, transactionMeta: TransactionMeta, transaction: SubmittedTransaction, estimatedInterpretationCost: Long, ): CompletionStage[SubmissionResult] = writerAdapter.submitTransaction( submitterInfo, transactionMeta, transaction, estimatedInterpretationCost, ) override def submitConfiguration( maxRecordTime: Time.Timestamp, submissionId: SubmissionId, config: Configuration): CompletionStage[SubmissionResult] = writerAdapter.submitConfiguration(maxRecordTime, submissionId, config) override def uploadPackages( submissionId: SubmissionId, archives: List[DamlLf.Archive], sourceDescription: Option[String]): CompletionStage[SubmissionResult] = writerAdapter.uploadPackages(submissionId, archives, sourceDescription) override def allocateParty( hint: Option[Party], displayName: Option[String], submissionId: SubmissionId): CompletionStage[SubmissionResult] = writerAdapter.allocateParty(hint, displayName, submissionId) override def currentHealth(): HealthStatus = reader.currentHealth() and writer.currentHealth() }
Example 104
Source File: BatchingQueue.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.kvutils.api import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete} import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult} import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch import com.daml.ledger.participant.state.v1.SubmissionResult import scala.concurrent.Future import scala.concurrent.duration._ object BatchingQueue { type CommitBatchFunction = Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit] } case class DefaultBatchingQueue( maxQueueSize: Int, maxBatchSizeBytes: Long, maxWaitDuration: FiniteDuration, maxConcurrentCommits: Int ) extends BatchingQueue { private val queue: Source[ Seq[DamlSubmissionBatch.CorrelatedSubmission], SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] = Source .queue(maxQueueSize, OverflowStrategy.dropNew) .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)( (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong) def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])( implicit materializer: Materializer): RunningBatchingQueueHandle = { val materializedQueue = queue .mapAsync(maxConcurrentCommits)(commitBatch) .to(Sink.ignore) .run() val queueAlive = new AtomicBoolean(true) materializedQueue.watchCompletion.foreach { _ => queueAlive.set(false) }(materializer.executionContext) new RunningBatchingQueueHandle { override def alive: Boolean = queueAlive.get() override def offer( submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = { materializedQueue .offer(submission) .map { case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged case QueueOfferResult.Dropped => SubmissionResult.Overloaded case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString) case QueueOfferResult.QueueClosed => SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed") }(materializer.executionContext) } override def close(): Unit = { materializedQueue.complete() } } } }
Example 105
Source File: package.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.kvutils import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.ledger.api.health.HealthStatus import com.daml.ledger.participant.state.v1.{LedgerId, Offset, ParticipantId, SubmissionResult} import scala.concurrent.Future package object api { type KeyValueLedger = LedgerReader with LedgerWriter def createKeyValueLedger(reader: LedgerReader, writer: LedgerWriter): KeyValueLedger = new LedgerReader with LedgerWriter { override def events(startExclusive: Option[Offset]): Source[LedgerRecord, NotUsed] = reader.events(startExclusive) override def ledgerId(): LedgerId = reader.ledgerId() override def currentHealth(): HealthStatus = reader.currentHealth().and(writer.currentHealth()) override def participantId: ParticipantId = writer.participantId override def commit( correlationId: String, envelope: Bytes, metadata: CommitMetadata, ): Future[SubmissionResult] = writer.commit(correlationId, envelope, metadata) } }
Example 106
Source File: ResponseFormats.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.http.json import akka.NotUsed import akka.http.scaladsl.model._ import akka.stream.scaladsl.{Concat, Source, _} import akka.stream.{FanOutShape2, SourceShape, UniformFanInShape} import akka.util.ByteString import com.daml.http.ContractsFetch import scalaz.syntax.show._ import scalaz.{Show, \/} import spray.json.DefaultJsonProtocol._ import spray.json._ private[http] object ResponseFormats { def errorsJsObject(status: StatusCode, es: String*): JsObject = { val errors = es.toJson JsObject(statusField(status), ("errors", errors)) } def resultJsObject[A: JsonWriter](a: A): JsObject = { resultJsObject(a.toJson) } def resultJsObject(a: JsValue): JsObject = { JsObject(statusField(StatusCodes.OK), ("result", a)) } def resultJsObject[E: Show]( jsVals: Source[E \/ JsValue, NotUsed], warnings: Option[JsValue]): Source[ByteString, NotUsed] = { val graph = GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val partition: FanOutShape2[E \/ JsValue, E, JsValue] = b add ContractsFetch.partition val concat: UniformFanInShape[ByteString, ByteString] = b add Concat(3) // first produce optional warnings and result element warnings match { case Some(x) => Source.single(ByteString(s"""{"warnings":${x.compactPrint},"result":[""")) ~> concat.in(0) case None => Source.single(ByteString("""{"result":[""")) ~> concat.in(0) } jsVals ~> partition.in // second consume all successes partition.out1.zipWithIndex.map(a => formatOneElement(a._1, a._2)) ~> concat.in(1) // then consume all failures and produce the status and optional errors partition.out0.fold(Vector.empty[E])((b, a) => b :+ a).map { case Vector() => ByteString("""],"status":200}""") case errors => val jsErrors: Vector[JsString] = errors.map(e => JsString(e.shows)) ByteString(s"""],"errors":${JsArray(jsErrors).compactPrint},"status":501}""") } ~> concat.in(2) SourceShape(concat.out) } Source.fromGraph(graph) } private def formatOneElement(a: JsValue, index: Long): ByteString = { if (index == 0L) ByteString(a.compactPrint) else ByteString("," + a.compactPrint) } def statusField(status: StatusCode): (String, JsNumber) = ("status", JsNumber(status.intValue())) }
Example 107
Source File: ProtobufByteStrings.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.http.util import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.{Source, StreamConverters} import com.google.protobuf import scala.collection.JavaConverters._ object ProtobufByteStrings { def readFrom(source: Source[akka.util.ByteString, NotUsed])( implicit mat: Materializer): protobuf.ByteString = { val inputStream = source.runWith(StreamConverters.asInputStream()) protobuf.ByteString.readFrom(inputStream) } def toSource(a: protobuf.ByteString): Source[akka.util.ByteString, NotUsed] = { Source.fromIterator(() => a.asReadOnlyByteBufferList().iterator.asScala.map(x => akka.util.ByteString(x))) } }
Example 108
Source File: PackageManagementService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.http import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import akka.util.ByteString import com.daml.http.util.ProtobufByteStrings import com.daml.jwt.domain.Jwt import scala.concurrent.{ExecutionContext, Future} class PackageManagementService( listKnownPackagesFn: LedgerClientJwt.ListPackages, getPackageFn: LedgerClientJwt.GetPackage, uploadDarFileFn: LedgerClientJwt.UploadDarFile, )(implicit ec: ExecutionContext, mat: Materializer) { def listPackages(jwt: Jwt): Future[Seq[String]] = listKnownPackagesFn(jwt).map(_.packageIds) def getPackage(jwt: Jwt, packageId: String): Future[admin.GetPackageResponse] = getPackageFn(jwt, packageId).map(admin.GetPackageResponse.fromLedgerApi) def uploadDarFile(jwt: Jwt, source: Source[ByteString, NotUsed]): Future[Unit] = uploadDarFileFn(jwt, ProtobufByteStrings.readFrom(source)) }
Example 109
Source File: FlowUtilTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.http.util import akka.NotUsed import akka.actor.ActorSystem import akka.stream.Materializer import akka.stream.scaladsl.Source import org.scalacheck.{Gen, Arbitrary} import org.scalatest.concurrent.ScalaFutures import org.scalatest.prop.GeneratorDrivenPropertyChecks import org.scalatest.{FlatSpec, Matchers} import scalaz.{-\/, \/, \/-} import scala.concurrent.Future class FlowUtilTest extends FlatSpec with ScalaFutures with Matchers with GeneratorDrivenPropertyChecks { import FlowUtil._ implicit val asys: ActorSystem = ActorSystem(this.getClass.getSimpleName) implicit val materializer: Materializer = Materializer(asys) "allowOnlyFirstInput" should "pass 1st message through and replace all others with errors" in forAll( nonEmptyVectorOfInts) { xs: Vector[Int] => val error = "Error" val errorNum = Math.max(xs.size - 1, 0) val expected: Vector[String \/ Int] = xs.take(1).map(\/-(_)) ++ Vector.fill(errorNum)(-\/(error)) val input: Source[String \/ Int, NotUsed] = Source.fromIterator(() => xs.toIterator).map(\/-(_)) val actualF: Future[Vector[String \/ Int]] = input .via(allowOnlyFirstInput[String, Int](error)) .runFold(Vector.empty[String \/ Int])(_ :+ _) whenReady(actualF) { actual => actual shouldBe expected } } private val nonEmptyVectorOfInts: Gen[Vector[Int]] = Gen.nonEmptyBuildableOf[Vector[Int], Int](Arbitrary.arbitrary[Int]) }
Example 110
Source File: ClientAdapter.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.grpc.adapter.client.akka import java.util.function.BiConsumer import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.rs.ClientPublisher import io.grpc.stub.StreamObserver object ClientAdapter { def serverStreaming[Req, Resp](req: Req, stub: (Req, StreamObserver[Resp]) => Unit)( implicit executionSequencerFactory: ExecutionSequencerFactory): Source[Resp, NotUsed] = Source.fromPublisher( new ClientPublisher[Req, Resp](req, adaptStub(stub), executionSequencerFactory)) private def adaptStub[Req, Resp]( stub: (Req, StreamObserver[Resp]) => Unit ): BiConsumer[Req, StreamObserver[Resp]] = { (req, resp) => stub(req, resp) } }
Example 111
Source File: AkkaImplementation.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.grpc.adapter.utils.implementations import java.util.concurrent.atomic.AtomicInteger import akka.stream.Materializer import akka.stream.scaladsl.{Flow, Source} import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.server.akka.ServerAdapter import com.daml.grpc.sampleservice.Responding import com.daml.platform.hello.HelloServiceGrpc.HelloService import com.daml.platform.hello.{HelloRequest, HelloResponse, HelloServiceGrpc} import io.grpc.stub.StreamObserver import io.grpc.{BindableService, ServerServiceDefinition} import scala.concurrent.ExecutionContext.Implicits.global class AkkaImplementation( implicit executionSequencerFactory: ExecutionSequencerFactory, materializer: Materializer, ) extends HelloService with Responding with BindableService { private val serverStreamingCalls = new AtomicInteger() def getServerStreamingCalls: Int = serverStreamingCalls.get() override def bindService(): ServerServiceDefinition = HelloServiceGrpc.bindService(this, global) override def serverStreaming( request: HelloRequest, responseObserver: StreamObserver[HelloResponse], ): Unit = Source .single(request) .via(Flow[HelloRequest].mapConcat(responses)) .runWith(ServerAdapter.toSink(responseObserver)) .onComplete(_ => serverStreamingCalls.incrementAndGet()) }
Example 112
Source File: AkkaTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.testing import java.util import java.util.concurrent.{Executors, ScheduledExecutorService} import akka.NotUsed import akka.actor.{ActorSystem, Scheduler} import akka.stream.scaladsl.{Sink, Source} import akka.stream.Materializer import akka.util.ByteString import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool} import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory} import com.typesafe.scalalogging.LazyLogging import org.scalatest.{BeforeAndAfterAll, Suite} import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContextExecutor, Future} import scala.util.control.NonFatal trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite => // TestEventListener is needed for log testing private val loggers = util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener") protected implicit val sysConfig: Config = ConfigFactory .load() .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers)) .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s")) .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO")) protected implicit val system: ActorSystem = ActorSystem("test", sysConfig) protected implicit val ec: ExecutionContextExecutor = system.dispatchers.lookup("test-dispatcher") protected implicit val scheduler: Scheduler = system.scheduler protected implicit val schedulerService: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor() protected implicit val materializer: Materializer = Materializer(system) protected implicit val esf: ExecutionSequencerFactory = new SingleThreadExecutionSequencerPool("testSequencerPool") protected val timeout: FiniteDuration = 2.minutes protected val shortTimeout: FiniteDuration = 5.seconds protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout) protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout) protected def drain(source: Source[ByteString, NotUsed]): ByteString = { val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) => a.concat(b) } awaitShort(futureResult) } protected def drain[A, B](source: Source[A, B]): Seq[A] = { val futureResult: Future[Seq[A]] = source.runWith(Sink.seq) awaitShort(futureResult) } override protected def afterAll(): Unit = { try { val _ = await(system.terminate()) } catch { case NonFatal(_) => () } schedulerService.shutdownNow() super.afterAll() } }
Example 113
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.{Command, Commands} import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val packageClient = client.packageClient private val commandClient = client.commandClient private val transactionClient = client.transactionClient def listPackages(implicit ec: ExecutionContext): Future[Set[String]] = packageClient.listPackages().map(_.packageIds.toSet) def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand(party: String, workflowId: WorkflowId, cmd: Command.Command): Future[Empty] = { val commands = Commands( ledgerId = LedgerId.unwrap(ledgerId), workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = party, commands = Seq(Command(cmd)), ) commandClient.submitSingleCommand(SubmitRequest(Some(commands), None)) } def nextTransaction(party: String, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: String, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(parties: String*): TransactionFilter = TransactionFilter(parties.map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: String): WorkflowId = WorkflowId(s"$p Workflow") }
Example 114
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.ledger.client.binding.{Primitive => P} import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scalaz.syntax.tag._ import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val commandClient = client.commandClient private val transactionClient = client.transactionClient def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand[T]( sender: P.Party, workflowId: WorkflowId, command: P.Update[P.ContractId[T]]): Future[Empty] = { commandClient.submitSingleCommand(submitRequest(sender, workflowId, command)) } def submitRequest[T]( party: P.Party, workflowId: WorkflowId, seq: P.Update[P.ContractId[T]]*): SubmitRequest = { val commands = Commands( ledgerId = ledgerId.unwrap, workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = P.Party.unwrap(party), commands = seq.map(_.command) ) SubmitRequest(Some(commands), None) } def nextTransaction(party: P.Party, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(ps: P.Party*): TransactionFilter = TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: P.Party): WorkflowId = WorkflowId(s"${P.Party.unwrap(p): String} Workflow") }
Example 115
Source File: AkkaResourceOwnerSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.resources.akka import akka.actor.{Actor, ActorSystem, Props} import akka.stream.Materializer import akka.stream.scaladsl.{Keep, Sink, Source} import akka.{Done, NotUsed} import com.daml.resources.ResourceOwner import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{Future, Promise} class AkkaResourceOwnerSpec extends AsyncWordSpec with Matchers { "a function returning an ActorSystem" should { "convert to a ResourceOwner" in { val testPromise = Promise[Int]() class TestActor extends Actor { @SuppressWarnings(Array("org.wartremover.warts.Any")) override def receive: Receive = { case value: Int => testPromise.success(value) case value => testPromise.failure(new IllegalArgumentException(s"$value")) } } val resource = for { actorSystem <- AkkaResourceOwner .forActorSystem(() => ActorSystem("TestActorSystem")) .acquire() actor <- ResourceOwner .successful(actorSystem.actorOf(Props(new TestActor))) .acquire() } yield (actorSystem, actor) for { resourceFuture <- resource.asFuture (actorSystem, actor) = resourceFuture _ = actor ! 7 result <- testPromise.future _ <- resource.release() } yield { result should be(7) an[IllegalStateException] should be thrownBy actorSystem.actorOf(Props(new TestActor)) } } } "a function returning a Materializer" should { "convert to a ResourceOwner" in { val resource = for { actorSystem <- AkkaResourceOwner .forActorSystem(() => ActorSystem("TestActorSystem")) .acquire() materializer <- AkkaResourceOwner.forMaterializer(() => Materializer(actorSystem)).acquire() } yield materializer for { materializer <- resource.asFuture numbers <- Source(1 to 10) .toMat(Sink.seq)(Keep.right[NotUsed, Future[Seq[Int]]]) .run()(materializer) _ <- resource.release() } yield { numbers should be(1 to 10) an[IllegalStateException] should be thrownBy Source .single(0) .toMat(Sink.ignore)(Keep.right[NotUsed, Future[Done]]) .run()(materializer) } } } }
Example 116
Source File: GroupedAverage.scala From streams-tests with Apache License 2.0 | 5 votes |
package com.softwaremill.streams import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import com.softwaremill.streams.util.Timed._ import scala.concurrent.Await import scala.concurrent.duration._ import scalaz.stream.{Process, Process0} trait GroupedAverage { def run(input: () => Iterator[Int]): Option[Double] } object AkkaStreamsGroupedAverage extends GroupedAverage { private lazy implicit val system = ActorSystem() def run(input: () => Iterator[Int]): Option[Double] = { implicit val mat = ActorMaterializer() val r = Source.fromIterator(input) .mapConcat(n => List(n, n+1)) .filter(_ % 17 != 0) .grouped(10) .map(group => group.sum / group.size.toDouble) .runWith(Sink.fold[Option[Double], Double](None)((_, el) => Some(el))) Await.result(r, 1.hour) } def shutdown() = { system.terminate() } } object ScalazStreamsGroupedAverage extends GroupedAverage { def run(input: () => Iterator[Int]): Option[Double] = { processFromIterator(input) .flatMap(n => Process(n, n+1)) .filter(_ % 17 != 0) .chunk(10) .map(group => group.sum / group.size.toDouble) .toSource.runLast.run } private def processFromIterator[T](input: () => Iterator[T]): Process0[T] = Process.suspend { val iterator = input() def go(): Process0[T] = { if (iterator.hasNext) { Process.emit(iterator.next()) ++ go() } else Process.halt } go() } } object GroupedAverageRunner extends App { val impls = List(AkkaStreamsGroupedAverage, ScalazStreamsGroupedAverage) val ranges = List(1000, 100000, 1000000, 10000000) val tests = for { impl <- impls range <- ranges } yield ( s"${if (impl == ScalazStreamsGroupedAverage) "scalaz" else "akka"}, 1->$range", () => impl.run(() => Iterator.range(1, range+1)).toString) runTests(tests, 3) AkkaStreamsGroupedAverage.shutdown() }
Example 117
Source File: SlowConsumer.scala From streams-tests with Apache License 2.0 | 5 votes |
package com.softwaremill.streams import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import scala.concurrent.Await import scala.concurrent.duration._ import scalaz.concurrent.{Strategy, Task} import scalaz.stream.{Process, async, time} object AkkaSlowConsumer extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() try { val future = Source.tick(0.millis, 100.millis, 1) .conflate(identity)(_ + _) .runForeach { el => Thread.sleep(1000L) println(el) } Await.result(future, 1.hour) } finally system.terminate() } object ScalazSlowConsumer extends App { implicit val scheduler = Strategy.DefaultTimeoutScheduler val queue = async.boundedQueue[Int](10000) val enqueueProcess = time.awakeEvery(100.millis) .map(_ => 1) .to(queue.enqueue) val dequeueProcess = queue.dequeueAvailable .map(_.sum) .flatMap(el => Process.eval_(Task { Thread.sleep(1000L) println(el) })) (enqueueProcess merge dequeueProcess).run.run }
Example 118
Source File: AkkaStreamUtils.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.NotUsed import akka.stream.scaladsl.Source import akka.stream.testkit.TestSubscriber import akka.stream.testkit.scaladsl.TestSink import scala.collection.immutable.Seq import scala.concurrent.duration._ trait AkkaStreamUtils { _: TestSpec ⇒ implicit class SourceOps[A, M](src: Source[A, M]) { def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = { val tp = src.runWith(TestSink.probe(system)) tp.within(10.seconds)(f(tp)) } } def withIteratorSrc[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ Unit): Unit = f(Source.fromIterator(() ⇒ Iterator from start)) def fromCollectionProbe[A](xs: Seq[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = f(Source(xs).runWith(TestSink.probe(system))) }
Example 119
Source File: PersonTest.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.domain import akka.actor.{ ActorRef, Props } import akka.pattern.ask import akka.persistence.query.EventEnvelope import akka.stream.scaladsl.{ Sink, Source } import akka.testkit.TestProbe import com.github.dnvriend.TestSpec import com.github.dnvriend.domain.Person._ import com.github.dnvriend.persistence.ProtobufReader import proto.person.Command._ class PersonTest extends TestSpec { import com.github.dnvriend.persistence.ProtobufFormats._ def withPerson(id: String)(f: ActorRef ⇒ TestProbe ⇒ Unit): Unit = { val tp = TestProbe() val ref = system.actorOf(Props(new Person(id))) try f(ref)(tp) finally killActors(ref) } "Person" should "register a name" in { withPerson("p1") { ref ⇒ tp ⇒ Source(List(RegisterNameCommand("dennis", "vriend"))) .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue } withPerson("p1") { ref ⇒ tp ⇒ Source(List(RegisterNameCommand("dennis", "vriend"))) .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue } // note that the persistence-query does not use the deserializer // so the protobuf must be deserialized inline eventsForPersistenceIdSource("p1").collect { case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒ implicitly[ProtobufReader[NameRegisteredEvent]].read(proto) }.testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNext(NameRegisteredEvent("dennis", "vriend")) tp.expectNext(NameRegisteredEvent("dennis", "vriend")) tp.expectComplete() } } it should "update its name and surname" in { withPerson("p2") { ref ⇒ tp ⇒ Source(List(RegisterNameCommand("dennis", "vriend"), ChangeNameCommand("jimi"), ChangeSurnameCommand("hendrix"))) .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue } eventsForPersistenceIdSource("p2").collect { case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒ implicitly[ProtobufReader[NameRegisteredEvent]].read(proto) case EventEnvelope(_, _, _, proto: NameChangedMessage) ⇒ implicitly[ProtobufReader[NameChangedEvent]].read(proto) case EventEnvelope(_, _, _, proto: SurnameChangedMessage) ⇒ implicitly[ProtobufReader[SurnameChangedEvent]].read(proto) }.testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNext(NameRegisteredEvent("dennis", "vriend")) tp.expectNext(NameChangedEvent("jimi")) tp.expectNext(SurnameChangedEvent("hendrix")) tp.expectComplete() } } }
Example 120
Source File: AlbumTest.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
// package com.github.dnvriend.domain import java.time.Duration import akka.pattern.ask import akka.stream.scaladsl.{ Sink, Source } import com.github.dnvriend.TestSpec import com.github.dnvriend.domain.Music._ import com.github.dnvriend.repository.AlbumRepository class AlbumTest extends TestSpec { "Album" should "register a title" in { val album = AlbumRepository.forId("album-1") val xs = List(ChangeAlbumTitle("Dark side of the Moon")) Source(xs).mapAsync(1)(album ? _).runWith(Sink.ignore).futureValue eventsForPersistenceIdSource("album-1").map(_.event).testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNextN(xs.map(cmd ⇒ TitleChanged(cmd.title))) tp.expectComplete() } killActors(album) } it should "update its title and year and songs" in { val album = AlbumRepository.forId("album-2") val xs = List( ChangeAlbumTitle("Dark side of the Moon"), ChangeAlbumYear(1973), AddSong(Song("Money", Duration.ofSeconds(390))), AddSong(Song("Redemption Song", Duration.ofSeconds(227))), RemoveSong(Song("Redemption Song", Duration.ofSeconds(227))) ) val expectedEvents = xs.map { case ChangeAlbumTitle(title) ⇒ TitleChanged(title) case ChangeAlbumYear(year) ⇒ YearChanged(year) case AddSong(song) ⇒ SongAdded(song) case RemoveSong(song) ⇒ SongRemoved(song) } Source(xs).mapAsync(1)(album ? _).runWith(Sink.ignore).futureValue eventsForPersistenceIdSource("album-2").map(_.event).testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNextN(expectedEvents) tp.expectComplete() } } }
Example 121
Source File: Client.scala From opencensus-scala with Apache License 2.0 | 5 votes |
package io.opencensus.scala.examples.akka.http import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.stream.scaladsl.{Sink, Source} import io.opencensus.scala.akka.http.TracingClient import org.slf4j.bridge.SLF4JBridgeHandler import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.{Failure, Success} object Client extends App { // Forward java.util.Logging to slf4j SLF4JBridgeHandler.removeHandlersForRootLogger() SLF4JBridgeHandler.install() implicit val system: ActorSystem = ActorSystem() import system.dispatcher def await[T](f: Future[T]) = Await.result(f, 3.seconds) // Request level client val pipeling = Http().singleRequest(_: HttpRequest) val r1 = await { TracingClient .traceRequest(pipeling)(HttpRequest(uri = "http://localhost:8080")) .flatMap(_.entity.toStrict(1.second)) .map(_.data.utf8String) } println(r1) // Host level client val pool = Http().cachedHostConnectionPool[Unit]("localhost", 8080) val hostFlow = TracingClient.traceRequestForPool(pool) val r2 = await { Source .single(HttpRequest(uri = "/")) .map((_, ())) .via(hostFlow) .map(_._1) .flatMapConcat { case Success(response) => response.entity.dataBytes case Failure(e) => throw e } .map(_.utf8String) .runWith(Sink.head) } println(r2) // Connection level client val connection = Http().outgoingConnection("localhost", 8080) val connectionFlow = TracingClient.traceRequestForConnection(connection) val r3 = await { Source .single(HttpRequest(uri = "/")) .via(connectionFlow) .flatMapConcat(_.entity.dataBytes) .map(_.utf8String) .runWith(Sink.head) } println(r3) }
Example 122
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.api.ws.connection import java.util.concurrent.ConcurrentLinkedQueue import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.collection.JavaConverters._ import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging { log.info(s"""Connecting to Matcher WS API: | URI = $uri | Keep alive = $keepAlive""".stripMargin) import materializer.executionContext private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive) protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // From test to server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]() // From server to test private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => { messagesBuffer.add(x) if (keepAlive) x match { case value: WsPingOrPong => wsHandlerRef ! value case _ => } Future.successful(x) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) val connectionOpenedTs: Long = System.currentTimeMillis val connectionClosedTs: Future[Long] = closed.map(_ => System.currentTimeMillis) val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS)) def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList def clearMessages(): Unit = messagesBuffer.clear() def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def close(): Unit = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection def isClosed: Boolean = closed.isCompleted }
Example 123
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.load.ws import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.concurrent.Future import scala.concurrent.duration.DurationInt import scala.util.{Failure, Success, Try} class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging { import system.dispatcher private implicit val materializer = Materializer(system) private val wsHandlerRef = system.actorOf(TestWsHandlerActor.props(keepAlive = true)) log.info(s"Connecting to Matcher WS API: $uri") protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // To server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } // To client private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => // TODO move to tests for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => Future.successful { receive(x).foreach(wsHandlerRef ! _) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def isClosed: Boolean = closed.isCompleted def close(): Future[Done] = { if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection closed } }
Example 124
Source File: ReadJournalDaoImpl.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.query.dao import akka.NotUsed import akka.actor.ActorSystem import akka.persistence.PersistentRepr import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Flow, Source } import com.github.j5ik2o.akka.persistence.dynamodb.config.QueryPluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.journal.dao.{ DaoSupport, JournalRowReadDriver } import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow import com.github.j5ik2o.akka.persistence.dynamodb.metrics.MetricsReporter import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import com.github.j5ik2o.akka.persistence.dynamodb.serialization.FlowPersistentReprSerializer import scala.collection.immutable.Set import scala.concurrent.ExecutionContext import scala.util.Try class ReadJournalDaoImpl( queryProcessor: QueryProcessor, override protected val journalRowDriver: JournalRowReadDriver, pluginConfig: QueryPluginConfig, override val serializer: FlowPersistentReprSerializer[JournalRow], override protected val metricsReporter: Option[MetricsReporter] )(implicit val ec: ExecutionContext, system: ActorSystem) extends ReadJournalDao with DaoSupport { implicit val mat = ActorMaterializer() override def allPersistenceIds(max: Long): Source[PersistenceId, NotUsed] = queryProcessor.allPersistenceIds(max) private def perfectlyMatchTag(tag: String, separator: String): Flow[JournalRow, JournalRow, NotUsed] = Flow[JournalRow].filter(_.tags.exists(tags => tags.split(separator).contains(tag))) override def eventsByTag( tag: String, offset: Long, maxOffset: Long, max: Long ): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = eventsByTagAsJournalRow(tag, offset, maxOffset, max) .via(perfectlyMatchTag(tag, pluginConfig.tagSeparator)) .via(serializer.deserializeFlowAsTry) override def eventsByTagAsJournalRow( tag: String, offset: Long, maxOffset: Long, max: Long ): Source[JournalRow, NotUsed] = queryProcessor.eventsByTagAsJournalRow(tag, offset, maxOffset, max) override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = queryProcessor.journalSequence(offset, limit) override def getMessagesAsJournalRow( persistenceId: PersistenceId, fromSequenceNr: SequenceNumber, toSequenceNr: SequenceNumber, max: Long, deleted: Option[Boolean] ): Source[JournalRow, NotUsed] = journalRowDriver.getJournalRows(persistenceId, fromSequenceNr, toSequenceNr, max, deleted) override def maxJournalSequence(): Source[Long, NotUsed] = { Source.single(Long.MaxValue) } }
Example 125
Source File: QueryProcessor.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.query.dao import akka.NotUsed import akka.stream.Attributes import akka.stream.scaladsl.{ Source, SourceUtils } import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow import com.github.j5ik2o.akka.persistence.dynamodb.model.PersistenceId trait QueryProcessor { def allPersistenceIds(max: Long): Source[PersistenceId, NotUsed] def eventsByTagAsJournalRow( tag: String, offset: Long, maxOffset: Long, max: Long ): Source[JournalRow, NotUsed] def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] protected val startTimeSource: Source[Long, NotUsed] = SourceUtils .lazySource(() => Source.single(System.nanoTime())).mapMaterializedValue(_ => NotUsed) protected val logLevels: Attributes = Attributes.logLevels( onElement = Attributes.LogLevels.Debug, onFailure = Attributes.LogLevels.Error, onFinish = Attributes.LogLevels.Debug ) }
Example 126
Source File: WriteJournalDao.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.journal.dao import akka.NotUsed import akka.actor.Scheduler import akka.persistence.PersistentRepr import akka.stream.scaladsl.Source import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import scala.concurrent.duration.FiniteDuration import scala.util.Try trait WriteJournalDao extends JournalDaoWithReadMessages { def deleteMessages( persistenceId: PersistenceId, toSequenceNr: SequenceNumber ): Source[Long, NotUsed] def highestSequenceNr(persistenceId: PersistenceId, fromSequenceNr: SequenceNumber): Source[Long, NotUsed] def putMessages(messages: Seq[JournalRow]): Source[Long, NotUsed] } trait JournalDaoWithUpdates extends WriteJournalDao { def updateMessage(journalRow: JournalRow): Source[Unit, NotUsed] } trait JournalDaoWithReadMessages { def getMessagesAsPersistentRepr( persistenceId: PersistenceId, fromSequenceNr: SequenceNumber, toSequenceNr: SequenceNumber, max: Long, deleted: Option[Boolean] = Some(false) ): Source[Try[PersistentRepr], NotUsed] def getMessagesAsPersistentReprWithBatch( persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, batchSize: Int, refreshInterval: Option[(FiniteDuration, Scheduler)] ): Source[Try[PersistentRepr], NotUsed] }
Example 127
Source File: JournalRowDriver.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.journal.dao import akka.NotUsed import akka.actor.ActorSystem import akka.stream.Attributes import akka.stream.scaladsl.{ Flow, Source, SourceUtils } import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } trait JournalRowDriver { def system: ActorSystem protected val startTimeSource: Source[Long, NotUsed] = SourceUtils .lazySource(() => Source.single(System.nanoTime())).mapMaterializedValue(_ => NotUsed) protected val logLevels: Attributes = Attributes.logLevels( onElement = Attributes.LogLevels.Debug, onFailure = Attributes.LogLevels.Error, onFinish = Attributes.LogLevels.Debug ) } trait JournalRowReadDriver extends JournalRowDriver { def getJournalRows( persistenceId: PersistenceId, toSequenceNr: SequenceNumber, deleted: Boolean ): Source[Seq[JournalRow], NotUsed] def getJournalRows( persistenceId: PersistenceId, fromSequenceNr: SequenceNumber, toSequenceNr: SequenceNumber, max: Long, deleted: Option[Boolean] = Some(false) ): Source[JournalRow, NotUsed] def highestSequenceNr( persistenceId: PersistenceId, fromSequenceNr: Option[SequenceNumber] = None, deleted: Option[Boolean] = None ): Source[Long, NotUsed] } trait JournalRowWriteDriver extends JournalRowReadDriver { def singlePutJournalRowFlow: Flow[JournalRow, Long, NotUsed] def multiPutJournalRowsFlow: Flow[Seq[JournalRow], Long, NotUsed] def updateMessage(journalRow: JournalRow): Source[Unit, NotUsed] def singleDeleteJournalRowFlow: Flow[PersistenceIdWithSeqNr, Long, NotUsed] def multiDeleteJournalRowsFlow: Flow[Seq[PersistenceIdWithSeqNr], Long, NotUsed] }
Example 128
Source File: DynamoDBSnapshotStore.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.snapshot import akka.actor.ExtendedActorSystem import akka.persistence.snapshot.SnapshotStore import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria } import akka.serialization.SerializationExtension import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Sink, Source } import com.github.j5ik2o.akka.persistence.dynamodb.config.SnapshotPluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import com.github.j5ik2o.akka.persistence.dynamodb.snapshot.dao.{ SnapshotDao, SnapshotDaoImpl } import com.github.j5ik2o.akka.persistence.dynamodb.utils.V2DynamoDbClientBuilderUtils import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient import com.typesafe.config.Config import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient } import scala.concurrent.{ ExecutionContext, Future } object DynamoDBSnapshotStore { def toSelectedSnapshot(tupled: (SnapshotMetadata, Any)): SelectedSnapshot = tupled match { case (meta: SnapshotMetadata, snapshot: Any) => SelectedSnapshot(meta, snapshot) } } class DynamoDBSnapshotStore(config: Config) extends SnapshotStore { import DynamoDBSnapshotStore._ implicit val ec: ExecutionContext = context.dispatcher implicit val system: ExtendedActorSystem = context.system.asInstanceOf[ExtendedActorSystem] implicit val mat = ActorMaterializer() private val serialization = SerializationExtension(system) protected val pluginConfig: SnapshotPluginConfig = SnapshotPluginConfig.fromConfig(config) protected val javaClient: JavaDynamoDbAsyncClient = V2DynamoDbClientBuilderUtils.setupAsync(system.dynamicAccess, pluginConfig).build() protected val asyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(javaClient) protected val snapshotDao: SnapshotDao = new SnapshotDaoImpl(asyncClient, serialization, pluginConfig) override def loadAsync( persistenceId: String, criteria: SnapshotSelectionCriteria ): Future[Option[SelectedSnapshot]] = { val result = criteria match { case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) => snapshotDao.latestSnapshot(PersistenceId(persistenceId)) case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) => snapshotDao.snapshotForMaxTimestamp(PersistenceId(persistenceId), maxTimestamp) case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) => snapshotDao.snapshotForMaxSequenceNr(PersistenceId(persistenceId), SequenceNumber(maxSequenceNr)) case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) => snapshotDao.snapshotForMaxSequenceNrAndMaxTimestamp( PersistenceId(persistenceId), SequenceNumber(maxSequenceNr), maxTimestamp ) case _ => Source.empty } result.map(_.map(toSelectedSnapshot)).runWith(Sink.head) } override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = snapshotDao.save(metadata, snapshot).runWith(Sink.ignore).map(_ => ()) override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = snapshotDao .delete(PersistenceId(metadata.persistenceId), SequenceNumber(metadata.sequenceNr)).map(_ => ()).runWith( Sink.ignore ).map(_ => ()) override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = { val pid = PersistenceId(persistenceId) criteria match { case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) => snapshotDao.deleteAllSnapshots(pid).runWith(Sink.ignore).map(_ => ()) case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) => snapshotDao.deleteUpToMaxTimestamp(pid, maxTimestamp).runWith(Sink.ignore).map(_ => ()) case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) => snapshotDao .deleteUpToMaxSequenceNr(pid, SequenceNumber(maxSequenceNr)).runWith(Sink.ignore).map(_ => ()) case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) => snapshotDao .deleteUpToMaxSequenceNrAndMaxTimestamp(pid, SequenceNumber(maxSequenceNr), maxTimestamp).runWith( Sink.ignore ).map(_ => ()) case _ => Future.successful(()) } } }
Example 129
Source File: SemanticRepositorySpecs.scala From daf-semantics with Apache License 2.0 | 5 votes |
package specs import org.junit.runner.RunWith import scala.concurrent.{ Await, Future } import scala.concurrent.duration.Duration import play.api.test._ import play.api.http.Status import play.api.Application import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.ws.WSResponse import play.api.libs.ws.ahc.AhcWSClient import org.specs2.runner.JUnitRunner import org.specs2.mutable.Specification import play.api.libs.json.Json //import it.almawave.linkeddata.kb.utils.ConfigHelper import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ import play.twirl.api.Content import play.api.test.Helpers._ import play.api.libs.json.JsObject import java.io.File import play.api.http.Writeable import akka.stream.scaladsl.Source import play.api.mvc.MultipartFormData import play.api.libs.Files.TemporaryFile import java.nio.file.Files import org.asynchttpclient.AsyncHttpClient import play.api.libs.ws.WS import akka.util.ByteString import play.api.mvc.MultipartFormData.DataPart import play.api.mvc.MultipartFormData.FilePart import akka.stream.scaladsl.FileIO import play.api.libs.ws.WSClient /* * TODO: REWRITE */ @RunWith(classOf[JUnitRunner]) class SemanticRepositorySpecs extends Specification { def application: Application = GuiceApplicationBuilder().build() "The semantic repository" should { "call kb/v1/contexts to obtain a list of contexts" in { new WithServer(app = application, port = 9999) { WsTestClient.withClient { implicit client => val response: WSResponse = Await.result[WSResponse]( client.url(s"http://localhost:${port}/kb/v1/contexts").execute, Duration.Inf) response.status must be equalTo Status.OK response.json.as[Seq[JsObject]].size must be equals 0 // response.json.as[Seq[JsObject]].size must be greaterThan 0 // if pre-loaded ontologies! } } } "call kb/v1/contexts ensuring all contexts have triples" in { new WithServer(app = application, port = 9999) { WsTestClient.withClient { implicit client => val response: WSResponse = Await.result[WSResponse]( client.url(s"http://localhost:${port}/kb/v1/contexts").execute, Duration.Inf) val json_list = response.json.as[Seq[JsObject]] forall(json_list)((_) must not beNull) forall(json_list)(_.keys must contain("context", "triples")) forall(json_list)(item => (item \ "triples").get.as[Int] > 0) } } } } }
Example 130
Source File: TestSpec.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams import akka.NotUsed import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.stream.Materializer import akka.stream.scaladsl.Source import akka.stream.testkit.TestSubscriber import akka.stream.testkit.scaladsl.TestSink import akka.testkit.TestProbe import akka.util.Timeout import com.github.dnvriend.streams.util.ClasspathResources import org.scalatest._ import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatestplus.play.guice.GuiceOneServerPerSuite import play.api.inject.BindingKey import play.api.libs.json.{ Format, Json } import play.api.test.WsTestClient import scala.collection.immutable._ import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.reflect.ClassTag import scala.util.Try object Person { implicit val format: Format[Person] = Json.format[Person] } final case class Person(firstName: String, age: Int) class TestSpec extends FlatSpec with Matchers with GivenWhenThen with OptionValues with TryValues with ScalaFutures with WsTestClient with BeforeAndAfterAll with BeforeAndAfterEach with Eventually with ClasspathResources with GuiceOneServerPerSuite { def getComponent[A: ClassTag] = app.injector.instanceOf[A] def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A = app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name)) // set the port number of the HTTP server override lazy val port: Int = 8081 implicit val timeout: Timeout = 1.second implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis) implicit val system: ActorSystem = getComponent[ActorSystem] implicit val ec: ExecutionContext = getComponent[ExecutionContext] implicit val mat: Materializer = getComponent[Materializer] val log: LoggingAdapter = Logging(system, this.getClass) // ================================== Supporting Operations ==================================== def id: String = java.util.UUID.randomUUID().toString implicit class FutureToTry[T](f: Future[T]) { def toTry: Try[T] = Try(f.futureValue) } implicit class SourceOps[A](src: Source[A, NotUsed]) { def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = f(src.runWith(TestSink.probe(system))) } def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T = f(Source.fromIterator(() ⇒ Iterator from start)) def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = f(Source(xs).runWith(TestSink.probe(system))) def killActors(refs: ActorRef*): Unit = { val tp = TestProbe() refs.foreach { ref ⇒ tp watch ref tp.send(ref, PoisonPill) tp.expectTerminated(ref) } } }
Example 131
Source File: ActorRefWithAckTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.sink import akka.actor.{ Actor, ActorRef, Props } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.testkit.TestPublisher import akka.stream.testkit.scaladsl.TestSource import akka.testkit.TestProbe import com.github.dnvriend.streams.TestSpec import scala.concurrent.duration._ import scala.reflect.ClassTag // see: https://github.com/akka/akka/blob/4acc1cca6a27be0ff80f801de3640f91343dce94/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala object ActorRefWithAckTest { final val InitMessage = "start" final val CompleteMessage = "done" final val AckMessage = "ack" class Forwarder(ref: ActorRef) extends Actor { def receive = { case msg @ `InitMessage` ⇒ sender() ! AckMessage ref forward msg case msg @ `CompleteMessage` ⇒ ref forward msg case msg ⇒ sender() ! AckMessage ref forward msg } } } class ActorRefWithAckTest extends TestSpec { import ActorRefWithAckTest._ def createActor[A: ClassTag](testProbeRef: ActorRef): ActorRef = system.actorOf(Props(implicitly[ClassTag[A]].runtimeClass, testProbeRef)) def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = { val tp = TestProbe() val ref = createActor[Forwarder](tp.ref) Source(xs.toList).runWith(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)) try f(tp) finally killActors(ref) } def withTestPublisher[A](f: (TestPublisher.Probe[A], TestProbe, ActorRef) ⇒ Unit): Unit = { val tp = TestProbe() val ref = createActor[Forwarder](tp.ref) val pub: TestPublisher.Probe[A] = TestSource.probe[A].to(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)).run() try f(pub, tp, ref) finally killActors(ref) } it should "send the elements to the ActorRef" in { // which means that the forwarder actor that acts as a sink // will initially receive an InitMessage // next it will receive each `payload` element, here 1, 2 and 3, // finally the forwarder will receive the CompletedMessage, stating that // the producer completes the stream because there are no more elements (a finite stream) withForwarder(1, 2, 3) { tp ⇒ tp.expectMsg(InitMessage) tp.expectMsg(1) tp.expectMsg(2) tp.expectMsg(3) tp.expectMsg(CompleteMessage) tp.expectNoMsg(100.millis) } } it should "send the elements to the ActorRef manually 1, 2 and 3" in { withTestPublisher[Int] { (pub, tp, _) ⇒ pub.sendNext(1) tp.expectMsg(InitMessage) tp.expectMsg(1) pub.sendNext(2) tp.expectMsg(2) pub.sendNext(3) tp.expectMsg(3) pub.sendComplete() tp.expectMsg(CompleteMessage) tp.expectNoMsg(100.millis) } } it should "cancel stream when actor terminates" in { withTestPublisher[Int] { (pub, tp, ref) ⇒ pub.sendNext(1) tp.expectMsg(InitMessage) tp.expectMsg(1) killActors(ref) pub.expectCancellation() } } }
Example 132
Source File: ActorSubscriberTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.sink import akka.Done import akka.actor.Actor.Receive import akka.actor.{ ActorRef, Props } import akka.event.LoggingReceive import akka.stream.actor.ActorSubscriberMessage.{ OnComplete, OnError, OnNext } import akka.stream.actor.{ ActorSubscriber, OneByOneRequestStrategy, RequestStrategy } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.testkit.TestPublisher import akka.stream.testkit.scaladsl.TestSource import akka.testkit.TestProbe import com.github.dnvriend.streams.TestSpec import com.github.dnvriend.streams.sink.ActorSubscriberTest.TestActorSubscriber import scala.concurrent.Future import scala.reflect.ClassTag object ActorSubscriberTest { final val OnNextMessage = "onNext" final val OnCompleteMessage = "onComplete" final val OnErrorMessage = "onError" class TestActorSubscriber(ref: ActorRef) extends ActorSubscriber { override protected val requestStrategy: RequestStrategy = OneByOneRequestStrategy override def receive: Receive = LoggingReceive { case OnNext(msg) ⇒ ref ! OnNextMessage case OnComplete ⇒ ref ! OnCompleteMessage case OnError(cause) ⇒ ref ! OnErrorMessage } } } //class ActorSubscriberTest extends TestSpec { // def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = { // val tp = TestProbe() // val ref = new TestActorSubscriber(tp.ref) // Source(xs.toList).to(Sink.actorSubscriber(Props())).mapMaterializedValue(_ ⇒ Future.successful[Done]).run() // try f(tp) finally killActors(ref) // } // //}
Example 133
Source File: QueueSourceTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.source import akka.stream.OverflowStrategy import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete } import com.github.dnvriend.streams.TestSpec import scala.collection.immutable._ import scala.concurrent.Future class QueueSourceTest extends TestSpec { it should "queue a b and c and return Seq(a, b, c)" in { val (queue: SourceQueueWithComplete[String], xs: Future[Seq[String]]) = Source.queue[String](Int.MaxValue, OverflowStrategy.backpressure).toMat(Sink.seq)(Keep.both).run() queue.offer("a").toTry should be a 'success // offer 'a' to stream queue.offer("b").toTry should be a 'success // b queue.offer("c").toTry should be a 'success // and c // complete the queue queue.complete() queue.watchCompletion().toTry should be a 'success // get the results of the stream xs.futureValue shouldEqual Seq("a", "b", "c") xs.futureValue should not equal Seq("c", "b", "a") } }
Example 134
Source File: FailedSource.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.source import akka.Done import akka.stream.OverflowStrategy import akka.stream.scaladsl.{ Keep, Merge, Sink, Source, SourceQueueWithComplete } import com.github.dnvriend.streams.TestSpec import scala.concurrent.Future import scala.concurrent.duration._ import scala.collection.immutable._ class FailedSource extends TestSpec { it should "fail the stream" in { Source.failed[Int](new RuntimeException("test error")).testProbe { tp ⇒ tp.request(Long.MaxValue) tp.expectError() } } it should "complete a stream" in { val (queue: SourceQueueWithComplete[Int], done: Future[Done]) = Source.queue[Int](1, OverflowStrategy.dropNew) .toMat(Sink.ignore)(Keep.both).run queue.complete() done.toTry should be a 'success } it should "complete a stream normally" in { val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat { case "stop" ⇒ Source.failed(new RuntimeException("test error")) case str ⇒ Source.single(str) }.toMat(Sink.seq)(Keep.both).run Thread.sleep(3000) queue.offer("foo").futureValue queue.offer("bar").futureValue queue.complete() done.futureValue shouldBe List("foo", "bar") } it should "force stop a stream with an error" in { val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat { case "stop" ⇒ Source.failed(new RuntimeException("test error")) case str ⇒ Source.single(str) }.toMat(Sink.seq)(Keep.both).run Thread.sleep(3000) queue.offer("stop").futureValue done.toTry should be a 'failure } }
Example 135
Source File: FlatteningStagesTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.nesting import akka.stream.scaladsl.Source import com.github.dnvriend.streams.TestSpec import scala.concurrent.Future class FlatteningStagesTest extends TestSpec { it should "flatten and concat all sub-streams and output the result" in withIterator(1) { src ⇒ src.take(3).flatMapConcat { i ⇒ Source.fromFuture(Future(i)).map(_ + 1) }.testProbe { tp ⇒ tp.request(Long.MaxValue) tp.expectNext(2, 3, 4) tp.expectComplete() } } }
Example 136
Source File: ClasspathResources.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.util import java.io.InputStream import akka.NotUsed import akka.stream.IOResult import akka.stream.scaladsl.{ Source, StreamConverters } import akka.util.ByteString import scala.concurrent.Future import scala.io.{ Source ⇒ ScalaIOSource } import scala.util.Try import scala.xml.pull.{ XMLEvent, XMLEventReader } trait ClasspathResources { def withInputStream[T](fileName: String)(f: InputStream ⇒ T): T = { val is = fromClasspathAsStream(fileName) try { f(is) } finally { Try(is.close()) } } def withXMLEventReader[T](fileName: String)(f: XMLEventReader ⇒ T): T = withInputStream(fileName) { is ⇒ f(new XMLEventReader(ScalaIOSource.fromInputStream(is))) } def withXMLEventSource[T](fileName: String)(f: Source[XMLEvent, NotUsed] ⇒ T): T = withXMLEventReader(fileName) { reader ⇒ f(Source.fromIterator(() ⇒ reader)) } def withByteStringSource[T](fileName: String)(f: Source[ByteString, Future[IOResult]] ⇒ T): T = withInputStream(fileName) { inputStream ⇒ f(StreamConverters.fromInputStream(() ⇒ inputStream)) } def streamToString(is: InputStream): String = ScalaIOSource.fromInputStream(is).mkString def fromClasspathAsString(fileName: String): String = streamToString(fromClasspathAsStream(fileName)) def fromClasspathAsStream(fileName: String): InputStream = getClass.getClassLoader.getResourceAsStream(fileName) }
Example 137
Source File: FileIOTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.io import akka.stream.scaladsl.{ Sink, Source } import com.github.dnvriend.streams.TestSpec class FileIOTest extends TestSpec { trait Foo case class ImportStarted(fileName: String, processId: String) extends Foo case class ImportFinished(a: String = "") extends Foo case class ImportFailed(t: Throwable) extends Foo case class NestedType2(a: String = "") extends Foo case class NestedType1(b: String = "") extends Foo case class RootType(c: String = "") extends Foo case class ImportFileCommand(processId: String = "abcdefg", fileName: String = "fileName.xml") it should "import" in { // import proces def unmarshaller(fileName: String, processId: String) = Source(List(ImportStarted(fileName, processId), NestedType2(), NestedType1(), RootType(), ImportFinished())) Source(List.fill(1)(ImportFileCommand())) .flatMapConcat { cmd ⇒ unmarshaller(cmd.fileName, cmd.processId) .map { // case _: NestedType2 ⇒ throw new RuntimeException("error") case e ⇒ e } } .recover { case t: Throwable ⇒ ImportFailed(t) } .runWith(Sink.seq).futureValue should matchPattern { case Seq(ImportStarted("fileName.xml", "abcdefg"), NestedType2(_), ImportFailed(_)) ⇒ case Seq(ImportStarted("fileName.xml", "abcdefg"), NestedType2(_), NestedType1(_), RootType(_), ImportFinished(_)) ⇒ } } }
Example 138
Source File: MapAsyncUnorderedStageTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.stage.async import akka.stream.scaladsl.Source import akka.stream.testkit.scaladsl.TestSink import com.github.dnvriend.streams.TestSpec import scala.concurrent.Future class MapAsyncUnorderedStageTest extends TestSpec { "MapAsyncUnordered" should "transform the stream by applying the function to each element" in { withIterator() { src ⇒ src.take(10) .mapAsyncUnordered(4)(num ⇒ Future(num * 2)) .runWith(TestSink.probe[Int]) .request(11) .expectNextUnordered(0, 2, 4, 6, 8, 10, 12, 14, 16, 18) .expectComplete() } } }
Example 139
Source File: WriteAndReadFilteredAkkaApp.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.akka import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source} import akka.stream.{ActorMaterializer, Materializer} import com.github.mjakubowski84.parquet4s.{Col, ParquetReader, ParquetStreams} import com.google.common.io.Files import scala.concurrent.Future import scala.util.Random object WriteAndReadFilteredAkkaApp extends App { object Dict { val A = "A" val B = "B" val C = "C" val D = "D" val values: List[String] = List(A, B, C, D) def random: String = values(Random.nextInt(values.length)) } case class Data(id: Int, dict: String) val count = 100 val data = (1 to count).map { i => Data(id = i, dict = Dict.random) } val path = Files.createTempDir().getAbsolutePath implicit val system: ActorSystem = ActorSystem() implicit val materializer: Materializer = ActorMaterializer() import system.dispatcher val options = ParquetReader.Options() val printingSink = Sink.foreach(println) for { // write _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet")) // read filtered _ <- Future(println("""dict == "A"""")) _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("dict") === Dict.A).runWith(printingSink) _ <- Future(println("""id >= 20 && id < 40""")) _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("id") >= 20 && Col("id") < 40).runWith(printingSink) // finish _ <- system.terminate() } yield () }
Example 140
Source File: WriteAndReadCustomTypeAkkaApp.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.akka import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source} import akka.stream.{ActorMaterializer, Materializer} import com.github.mjakubowski84.parquet4s.CustomType._ import com.github.mjakubowski84.parquet4s.ParquetStreams import com.google.common.io.Files object WriteAndReadCustomTypeAkkaApp extends App { object Data { def generate(count: Int): Iterator[Data] = Iterator.range(1, count).map { i => Data(id = i, dict = Dict.random) } } case class Data(id: Long, dict: Dict.Type) val data = () => Data.generate(count = 100) val path = Files.createTempDir().getAbsolutePath implicit val system: ActorSystem = ActorSystem() implicit val materializer: Materializer = ActorMaterializer() import system.dispatcher for { // write _ <- Source.fromIterator(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet")) // read // hint: you can filter by dict using string value, for example: filter = Col("dict") === "A" _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println)) // finish _ <- system.terminate() } yield () }
Example 141
Source File: WriteAndReadAkkaApp.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.akka import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source} import akka.stream.{ActorMaterializer, Materializer} import com.github.mjakubowski84.parquet4s.ParquetStreams import com.google.common.io.Files import scala.util.Random object WriteAndReadAkkaApp extends App { case class Data(id: Int, text: String) val count = 100 val data = (1 to count).map { i => Data(id = i, text = Random.nextString(4)) } val path = Files.createTempDir().getAbsolutePath implicit val system: ActorSystem = ActorSystem() implicit val materializer: Materializer = ActorMaterializer() import system.dispatcher for { // write _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet")) // read _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println)) // finish _ <- system.terminate() } yield () }
Example 142
Source File: QueryExecution.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package controllers import akka.stream.scaladsl.Source import cats.syntax.show.toShow import daf.dataset._ import daf.dataset.query.jdbc.{ JdbcResult, QueryFragmentWriterSyntax, Writers } import daf.dataset.query.Query import daf.web._ import daf.filesystem._ import daf.instances.FileSystemInstance import it.gov.daf.common.utils._ import org.apache.hadoop.fs.Path import play.api.libs.json.JsValue import scala.concurrent.Future import scala.util.{ Failure, Success, Try } trait QueryExecution { this: DatasetController with DatasetExport with FileSystemInstance => private def extractDatabaseName(parent: String, params: FileDatasetParams) = parent.toLowerCase match { case "opendata" => params.extraParams.get("theme").map { s => s"opendata__${s.toLowerCase}" } getOrElse "opendata" // append __{theme} for opendata case other => other // use the parent dir for other data } private def extractTableName(path: Path, params: FileDatasetParams): Try[String] = Try { s"${extractDatabaseName(path.getParent.getName, params)}.${path.getName.toLowerCase}" } private def extractTableName(params: DatasetParams, userId: String): Try[String] = params match { case kudu: KuduDatasetParams => (proxyUser as userId) { downloadService.tableInfo(kudu.table) } map { _ => kudu.table } case file: FileDatasetParams => (proxyUser as userId) { extractTableName(file.path.asHadoop.resolve, file) } } private def prepareQuery(params: DatasetParams, query: Query, userId: String) = for { tableName <- extractTableName(params, userId) fragment <- Writers.sql(query, tableName).write } yield fragment.query[Unit].sql private def analyzeQuery(params: DatasetParams, query: Query, userId: String) = for { tableName <- extractTableName(params, userId) analysis <- queryService.explain(query, tableName, userId) } yield analysis private def transform(jdbcResult: JdbcResult, targetFormat: FileDataFormat) = targetFormat match { case CsvFileFormat => Try { Source[String](jdbcResult.toCsv).map { csv => s"$csv${System.lineSeparator}" } } case JsonFileFormat => Try { wrapJson { Source[JsValue](jdbcResult.toJson).map { _.toString } } } case _ => Failure { new IllegalArgumentException(s"Invalid target format [$targetFormat]; must be [csv | json]") } } // Web // Failure private def failQuickExec(params: DatasetParams, targetFormat: FileDataFormat) = Future.successful { TemporaryRedirect { s"${controllers.routes.DatasetController.queryDataset(params.catalogUri, targetFormat.show, "batch").url}" } } // Executions private def doBatchExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = prepareQuery(params, query, userId) match { case Success(sql) => prepareQueryExport(sql, targetFormat).map { formatExport(_, targetFormat) } case Failure(error) => Future.failed { error } } private def doQuickExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = for { tableName <- extractTableName(params, userId) jdbcResult <- queryService.exec(query, tableName, userId) data <- transform(jdbcResult, targetFormat) } yield data // API protected def quickExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = analyzeQuery(params, query, userId) match { case Success(analysis) if analysis.memoryEstimation <= impalaConfig.memoryEstimationLimit => doQuickExec(params, query, targetFormat, userId).~>[Future].map { respond(_, params.name, targetFormat) } case Success(_) => failQuickExec(params, targetFormat) case Failure(error) => Future.failed { error } } protected def batchExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = doBatchExec(params, query, targetFormat, userId).map { respond(_, params.name, targetFormat) } protected def exec(params: DatasetParams, query: Query, userId: String, targetFormat: FileDataFormat, method: DownloadMethod) = method match { case QuickDownloadMethod => quickExec(params, query, targetFormat, userId) case BatchDownloadMethod => batchExec(params, query, targetFormat, userId) } }
Example 143
Source File: DatasetExport.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package controllers import akka.stream.scaladsl.{ Source, StreamConverters } import cats.syntax.show.toShow import daf.dataset.{ DatasetParams, ExtraParams } import daf.filesystem.{ CsvFileFormat, FileDataFormat, JsonFileFormat, PathInfo, fileFormatShow } import daf.web.contentType import scala.concurrent.Future import scala.util.{ Failure, Success } trait DatasetExport { this: DatasetController => protected def prepareDirect(params: DatasetParams, targetFormat: FileDataFormat, limit: Option[Int]) = targetFormat match { case JsonFileFormat => datasetService.jsonData(params, limit) case CsvFileFormat => datasetService.csvData(params, limit) case _ => Failure { new IllegalArgumentException("Unable to prepare download; only CSV and JSON are permitted") } } protected def prepareFileExport(pathInfo: PathInfo, sourceFormat: FileDataFormat, targetFormat: FileDataFormat, extraParams: ExtraParams, limit: Option[Int] = None) = fileExportService.exportFile(pathInfo.path, sourceFormat, targetFormat, extraParams, limit).map { downloadService.openPath }.flatMap { case Success(stream) => Future.successful { StreamConverters.fromInputStream { () => stream } } case Failure(error) => Future.failed { error } } protected def prepareTableExport(table: String, targetFormat: FileDataFormat, extraParams: ExtraParams, limit: Option[Int] = None) = fileExportService.exportTable(table, targetFormat, extraParams, limit).map { downloadService.openPath }.flatMap { case Success(stream) => Future.successful { StreamConverters.fromInputStream { () => stream } } case Failure(error) => Future.failed { error } } protected def prepareQueryExport(query: String, targetFormat: FileDataFormat) = fileExportService.exportQuery(query, targetFormat).map { downloadService.openPath }.flatMap { case Success(stream) => Future.successful { StreamConverters.fromInputStream { () => stream } } case Failure(error) => Future.failed { error } } protected def respond(data: Source[String, _], fileName: String, targetFormat: FileDataFormat) = Ok.chunked(data).withHeaders( CONTENT_DISPOSITION -> s"""attachment; filename="$fileName.${targetFormat.show}"""", CONTENT_TYPE -> contentType(targetFormat) ) }
Example 144
Source File: DatasetService.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package daf.dataset import akka.stream.scaladsl.Source import com.typesafe.config.Config import controllers.PhysicalDatasetController import org.apache.spark.sql.DataFrame import org.apache.spark.sql.types.StructType import scala.util.Try class DatasetService(config: Config) { private val storageClient = PhysicalDatasetController(config) def schema(params: DatasetParams): Try[StructType] = storageClient.get(params, Some(1)).map { _.schema } def data(params: DatasetParams, limit: Option[Int]): Try[DataFrame] = storageClient.get(params, limit) def jsonData(params: DatasetParams, limit: Option[Int]) = data(params, limit).map { json } def json(dataFrame: DataFrame) = wrapJson { Source[String] { dataFrame.toJSON.collect().toVector } } // This code produces valid JSON but is inconsistent with Spark's JSON structure // def json(dataFrame: DataFrame) = Source[String] { "<start>" +: dataFrame.toJSON.collect().toVector :+ "<end>"}.sliding(2, 2).map { // case Seq("<start>", "<end>") => "[]" // case Seq("<start>", row) => s"[${System.lineSeparator()} $row" // case Seq(row, "<end>") => s",${System.lineSeparator()} $row${System.lineSeparator()}]" // case Seq("<end>") => s"${System.lineSeparator()}]" // case Seq(row1, row2) => s",${System.lineSeparator()} $row1,${System.lineSeparator} $row2" // case rows => rows.map { row => s",${System.lineSeparator()} $row" }.mkString // } // TODO: split code without breaking Spark task serialization def csvData(params: DatasetParams, limit: Option[Int]) = data(params, limit).map { csv } def csv(dataFrame: DataFrame) = Source[String] { dataFrame.schema.fieldNames.map { h => s""""$h"""" }.mkString(",") +: dataFrame.rdd.map { _.toSeq.map { cleanCsv }.mkString(",") }.collect().toVector }.map { row => s"$row${System.lineSeparator}" } }
Example 145
Source File: DirManager.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package it.gov.daf.catalogmanager.listeners import java.net.URLEncoder import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ FileIO, Source } import net.caoticode.dirwatcher.FSListener import play.api.libs.ws.WSClient import play.api.libs.ws.ahc.AhcWSClient import play.api.mvc.MultipartFormData.FilePart import play.Logger import scala.concurrent.Future class DirManager() extends FSListener { import java.nio.file.Path import scala.concurrent.ExecutionContext.Implicits.global val logger = Logger.underlying() override def onCreate(ref: Path): Unit = { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() val wsClient = AhcWSClient() val name = ref.getParent.getFileName.toString println(name) val uri: Option[String] = IngestionUtils.datasetsNameUri.get(name) val logicalUri = URLEncoder.encode(uri.get, "UTF-8") logger.debug("logicalUri: " + logicalUri) call(wsClient) .andThen { case _ => wsClient.close() } .andThen { case _ => system.terminate() } def call(wsClient: WSClient): Future[Unit] = { wsClient.url("http://localhost:9001/ingestion-manager/v1/add-datasets/" + logicalUri) //.withHeaders("content-type" -> "multipart/form-data") .post( Source(FilePart("upfile", name, None, FileIO.fromPath(ref)) :: List())).map { response => val statusText: String = response.statusText logger.debug(s"Got a response $statusText") } } logger.debug(s"created $ref") } override def onDelete(ref: Path): Unit = println(s"deleted $ref") override def onModify(ref: Path): Unit = println(s"modified $ref") }
Example 146
Source File: Ctx.scala From sangria-subscriptions-example with Apache License 2.0 | 5 votes |
import akka.NotUsed import akka.util.Timeout import schema.MutationError import akka.actor.ActorRef import generic.Event import generic.MemoryEventStore._ import generic.View.{Get, GetMany} import akka.pattern.ask import akka.stream.OverflowStrategy import akka.stream.scaladsl.Source import org.reactivestreams.Publisher import scala.concurrent.{ExecutionContext, Future} case class Ctx( authors: ActorRef, articles: ActorRef, eventStore: ActorRef, eventStorePublisher: Publisher[Event], ec: ExecutionContext, to: Timeout ) extends Mutation { implicit def executionContext = ec implicit def timeout = to lazy val eventStream: Source[Event, NotUsed] = Source.fromPublisher(eventStorePublisher).buffer(100, OverflowStrategy.fail) def addEvent[T](view: ActorRef, event: Event) = (eventStore ? AddEvent(event)).flatMap { case EventAdded(_) ⇒ (view ? Get(event.id, Some(event.version))).mapTo[Option[T]] case OverCapacity(_) ⇒ throw MutationError("Service is overloaded.") case ConcurrentModification(_, latestVersion) ⇒ throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.") } def addDeleteEvent(event: Event) = (eventStore ? AddEvent(event)).map { case EventAdded(e) ⇒ e case OverCapacity(_) ⇒ throw MutationError("Service is overloaded.") case ConcurrentModification(_, latestVersion) ⇒ throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.") } def loadLatestVersion(id: String, version: Long): Future[Long] = (eventStore ? LatestEventVersion(id)) map { case Some(latestVersion: Long) if version != latestVersion ⇒ throw MutationError(s"Concurrent Modification error for entity '$id'. Latest entity version is '$latestVersion'.") case Some(version: Long) ⇒ version + 1 case _ ⇒ throw MutationError(s"Entity with ID '$id' does not exist.") } def loadAuthors(ids: Seq[String]) = (authors ? GetMany(ids)).mapTo[Seq[Author]] }
Example 147
Source File: ReliableHttpProxyFactory.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.akkahttp.proxy import akka.NotUsed import akka.actor._ import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpEntity, HttpRequest, HttpResponse} import akka.stream.Materializer import akka.stream.scaladsl.{Flow, Sink, Source} import org.slf4j.LoggerFactory import rhttpc.client.protocol.{Correlated, Request} import rhttpc.client.proxy._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal import scala.util.{Failure, Success} object ReliableHttpProxyFactory { private lazy val logger = LoggerFactory.getLogger(getClass) def send(successRecognizer: SuccessHttpResponseRecognizer, batchSize: Int, parallelConsumers: Int) (request: Request[HttpRequest]) (implicit actorSystem: ActorSystem, materialize: Materializer): Future[HttpResponse] = { import actorSystem.dispatcher send(prepareHttpFlow(batchSize * parallelConsumers), successRecognizer)(request.correlated) } private def prepareHttpFlow(parallelism: Int) (implicit actorSystem: ActorSystem, materialize: Materializer): Flow[(HttpRequest, String), HttpResponse, NotUsed] = { import actorSystem.dispatcher Http().superPool[String]().mapAsync(parallelism) { case (tryResponse, id) => tryResponse match { case Success(response) => response.toStrict(1 minute) case Failure(ex) => Future.failed(ex) } } } private def send(httpFlow: Flow[(HttpRequest, String), HttpResponse, Any], successRecognizer: SuccessHttpResponseRecognizer) (corr: Correlated[HttpRequest]) (implicit ec: ExecutionContext, materialize: Materializer): Future[HttpResponse] = { import collection.JavaConverters._ logger.debug( s"""Sending request for ${corr.correlationId} to ${corr.msg.getUri()}. Headers: |${corr.msg.getHeaders().asScala.toSeq.map(h => " " + h.name() + ": " + h.value()).mkString("\n")} |Body: |${corr.msg.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin ) val logResp = logResponse(corr) _ val responseFuture = Source.single((corr.msg, corr.correlationId)).via(httpFlow).runWith(Sink.head) responseFuture.onComplete { case Failure(ex) => logger.error(s"Got failure for ${corr.correlationId} to ${corr.msg.getUri()}", ex) case Success(_) => } for { response <- responseFuture transformedToFailureIfNeed <- { if (successRecognizer.isSuccess(response)) { logResp(response, "success response") Future.successful(response) } else { logResp(response, "response recognized as non-success") Future.failed(NonSuccessResponse) } } } yield transformedToFailureIfNeed } private def logResponse(corr: Correlated[HttpRequest]) (response: HttpResponse, additionalInfo: String): Unit = { import collection.JavaConverters._ logger.debug( s"""Got $additionalInfo for ${corr.correlationId} to ${corr.msg.getUri()}. Status: ${response.status.value}. Headers: |${response.getHeaders().asScala.toSeq.map(h => " " + h.name() + ": " + h.value()).mkString("\n")} |Body: |${response.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin ) } }
Example 148
Source File: HTTPResponseStream.scala From akka_streams_tutorial with MIT License | 5 votes |
package akkahttp import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport} import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives.{complete, get, logRequestResult, path, _} import akka.http.scaladsl.server.Route import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.ThrottleMode import akka.stream.scaladsl.{Flow, Sink, Source} import com.typesafe.config.ConfigFactory import spray.json.DefaultJsonProtocol import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object HTTPResponseStream extends App with DefaultJsonProtocol with SprayJsonSupport { implicit val system = ActorSystem("HTTPResponseStream") implicit val executionContext = system.dispatcher //JSON Protocol and streaming support final case class ExamplePerson(name: String) implicit def examplePersonFormat = jsonFormat1(ExamplePerson.apply) implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json() val (address, port) = ("127.0.0.1", 8080) server(address, port) client(address, port) def client(address: String, port: Int): Unit = { val requestParallelism = ConfigFactory.load.getInt("akka.http.host-connection-pool.max-connections") val requests: Source[HttpRequest, NotUsed] = Source .fromIterator(() => Range(0, requestParallelism).map(i => HttpRequest(uri = Uri(s"http://$address:$port/download/$i"))).iterator ) // Run singleRequest and completely consume response elements def runRequestDownload(req: HttpRequest) = Http() .singleRequest(req) .flatMap { response => val unmarshalled: Future[Source[ExamplePerson, NotUsed]] = Unmarshal(response).to[Source[ExamplePerson, NotUsed]] val source: Source[ExamplePerson, Future[NotUsed]] = Source.futureSource(unmarshalled) source.via(processorFlow).runWith(printSink) } requests .mapAsync(requestParallelism)(runRequestDownload) .runWith(Sink.ignore) } val printSink = Sink.foreach[ExamplePerson] { each: ExamplePerson => println(s"Client processed element: $each") } val processorFlow: Flow[ExamplePerson, ExamplePerson, NotUsed] = Flow[ExamplePerson].map { each: ExamplePerson => { //println(s"Process: $each") each } } def server(address: String, port: Int): Unit = { def routes: Route = logRequestResult("httpecho") { path("download" / Segment) { id: String => get { println(s"Server received request with id: $id, stream response...") extractRequest { r: HttpRequest => val finishedWriting = r.discardEntityBytes().future onComplete(finishedWriting) { done => //Limit response by appending eg .take(5) val responseStream: Stream[ExamplePerson] = Stream.continually(ExamplePerson(s"request:$id")) complete(Source(responseStream).throttle(1, 1.second, 1, ThrottleMode.shaping)) } } } } } val bindingFuture = Http().bindAndHandle(routes, address, port) bindingFuture.onComplete { case Success(b) => println("Server started, listening on: " + b.localAddress) case Failure(e) => println(s"Server could not bind to: $address:$port. Exception message: ${e.getMessage}") system.terminate() } } }
Example 149
Source File: PublishToSourceQueueFromMultipleThreads.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.actor.ActorSystem import akka.stream.Supervision.Decider import akka.stream._ import akka.stream.scaladsl.{Flow, Sink, Source, SourceQueueWithComplete} import akka.{Done, NotUsed} import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object PublishToSourceQueueFromMultipleThreads extends App { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("PublishToSourceQueueFromMultipleThreads") implicit val ec = system.dispatcher val bufferSize = 100 // As of akka 2.6.x there is a thread safe implementation for SourceQueue val maxConcurrentOffers = 1000 val numberOfPublishingClients = 1000 val slowSink: Sink[Seq[Int], NotUsed] = Flow[Seq[Int]] .delay(2.seconds, DelayOverflowStrategy.backpressure) .to(Sink.foreach(e => logger.info(s"Reached sink: $e"))) val sourceQueue: SourceQueueWithComplete[Int] = Source .queue[Int](bufferSize, OverflowStrategy.backpressure, maxConcurrentOffers) .groupedWithin(10, 1.seconds) .to(slowSink) .run val doneConsuming: Future[Done] = sourceQueue.watchCompletion() signalWhen(doneConsuming, "consuming") //never completes simulatePublishingFromMulitpleThreads() // Before 2.6.x a stream had to be used to throttle and control the backpressure //simulatePublishingClientsFromStream() // Decide on the stream level, because the OverflowStrategy.backpressure // on the sourceQueue causes an IllegalStateException // Handling this on the stream level allows to restart the stream private def simulatePublishingClientsFromStream() = { val decider: Decider = { case _: IllegalStateException => println("Got backpressure signal for offered element, restart..."); Supervision.Restart case _ => Supervision.Stop } val donePublishing: Future[Done] = Source(1 to numberOfPublishingClients) .mapAsync(10)(offerToSourceQueue) //throttle .withAttributes(ActorAttributes.supervisionStrategy(decider)) .runWith(Sink.ignore) signalWhen(donePublishing, "publishing") } private def simulatePublishingFromMulitpleThreads() = (1 to numberOfPublishingClients).par.foreach(offerToSourceQueue) private def offerToSourceQueue(each: Int) = { sourceQueue.offer(each).map { case QueueOfferResult.Enqueued => logger.info(s"enqueued $each") case QueueOfferResult.Dropped => logger.info(s"dropped $each") case QueueOfferResult.Failure(ex) => logger.info(s"Offer failed: $ex") case QueueOfferResult.QueueClosed => logger.info("Source Queue closed") } } private def signalWhen(done: Future[Done], operation: String) = { done.onComplete { case Success(b) => logger.info(s"Finished: $operation") case Failure(e) => logger.info(s"Failure: $e About to terminate...") system.terminate() } } }
Example 150
Source File: CalculateMedian.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import java.util.concurrent.ThreadLocalRandom import akka.actor.ActorSystem import akka.stream.ThrottleMode import akka.stream.scaladsl.Source import scala.annotation.tailrec import scala.concurrent.duration._ //noinspection LanguageFeature object CalculateMedian { implicit val system = ActorSystem("CalculateMedian") implicit val ec = system.dispatcher def main(args: Array[String]) = { val maxRandomNumber = 100 val source = Source.fromIterator(() => Iterator.continually(ThreadLocalRandom.current().nextDouble(maxRandomNumber))) source .throttle(1, 10.millis, 1, ThrottleMode.shaping) .groupedWithin(100, 1.second) //.map{each => println(each); each} .map(each => medianOfMedians(each.toArray)) .runForeach(result => println(s"Median of Median (grouped by 5) over the last 100 elements: $result")) .onComplete(_ => system.terminate()) } @tailrec def findKMedian(arr: Array[Double], k: Int)(implicit choosePivot: Array[Double] => Double): Double = { val a = choosePivot(arr) val (s, b) = arr partition (a >) if (s.length == k) a // The following test is used to avoid infinite repetition else if (s.isEmpty) { val (s, b) = arr partition (a ==) if (s.length > k) a else findKMedian(b, k - s.length) } else if (s.length < k) findKMedian(b, k - s.length) else findKMedian(s, k) } def medianUpTo5(five: Array[Double]): Double = { def order2(a: Array[Double], i: Int, j: Int) = { if (a(i) > a(j)) { val t = a(i); a(i) = a(j); a(j) = t } } def pairs(a: Array[Double], i: Int, j: Int, k: Int, l: Int) = { if (a(i) < a(k)) { order2(a, j, k); a(j) } else { order2(a, i, l); a(i) } } if (five.length < 2) { return five(0) } order2(five, 0, 1) if (five.length < 4) return if (five.length == 2 || five(2) < five(0)) five(0) else if (five(2) > five(1)) five(1) else five(2) order2(five, 2, 3) if (five.length < 5) pairs(five, 0, 1, 2, 3) else if (five(0) < five(2)) { order2(five, 1, 4); pairs(five, 1, 4, 2, 3) } else { order2(five, 3, 4); pairs(five, 0, 1, 3, 4) } } def medianOfMedians(arr: Array[Double]): Double = { val medians = arr grouped 5 map medianUpTo5 toArray; if (medians.length <= 5) medianUpTo5(medians) else medianOfMedians(medians) } }
Example 151
Source File: PublishToBlockingResource.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue} import akka.NotUsed import akka.actor.ActorSystem import akka.stream.DelayOverflowStrategy import akka.stream.scaladsl.{Flow, Sink, Source} import scala.concurrent.duration._ import scala.util.Failure object PublishToBlockingResource extends App { implicit val system = ActorSystem("PublishToBlockingResource") implicit val ec = system.dispatcher val slowSink: Sink[Seq[Int], NotUsed] = Flow[Seq[Int]] .delay(1.seconds, DelayOverflowStrategy.backpressure) .to(Sink.foreach(e => println(s"Reached sink: $e"))) val blockingResource: BlockingQueue[Int] = new ArrayBlockingQueue[Int](100) //Start a new `Source` from some (third party) blocking resource which can be opened, read and closed val source: Source[Int, NotUsed] = Source.unfoldResource[Int, BlockingQueue[Int]]( () => blockingResource, //open (q: BlockingQueue[Int]) => Some(q.take()),//read (_: BlockingQueue[Int]) => {}) //close val done = source .groupedWithin(10, 2.seconds) .watchTermination()((_, done) => done.onComplete { case Failure(err) => println(s"Flow failed: $err") case each => println(s"Server flow terminated: $each") }) .runWith(slowSink) //simulate n process that publish in blocking fashion to the queue (1 to 1000).par.foreach(value => blockingResource.put(value)) }
Example 152
Source File: TweetExample.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import java.time.{Instant, ZoneId} import akka.NotUsed import akka.actor.{ActorSystem, Cancellable} import akka.stream.DelayOverflowStrategy import akka.stream.scaladsl.{Flow, MergePrioritized, Sink, Source} import org.apache.commons.lang3.exception.ExceptionUtils import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.duration._ import scala.util.{Failure, Success} object TweetExample extends App { implicit val system = ActorSystem("TweetExample") implicit val ec = system.dispatcher val logger: Logger = LoggerFactory.getLogger(this.getClass) final case class Author(handle: String) final case class Hashtag(name: String) final case class Tweet(author: Author, timestamp: Long, body: String) { def hashtags: Set[Hashtag] = body.split(" ").collect { case t if t.startsWith("#") => Hashtag(t) }.toSet override def toString = { val localDateTime = Instant.ofEpochMilli(timestamp).atZone(ZoneId.systemDefault()).toLocalDateTime s"$localDateTime - ${author.handle} tweeted: ${body.take(5)}..." } } val akkaTag = Hashtag("#akka") val tweetsLowPrio: Source[Tweet, Cancellable] = Source.tick(1.second, 200.millis, NotUsed).map(_ => Tweet(Author("LowPrio"), System.currentTimeMillis, "#other #akka aBody")) val tweetsHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("HighPrio"), System.currentTimeMillis, "#akka #other aBody")) val tweetsVeryHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("VeryHighPrio"), System.currentTimeMillis, "#akka #other aBody")) val limitedTweets: Source[Tweet, NotUsed] = Source.combine(tweetsLowPrio, tweetsHighPrio, tweetsVeryHighPrio)(_ => MergePrioritized(List(1, 10, 100))).take(20) val processingFlow = Flow[Tweet] .filter(_.hashtags.contains(akkaTag)) .wireTap(each => logger.info(s"$each")) val slowDownstream = Flow[Tweet] .delay(5.seconds, DelayOverflowStrategy.backpressure) val processedTweets = limitedTweets .via(processingFlow) .via(slowDownstream) .runWith(Sink.seq) processedTweets.onComplete { case Success(results) => logger.info(s"Successfully processed: ${results.size} tweets") system.terminate() case Failure(exception) => logger.info(s"The stream failed with: ${ExceptionUtils.getRootCause(exception)}") system.terminate() } }
Example 153
Source File: MergeHubWithDynamicSources.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.NotUsed import akka.actor.ActorSystem import akka.stream.DelayOverflowStrategy import akka.stream.scaladsl.{Flow, MergeHub, RunnableGraph, Sink, Source} import scala.concurrent.duration._ object MergeHubWithDynamicSources { implicit val system = ActorSystem("MergeHubWithDynamicSources") implicit val ec = system.dispatcher def main(args: Array[String]): Unit = { val slowSink: Sink[Seq[String], NotUsed] = Flow[Seq[String]] .delay(1.seconds, DelayOverflowStrategy.backpressure) .to(Sink.foreach(e => println(s"Reached Sink: $e"))) // Attach a MergeHub Source to the consumer. This will materialize to a corresponding Sink val runnableGraph: RunnableGraph[Sink[String, NotUsed]] = MergeHub.source[String](perProducerBufferSize = 16) .groupedWithin(10, 2.seconds) .to(slowSink) // By running/materializing the graph we get back a Sink, and hence now have access to feed elements into it // This Sink can then be materialized any number of times, and every element that enters the Sink will be consumed by our consumer val toConsumer: Sink[String, NotUsed] = runnableGraph.run() def fastSource(sourceId: Int, toConsumer: Sink[String, NotUsed]) = { Source(1 to 10) .map{each => println(s"Produced: $sourceId.$each"); s"$sourceId.$each"} .runWith(toConsumer) } // Add dynamic producer sources. If the consumer cannot keep up, then ALL of the producers are backpressured (1 to 10).par.foreach(each => fastSource(each, toConsumer)) } }
Example 154
Source File: PartitionHubWithDynamicSinks.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ThrottleMode import akka.stream.scaladsl.{Keep, PartitionHub, RunnableGraph, Source} import scala.concurrent.duration._ object PartitionHubWithDynamicSinks { implicit val system = ActorSystem() implicit val ec = system.dispatcher def main(args: Array[String]): Unit = { val producer = Source.tick(1.second, 100.millis, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") // A new instance of the partitioner functions and its state is created for each materialization of the PartitionHub def partitionRoundRobin(): (PartitionHub.ConsumerInfo, String) => Long = { var i = -1L (info, elem) => { i += 1 info.consumerIdByIdx((i % info.size).toInt) } } def partitionToFastestConsumer(): (PartitionHub.ConsumerInfo, String) => Long = { (info: PartitionHub.ConsumerInfo, each:String) => info.consumerIds.minBy(id => info.queueSize(id)) } // Attach a PartitionHub Sink to the producer. This will materialize to a corresponding Source // We need to use toMat and Keep.right since by default the materialized value to the left is used val runnableGraph: RunnableGraph[Source[String, NotUsed]] = producer.toMat(PartitionHub.statefulSink( //Switch the partitioning function () => partitionRoundRobin(), //() => partitionToFastestConsumer(), startAfterNrOfConsumers = 1, bufferSize = 1))(Keep.right) // By running/materializing the producer, we get back a Source, which // gives us access to the elements published by the producer. val fromProducer: Source[String, NotUsed] = runnableGraph.run() // Attach three dynamic fan-out sinks to the PartitionHub fromProducer.runForeach(msg => println("fast consumer1 received: " + msg)) fromProducer.throttle(100, 1.millis, 10, ThrottleMode.Shaping) .runForeach(msg => println("slow consumer2 received: " + msg)) fromProducer.throttle(100, 2.millis, 10, ThrottleMode.Shaping) .runForeach(msg => println("really slow consumer3 received: " + msg)) } }
Example 155
Source File: AsyncExecution.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.Done import akka.actor.ActorSystem import akka.stream.ActorAttributes import akka.stream.scaladsl.{Flow, Sink, Source} import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future import scala.util.{Failure, Success} object AsyncExecution extends App { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("AsyncExecution") implicit val ec = system.dispatcher def stage(name: String) = Flow[Int] .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}")) def stageBlocking(name: String) = Flow[Int] .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}")) .wireTap(_ => Thread.sleep(5000)) .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking")) def sinkBlocking: Sink[Int, Future[Done]] = Sink.foreach { index: Int => Thread.sleep(2000) logger.info(s"Slow sink processing element $index by ${Thread.currentThread().getName}") } //Adding a custom dispatcher creates an async boundary //see discussion in: https://discuss.lightbend.com/t/how-can-i-make-sure-that-fileio-frompath-is-picking-up-my-dispatcher/6528/4 .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking")) val done = Source(1 to 10) .via(stage("A")).async //When activated instead of alsoTo(sinkBlocking): elements for stage C are held up by stage B //.via(stageBlocking("B")).async .alsoTo(sinkBlocking).async .via(stage("C")).async .runWith(Sink.ignore) //With alsoTo(sinkBlocking) the stages A and C signal "done" too early and thus would terminate the whole stream //The reason for this is the custom dispatcher in sinkBlocking //terminateWhen(done) def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } }
Example 156
Source File: BasicTransformation.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Sink, Source} object BasicTransformation { implicit val system = ActorSystem("BasicTransformation") import system.dispatcher def main(args: Array[String]): Unit = { val text = """|Lorem Ipsum is simply dummy text of the printing and typesetting industry. |Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, |when an unknown printer took a galley of type and scrambled it to make a type |specimen book.""".stripMargin val source = Source.fromIterator(() => text.split("\\s").iterator) val sink = Sink.foreach[String](println) val flow = Flow[String].map(x => x.toUpperCase) val result = source.via(flow).runWith(sink) result.onComplete(_ => system.terminate()) } }
Example 157
Source File: TcpEcho.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source, Tcp} import akka.util.ByteString import scala.concurrent.Future import scala.util.{Failure, Success} object TcpEcho extends App { val systemServer = ActorSystem("TcpEchoServer") val systemClient = ActorSystem("TcpEchoClient") var serverBinding: Future[Tcp.ServerBinding] = _ if (args.isEmpty) { val (address, port) = ("127.0.0.1", 6000) serverBinding = server(systemServer, address, port) (1 to 10).par.foreach(each => client(each, systemClient, address, port)) } else { val (address, port) = if (args.length == 3) (args(1), args(2).toInt) else ("127.0.0.1", 6000) if (args(0) == "server") { serverBinding = server(systemServer, address, port) } else if (args(0) == "client") { client(1, systemClient, address, port) } } def server(system: ActorSystem, address: String, port: Int): Future[Tcp.ServerBinding] = { implicit val sys = system implicit val ec = system.dispatcher val handler = Sink.foreach[Tcp.IncomingConnection] { connection => // parse incoming commands and append ! val commandParser = Flow[String].takeWhile(_ != "BYE").map(_ + "!") val welcomeMsg = s"Welcome to: ${connection.localAddress}, you are: ${connection.remoteAddress}!" val welcomeSource = Source.single(welcomeMsg) val serverEchoFlow = Flow[ByteString] .via(Framing.delimiter( //chunk the inputs up into actual lines of text ByteString("\n"), maximumFrameLength = 256, allowTruncation = true)) .map(_.utf8String) .via(commandParser) .merge(welcomeSource) // merge the initial banner after parser .map(_ + "\n") .map(ByteString(_)) .watchTermination()((_, done) => done.onComplete { case Failure(err) => println(s"Server flow failed: $err") case _ => println(s"Server flow terminated for client: ${connection.remoteAddress}") }) connection.handleWith(serverEchoFlow) } val connections = Tcp().bind(interface = address, port = port) val binding = connections.watchTermination()(Keep.left).to(handler).run() binding.onComplete { case Success(b) => println("Server started, listening on: " + b.localAddress) case Failure(e) => println(s"Server could not bind to: $address:$port: ${e.getMessage}") system.terminate() } binding } def client(id: Int, system: ActorSystem, address: String, port: Int): Unit = { implicit val sys = system implicit val ec = system.dispatcher val connection: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] = Tcp().outgoingConnection(address, port) val testInput = ('a' to 'z').map(ByteString(_)) ++ Seq(ByteString("BYE")) val source = Source(testInput).via(connection) val closed = source.runForeach(each => println(s"Client: $id received echo: ${each.utf8String}")) closed.onComplete(each => println(s"Client: $id closed: $each")) } }
Example 158
Source File: AlsoTo.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_divert import akka.actor.ActorSystem import akka.event.Logging import akka.stream.Attributes import akka.stream.scaladsl.{Flow, Sink, Source} object AlsoTo extends App { implicit val system = ActorSystem("AlsoTo") implicit val executionContext = system.dispatcher implicit val adapter = Logging(system, this.getClass) val source = Source(1 to 10) val sink = Sink.foreach { x: Int => adapter.log(Logging.InfoLevel, s" --> Element: $x reached sink") } def sinkBlocking = Sink.foreach { x: Int => Thread.sleep(1000) adapter.log(Logging.InfoLevel, s" --> Element: $x logged in alsoTo sinkBlocking by ${Thread.currentThread().getName}") } val flow = Flow[Int] .log("before alsoTo") .alsoTo(sinkBlocking) .log("after alsoTo") .withAttributes( Attributes.logLevels( onElement = Logging.InfoLevel, onFinish = Logging.InfoLevel, onFailure = Logging.DebugLevel )) val done = source.via(flow).runWith(sink) done.onComplete(_ => system.terminate()) }
Example 159
Source File: DivertTo.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_divert import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Sink, Source} import scala.concurrent.Future import scala.util.{Failure, Success} object DivertTo extends App { implicit val system = ActorSystem("DivertTo") implicit val executionContext = system.dispatcher val source = Source(1 to 10) val sink = Sink.foreach[Either[Valid[Int], Invalid[Int]]](each => println(s"Reached sink: ${each.left.get}")) val errorSink = Flow[Invalid[Int]] .map(each => println(s"Reached errorSink: $each")) .to(Sink.ignore) val flow: Flow[Int, Either[Valid[Int], Invalid[Int]], NotUsed] = Flow[Int] .map { x => if (x % 2 == 0) Left(Valid(x)) else Right(Invalid(x, Some(new Exception("Is odd")))) } .map { //Drawback of this approach: Pattern matching on all downstream operations case left@Left(_) => businessLogicOn(left) case right@Right(_) => right } .map { case left@Left(_) => left case right@Right(_) => right } //Divert invalid elements //contramap: apply "right.get" to each incoming upstream element *before* it is passed to the errorSink .divertTo(errorSink.contramap(_.right.get), _.isRight) private def businessLogicOn(left: Left[Valid[Int], Invalid[Int]]) = { if (left.value.payload > 5) left else Right(Invalid(left.value.payload, Some(new Exception("Is smaller than 5")))) } val done = source.via(flow).runWith(sink) terminateWhen(done) def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } } case class Valid[T](payload: T) case class Invalid[T](payload: T, cause: Option[Throwable])
Example 160
Source File: SplitAfter.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_shared_state import java.time.{Instant, LocalDateTime, ZoneOffset} import akka.Done import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source} import org.slf4j.{Logger, LoggerFactory} import scala.collection.immutable._ import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object SplitAfter extends App { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("SplitAfter") implicit val executionContext = system.dispatcher private def hasSecondChanged: () => Seq[(Int, Instant)] => Iterable[(Instant, Boolean)] = { () => { slidingElements => { if (slidingElements.size == 2) { val current = slidingElements.head val next = slidingElements.tail.head val currentBucket = LocalDateTime.ofInstant(current._2, ZoneOffset.UTC).withNano(0) val nextBucket = LocalDateTime.ofInstant(next._2, ZoneOffset.UTC).withNano(0) List((current._2, currentBucket != nextBucket)) } else { val current = slidingElements.head List((current._2, false)) } } } } val done: Future[Done] = Source(1 to 100) .throttle(1, 100.millis) .map(elem => (elem, Instant.now())) .sliding(2) // allows to compare this element with the next element .statefulMapConcat(hasSecondChanged) // stateful decision .splitAfter(_._2) // split when second has changed .map(_._1) // proceed with payload .fold(0)((acc, _) => acc + 1) // sum .mergeSubstreams .runWith(Sink.foreach(each => println(s"Elements in group: $each"))) terminateWhen(done) def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } }
Example 161
Source File: ParametrizedFlow.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_shared_state import akka.Done import akka.actor.{ActorSystem, Cancellable} import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source, SourceQueueWithComplete, Zip} import akka.stream.{FlowShape, OverflowStrategy} import scala.collection.immutable import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object ParametrizedFlow extends App { val service = ParameterizedFlowService Thread.sleep(5000) service.update(1.0) Thread.sleep(2000) service.update(1.5) Thread.sleep(2000) service.cancel() Thread.sleep(2000) println(service.result()) } object ParameterizedFlowService { implicit val system = ActorSystem("ParameterizedFlowService") implicit val executionContext = system.dispatcher def update(element: Double): Unit = flow._1._2.offer(element) def cancel(): Boolean = flow._1._1.cancel() def result(): Future[Seq[Double]] = flow._2 val fun = (flowValue: Int, paramValue: Double) => flowValue * paramValue val flow: ((Cancellable, SourceQueueWithComplete[Double]), Future[immutable.Seq[Double]]) = Source.tick(0.seconds, 500.millis, 10) .viaMat(createParamFlow(1, OverflowStrategy.dropBuffer, 0.5)(fun))(Keep.both) .wireTap(x => println(x)) .toMat(Sink.seq)(Keep.both) .run() val done: Future[Done] = flow._1._2.watchCompletion() terminateWhen(done) private def createParamFlow[A, P, O](bufferSize: Int, overflowStrategy: OverflowStrategy, initialParam: P)(fun: (A, P) => O) = Flow.fromGraph(GraphDSL.create(Source.queue[P](bufferSize, overflowStrategy)) { implicit builder => queue => import GraphDSL.Implicits._ val zip = builder.add(Zip[A, P]()) //Interesting use of the extrapolate operator //based on https://doc.akka.io/docs/akka/current/stream/stream-rate.html#understanding-extrapolate-and-expand val extra = builder.add(Flow[P].extrapolate(Iterator.continually(_), Some(initialParam))) val map = builder.add(Flow[(A, P)].map(r => fun(r._1, r._2))) queue ~> extra ~> zip.in1 zip.out ~> map FlowShape(zip.in0, map.out) }) private def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } }
Example 162
Source File: Blacklist.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_shared_state import akka.actor.ActorSystem import akka.stream._ import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.stage._ import scala.concurrent.duration._ object Blacklist extends App { implicit val system = ActorSystem("Blacklist") val initBlacklist = Set.empty[String] val service: StateService[Set[String]] = Source.repeat("yes") .throttle(1, 1.second, 10, ThrottleMode.shaping) .viaMat(new ZipWithState(initBlacklist))(Keep.right) .filterNot { case (blacklist: Set[String], elem: String) => blacklist(elem) } .to(Sink.foreach(each => println(each._2))) .run() println("Starting with empty blacklist on a list of 'yes' elements -> elements are passing") Thread.sleep(2000) println("Inject new blacklist with value: 'yes' -> elements are filtered") service.update(Set("yes")) Thread.sleep(5000) println("Inject new blacklist with value: 'no' -> elements are passing again") service.update(Set("no")) } trait StateService[A] { def update(state: A): Unit } class StateServiceCallback[A](callback: AsyncCallback[A]) extends StateService[A] { override def update(state: A): Unit = callback.invoke(state) } class ZipWithState[S, I](initState: S) extends GraphStageWithMaterializedValue[FlowShape[I, (S, I)], StateService[S]] { val in = Inlet[I]("ZipWithState.in") val out = Outlet[(S, I)]("ZipWithState.out") override val shape: FlowShape[I, (S, I)] = FlowShape.of(in, out) override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, StateService[S]) = { val logic = new GraphStageLogic(shape) { private[this] var state: S = initState val updateStateCallback: AsyncCallback[S] = getAsyncCallback[S] { state = _ } setHandler(in, new InHandler { override def onPush(): Unit = { push(out, (state, grab(in))) } }) setHandler(out, new OutHandler { override def onPull(): Unit = { pull(in) } }) } (logic, new StateServiceCallback(logic.updateStateCallback)) } }
Example 163
Source File: ConflateWithSeed.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_shared_state import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import org.slf4j.{Logger, LoggerFactory} import scala.collection._ import scala.concurrent.duration._ import scala.util.Random object ConflateWithSeed extends App { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("ConflateWithSeed") implicit val executionContext = system.dispatcher def seed(i: Int): mutable.LinkedHashMap[Int, Int] = mutable.LinkedHashMap[Int, Int](i -> 1) def aggregate(state: mutable.LinkedHashMap[Int, Int], i: Int): mutable.LinkedHashMap[Int, Int] = { logger.info(s"Got: $i") state.put(i, state.getOrElseUpdate(i, 0) + 1) state } // lazyFlow is not really needed here, but nice to know that it exists // conflateWithSeed invokes the seed method every time, so it // is safe to materialize this flow multiple times val lazyFlow = Flow.lazyFlow(() => Flow[Int] .map(_ => Random.nextInt(100)) .conflateWithSeed(seed)(aggregate) ) Source(1 to 10) .via(lazyFlow) .throttle(1, 1.second) //simulate slow sink .runForeach(each => logger.info(s"1st reached sink: $each")) // Source(1 to 10) // .via(lazyFlow) // .throttle(1, 1.second) //simulate slow sink // .runForeach(each => logger.info(s"2nd reached sink: $each")) }
Example 164
Source File: SplitWhen.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_shared_state import java.nio.file.Paths import akka.NotUsed import akka.actor.ActorSystem import akka.stream.IOResult import akka.stream.scaladsl.{FileIO, Flow, Framing, Keep, Sink, Source} import akka.util.ByteString import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future import scala.util.{Failure, Success} object SplitWhen extends App { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("SplitWhen") implicit val executionContext = system.dispatcher val nonLinearCapacityFactor = 100 //raise to see how it scales val filename = "splitWhen.csv" def genResourceFile() = { logger.info(s"Writing resource file: $filename...") def fileSink(filename: String): Sink[String, Future[IOResult]] = Flow[String] .map(s => ByteString(s + "\n")) .toMat(FileIO.toPath(Paths.get(filename)))(Keep.right) Source.fromIterator(() => (1 to nonLinearCapacityFactor).toList.combinations(2)) .map(each => s"${each.head},${each.last}") .runWith(fileSink(filename)) } val sourceOfLines = FileIO.fromPath(Paths.get(filename)) .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 1024, allowTruncation = true) .map(_.utf8String)) val csvToRecord: Flow[String, Record, NotUsed] = Flow[String] .map(_.split(",").map(_.trim)) .map(stringArrayToRecord) val terminationHook: Flow[Record, Record, Unit] = Flow[Record] .watchTermination() { (_, done) => done.onComplete { case Failure(err) => logger.info(s"Flow failed: $err") case _ => system.terminate(); logger.info(s"Flow terminated") } } val printSink = Sink.foreach[Vector[Record]](each => println(s"Reached sink: $each")) private def stringArrayToRecord(cols: Array[String]) = Record(cols(0), cols(1)) private def hasKeyChanged = { () => { var lastRecordKey: Option[String] = None currentRecord: Record => lastRecordKey match { case Some(currentRecord.key) | None => lastRecordKey = Some(currentRecord.key) List((currentRecord, false)) case _ => lastRecordKey = Some(currentRecord.key) List((currentRecord, true)) } } } genResourceFile().onComplete { case Success(_) => logger.info(s"Start processing...") sourceOfLines .via(csvToRecord) .via(terminationHook) .statefulMapConcat(hasKeyChanged) // stateful decision .splitWhen(_._2) // split when key has changed .map(_._1) // proceed with payload .fold(Vector.empty[Record])(_ :+ _) // sum payload .mergeSubstreams // better performance, but why? .runWith(printSink) case Failure(exception) => logger.info(s"Exception: $exception") } case class Record(key: String, value: String) }
Example 165
Source File: CustomCacheRunner.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor.typed import akka.actor.typed.ActorSystem import akka.actor.typed.scaladsl.AskPattern._ import akka.stream.ThrottleMode import akka.stream.scaladsl.{RestartSource, Sink, Source} import akka.util.Timeout import sample.stream_actor.typed.CustomCache.{AddDevices, CacheRequests, CacheResponses, CachedDevices} import scala.concurrent.Future import scala.concurrent.duration._ object CustomCacheRunner extends App { // the system is also the top level actor ref implicit val cache = ActorSystem[CacheRequests](CustomCache.empty, "CustomCache") implicit val ec = cache.executionContext implicit val timeout: Timeout = 5.seconds RestartSource .withBackoff( minBackoff = 0.seconds, maxBackoff = 60.seconds, randomFactor = 0.1 ) { () => Source .tick(initialDelay = 0.seconds, interval = 2.seconds, tick = ()) .mapAsync(parallelism = 1) { _ => cache.ref.ask(ref => CustomCache.Get("42", ref)) } .map((each: CacheResponses) => each match { case cachedDevices: CachedDevices => cache.log.info(s"Current amount of cached devices: ${cachedDevices.devices.size}") case _ => cache.log.info("No devices") }) .recover { case ex => cache.log.error("Failed to read cached devices: ", ex) } } .runWith(Sink.ignore) val sourceOfUUID = Source(Stream.continually(java.util.UUID.randomUUID.toString).take(100)) sourceOfUUID .throttle(10, 1.second, 10, ThrottleMode.shaping) .mapAsync(parallelism = 10)(each => Future(cache ! AddDevices(List(DeviceId(each))))) .runWith(Sink.ignore) }
Example 166
Source File: SimulateWindTurbines.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor import akka.actor.ActorSystem import akka.pattern.{BackoffOpts, BackoffSupervisor} import akka.stream.ThrottleMode import akka.stream.scaladsl.{Sink, Source} import scala.concurrent.duration._ object SimulateWindTurbines extends App { implicit val system = ActorSystem() val endpoint = "ws://127.0.0.1:8080" val numberOfTurbines = 5 Source(1 to numberOfTurbines) .throttle( elements = 100, //number of elements to be taken from bucket per = 1.second, maximumBurst = 100, //capacity of bucket mode = ThrottleMode.shaping ) .map { _ => val id = java.util.UUID.randomUUID.toString val supervisor = BackoffSupervisor.props( BackoffOpts.onFailure( WindTurbineSimulator.props(id, endpoint), childName = id, minBackoff = 1.second, maxBackoff = 30.seconds, randomFactor = 0.2 )) system.actorOf(supervisor, name = s"$id-backoff-supervisor") } .runWith(Sink.ignore) }
Example 167
Source File: WebSocketClient.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.Http import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.model.ws._ import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source} import akka.stream.{FlowShape, SourceShape} import sample.stream_actor.WindTurbineSimulator._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} object WebSocketClient { def apply(id: String, endpoint: String, windTurbineSimulator: ActorRef) (implicit system: ActorSystem, executionContext: ExecutionContext) = { new WebSocketClient(id, endpoint, windTurbineSimulator)(system, executionContext) } } class WebSocketClient(id: String, endpoint: String, windTurbineSimulator: ActorRef) (implicit system: ActorSystem, executionContext: ExecutionContext) { val webSocketFlow: Flow[Message, Message, Future[WebSocketUpgradeResponse]] = { val websocketUri = s"$endpoint/measurements/$id" Http().webSocketClientFlow(WebSocketRequest(websocketUri)) } val outgoing = GraphDSL.create() { implicit builder => val data = WindTurbineData(id) val flow = builder.add { Source.tick(1.second, 100.millis,()) //valve for the WindTurbineData frequency .map(_ => TextMessage(data.getNext)) } SourceShape(flow.out) } val incoming = GraphDSL.create() { implicit builder => val flow = builder.add { Flow[Message] .collect { case TextMessage.Strict(text) => Future.successful(text) case TextMessage.Streamed(textStream) => textStream.runFold("")(_ + _) .flatMap(Future.successful) } .mapAsync(1)(identity) .map(each => println(s"Client received msg: $each")) } FlowShape(flow.in, flow.out) } val (upgradeResponse, closed) = Source.fromGraph(outgoing) .viaMat(webSocketFlow)(Keep.right) // keep the materialized Future[WebSocketUpgradeResponse] .via(incoming) .toMat(Sink.ignore)(Keep.both) // also keep the Future[Done] .run() val connected = upgradeResponse.map { upgrade => upgrade.response.status match { case StatusCodes.SwitchingProtocols => windTurbineSimulator ! Upgraded case statusCode => windTurbineSimulator ! FailedUpgrade(statusCode) } } connected.onComplete { case Success(_) => windTurbineSimulator ! Connected case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex) } closed.map { _ => windTurbineSimulator ! Terminated } closed.onComplete { case Success(_) => windTurbineSimulator ! Connected case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex) } }
Example 168
Source File: PrintMoreNumbers.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor_simple import akka.actor.Actor import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.{KillSwitches, UniqueKillSwitch} import scala.concurrent.duration._ class PrintMoreNumbers extends Actor { implicit val system = context.system implicit val executionContext = context.system.dispatcher private val (killSwitch: UniqueKillSwitch, done) = Source.tick(0.seconds, 1.second, 1) .scan(0)(_ + _) .map(_.toString) .viaMat(KillSwitches.single)(Keep.right) .toMat(Sink.foreach(println))(Keep.both) .run() done.map(_ => self ! "done") override def receive: Receive = { //When the actor is stopped, it will also stop the stream case "stop" => println("Stopping...") killSwitch.shutdown() case "done" => println("Done") context.stop(self) context.system.terminate() } }
Example 169
Source File: Hl7TcpClient.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.tcp_to_websockets.hl7mllp import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source, Tcp} import akka.util.ByteString import ca.uhn.hl7v2.AcknowledgmentCode import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future import scala.concurrent.duration._ object Hl7TcpClient extends App with MllpProtocol { val logger: Logger = LoggerFactory.getLogger(this.getClass) val system = ActorSystem("Hl7TcpClient") val (address, port) = ("127.0.0.1", 6160) //(1 to 1).par.foreach(each => localStreamingMessageClient(each, 1000, system, address, port)) (1 to 1).par.foreach(each => localSingleMessageClient(each, 100, system, address, port)) def localSingleMessageClient(clientname: Int, numberOfMessages: Int, system: ActorSystem, address: String, port: Int): Unit = { implicit val sys = system implicit val ec = system.dispatcher val connection = Tcp().outgoingConnection(address, port) def sendAndReceive(i: Int): Future[Int] = { val traceID = s"$clientname-${i.toString}" val source = Source.single(ByteString(encodeMllp(generateTestMessage(traceID)))).via(connection) val closed = source.runForeach(each => if (isNACK(each)) { logger.info(s"Client: $clientname-$i received NACK: ${printable(each.utf8String)}") throw new RuntimeException("NACK") } else { logger.info(s"Client: $clientname-$i received ACK: ${printable(each.utf8String)}") } ).recoverWith { case _: RuntimeException => { logger.info(s"About to retry for: $clientname-$i...") sendAndReceive(i) } case e: Throwable => Future.failed(e) } closed.onComplete(each => logger.debug(s"Client: $clientname-$i closed: $each")) Future(i) } Source(1 to numberOfMessages) .throttle(1, 1.second) .mapAsync(1)(i => sendAndReceive(i)) .runWith(Sink.ignore) } def localStreamingMessageClient(id: Int, numberOfMesssages: Int, system: ActorSystem, address: String, port: Int): Unit = { implicit val sys = system implicit val ec = system.dispatcher val connection = Tcp().outgoingConnection(address, port) val hl7MllpMessages= (1 to numberOfMesssages).map(each => ByteString(encodeMllp(generateTestMessage(each.toString)) )) val source = Source(hl7MllpMessages).throttle(10, 1.second).via(connection) val closed = source.runForeach(each => logger.info(s"Client: $id received echo: ${printable(each.utf8String)}")) closed.onComplete(each => logger.info(s"Client: $id closed: $each")) } private def generateTestMessage(senderTraceID: String) = { //For now put the senderTraceID into the "sender lab" field to follow the messages accross the workflow val message = new StringBuilder message ++= s"MSH|^~\\&|$senderTraceID|MCM|LABADT|MCM|198808181126|SECURITY|ADT^A01|1234|P|2.5.1|" message ++= CARRIAGE_RETURN message ++= "EVN|A01|198808181123||" message ++= CARRIAGE_RETURN message ++= "PID|||PATID1234^5^M11^ADT1^MR^MCM~123456789^^^USSSA^SS||EVERYMAN^ADAM^A^III||19610615|M||C|1200 N ELM STREET^^GREENSBORO^NC^27401-1020" message ++= CARRIAGE_RETURN message ++= "NK1|1|JONES^BARBARA^K|SPO^Spouse^HL70063|171 ZOBERLEIN^^ISHPEMING^MI^49849^|" message ++= CARRIAGE_RETURN message ++= "PV1|1|I|2000^2012^01||||004777^LEBAUER^SIDNEY^J.|||SUR||||9|A0|" message ++= CARRIAGE_RETURN message.toString() } private def isNACK(message: ByteString): Boolean = { message.utf8String.contains(AcknowledgmentCode.AE.name()) || message.utf8String.contains(AcknowledgmentCode.AR.name()) || message.utf8String.contains(AcknowledgmentCode.CE.name()) || message.utf8String.contains(AcknowledgmentCode.CR.name()) } }
Example 170
Source File: WebsocketServer.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.env import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.http.scaladsl.server.directives.WebSocketDirectives import akka.stream.scaladsl.{Flow, Sink, Source} import scala.concurrent.Await import scala.concurrent.duration._ import scala.language.postfixOps import scala.util.{Failure, Success} object WebsocketServer extends App with WebSocketDirectives { implicit val system = ActorSystem("WebsocketServer") implicit val executionContext = system.dispatcher val (address, port) = ("127.0.0.1", 6002) server(address, port) def server(address: String, port: Int) = { def echoFlow: Flow[Message, Message, Any] = Flow[Message].mapConcat { case tm: TextMessage => println(s"Server received: $tm") TextMessage(Source.single("Echo: ") ++ tm.textStream) :: Nil case bm: BinaryMessage => // ignore binary messages but drain content to avoid the stream being clogged bm.dataStream.runWith(Sink.ignore) Nil } val websocketRoute: Route = path("echo") { handleWebSocketMessages(echoFlow) } val bindingFuture = Http().bindAndHandle(websocketRoute, address, port) bindingFuture.onComplete { case Success(b) => println("Server started, listening on: " + b.localAddress) case Failure(e) => println(s"Server could not bind to $address:$port. Exception message: ${e.getMessage}") system.terminate() } sys.addShutdownHook { println("About to shutdown...") val fut = bindingFuture.map(serverBinding => serverBinding.terminate(hardDeadline = 3.seconds)) println("Waiting for connections to terminate...") val onceAllConnectionsTerminated = Await.result(fut, 10.seconds) println("Connections terminated") onceAllConnectionsTerminated.flatMap { _ => system.terminate() } } } }
Example 171
Source File: XmlProcessing.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.xml import java.nio.file.Paths import java.util.Base64 import akka.actor.ActorSystem import akka.stream.alpakka.xml.scaladsl.XmlParsing import akka.stream.alpakka.xml.{EndElement, ParseEvent, StartElement, TextEvent} import akka.stream.scaladsl.{FileIO, Sink, Source} import akka.util.ByteString import scala.collection.immutable import scala.concurrent.Future import scala.util.{Failure, Success} object XmlProcessing extends App { implicit val system = ActorSystem("XmlProcessing") implicit val executionContext = system.dispatcher val resultFileName = "testfile_result.jpg" val done = FileIO.fromPath(Paths.get("./src/main/resources/xml_with_base64_embedded.xml")) .via(XmlParsing.parser) .statefulMapConcat(() => { // state val stringBuilder: StringBuilder = StringBuilder.newBuilder var counter: Int = 0 // aggregation function parseEvent: ParseEvent => parseEvent match { case s: StartElement if s.attributes.contains("mediaType") => stringBuilder.clear() val mediaType = s.attributes.head._2 println("mediaType: " + mediaType) immutable.Seq(mediaType) case s: EndElement if s.localName == "embeddedDoc" => val text = stringBuilder.toString println("File content: " + text) //large embedded files are read into memory Source.single(ByteString(text)) .map(each => ByteString(Base64.getMimeDecoder.decode(each.toByteBuffer))) .runWith(FileIO.toPath(Paths.get(s"$counter-$resultFileName"))) counter = counter + 1 immutable.Seq(text) case t: TextEvent => stringBuilder.append(t.text) immutable.Seq.empty case _ => immutable.Seq.empty } }) .runWith(Sink.ignore) terminateWhen(done) def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } }
Example 172
Source File: JMSTextMessageProducerClient.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.jms import akka.Done import akka.actor.ActorSystem import akka.stream.ThrottleMode import akka.stream.alpakka.jms.scaladsl.JmsProducer import akka.stream.alpakka.jms.{JmsProducerSettings, JmsTextMessage} import akka.stream.scaladsl.{Sink, Source} import com.typesafe.config.Config import javax.jms.ConnectionFactory import org.apache.activemq.ActiveMQConnectionFactory import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future import scala.concurrent.duration._ object JMSTextMessageProducerClient { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("JMSTextMessageProducerClient") implicit val ec = system.dispatcher //The "failover:" part in the brokerURL instructs ActiveMQ to reconnect on network failure //This does not interfere with the new 1.0-M2 implementation val connectionFactory: ConnectionFactory = new ActiveMQConnectionFactory("artemis", "simetraehcapa", "failover:tcp://127.0.0.1:21616") def main(args: Array[String]): Unit = { jmsTextMessageProducerClient(connectionFactory) } private def jmsTextMessageProducerClient(connectionFactory: ConnectionFactory) = { val producerConfig: Config = system.settings.config.getConfig(JmsProducerSettings.configPath) val jmsProducerSink: Sink[JmsTextMessage, Future[Done]] = JmsProducer.sink( JmsProducerSettings(producerConfig, connectionFactory).withQueue("test-queue") ) Source(1 to 2000000) .throttle(1, 1.second, 1, ThrottleMode.shaping) .wireTap(number => logger.info(s"SEND Msg with TRACE_ID: $number")) .map { number => JmsTextMessage(s"Payload: ${number.toString}") .withProperty("TRACE_ID", number) } .runWith(jmsProducerSink) } }
Example 173
Source File: WordCountProducer.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.kafka import java.util import java.util.concurrent.ThreadLocalRandom import akka.actor.ActorSystem import akka.kafka.ProducerMessage.Message import akka.kafka.ProducerSettings import akka.kafka.scaladsl.Producer import akka.stream.ThrottleMode import akka.stream.scaladsl.{Keep, Sink, Source} import akka.{Done, NotUsed} import org.apache.kafka.clients.producer.{Partitioner, ProducerRecord} import org.apache.kafka.common.errors.{NetworkException, UnknownTopicOrPartitionException} import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.{Cluster, PartitionInfo} import scala.concurrent.Future import scala.concurrent.duration._ class CustomPartitioner extends Partitioner { override def partition(topic: String, key: Any, keyBytes: Array[Byte], value: Any, valueBytes: Array[Byte], cluster: Cluster): Int = { val partitionInfoList: util.List[PartitionInfo] = cluster.availablePartitionsForTopic(topic) val partitionCount = partitionInfoList.size val fakeNewsPartition = 0 //println("CustomPartitioner received key: " + key + " and value: " + value) if (value.toString.contains(WordCountProducer.fakeNewsKeyword)) { //println("CustomPartitioner send message: " + value + " to fakeNewsPartition") fakeNewsPartition } else ThreadLocalRandom.current.nextInt(1, partitionCount) //round robin } override def close(): Unit = { println("CustomPartitioner: " + Thread.currentThread + " received close") } override def configure(configs: util.Map[String, _]): Unit = { println("CustomPartitioner received configure with configuration: " + configs) } } object CustomPartitioner { private def deserialize[V](objectData: Array[Byte]): V = org.apache.commons.lang3.SerializationUtils.deserialize(objectData).asInstanceOf[V] }
Example 174
Source File: IngestSocketFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.NotUsed import akka.actor.{ActorRef, ActorRefFactory, Props} import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Flow, Sink, Source} import hydra.core.ingest.IngestionReport trait IngestSocketFactory { def ingestFlow(): Flow[String, OutgoingMessage, NotUsed] } object IngestSocketFactory { def createSocket(fact: ActorRefFactory): IngestSocketFactory = { () => { val socketActor = fact.actorOf(Props[IngestionSocketActor]) def actorSink = Sink.actorRefWithBackpressure( socketActor, onInitMessage = SocketInit, ackMessage = SocketAck, onCompleteMessage = SocketEnded, onFailureMessage = SocketFailed.apply ) val in = Flow[String] .map(IncomingMessage) .to(actorSink) val out = Source .actorRefWithBackpressure[OutgoingMessage]( SocketAck, PartialFunction.empty, PartialFunction.empty ) .mapMaterializedValue(socketActor ! SocketStarted(_)) Flow.fromSinkAndSourceCoupled(in, out) } } } sealed trait SocketEvent case object SocketInit extends SocketEvent case class SocketStarted(ref: ActorRef) extends SocketEvent case object SocketEnded extends SocketEvent case object SocketAck extends SocketEvent case class IncomingMessage(message: String) extends SocketEvent case class SocketFailed(ex: Throwable) sealed trait OutgoingMessage extends SocketEvent case class SimpleOutgoingMessage(status: Int, message: String) extends OutgoingMessage case class IngestionOutgoingMessage(report: IngestionReport) extends OutgoingMessage
Example 175
Source File: ProgressSource.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.stream import java.util.concurrent.TimeUnit import akka.NotUsed import akka.actor._ import akka.pattern.ask import akka.stream._ import akka.stream.scaladsl.Source import akka.util.Timeout import com.rbmhtechnology.eventuate.ReplicationProtocol._ import com.typesafe.config.Config import scala.concurrent.Future import scala.concurrent.duration._ private class ProgressSourceSettings(config: Config) { val readTimeout = config.getDuration("eventuate.log.read-timeout", TimeUnit.MILLISECONDS).millis } object ProgressSource { def apply(sourceLogId: String, targetLog: ActorRef)(implicit system: ActorSystem): Graph[SourceShape[Long], NotUsed] = { implicit val timeout = Timeout(new ProgressSourceSettings(system.settings.config).readTimeout) import system.dispatcher Source.fromFuture(targetLog.ask(GetReplicationProgress(sourceLogId)).flatMap { case GetReplicationProgressSuccess(_, progress, _) => Future.successful(progress) case GetReplicationProgressFailure(cause) => Future.failed(cause) }) } }
Example 176
Source File: DurableEventSourceIntegrationSpec.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.stream import akka.actor._ import akka.stream._ import akka.stream.scaladsl.Source import akka.stream.testkit.scaladsl._ import akka.testkit._ import com.rbmhtechnology.eventuate._ import com.rbmhtechnology.eventuate.log.EventLogWriter import com.rbmhtechnology.eventuate.utilities._ import org.scalatest._ class DurableEventSourceIntegrationSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with SingleLocationSpecLeveldb { implicit val materializer: Materializer = ActorMaterializer() var writer: EventLogWriter = _ var writerA: EventLogWriter = _ var writerB: EventLogWriter = _ override def beforeEach(): Unit = { super.beforeEach() writer = new EventLogWriter("writer", log) writerA = new EventLogWriter("writerA", log, Some("a")) writerB = new EventLogWriter("writerB", log, Some("b")) } override def afterEach(): Unit = { super.afterEach() writer.stop() writerA.stop() writerB.stop() } "A DurableEventSource" must { "emit events for all aggregate ids" in { val source = Source.fromGraph(DurableEventSource(log)) val probe = source.take(4).runWith(TestSink.probe) val abc = writer.write(List("a", "b", "c")).await probe.request(3).expectNextN(abc) val xyz = writerA.write(List("x", "y", "z")).await probe.request(3).expectNext(xyz.head).expectComplete() } "emit events for given aggregateId" in { val source1 = Source.fromGraph(DurableEventSource(log, aggregateId = Some("a"))) val source2 = Source.fromGraph(DurableEventSource(log, aggregateId = Some("b"))) val probe1 = source1.take(1).runWith(TestSink.probe) val probe2 = source2.take(1).runWith(TestSink.probe) val x = writer.write(List("x")).await val y = writerA.write(List("y")).await val z = writerB.write(List("z")).await probe1.request(1).expectNext(y.head).expectComplete() probe2.request(1).expectNext(z.head).expectComplete() } "emit events from a given sequence number" in { val source = Source.fromGraph(DurableEventSource(log, fromSequenceNr = 2)) val probe = source.take(2).runWith(TestSink.probe) val abc = writer.write(List("a", "b", "c")).await probe.request(2).expectNextN(abc.drop(1)).expectComplete() } } }
Example 177
Source File: DurableEventWriterExample.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.example.stream //# durable-event-writer import akka.stream.scaladsl.Source import com.rbmhtechnology.eventuate.DurableEvent import com.rbmhtechnology.eventuate.adapter.stream.DurableEventWriter //# object DurableEventWriterExample extends App with DurableEventLogs { //# durable-event-writer val writerId = "writer-1" Source(List("a", "b", "c")) .map(DurableEvent(_)) .via(DurableEventWriter(writerId, logA)) .map(event => (event.payload, event.localSequenceNr)) .runForeach(println) // prints (on first run): // (a,1) // (b,2) // (c,3) //# }
Example 178
Source File: DurableEventSourceExample.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.example.stream //# durable-event-source-1 import akka.stream.scaladsl.Source import com.rbmhtechnology.eventuate.adapter.stream.DurableEventSource //# object DurableEventSourceExample extends DurableEventLogs { //# durable-event-source-1 val source1 = Source.fromGraph(DurableEventSource(logA)) //# //# durable-event-source-2 val source2 = Source.fromGraph(DurableEventSource( logA, fromSequenceNr = 12414, aggregateId = Some("user-17"))) //# }
Example 179
Source File: ProgressSourceExample.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.example.stream import akka.stream.scaladsl.Source import com.rbmhtechnology.eventuate.ReplicationProtocol.SetReplicationProgress import com.rbmhtechnology.eventuate.adapter.stream.DurableEventSource //# progress-source import com.rbmhtechnology.eventuate.adapter.stream.ProgressSource //# object ProgressSourceExample extends App with DurableEventLogs { logC ! SetReplicationProgress(logAId, 17) logC ! SetReplicationProgress(logBId, 22) Thread.sleep(1000) //# progress-source val progressSourceA = Source.fromGraph(ProgressSource(logAId, logC)) val progressSourceB = Source.fromGraph(ProgressSource(logBId, logC)) val sourceA = progressSourceA.flatMapConcat { progress => Source.fromGraph(DurableEventSource(logA, fromSequenceNr = progress)) } val sourceB = progressSourceB.flatMapConcat { progress => Source.fromGraph(DurableEventSource(logB, fromSequenceNr = progress)) } //# }
Example 180
Source File: KinesisSinkGraphStageIntegrationSpec.scala From reactive-kinesis with Apache License 2.0 | 5 votes |
package com.weightwatchers.reactive.kinesis.stream import akka.stream.scaladsl.Source import com.weightwatchers.reactive.kinesis.common.{ AkkaUnitTestLike, KinesisConfiguration, KinesisSuite } import com.weightwatchers.reactive.kinesis.models.ProducerEvent import org.scalatest.{FreeSpec, Matchers} import scala.concurrent.duration._ class KinesisSinkGraphStageIntegrationSpec extends FreeSpec with KinesisSuite with KinesisConfiguration with AkkaUnitTestLike with Matchers { "KinesisSinkGraph" - { "produced messages are written to the stream" in new withKinesisConfForApp("sink_produce") { val messageCount = 100 val elements = 1.to(messageCount).map(_.toString) Source(elements) .map(num => ProducerEvent(num, num)) .runWith(Kinesis.sink(producerConf())) .futureValue val list = testConsumer.retrieveRecords(TestStreamName, messageCount) list should contain allElementsOf elements testConsumer.shutdown() } "upstream fail should fail the materialized value of the sink" in new withKinesisConfForApp( "sink_fail" ) { Source .failed(new IllegalStateException("Boom")) .runWith(Kinesis.sink(producerConf())) .failed .futureValue shouldBe a[IllegalStateException] } } // do not create messages in setup, we will create messages inside the test override def TestStreamNrOfMessagesPerShard: Long = 0 override implicit def patienceConfig: PatienceConfig = PatienceConfig(60.seconds, 1.second) }
Example 181
Source File: UsersHandler.scala From akka-api-gateway-example with MIT License | 5 votes |
package jp.co.dzl.example.akka.api.handler.v1.github import akka.actor.ActorSystem import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Sink, Source } import jp.co.dzl.example.akka.api.handler.Handler import jp.co.dzl.example.akka.api.service.GitHub import scala.util.{ Failure, Success } class UsersHandler( actorSystem: ActorSystem, github: GitHub ) extends Handler { implicit val system = actorSystem implicit val executor = system.dispatcher implicit val materializer = ActorMaterializer() def routes = pathPrefix("v1" / "github") { path("users" / """^[a-zA-Z0-9\-]+$""".r) { login => get { extractRequest { req => val result = Source.single(HttpRequest(HttpMethods.GET, s"/users/$login")) .via(github.from(req)) .via(github.send) .runWith(Sink.head) onComplete(result) { case Success(response) => complete(response) case Failure(error) => complete(StatusCodes.ServiceUnavailable -> error.toString) } } } } } }
Example 182
Source File: GitHubSpec.scala From akka-api-gateway-example with MIT License | 5 votes |
package jp.co.dzl.example.akka.api.service import akka.actor.ActorSystem import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.model.{ HttpMethods, HttpRequest, HttpResponse } import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Flow, Source } import akka.stream.testkit.scaladsl.TestSink import org.scalamock.scalatest.MockFactory import org.scalatest.concurrent.ScalaFutures import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers } import scala.concurrent.Await import scala.concurrent.duration.Duration class GitHubSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory { implicit val system = ActorSystem("github-spec") implicit val executor = system.dispatcher implicit val materializer = ActorMaterializer() override protected def afterAll: Unit = { Await.result(system.terminate(), Duration.Inf) } "#from" should "merge original headers to github request" in { val github = new GitHubImpl("127.0.0.1", 8000, 5, mock[HttpClient]) val request = HttpRequest(HttpMethods.GET, "/") .addHeader(RawHeader("host", "dummy")) .addHeader(RawHeader("timeout-access", "dummy")) val result = Source.single(HttpRequest(HttpMethods.GET, "/v1/github/users/xxxxxx")) .via(github.from(request)) .runWith(TestSink.probe[HttpRequest]) .request(1) .expectNext() result.headers.filter(_.lowercaseName() == "host") shouldBe empty result.headers.filter(_.lowercaseName() == "timeout-access") shouldBe empty result.headers.filter(_.lowercaseName() == "x-forwarded-host") shouldNot be(empty) } "#send" should "connect using http client" in { val httpResponse = HttpResponse() val httpClient = mock[HttpClient] (httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse)) val github = new GitHubImpl("127.0.0.1", 8000, 5, httpClient) val result = Source.single(HttpRequest(HttpMethods.GET, "/")) .via(github.send) .runWith(TestSink.probe[HttpResponse]) .request(1) .expectNext() result shouldBe httpResponse } }
Example 183
Source File: ClickhouseQueryExecutor.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.dsl.execution import akka.stream.scaladsl.Source import com.crobox.clickhouse.ClickhouseClient import com.crobox.clickhouse.dsl.language.{ClickhouseTokenizerModule, TokenizerModule} import com.crobox.clickhouse.dsl.{Query, Table} import com.crobox.clickhouse.internal.QuerySettings import com.crobox.clickhouse.internal.progress.QueryProgress.QueryProgress import spray.json.{JsonReader, _} import scala.concurrent.{ExecutionContext, Future} trait ClickhouseQueryExecutor extends QueryExecutor { self: TokenizerModule => implicit val client: ClickhouseClient def execute[V: JsonReader](query: Query)(implicit executionContext: ExecutionContext, settings: QuerySettings = QuerySettings()): Future[QueryResult[V]] = { import QueryResult._ val queryResult = client.query(toSql(query.internalQuery)) queryResult.map(_.parseJson.convertTo[QueryResult[V]]) } def executeWithProgress[V: JsonReader]( query: Query )(implicit executionContext: ExecutionContext, settings: QuerySettings = QuerySettings()): Source[QueryProgress, Future[QueryResult[V]]] = { import QueryResult._ val queryResult = client.queryWithProgress(toSql(query.internalQuery)) queryResult.mapMaterializedValue(_.map(_.parseJson.convertTo[QueryResult[V]])) } override def insert[V: JsonWriter]( table: Table, values: Seq[V] )(implicit executionContext: ExecutionContext, settings: QuerySettings = QuerySettings()): Future[String] = Future { values.map(_.toJson.compactPrint).mkString("\n") + "\n" }.flatMap( entity => client.execute(s"INSERT INTO ${table.quoted} FORMAT JSONEachRow", entity) ) } object ClickhouseQueryExecutor { def default(clickhouseClient: ClickhouseClient): ClickhouseQueryExecutor = new DefaultClickhouseQueryExecutor(clickhouseClient) } class DefaultClickhouseQueryExecutor(override val client: ClickhouseClient) extends ClickhouseQueryExecutor with ClickhouseTokenizerModule
Example 184
Source File: SourceAugmenter.scala From HAT2.0 with GNU Affero General Public License v3.0 | 5 votes |
package org.hatdex.hat.utils import akka.NotUsed import akka.stream.scaladsl.{ GraphDSL, Source } import akka.stream.stage.GraphStage import akka.stream.{ FanInShape2, SourceShape } class SourceAugmenter { def augment[T, U](source: Source[T, NotUsed], extrasSource: Source[U, NotUsed], augmentFunction: (T, U) ⇒ Either[T, U]): Source[T, NotUsed] = { augmentSource(new AugmentWith(augmentFunction), source, extrasSource) { (_, _) => NotUsed } } private def augmentSource[T, U, MatIn0, MatIn1, Mat]( combinator: GraphStage[FanInShape2[T, U, T]], s0: Source[T, MatIn0], s1: Source[U, MatIn1])(combineMat: (MatIn0, MatIn1) => Mat): Source[T, Mat] = Source.fromGraph(GraphDSL.create(s0, s1)(combineMat) { implicit builder => (s0, s1) => import GraphDSL.Implicits._ val merge = builder.add(combinator) s0 ~> merge.in0 s1 ~> merge.in1 SourceShape(merge.out) }) }
Example 185
Source File: SourceMergeSorter.scala From HAT2.0 with GNU Affero General Public License v3.0 | 5 votes |
package org.hatdex.hat.utils import akka.NotUsed import akka.stream.scaladsl.{ GraphDSL, MergeSorted, Source } import akka.stream.stage.GraphStage import akka.stream.{ FanInShape2, SourceShape } import scala.annotation.tailrec class SourceMergeSorter { def mergeWithSorter[A](originSources: Seq[Source[A, NotUsed]])(implicit ordering: Ordering[A]): Source[A, NotUsed] = merge(originSources, sorter[A]) private def merge[A](originSources: Seq[Source[A, NotUsed]], f: (Source[A, NotUsed], Source[A, NotUsed]) => Source[A, NotUsed]): Source[A, NotUsed] = originSources match { case Nil => Source.empty[A] case sources => @tailrec def reducePairs(sources: Seq[Source[A, NotUsed]]): Source[A, NotUsed] = sources match { case Seq(s) => s case _ => reducePairs(sources.grouped(2).map { case Seq(a) => a case Seq(a, b) => f(a, b) }.toSeq) } reducePairs(sources) } private def sorter[A](s1: Source[A, NotUsed], s2: Source[A, NotUsed])(implicit ord: Ordering[A]): Source[A, NotUsed] = combineSources(new MergeSorted[A], s1, s2) { (_, _) => NotUsed } private def combineSources[A, MatIn0, MatIn1, Mat]( combinator: GraphStage[FanInShape2[A, A, A]], s0: Source[A, MatIn0], s1: Source[A, MatIn1])(combineMat: (MatIn0, MatIn1) => Mat): Source[A, Mat] = Source.fromGraph(GraphDSL.create(s0, s1)(combineMat) { implicit builder => (s0, s1) => import GraphDSL.Implicits._ val merge = builder.add(combinator) s0 ~> merge.in0 s1 ~> merge.in1 SourceShape(merge.out) }) }
Example 186
Source File: AkkaHttpLambdaHandler.scala From scala-server-lambda with MIT License | 5 votes |
package io.github.howardjohn.lambda.akka import akka.actor.ActorSystem import akka.http.scaladsl.model.HttpHeader.ParsingResult import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Route import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Keep, Sink, Source} import io.github.howardjohn.lambda.ProxyEncoding._ import io.github.howardjohn.lambda.{LambdaHandler, ProxyEncoding} import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, Future} class AkkaHttpLambdaHandler(route: Route)( implicit system: ActorSystem, materializer: ActorMaterializer, ec: ExecutionContext ) extends LambdaHandler { import AkkaHttpLambdaHandler._ override def handleRequest(request: ProxyRequest): ProxyResponse = Await.result(runRequest(proxyToAkkaRequest(request)), Duration.Inf) private def runRequest(request: HttpRequest): Future[ProxyResponse] = { val source = Source.single(request) val sink = Sink.head[HttpResponse] source .via(route) .toMat(sink)(Keep.right) .run() .flatMap(asProxyResponse) } private def proxyToAkkaRequest(request: ProxyRequest): HttpRequest = new HttpRequest( method = parseHttpMethod(request.httpMethod), uri = Uri(ProxyEncoding.reconstructPath(request)), headers = parseRequestHeaders(request.headers.getOrElse(Map.empty)), entity = parseEntity(request.headers.getOrElse(Map.empty), request.body), protocol = HttpProtocols.`HTTP/1.1` ) private def parseEntity(headers: Map[String, String], body: Option[String]): MessageEntity = { val defaultContentType = ContentTypes.`text/plain(UTF-8)` val contentType = ContentType .parse(headers.getOrElse("Content-Type", defaultContentType.value)) .getOrElse(defaultContentType) body match { case Some(b) => HttpEntity(contentType, b.getBytes) case None => HttpEntity.empty(contentType) } } private def asProxyResponse(resp: HttpResponse): Future[ProxyResponse] = Unmarshal(resp.entity) .to[String] .map { body => ProxyResponse( resp.status.intValue(), resp.headers.map(h => h.name -> h.value).toMap, body ) } } private object AkkaHttpLambdaHandler { private def parseRequestHeaders(headers: Map[String, String]): List[HttpHeader] = headers.map { case (k, v) => HttpHeader.parse(k, v) match { case ParsingResult.Ok(header, _) => header case ParsingResult.Error(err) => throw new RuntimeException(s"Failed to parse header $k:$v with error $err.") } }.toList private def parseHttpMethod(method: String) = method.toUpperCase match { case "CONNECT" => HttpMethods.CONNECT case "DELETE" => HttpMethods.DELETE case "GET" => HttpMethods.GET case "HEAD" => HttpMethods.HEAD case "OPTIONS" => HttpMethods.OPTIONS case "PATCH" => HttpMethods.PATCH case "POST" => HttpMethods.POST case "PUT" => HttpMethods.PUT case "TRACE" => HttpMethods.TRACE case other => HttpMethod.custom(other) } }
Example 187
Source File: ScanAndScrollSourceTest.scala From elasticsearch-client with Apache License 2.0 | 5 votes |
package com.sumologic.elasticsearch.akkahelpers import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import com.sumologic.elasticsearch.restlastic.RestlasticSearchClient.ReturnTypes._ import com.sumologic.elasticsearch.restlastic.ScrollClient import com.sumologic.elasticsearch.restlastic.dsl.Dsl import com.sumologic.elasticsearch.restlastic.dsl.Dsl._ import org.json4s.Extraction._ import org.json4s._ import org.junit.runner.RunWith import org.scalatest.concurrent.ScalaFutures import org.scalatest.{Matchers, WordSpec} import org.scalatestplus.junit.JUnitRunner import scala.concurrent.{ExecutionContext, Future} @RunWith(classOf[JUnitRunner]) class ScanAndScrollSourceTest extends WordSpec with Matchers with ScalaFutures { val resultMaps: List[Map[String, AnyRef]] = List(Map("a" -> "1"), Map("a" -> "2"), Map("a" -> "3")) implicit val formats = org.json4s.DefaultFormats implicit val system = ActorSystem("test") implicit val materializer = ActorMaterializer() def searchResponseFromMap(map: Map[String, AnyRef]) = { val raw = RawSearchResponse(Hits(List(ElasticJsonDocument("index", "type", "id", Some(0.1f), decompose(map).asInstanceOf[JObject], highlight = None, inner_hits = None)), 1)) SearchResponse(raw, "{}") } "ScanAndScrollSource" should { val index = Index("index") val tpe = Type("tpe") val queryRoot = new QueryRoot(MatchAll) "Read to the end of a source" in { val searchResponses = resultMaps.map(searchResponseFromMap) val client = new MockScrollClient(searchResponses) val source = Source.actorPublisher[SearchResponse](ScanAndScrollSource.props(index, tpe, queryRoot, client, sizeOpt = Some(5))) val fut = source .map(_.sourceAsMap) .grouped(10) .runWith(Sink.head) whenReady(fut) { resp => resp.flatten should be(resultMaps) } } } } class MockScrollClient(results: List[SearchResponse]) extends ScrollClient { var id = 1 var started = false var resultsQueue = results override val indexExecutionCtx: ExecutionContext = ExecutionContext.Implicits.global override def startScrollRequestIndices(indices: Seq[Dsl.Index], tpe: Dsl.Type, query: Dsl.QueryRoot, resultWindowOpt: Option[String] = None, fromOpt: Option[Int] = None, sizeOpt: Option[Int] = None, preference: Option[String] = None): Future[(ScrollId, SearchResponse)] = { if (!started) { started = true processRequest() } else { Future.failed(new RuntimeException("Scroll already started")) } } override def scroll(scrollId: ScrollId, resultWindowOpt: Option[String] = None): Future[(ScrollId, SearchResponse)] = { if (scrollId.id.toInt == id) { processRequest() } else { Future.failed(new RuntimeException("Invalid id")) } } private def processRequest(): Future[(ScrollId, SearchResponse)] = { id += 1 resultsQueue match { case head :: rest => resultsQueue = rest Future.successful((ScrollId(id.toString), head)) case Nil => Future.successful((ScrollId(id.toString), SearchResponse.empty)) } } }
Example 188
Source File: TagViewSequenceNumberScanner.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.query import java.lang.{ Long => JLong } import java.util.UUID import akka.NotUsed import akka.annotation.InternalApi import akka.event.Logging import akka.persistence.cassandra.journal.CassandraJournal._ import akka.persistence.cassandra.journal.TimeBucket import akka.persistence.cassandra.formatOffset import akka.persistence.cassandra.query.TagViewSequenceNumberScanner.Session import akka.stream.Materializer import akka.stream.scaladsl.Source import com.datastax.oss.driver.api.core.cql.{ PreparedStatement, Row } import scala.concurrent.duration.{ Deadline, FiniteDuration } import scala.concurrent.{ ExecutionContext, Future } import akka.persistence.cassandra.BucketSize import akka.stream.alpakka.cassandra.scaladsl.CassandraSession private[akka] def scan( tag: String, fromOffset: UUID, toOffset: UUID, bucketSize: BucketSize, scanningPeriod: FiniteDuration, whichToKeep: (TagPidSequenceNr, TagPidSequenceNr) => TagPidSequenceNr) : Future[Map[PersistenceId, (TagPidSequenceNr, UUID)]] = { val deadline: Deadline = Deadline.now + scanningPeriod def doIt(): Future[Map[PersistenceId, (TagPidSequenceNr, UUID)]] = { // How many buckets is this spread across? val startBucket = TimeBucket(fromOffset, bucketSize) val endBucket = TimeBucket(toOffset, bucketSize) require(startBucket <= endBucket) if (log.isDebugEnabled) { log.debug( s"Scanning tag: $tag from: {}, to: {}. Bucket {} to {}", formatOffset(fromOffset), formatOffset(toOffset), startBucket, endBucket) } Source .unfold(startBucket)(current => { if (current <= endBucket) { Some((current.next(), current)) } else { None } }) .flatMapConcat(bucket => { log.debug("Scanning bucket {}", bucket) session.selectTagSequenceNrs(tag, bucket, fromOffset, toOffset) }) .map(row => (row.getString("persistence_id"), row.getLong("tag_pid_sequence_nr"), row.getUuid("timestamp"))) .runFold(Map.empty[Tag, (TagPidSequenceNr, UUID)]) { case (acc, (pid, tagPidSequenceNr, timestamp)) => val (newTagPidSequenceNr, newTimestamp) = acc.get(pid) match { case None => (tagPidSequenceNr, timestamp) case Some((currentTagPidSequenceNr, currentTimestamp)) => if (whichToKeep(tagPidSequenceNr, currentTagPidSequenceNr) == tagPidSequenceNr) (tagPidSequenceNr, timestamp) else (currentTagPidSequenceNr, currentTimestamp) } acc + (pid -> ((newTagPidSequenceNr, newTimestamp))) } .flatMap { result => if (deadline.hasTimeLeft()) { doIt() } else { Future.successful(result) } } } doIt() } }
Example 189
Source File: BuildTagViewForPersistenceId.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.reconciler import akka.actor.ActorSystem import akka.persistence.cassandra.PluginSettings import akka.Done import akka.persistence.cassandra.journal.TagWriter._ import scala.concurrent.duration._ import scala.concurrent.Future import akka.stream.scaladsl.Source import akka.actor.ExtendedActorSystem import akka.persistence.query.PersistenceQuery import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.event.Logging import akka.persistence.cassandra.journal.CassandraTagRecovery import akka.persistence.cassandra.Extractors import akka.util.Timeout import akka.stream.OverflowStrategy import akka.stream.scaladsl.Sink import akka.annotation.InternalApi import akka.serialization.SerializationExtension @InternalApi private[akka] final class BuildTagViewForPersisetceId( persistenceId: String, system: ActorSystem, recovery: CassandraTagRecovery, settings: PluginSettings) { import system.dispatcher private implicit val sys = system private val log = Logging(system, classOf[BuildTagViewForPersisetceId]) private val serialization = SerializationExtension(system) private val queries: CassandraReadJournal = PersistenceQuery(system.asInstanceOf[ExtendedActorSystem]) .readJournalFor[CassandraReadJournal]("akka.persistence.cassandra.query") private implicit val flushTimeout = Timeout(30.seconds) def reconcile(flushEvery: Int = 1000): Future[Done] = { val recoveryPrep = for { tp <- recovery.lookupTagProgress(persistenceId) _ <- recovery.setTagProgress(persistenceId, tp) } yield tp Source .futureSource(recoveryPrep.map((tp: Map[String, TagProgress]) => { log.debug("[{}] Rebuilding tag view table from: [{}]", persistenceId, tp) queries .eventsByPersistenceId( persistenceId, 0, Long.MaxValue, Long.MaxValue, None, settings.journalSettings.readProfile, "BuildTagViewForPersistenceId", extractor = Extractors.rawEvent(settings.eventsByTagSettings.bucketSize, serialization, system)) .map(recovery.sendMissingTagWriteRaw(tp, actorRunning = false)) .buffer(flushEvery, OverflowStrategy.backpressure) .mapAsync(1)(_ => recovery.flush(flushTimeout)) })) .runWith(Sink.ignore) } }
Example 190
Source File: AllTags.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.reconciler import akka.annotation.InternalApi import akka.stream.scaladsl.Source import akka.NotUsed @InternalApi private[akka] final class AllTags(session: ReconciliationSession) { def execute(): Source[String, NotUsed] = { session .selectAllTagProgress() .map(_.getString("tag")) .statefulMapConcat(() => { var seen = Set.empty[String] tag => if (!seen.contains(tag)) { seen += tag List(tag) } else { Nil } }) } }
Example 191
Source File: EventsByTagCrashSpec.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra import akka.NotUsed import akka.persistence.cassandra.TestTaggingActor.{ Ack, Crash } import akka.persistence.query.{ EventEnvelope, NoOffset } import akka.stream.scaladsl.Source import akka.stream.testkit.scaladsl.TestSink import scala.concurrent.duration._ class EventsByTagCrashSpec extends CassandraSpec(EventsByTagRestartSpec.config) { val waitTime = 100.milliseconds "EventsByTag" must { "should handle crashes of the persistent actor" in { // crash the actor many times, persist 5 events each time val crashEvery = 5 val crashNr = 20 val msgs = crashEvery * crashNr val p2 = system.actorOf(TestTaggingActor.props("p2", Set("blue"))) (1 to msgs).foreach { cn => if (cn % crashEvery == 0) { p2 ! Crash } val msg = s"msg $cn" p2 ! msg expectMsg(Ack) } val blueTags: Source[EventEnvelope, NotUsed] = queryJournal.eventsByTag(tag = "blue", offset = NoOffset) val tagProbe = blueTags.runWith(TestSink.probe[EventEnvelope](system)) (1L to msgs).foreach { m => val expected = s"msg $m" tagProbe.request(1) tagProbe.expectNext().event shouldEqual expected } tagProbe.expectNoMessage(250.millis) tagProbe.cancel() } } }
Example 192
Source File: Gateway.scala From reactive-microservices with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.client.RequestBuilding import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.FlowMaterializer import akka.stream.scaladsl.{Sink, Source} import java.io.IOException import scala.concurrent.{ExecutionContext, Future} case class InternalLoginRequest(identityId: Long, authMethod: String = "codecard") case class InternalReloginRequest(tokenValue: String, authMethod: String = "codecard") class Gateway(implicit actorSystem: ActorSystem, materializer: FlowMaterializer, ec: ExecutionContext) extends JsonProtocols with Config { private val identityManagerConnectionFlow = Http().outgoingConnection(identityManagerHost, identityManagerPort) private val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort) private def requestIdentityManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(identityManagerConnectionFlow).runWith(Sink.head) } private def requestTokenManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head) } def requestToken(tokenValue: String): Future[Either[String, Token]] = { requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Right(_)) case NotFound => Future.successful(Left("Token expired or not found")) case _ => Future.failed(new IOException(s"Token request failed with status ${response.status} and error ${response.entity}")) } } } def requestNewIdentity(): Future[Identity] = { requestIdentityManager(RequestBuilding.Post("/identities")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Identity] case _ => Future.failed(new IOException(s"Identity request failed with status ${response.status} and error ${response.entity}")) } } } def requestLogin(identityId: Long): Future[Token] = { val loginRequest = InternalLoginRequest(identityId) requestTokenManager(RequestBuilding.Post("/tokens", loginRequest)).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token] case _ => Future.failed(new IOException(s"Login request failed with status ${response.status} and error ${response.entity}")) } } } def requestRelogin(tokenValue: String): Future[Option[Token]] = { requestTokenManager(RequestBuilding.Patch("/tokens", InternalReloginRequest(tokenValue))).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Option(_)) case NotFound => Future.successful(None) case _ => Future.failed(new IOException(s"Relogin request failed with status ${response.status} and error ${response.entity}")) } } } }
Example 193
Source File: Gateway.scala From reactive-microservices with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.client.RequestBuilding import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.FlowMaterializer import akka.stream.scaladsl.{Sink, Source} import java.io.IOException import scala.concurrent.{ExecutionContext, Future} case class InternalLoginRequest(identityId: Long, authMethod: String = "password") case class InternalReloginRequest(tokenValue: String, authMethod: String = "password") class Gateway(implicit actorSystem: ActorSystem, materializer: FlowMaterializer, ec: ExecutionContext) extends JsonProtocols with Config { private val identityManagerConnectionFlow = Http().outgoingConnection(identityManagerHost, identityManagerPort) private val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort) private def requestIdentityManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(identityManagerConnectionFlow).runWith(Sink.head) } private def requestTokenManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head) } def requestToken(tokenValue: String): Future[Either[String, Token]] = { requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Right(_)) case NotFound => Future.successful(Left("Token expired or not found")) case _ => Future.failed(new IOException(s"Token request failed with status ${response.status} and error ${response.entity}")) } } } def requestNewIdentity(): Future[Identity] = { requestIdentityManager(RequestBuilding.Post("/identities")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Identity] case _ => Future.failed(new IOException(s"Identity request failed with status ${response.status} and error ${response.entity}")) } } } def requestLogin(identityId: Long): Future[Token] = { val loginRequest = InternalLoginRequest(identityId) requestTokenManager(RequestBuilding.Post("/tokens", loginRequest)).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token] case _ => Future.failed(new IOException(s"Login request failed with status ${response.status} and error ${response.entity}")) } } } def requestRelogin(tokenValue: String): Future[Option[Token]] = { requestTokenManager(RequestBuilding.Patch("/tokens", InternalReloginRequest(tokenValue))).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Option(_)) case NotFound => Future.successful(None) case _ => Future.failed(new IOException(s"Relogin request failed with status ${response.status} and error ${response.entity}")) } } } }
Example 194
Source File: SessionManager.scala From reactive-microservices with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.client.RequestBuilding import akka.http.scaladsl.model.{HttpResponse, HttpRequest} import akka.http.scaladsl.server.Directives._ import akka.stream.ActorFlowMaterializer import akka.stream.scaladsl.{Sink, Source} import com.typesafe.config.ConfigFactory import scala.concurrent.Future object SessionManager extends App { val config = ConfigFactory.load() val interface = config.getString("http.interface") val port = config.getInt("http.port") val tokenManagerHost = config.getString("services.token-manager.host") val tokenManagerPort = config.getInt("services.token-manager.port") implicit val actorSystem = ActorSystem() implicit val materializer = ActorFlowMaterializer() implicit val dispatcher = actorSystem.dispatcher val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort) def requestTokenManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head) } Http().bindAndHandle(interface = interface, port = port, handler = { logRequestResult("session-manager") { path("session") { headerValueByName("Auth-Token") { tokenValue => pathEndOrSingleSlash { get { complete { requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")) } } ~ delete { complete { requestTokenManager(RequestBuilding.Delete(s"/tokens/$tokenValue")) } } } } } } }) }
Example 195
Source File: Gateway.scala From reactive-microservices with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.client.RequestBuilding import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.FlowMaterializer import akka.stream.scaladsl.{Sink, Source} import com.restfb.DefaultFacebookClient import com.restfb.types.User import java.io.IOException import scala.concurrent.{blocking, ExecutionContext, Future} import scala.util.Try case class InternalLoginRequest(identityId: Long, authMethod: String = "fb") case class InternalReloginRequest(tokenValue: String, authMethod: String = "fb") class Gateway(implicit actorSystem: ActorSystem, materializer: FlowMaterializer, ec: ExecutionContext) extends JsonProtocols with Config { private val identityManagerConnectionFlow = Http().outgoingConnection(identityManagerHost, identityManagerPort) private val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort) private def requestIdentityManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(identityManagerConnectionFlow).runWith(Sink.head) } private def requestTokenManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head) } def requestToken(tokenValue: String): Future[Either[String, Token]] = { requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Right(_)) case NotFound => Future.successful(Left("Token expired or not found")) case _ => Future.failed(new IOException(s"Token request failed with status ${response.status} and error ${response.entity}")) } } } def requestNewIdentity(): Future[Identity] = { requestIdentityManager(RequestBuilding.Post("/identities")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Identity] case _ => Future.failed(new IOException(s"Identity request failed with status ${response.status} and error ${response.entity}")) } } } def requestLogin(identityId: Long): Future[Token] = { val loginRequest = InternalLoginRequest(identityId) requestTokenManager(RequestBuilding.Post("/tokens", loginRequest)).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token] case _ => Future.failed(new IOException(s"Login request failed with status ${response.status} and error ${response.entity}")) } } } def requestRelogin(tokenValue: String): Future[Option[Token]] = { requestTokenManager(RequestBuilding.Patch("/tokens", InternalReloginRequest(tokenValue))).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Option(_)) case NotFound => Future.successful(None) case _ => Future.failed(new IOException(s"Relogin request failed with status ${response.status} and error ${response.entity}")) } } } def getFbUserDetails(accessToken: String): Try[User] = { Try { blocking { val client = new DefaultFacebookClient(accessToken) client.fetchObject("me", classOf[User]) } } } }
Example 196
Source File: AkkaStreamsPartitionHubApp.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package com.packt.publishing.dynamic.akka.streams import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Keep, PartitionHub, RunnableGraph, Source} import scala.concurrent.duration._ object AkkaStreamsPartitionHubApp extends App { implicit val actorSystem = ActorSystem("PartitionHubSystem") implicit val materializer = ActorMaterializer() val producer = Source.tick(1.second, 1.second, "message") .zipWith(Source(1 to 10))((a, b) ⇒ s"$a-$b") val runnableGraph: RunnableGraph[Source[String, NotUsed]] = producer.toMat(PartitionHub.sink( (size, elem) ⇒ math.abs(elem.hashCode) % size, startAfterNrOfConsumers = 2, bufferSize = 256))(Keep.right) val fromProducer: Source[String, NotUsed] = runnableGraph.run() fromProducer.runForeach(msg ⇒ println("consumer1: " + msg)) fromProducer.runForeach(msg ⇒ println("consumer2: " + msg)) Thread.sleep(5000) actorSystem.terminate }
Example 197
Source File: AkkaStreamsMergeHubApp.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package com.packt.publishing.dynamic.akka.streams import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{MergeHub, RunnableGraph, Sink, Source} object AkkaStreamsMergeHubApp extends App{ implicit val actorSystem = ActorSystem("MergeHubSystem") implicit val materializer = ActorMaterializer() val consumer = Sink.foreach(println) val mergeHub = MergeHub.source[String](perProducerBufferSize = 16) val runnableGraph: RunnableGraph[Sink[String, NotUsed]] = mergeHub.to(consumer) val toConsumer: Sink[String, NotUsed] = runnableGraph.run() Source.single("Hello!").runWith(toConsumer) Source.single("MergeHub!").runWith(toConsumer) Source.single("World!").runWith(toConsumer) Thread.sleep(500) actorSystem.terminate }
Example 198
Source File: ChatController.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package controllers import java.net.URI import javax.inject._ import akka.actor.ActorSystem import akka.event.Logging import akka.stream.Materializer import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Source} import play.api.Logger import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future} @Singleton class ChatController @Inject()(cc: ControllerComponents) (implicit actorSystem: ActorSystem, mat: Materializer, executionContext: ExecutionContext, webJarsUtil: org.webjars.play.WebJarsUtil) extends AbstractController(cc) with RequestMarkerContext { private type WSMessage = String private val logger = Logger(getClass) private implicit val logging = Logging(actorSystem.eventStream, logger.underlyingLogger.getName) private val (chatSink, chatSource) = { val source = MergeHub.source[WSMessage] .log("source") .recoverWithRetries(-1, { case _: Exception ⇒ Source.empty }) val sink = BroadcastHub.sink[WSMessage] source.toMat(sink)(Keep.both).run() } private val userFlow: Flow[WSMessage, WSMessage, _] = { Flow.fromSinkAndSource(chatSink, chatSource) } def index: Action[AnyContent] = Action { implicit request: RequestHeader => val webSocketUrl = routes.ChatController.chat().webSocketURL() logger.info(s"index: ") Ok(views.html.index(webSocketUrl)) } def chat(): WebSocket = { WebSocket.acceptOrResult[WSMessage, WSMessage] { case rh if sameOriginCheck(rh) => Future.successful(userFlow).map { flow => Right(flow) }.recover { case e: Exception => val msg = "Cannot create websocket" logger.error(msg, e) val result = InternalServerError(msg) Left(result) } case rejected => logger.error(s"Request ${rejected} failed same origin check") Future.successful { Left(Forbidden("forbidden")) } } } private def sameOriginCheck(implicit rh: RequestHeader): Boolean = { logger.debug("Checking the ORIGIN ") rh.headers.get("Origin") match { case Some(originValue) if originMatches(originValue) => logger.debug(s"originCheck: originValue = $originValue") true case Some(badOrigin) => logger.error(s"originCheck: rejecting request because Origin header value ${badOrigin} is not in the same origin") false case None => logger.error("originCheck: rejecting request because no Origin header found") false } } private def originMatches(origin: String): Boolean = { try { val url = new URI(origin) url.getHost == "localhost" && (url.getPort match { case 9000 | 19001 => true; case _ => false }) } catch { case e: Exception => false } } }
Example 199
Source File: AkkaStreamsHelloWorldApp3.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package com.packt.publishing.akka.streams.hello import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Flow, Keep, RunnableGraph, Sink, Source} import akka.{Done, NotUsed} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.util.{Failure, Success} object AkkaStreamsHelloWorldApp3 extends App{ implicit val actorSystem = ActorSystem("HelloWorldSystem") implicit val materializer = ActorMaterializer() val helloWorldSource:Source[String,NotUsed] = Source.single("Akka Streams Hello World") val helloWorldSink: Sink[String,Future[Done]] = Sink.foreach(println) val helloWorldFlow:Flow[String,String,NotUsed] = Flow[String].map(str => str.toUpperCase) val helloWorldGraph:RunnableGraph[NotUsed] = helloWorldSource .via(helloWorldFlow) .to(helloWorldSink) val helloWorldGraph2:RunnableGraph[Future[Done]] = helloWorldSource .via(helloWorldFlow) .toMat(helloWorldSink)(Keep.right) helloWorldGraph.run val helloWorldMaterializedValue: Future[Done] = helloWorldGraph2.run helloWorldMaterializedValue.onComplete{ case Success(Done) => println("HelloWorld Stream ran succssfully.") case Failure(exception) => println(s"HelloWorld Stream ran into an issue: ${exception}.") } actorSystem.terminate }
Example 200
Source File: MapInitAndLastTests.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.util.streams.test import akka.stream._ import akka.stream.scaladsl.{GraphDSL, RunnableGraph, Source} import akka.stream.testkit.scaladsl.{TestSink, TestSource} import akka.stream.testkit.TestPublisher.{Probe => SrcProbe} import akka.stream.testkit.TestSubscriber.{Probe => SnkProbe} import cmwell.util.stream.MapInitAndLast import scala.concurrent.duration.DurationInt class MapInitAndLastTests extends StreamSpec { def generateGraph[In](): (SrcProbe[In],SnkProbe[(In,Boolean)]) = { val src = TestSource.probe[In] val snk = TestSink.probe[(In,Boolean)] RunnableGraph.fromGraph(GraphDSL.create(src, snk)((a, b) => (a, b)) { implicit b => { (s1, s2) => { import GraphDSL.Implicits._ val mial = b.add(new MapInitAndLast[In, (In,Boolean)](_ -> false, _ -> true)) s1 ~> mial ~> s2 ClosedShape } } }).run() } describe("MapInitAndLast Stage"){ it("should buffer a single element"){ val (src,snk) = generateGraph[Int]() snk.request(99) src.sendNext(1) snk.expectNoMessage(300.millis) src.sendComplete() snk.expectNext((1,true)) snk.expectComplete() } it("should treat last element differently") { val (src,snk) = generateGraph[Int]() snk.request(99) src.sendNext(1) snk.expectNoMessage(300.millis) src.sendNext(2) snk.expectNext((1,false)) src.sendNext(3) snk.expectNext((2,false)) src.sendComplete() snk.expectNext((3,true)) snk.expectComplete() } it("should propagate back-pressure"){ val (src,snk) = generateGraph[Int]() snk.ensureSubscription() src.sendNext(1) snk.expectNoMessage(300.millis) src.sendNext(1) snk.expectNoMessage(300.millis) src.sendComplete() snk.expectNoMessage(300.millis) snk.request(1) snk.expectNext((1,false)) snk.request(1) snk.expectNext((1,true)) snk.expectComplete() } } }