akka.stream.scaladsl.Keep Scala Examples

The following examples show how to use akka.stream.scaladsl.Keep. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.process

import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M],
                 materializer: Materializer)(implicit F: Async[F]): F[Unit] =
      F.bracket(
        F.delay(
          source
            .viaMat(KillSwitches.single)(Keep.right)
            .toMat(Sink.ignore)(Keep.both)
            .run()(materializer)
        )
      )(x => F.fromFuture(x._2).void)(x => F.delay(x._1.shutdown()))

  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 2
Source File: KillSwitchMatStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import java.util.concurrent.atomic.AtomicLong

import akka.stream.{ClosedShape, KillSwitch, KillSwitches}
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchMatStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchMatStream extends PerpetualStream[(KillSwitch, Future[Long])] {
  import KillSwitchMatStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(KillSwitches.single[Int], counter)((_, _)) {
    implicit builder =>
      (kill, sink) =>
        import GraphDSL.Implicits._
        source ~> kill ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue._2
  }
} 
Example 3
Source File: UnicomplexActorPublisherSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._

import scala.concurrent.duration._

object UnicomplexActorPublisherSpec {
  val myConfig: Config = ConfigFactory.parseString(
    """
      | squbs.actorsystem-name = UnicomplexActorPublisherSpec
    """.stripMargin)
  val boot = UnicomplexBoot(myConfig).createUsing((name, config) => ActorSystem(name, config))
    .scanResources("/")
    .initExtensions
    .start()
}

final class UnicomplexActorPublisherSpec extends TestKit(UnicomplexActorPublisherSpec.boot.actorSystem)
    with FlatSpecLike with Matchers with BeforeAndAfterAll {

  implicit val materializer = ActorMaterializer()
  val duration = 10.second

  val in = TestSource.probe[String]

  // expose probe port(s)
  val ((pubIn, pubTrigger), sub) = LifecycleManaged().source(in).toMat(TestSink.probe[String](system))(Keep.both).run()

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "UnicomplexTrigger" should "activate flow by unicomplex" in {
    // send 2 elements to in
    pubIn.sendNext("1")
    pubIn.sendNext("2")
    sub.request(2)
    sub.expectNext(duration, "1")
    sub.expectNext("2")

    // re-send Active to unicomplex trigger, flow continues
    sub.request(2)
    sub.expectNoMessage(remainingOrDefault)
    pubTrigger ! SystemState
    pubIn.sendNext("3")
    pubIn.sendNext("4")
    sub.expectNext("3", "4")
  }
} 
Example 4
Source File: KillSwitchWithChildActorStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{Actor, Props}
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchWithChildActorStream {
  val genCount = new AtomicLong(0L)
}

class DummyChildActor extends Actor {
  def receive = PartialFunction.empty
}

class KillSwitchWithChildActorStream extends PerpetualStream[Future[Long]] {
  import KillSwitchWithChildActorStream._
  import org.squbs.unicomplex.Timeouts._

  val dummyChildActor = context.actorOf(Props[DummyChildActor])

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }

  override def shutdown() = {
    val f = super.shutdown()
    defaultMidActorStop(Seq(dummyChildActor))
    f
  }
} 
Example 5
Source File: KillSwitchStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchStream extends PerpetualStream[Future[Long]] {
  import KillSwitchStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }
} 
Example 6
Source File: ProperShutdownStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.Done
import akka.actor.ActorRef
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object ProperShutdownStream {
  val genCount = new AtomicLong(0L)
}

class ProperShutdownStream extends PerpetualStream[(ActorRef, Future[Long])] {
  import ProperShutdownStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val managedSource = LifecycleManaged().source(Source fromIterator generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(managedSource, counter)((a, b) => (a._2, b)) {
    implicit builder =>
    (source, sink) =>
      import GraphDSL.Implicits._
      source ~> throttle ~> sink
      ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>
      val (_, fCount) = matValue

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! fCount
  }

  override def shutdown() = {
    super.shutdown()
    import context.dispatcher
    val (actorRef, fCount) = matValue
    val fStopped = gracefulStop(actorRef, awaitMax)
    for { _ <- fCount; _ <- fStopped } yield Done
  }
} 
Example 7
Source File: TestAvroConsumer.scala    From asura   with MIT License 5 votes vote down vote up
package asura.kafka.consumer

import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink}
import asura.kafka.avro.SampleAvroClass
import com.typesafe.scalalogging.StrictLogging
import io.confluent.kafka.serializers.{AbstractKafkaAvroSerDeConfig, KafkaAvroDeserializer, KafkaAvroDeserializerConfig}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization._

import scala.collection.JavaConverters._

object TestAvroConsumer extends StrictLogging {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem("consumer")
    implicit val materializer = ActorMaterializer()
    implicit val ec = system.dispatcher

    val schemaRegistryUrl = ""
    val bootstrapServers = ""
    val topic = ""
    val group = ""

    val kafkaAvroSerDeConfig = Map[String, Any](
      AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG -> schemaRegistryUrl,
      KafkaAvroDeserializerConfig.SPECIFIC_AVRO_READER_CONFIG -> true.toString
    )
    val consumerSettings: ConsumerSettings[String, SampleAvroClass] = {
      val kafkaAvroDeserializer = new KafkaAvroDeserializer()
      kafkaAvroDeserializer.configure(kafkaAvroSerDeConfig.asJava, false)
      val deserializer = kafkaAvroDeserializer.asInstanceOf[Deserializer[SampleAvroClass]]

      ConsumerSettings(system, new StringDeserializer, deserializer)
        .withBootstrapServers(bootstrapServers)
        .withGroupId(group)
        .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
    }

    val samples = (1 to 3)
    val (control, result) = Consumer
      .plainSource(consumerSettings, Subscriptions.topics(topic))
      .take(samples.size.toLong)
      .map(_.value())
      .toMat(Sink.seq)(Keep.both)
      .run()

    control.shutdown()
    result.map(records => records.foreach(record => logger.info(s"${record}")))
  }
} 
Example 8
Source File: ChatClient.scala    From akka-http-scala-js-websocket-chat   with MIT License 5 votes vote down vote up
package example.akkawschat.cli

import scala.concurrent.Future

import akka.actor.ActorSystem

import akka.stream.scaladsl.{ Keep, Source, Sink, Flow }

import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.model.ws._

import upickle.default._

import shared.Protocol

object ChatClient {
  def connect[T](endpoint: Uri, handler: Flow[Protocol.Message, String, T])(implicit system: ActorSystem): Future[T] = {
    val wsFlow: Flow[Message, Message, T] =
      Flow[Message]
        .collect {
          case TextMessage.Strict(msg) ⇒ read[Protocol.Message](msg)
        }
        .viaMat(handler)(Keep.right)
        .map(TextMessage(_))

    val (fut, t) = Http().singleWebSocketRequest(WebSocketRequest(endpoint), wsFlow)
    fut.map {
      case v: ValidUpgrade                         ⇒ t
      case InvalidUpgradeResponse(response, cause) ⇒ throw new RuntimeException(s"Connection to chat at $endpoint failed with $cause")
    }(system.dispatcher)
  }

  def connect[T](endpoint: Uri, in: Sink[Protocol.Message, Any], out: Source[String, Any])(implicit system: ActorSystem): Future[Unit] =
    connect(endpoint, Flow.fromSinkAndSource(in, out)).map(_ ⇒ ())(system.dispatcher)

  def connect[T](endpoint: Uri, onMessage: Protocol.Message ⇒ Unit, out: Source[String, Any])(implicit system: ActorSystem): Future[Unit] =
    connect(endpoint, Sink.foreach(onMessage), out)
} 
Example 9
Source File: ReloadableSchemaProvider.scala    From graphql-gateway   with Apache License 2.0 5 votes vote down vote up
package sangria.gateway.schema

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.stream.{Materializer, OverflowStrategy}
import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source}
import better.files.File
import sangria.gateway.AppConfig
import sangria.gateway.file.FileMonitorActor
import sangria.gateway.http.client.HttpClient
import sangria.gateway.schema.materializer.{GatewayContext, GatewayMaterializer}
import sangria.gateway.util.Logging

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

// TODO: on a timer reload all external schemas and check for changes
class ReloadableSchemaProvider(config: AppConfig, client: HttpClient, mat: GatewayMaterializer)(implicit system: ActorSystem, ec: ExecutionContext, amat: Materializer) extends SchemaProvider[GatewayContext, Any] with Logging {
  val loader = new SchemaLoader(config, client, mat)
  val schemaRef = new AtomicReference[Option[SchemaInfo[GatewayContext, Any]]](None)

  system.actorOf(FileMonitorActor.props(config.watch.allFiles, config.watch.threshold, config.watch.allGlobs, reloadSchema))

  private val producer = Source.actorRef[Boolean](100, OverflowStrategy.dropTail)
  private val runnableGraph = producer.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
  private val (changesPublisher, changesSource) = runnableGraph.run()

  val schemaChanges = Some(changesSource)

  def schemaInfo =
    schemaRef.get() match {
      case v @ Some(_) ⇒ Future.successful(v)
      case None ⇒ reloadSchema
    }

  def reloadSchema(files: Vector[File]): Unit = {
    logger.info(s"Schema files are changed: ${files mkString ", "}. Reloading schema")

    reloadSchema
  }

  def reloadSchema: Future[Option[SchemaInfo[GatewayContext, Any]]] =
    loader.loadSchema.andThen {
      case Success(Some(newSchema)) ⇒
        schemaRef.get() match {
          case Some(currentSchema) ⇒
            val changes = newSchema.schema.compare(currentSchema.schema)
            val renderedChanges =
              if (changes.nonEmpty)
                " with following changes:\n" + changes.map(c ⇒ "  * " + c.description + (if (c.breakingChange) " (breaking)" else "")).mkString("\n")
              else
                " without any changes."

            changesPublisher ! true
            logger.info(s"Schema successfully reloaded$renderedChanges")
          case None ⇒
            logger.info(s"Schema successfully loaded from files:\n${newSchema.files.map(f ⇒ "  * " + f).mkString("\n")}")
        }

        schemaRef.set(Some(newSchema))
      case Failure(error) ⇒
        logger.error("Failed to load the schema", error)
    }
} 
Example 10
Source File: LogStreamsSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.logging

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.testkit.TestKit
import io.hydrosphere.mist.core.MockitoSugar
import io.hydrosphere.mist.core.logging.LogEvent
import io.hydrosphere.mist.master.FilteredException
import org.mockito.Mockito.verify
import org.scalatest.{FunSpecLike, Matchers}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

class LogStreamsSpec extends TestKit(ActorSystem("log-service-test"))
  with FunSpecLike
  with MockitoSugar
  with Matchers {

  implicit val materializer = ActorMaterializer()

  it("should store events") {
    val writer = mock[LogsWriter]
    when(writer.write(any[String], any[Seq[LogEvent]]))
      .thenReturn(Future.successful(LogUpdate("jobId", Seq.empty, 1)))

    val out = Source.single(LogEvent.mkDebug("id", "message"))
      .via(LogStreams.storeFlow(writer))
      .take(1)
      .toMat(Sink.seq)(Keep.right).run()

    val updates = Await.result(out, Duration.Inf)

    updates.size shouldBe 1
    verify(writer).write(any[String], any[Seq[LogEvent]])
  }

  it("should ignore errors") {
    val event = LogEvent.mkDebug("id", "message")
    val writer = mock[LogsWriter]
    when(writer.write(any[String], any[Seq[LogEvent]]))
      .thenSuccess(LogUpdate("id", Seq(event), 1))
      .thenFailure(FilteredException())
      .thenSuccess(LogUpdate("id", Seq(event), 1))
      .thenFailure(FilteredException())
      .thenSuccess(LogUpdate("id", Seq(event), 1))

    val in = (1 to 5).map(i => LogEvent.mkDebug(s"job-$i", "message"))
    val future = Source(in)
      .via(LogStreams.storeFlow(writer))
      .take(3)
      .toMat(Sink.seq)(Keep.right).run()


    val updates = Await.result(future, Duration.Inf)
    updates.flatMap(_.events).size shouldBe 3
  }

} 
Example 11
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import aecor.distributedprocessing.DistributedProcessing._
import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M])(implicit F: Async[F],
                                          materializer: Materializer): Process[F] =
      Process(run = F.delay {
        val (killSwitch, terminated) = source
          .viaMat(KillSwitches.single)(Keep.right)
          .toMat(Sink.ignore)(Keep.both)
          .run()
        RunningProcess(F.fromFuture(terminated).void, F.delay(killSwitch.shutdown()))
      })
  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 12
Source File: DefaultScheduleEventJournal.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.schedule.process

import java.util.UUID

import aecor.data.{ Committable, ConsumerId, EntityEvent, EventTag }
import aecor.runtime.akkapersistence.readside.CommittableEventJournalQuery
import aecor.schedule.{ ScheduleBucketId, ScheduleEvent }
import aecor.util.effect._
import akka.stream.Materializer
import akka.stream.scaladsl.{ Keep, Sink }
import cats.effect.Effect
import cats.implicits._

object DefaultScheduleEventJournal {
  def apply[F[_]: Effect](
    consumerId: ConsumerId,
    parallelism: Int,
    aggregateJournal: CommittableEventJournalQuery[F, UUID, ScheduleBucketId, ScheduleEvent],
    eventTag: EventTag
  )(implicit materializer: Materializer): DefaultScheduleEventJournal[F] =
    new DefaultScheduleEventJournal(consumerId, parallelism, aggregateJournal, eventTag)
}

final class DefaultScheduleEventJournal[F[_]: Effect](
  consumerId: ConsumerId,
  parallelism: Int,
  aggregateJournal: CommittableEventJournalQuery[F, UUID, ScheduleBucketId, ScheduleEvent],
  eventTag: EventTag
)(implicit materializer: Materializer)
    extends ScheduleEventJournal[F] {
  override def processNewEvents(
    f: EntityEvent[ScheduleBucketId, ScheduleEvent] => F[Unit]
  ): F[Unit] =
    Effect[F].fromFuture {
      aggregateJournal
        .currentEventsByTag(eventTag, consumerId)
        .mapAsync(parallelism)(_.map(_.event).traverse(f).unsafeToFuture())
        .fold(Committable.unit[F])(Keep.right)
        .mapAsync(1)(_.commit.unsafeToFuture())
        .runWith(Sink.ignore)
    }.void
} 
Example 13
Source File: Fs2AkkaStreamInterop.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.common

import akka.stream.Materializer
import akka.stream.scaladsl.{ Keep, Sink, Source }
import cats.effect.ConcurrentEffect
import fs2.Stream
import fs2.interop.reactivestreams._
import cats.implicits._

object Fs2AkkaStreamInterop {
  implicit final class SourceToStream[A, Mat](val self: Source[A, Mat]) extends AnyVal {
    def materializeToStream[F[_]](
      materializer: Materializer
    )(implicit F: ConcurrentEffect[F]): F[(Mat, Stream[F, A])] = F.delay {
      val (mat, publisher) = self.toMat(Sink.asPublisher(false))(Keep.both).run()(materializer)
      (mat, publisher.toStream[F])
    }
    def toStream[F[_]](materializer: Materializer)(implicit F: ConcurrentEffect[F]): Stream[F, A] =
      Stream.force(materializeToStream[F](materializer).map(_._2))
  }
} 
Example 14
Source File: JavaFlowSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpEntity.Chunked
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes}
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpecLike, BeforeAndAfterAll, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

object JavaFlowSvcSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/JavaFlowSvc").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = JavaFlowSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class JavaFlowSvcSpec extends TestKit(
  JavaFlowSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with BeforeAndAfterAll with Matchers {

  implicit val am = ActorMaterializer()

  val portBindingsF = (Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]]
  val portF = portBindingsF map { bindings => bindings("default-listener") }

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  it should "handle a normal request" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaflowsvc/ping")
    } yield {
      response shouldBe "pong"
    }
  }

  it should "handle a chunked request and be able to provide a chunked response" in {
    val requestChunks = Source.single("Hi this is a test")
      .mapConcat { s => s.split(' ').toList }
      .map(HttpEntity.ChunkStreamPart(_))

    for {
      port <- portF
      response <- post(s"http://127.0.0.1:$port/javaflowsvc/chunks",
                       Chunked(ContentTypes.`text/plain(UTF-8)`, requestChunks))
      responseString <- response.entity.dataBytes.map(_.utf8String).toMat(Sink.fold("") { _ + _})(Keep.right).run()
    } yield {
      response.entity shouldBe 'chunked
      responseString should be("Received 5 chunks and 13 bytes.\r\nThis is the last chunk!")
    }
  }

  it should "get an InternalServerError with blank response if Flow collapses" in {
    for {
      port <- portF
      errResp <- get(s"http://127.0.0.1:$port/javaflowsvc/throwit")
      respEntity <- errResp.entity.toStrict(awaitMax)
    } yield {
      errResp.status shouldBe StatusCodes.InternalServerError
      respEntity.data.utf8String shouldBe 'empty
    }
  }
} 
Example 15
Source File: ThroughputMeasurementFlowTest.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.events

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.testkit.{TestActorRef, TestKit}
import akkaviz.events.types.{BackendEvent, ReceivedWithId, ThroughputMeasurement}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpecLike}

import scala.concurrent.Future

class ThroughputMeasurementFlowTest extends TestKit(ActorSystem("FlowTestSystem"))
    with WordSpecLike with Matchers with ScalaFutures {

  import scala.concurrent.duration._

  implicit val materializer = ActorMaterializer()(system)

  val firstRef = TestActorRef[SomeActor](new SomeActor, "first")
  val secondRef = TestActorRef[SomeActor](new SomeActor, "second")

  override implicit val patienceConfig = PatienceConfig(timeout = 5.seconds)

  "ThroughputMeasurementFlow" should {

    "not emit any measurements if there are no Received events" in {
      val src = Source.empty[BackendEvent]
      val sink: Sink[BackendEvent, Future[List[BackendEvent]]] = Sink.fold(List.empty[BackendEvent])((list, ev) => ev :: list)

      val materialized = ThroughputMeasurementFlow(1.second).runWith(src, sink)._2

      whenReady(materialized) { r =>
        r should be('empty)
      }
    }

    "emit proper measured value for one message" in {
      val src = Source.single(ReceivedWithId(1, ActorRef.noSender, firstRef, "sup", true))
      val mat = src.via(ThroughputMeasurementFlow(1.second))
        .toMat(Sink.head[ThroughputMeasurement])(Keep.right).run()

      whenReady(mat) { measurement =>
        measurement.actorRef should equal(firstRef)
        measurement.msgsPerSecond should equal(1.0)
      }
    }

    "emit measured value for one message and 0 for actors which didn't receive anything" in {
      import system.dispatcher
      val src = Source(List(
        ReceivedWithId(1, ActorRef.noSender, firstRef, "sup", true),
        ReceivedWithId(2, ActorRef.noSender, secondRef, "sup", true)
      )).concat(Source.fromFuture(pattern.after(2.seconds, system.scheduler) {
        Future.successful(ReceivedWithId(3, ActorRef.noSender, firstRef, "sup", true))
      }))

      val mat = src.via(ThroughputMeasurementFlow(1.second))
        .toMat(Sink.fold(List.empty[ThroughputMeasurement]) { (list, ev) => ev :: list })(Keep.right).run()

      whenReady(mat) { measurements =>
        val measurementsFor = measurements.groupBy(_.actorRef)
        measurementsFor(firstRef).map(_.msgsPerSecond) should not contain 0.0
        measurementsFor(secondRef).sortBy(_.timestamp).map(_.msgsPerSecond) should contain inOrder (1.0, 0.0)
      }
    }
  }
} 
Example 16
Source File: LagSim.scala    From kafka-lag-exporter   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.kafkalagexporter.integration

import akka.actor.Cancellable
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{Behavior, PostStop}
import akka.kafka.{CommitterSettings, Subscriptions}
import akka.kafka.scaladsl.{Committer, Consumer}
import akka.kafka.testkit.scaladsl.KafkaSpec
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.TestSink
import org.scalatest.concurrent.ScalaFutures

import scala.concurrent.Await
import scala.concurrent.duration._

trait LagSim extends KafkaSpec with ScalaFutures {
  private implicit val patience: PatienceConfig = PatienceConfig(30.seconds, 1.second)

  class LagSimulator(topic: String, group: String) {
    private var offset: Int = 0
    private val committerSettings = CommitterSettings(system).withMaxBatch(1).withParallelism(1)

    private lazy val (consumerControl, consumerProbe) = Consumer
      .committableSource(consumerDefaults.withGroupId(group), Subscriptions.topics(topic))
      .buffer(size = 1, OverflowStrategy.backpressure)
      .map { elem =>
        log.debug("Committing elem with offset: {}", elem.committableOffset.partitionOffset)
        elem.committableOffset.commitScaladsl()
      }
      .toMat(TestSink.probe)(Keep.both)
      .run()

    def produceElements(num: Int): Unit = {
      Await.result(produce(topic, offset to (offset + num)), remainingOrDefault)
      offset += num + 1
    }

    // TODO: Replace this with regular Kafka Consumer for more fine-grained control over committing
    def consumeElements(num: Int): Unit = {
      consumerProbe
        .request(num)
        .expectNextN(num)
    }

    def shutdown(): Unit = {
      consumerControl.shutdown().futureValue
      consumerProbe.cancel()
    }
  }

  sealed trait Simulator
  case class Tick(produce: Int, consume: Int) extends Simulator

  def lagSimActor(simulator: LagSimulator,
                  scheduledTick: Cancellable = Cancellable.alreadyCancelled): Behavior[Simulator] =
    Behaviors.receive[Simulator] {
      case (context, tick @ Tick(produce, consume)) =>
        simulator.produceElements(produce)
        simulator.consumeElements(consume)
        lagSimActor(simulator, context.scheduleOnce(1 second, context.self, tick))
    } receiveSignal {
      case (_, PostStop) =>
        simulator.shutdown()
        scheduledTick.cancel()
        Behaviors.same
    }

} 
Example 17
Source File: TsvRetrieverFromFile.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.dc.stream

import java.io.{BufferedWriter, File, FileWriter}

import akka.actor.ActorSystem
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.Supervision.Decider
import akka.stream.contrib.SourceGen
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import akka.util.ByteString
import cmwell.dc.LazyLogging
import cmwell.dc.stream.MessagesTypesAndExceptions.{DcInfo, InfotonData}
import cmwell.dc.stream.TsvRetriever.{logger, TsvFlowOutput}
import cmwell.util.resource._

import scala.concurrent.Future
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global


object TsvRetrieverFromFile extends LazyLogging {

  def apply(dcInfo: DcInfo)(implicit mat: Materializer,
                            system: ActorSystem): Source[InfotonData, (KillSwitch, Future[Seq[Option[String]]])] = {
    val persistFile = dcInfo.tsvFile.get + ".persist"

    def appendToPersistFile(str: String): Unit = {
      val bw = new BufferedWriter(new FileWriter(persistFile, true))
      bw.write(str)
      bw.close()
    }

    val linesToDrop = dcInfo.positionKey.fold {
      if (!new File(persistFile).exists) 0L
      else using(scala.io.Source.fromFile(persistFile))(_.getLines.toList.last.toLong)
    }(pos => pos.toLong)
    val positionKeySink = Flow[InfotonData]
      .recover {
        case e: Throwable => InfotonData(null, null, -1)
      }
      .scan(linesToDrop) {
        case (count, InfotonData(null, null, -1)) => {
          appendToPersistFile("crash at: " + count + "\n" + count.toString + "\n")
          count
        }
        case (count, _) => {
          val newCount = count + 1
          if (newCount % 10000 == 0) appendToPersistFile(newCount.toString + "\n")
          newCount
        }
      }
      .toMat(Sink.last)(
        (_, right) =>
          right.map { count =>
            appendToPersistFile(count.toString + "\n")
            Seq.fill(2)(Option(count.toString))
        }
      )

    Source
      .fromIterator(() => scala.io.Source.fromFile(dcInfo.tsvFile.get).getLines())
      .drop {
        logger.info(s"Dropping $linesToDrop initial lines from file ${dcInfo.tsvFile.get} for sync ${dcInfo.key}")
        linesToDrop
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .map(line => TsvRetriever.parseTSVAndCreateInfotonDataFromIt(ByteString(line)))
      .alsoToMat(positionKeySink)(Keep.both)
  }
} 
Example 18
Source File: GroupChunkerSpec.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.tools.data.utils.chunkers

import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.util.ByteString
import cmwell.tools.data.helpers.BaseStreamSpec

import scala.concurrent.duration._

class GroupSpecAutoFusingOn  extends { val autoFusing = true  } with GroupChunkerSpec
class GroupSpecAutoFusingOff extends { val autoFusing = false } with GroupChunkerSpec

trait GroupChunkerSpec extends BaseStreamSpec {
  "GroupChunker" should "emit elements when new group has arrived" in {
    val (pub, sub) = TestSource.probe[String]
      .map(x => ByteString(x.toString))
      .via(GroupChunker(b => ByteString(b.size), 2.seconds)) // group byte-strings by size
      .map(_.map(_.utf8String))
      .toMat(TestSink.probe[Seq[String]])(Keep.both)
      .run()

    sub.request(100)
    pub.sendNext("hello")
    pub.sendNext("world")
    pub.sendNext("nba")
    pub.sendNext("ibm")
    pub.sendNext("what")
    pub.sendNext("is")
    pub.sendNext("life")
    pub.sendComplete()
    sub.expectNext(Seq("hello", "world"))
    sub.expectNext(Seq("nba", "ibm"))
    sub.expectNext(Seq("what"))
    sub.expectNext(Seq("is"))
    sub.expectNext(Seq("life"))
    sub.expectComplete()
  }

  it should "emit elements when time threshold has reached" in {
    val (pub, sub) = TestSource.probe[String]
      .map(x => ByteString(x.toString))
      .via(GroupChunker(b => ByteString(b.size), 2.seconds)) // group byte-strings by size
      .map(_.map(_.utf8String))
      .toMat(TestSink.probe[Seq[String]])(Keep.both)
      .run()

    sub.request(4)

    pub.sendNext("one")
    sub.expectNext(Seq("one"))

    pub.sendNext("two")
    sub.expectNext(Seq("two"))

    pub.sendNext("four")
    pub.sendNext("five")
    pub.sendComplete()
    sub.expectNext(Seq("four","five"))
    sub.expectComplete()
  }
} 
Example 19
Source File: AkkaStreamsHelloWorldApp3.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.akka.streams.hello

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep, RunnableGraph, Sink, Source}
import akka.{Done, NotUsed}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}

object AkkaStreamsHelloWorldApp3 extends App{

  implicit val actorSystem = ActorSystem("HelloWorldSystem")
  implicit val materializer = ActorMaterializer()

  val helloWorldSource:Source[String,NotUsed] = Source.single("Akka Streams Hello World")
  val helloWorldSink: Sink[String,Future[Done]] = Sink.foreach(println)
  val helloWorldFlow:Flow[String,String,NotUsed] = Flow[String].map(str => str.toUpperCase)

  val helloWorldGraph:RunnableGraph[NotUsed] = helloWorldSource
                                                  .via(helloWorldFlow)
                                                  .to(helloWorldSink)

  val helloWorldGraph2:RunnableGraph[Future[Done]] = helloWorldSource
                                                  .via(helloWorldFlow)
                                                  .toMat(helloWorldSink)(Keep.right)

  helloWorldGraph.run

  val helloWorldMaterializedValue: Future[Done] = helloWorldGraph2.run
  helloWorldMaterializedValue.onComplete{
    case Success(Done) =>
      println("HelloWorld Stream ran succssfully.")
    case Failure(exception) =>
      println(s"HelloWorld Stream ran into an issue: ${exception}.")
  }

  actorSystem.terminate
} 
Example 20
Source File: ChatController.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package controllers

import java.net.URI
import javax.inject._
import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.Materializer
import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Source}
import play.api.Logger
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}

@Singleton
class ChatController @Inject()(cc: ControllerComponents)
                              (implicit actorSystem: ActorSystem,
                               mat: Materializer,
                               executionContext: ExecutionContext,
                               webJarsUtil: org.webjars.play.WebJarsUtil) 
                               extends AbstractController(cc) with RequestMarkerContext {

  private type WSMessage = String

  private val logger = Logger(getClass)

  private implicit val logging = Logging(actorSystem.eventStream, logger.underlyingLogger.getName)

  private val (chatSink, chatSource) = {
    val source = MergeHub.source[WSMessage]
      .log("source")
      .recoverWithRetries(-1, { case _: Exception ⇒ Source.empty })

    val sink = BroadcastHub.sink[WSMessage]
    source.toMat(sink)(Keep.both).run()
  }

  private val userFlow: Flow[WSMessage, WSMessage, _] = {
     Flow.fromSinkAndSource(chatSink, chatSource)
  }

  def index: Action[AnyContent] = Action { implicit request: RequestHeader =>
    val webSocketUrl = routes.ChatController.chat().webSocketURL()
    logger.info(s"index: ")
    Ok(views.html.index(webSocketUrl))
  }

  def chat(): WebSocket = {
    WebSocket.acceptOrResult[WSMessage, WSMessage] {
      case rh if sameOriginCheck(rh) =>
        Future.successful(userFlow).map { flow =>
          Right(flow)
        }.recover {
          case e: Exception =>
            val msg = "Cannot create websocket"
            logger.error(msg, e)
            val result = InternalServerError(msg)
            Left(result)
        }

      case rejected =>
        logger.error(s"Request ${rejected} failed same origin check")
        Future.successful {
          Left(Forbidden("forbidden"))
        }
    }
  }

  private def sameOriginCheck(implicit rh: RequestHeader): Boolean = {
    logger.debug("Checking the ORIGIN ")
    
    rh.headers.get("Origin") match {
      case Some(originValue) if originMatches(originValue) =>
        logger.debug(s"originCheck: originValue = $originValue")
        true

      case Some(badOrigin) =>
        logger.error(s"originCheck: rejecting request because Origin header value ${badOrigin} is not in the same origin")
        false

      case None =>
        logger.error("originCheck: rejecting request because no Origin header found")
        false
    }
  }

  private def originMatches(origin: String): Boolean = {
    try {
      val url = new URI(origin)
      url.getHost == "localhost" &&
        (url.getPort match { case 9000 | 19001 => true; case _ => false })
    } catch {
      case e: Exception => false
    }
  }

} 
Example 21
Source File: AkkaStreamsPartitionHubApp.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.dynamic.akka.streams

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, PartitionHub, RunnableGraph, Source}

import scala.concurrent.duration._

object AkkaStreamsPartitionHubApp extends App {
    implicit val actorSystem = ActorSystem("PartitionHubSystem")
    implicit val materializer = ActorMaterializer()

    val producer = Source.tick(1.second, 1.second, "message")
                         .zipWith(Source(1 to 10))((a, b) ⇒ s"$a-$b")

    val runnableGraph: RunnableGraph[Source[String, NotUsed]] =
        producer.toMat(PartitionHub.sink(
            (size, elem) ⇒ math.abs(elem.hashCode) % size,
            startAfterNrOfConsumers = 2, bufferSize = 256))(Keep.right)

    val fromProducer: Source[String, NotUsed] = runnableGraph.run()

    fromProducer.runForeach(msg ⇒ println("consumer1: " + msg))
    fromProducer.runForeach(msg ⇒ println("consumer2: " + msg))

    Thread.sleep(5000)
    actorSystem.terminate
} 
Example 22
Source File: EventsByPersistenceIdFastForwardSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query

import java.util.UUID

import akka.persistence.PersistentRepr
import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec }
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.TestSink
import com.typesafe.config.ConfigFactory
import org.scalatest.time.{ Milliseconds, Seconds, Span }

object EventsByPersistenceIdFastForwardSpec {

  // separate from EventsByPersistenceIdWithControlSpec since it needs the refreshing enabled
  val config = ConfigFactory.parseString(s"""
    akka.persistence.cassandra.journal.keyspace=EventsByPersistenceIdFastForwardSpec
    akka.persistence.cassandra.query.refresh-interval = 250ms
    akka.persistence.cassandra.query.max-result-size-query = 2
    akka.persistence.cassandra.journal.target-partition-size = 15
    """).withFallback(CassandraLifecycle.config)
}

class EventsByPersistenceIdFastForwardSpec
    extends CassandraSpec(EventsByPersistenceIdFastForwardSpec.config)
    with DirectWriting {

  override implicit val patience = PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Milliseconds))

  "be able to fast forward when currently looking for missing sequence number" in {
    val w1 = UUID.randomUUID().toString
    val evt1 = PersistentRepr("e-1", 1L, "f", "", writerUuid = w1)
    writeTestEvent(evt1)

    val src = queries.eventsByPersistenceIdWithControl("f", 0L, Long.MaxValue)
    val (futureControl, probe) = src.map(_.event).toMat(TestSink.probe[Any])(Keep.both).run()
    val control = futureControl.futureValue
    probe.request(5)

    val evt3 = PersistentRepr("e-3", 3L, "f", "", writerUuid = w1)
    writeTestEvent(evt3)

    probe.expectNext("e-1")

    system.log.debug("Sleeping for query to go into look-for-missing-seqnr-mode")
    Thread.sleep(2000)

    // then we fast forward past the gap
    control.fastForward(3L)
    probe.expectNext("e-3")

    val evt2 = PersistentRepr("e-2", 2L, "f", "", writerUuid = w1)
    val evt4 = PersistentRepr("e-4", 4L, "f", "", writerUuid = w1)
    writeTestEvent(evt2)
    writeTestEvent(evt4)
    probe.expectNext("e-4")

    probe.cancel()
  }
} 
Example 23
Source File: RequestRunner.scala    From aws-spi-akka-http   with Apache License 2.0 5 votes vote down vote up
package com.github.matsluni.akkahttpspi

import java.util.concurrent.CompletableFuture

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink}
import org.slf4j.LoggerFactory
import software.amazon.awssdk.http.SdkHttpFullResponse
import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler

import scala.compat.java8.FutureConverters
import scala.concurrent.ExecutionContext
import scala.collection.JavaConverters._

class RequestRunner(connectionPoolSettings: ConnectionPoolSettings)(implicit sys: ActorSystem,
                                                          ec: ExecutionContext,
                                                          mat: Materializer) {
  val logger = LoggerFactory.getLogger(this.getClass)

  def run(httpRequest: HttpRequest,
          handler: SdkAsyncHttpResponseHandler): CompletableFuture[Void] = {
    val result = Http()
      .singleRequest(httpRequest, settings = connectionPoolSettings)
      .flatMap { response =>
        val sdkResponse = SdkHttpFullResponse.builder()
          .headers(response.headers.groupBy(_.name()).map{ case (k, v) => k -> v.map(_.value()).asJava }.asJava)
          .statusCode(response.status.intValue())
          .statusText(response.status.reason)
          .build

        handler.onHeaders(sdkResponse)

        val (complete, publisher) = response
          .entity
          .dataBytes
          .map(_.asByteBuffer)
          .alsoToMat(Sink.ignore)(Keep.right)
          .toMat(Sink.asPublisher(fanout = false))(Keep.both)
          .run()

        handler.onStream(publisher)

        complete
      }

    result.failed.foreach(handler.onError)
    FutureConverters.toJava(result.map(_ => null: Void)).toCompletableFuture
  }
} 
Example 24
Source File: AkkaHttpLambdaHandler.scala    From scala-server-lambda   with MIT License 5 votes vote down vote up
package io.github.howardjohn.lambda.akka

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import io.github.howardjohn.lambda.ProxyEncoding._
import io.github.howardjohn.lambda.{LambdaHandler, ProxyEncoding}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

class AkkaHttpLambdaHandler(route: Route)(
  implicit system: ActorSystem,
  materializer: ActorMaterializer,
  ec: ExecutionContext
) extends LambdaHandler {
  import AkkaHttpLambdaHandler._

  override def handleRequest(request: ProxyRequest): ProxyResponse =
    Await.result(runRequest(proxyToAkkaRequest(request)), Duration.Inf)

  private def runRequest(request: HttpRequest): Future[ProxyResponse] = {
    val source = Source.single(request)
    val sink = Sink.head[HttpResponse]
    source
      .via(route)
      .toMat(sink)(Keep.right)
      .run()
      .flatMap(asProxyResponse)
  }

  private def proxyToAkkaRequest(request: ProxyRequest): HttpRequest =
    new HttpRequest(
      method = parseHttpMethod(request.httpMethod),
      uri = Uri(ProxyEncoding.reconstructPath(request)),
      headers = parseRequestHeaders(request.headers.getOrElse(Map.empty)),
      entity = parseEntity(request.headers.getOrElse(Map.empty), request.body),
      protocol = HttpProtocols.`HTTP/1.1`
    )

  private def parseEntity(headers: Map[String, String], body: Option[String]): MessageEntity = {
    val defaultContentType = ContentTypes.`text/plain(UTF-8)`
    val contentType = ContentType
      .parse(headers.getOrElse("Content-Type", defaultContentType.value))
      .getOrElse(defaultContentType)

    body match {
      case Some(b) => HttpEntity(contentType, b.getBytes)
      case None => HttpEntity.empty(contentType)
    }
  }

  private def asProxyResponse(resp: HttpResponse): Future[ProxyResponse] =
    Unmarshal(resp.entity)
      .to[String]
      .map { body =>
        ProxyResponse(
          resp.status.intValue(),
          resp.headers.map(h => h.name -> h.value).toMap,
          body
        )
      }
}

private object AkkaHttpLambdaHandler {
  private def parseRequestHeaders(headers: Map[String, String]): List[HttpHeader] =
    headers.map {
      case (k, v) =>
        HttpHeader.parse(k, v) match {
          case ParsingResult.Ok(header, _) => header
          case ParsingResult.Error(err) => throw new RuntimeException(s"Failed to parse header $k:$v with error $err.")
        }
    }.toList

  private def parseHttpMethod(method: String) = method.toUpperCase match {
    case "CONNECT" => HttpMethods.CONNECT
    case "DELETE" => HttpMethods.DELETE
    case "GET" => HttpMethods.GET
    case "HEAD" => HttpMethods.HEAD
    case "OPTIONS" => HttpMethods.OPTIONS
    case "PATCH" => HttpMethods.PATCH
    case "POST" => HttpMethods.POST
    case "PUT" => HttpMethods.PUT
    case "TRACE" => HttpMethods.TRACE
    case other => HttpMethod.custom(other)
  }
} 
Example 25
Source File: RecordProcessorFactoryImpl.scala    From kinesis-stream   with MIT License 5 votes vote down vote up
package px.kinesis.stream.consumer

import akka.NotUsed
import akka.event.LoggingAdapter
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{KillSwitch, Materializer, OverflowStrategy}
import px.kinesis.stream.consumer.checkpoint.CheckpointTracker
import software.amazon.kinesis.processor.{ShardRecordProcessor, ShardRecordProcessorFactory}

import scala.collection.immutable.Seq
import scala.concurrent.ExecutionContext

class RecordProcessorFactoryImpl(
  sink: Sink[Record, NotUsed],
  workerId: String,
  checkpointTracker: CheckpointTracker,
  killSwitch: KillSwitch
)(implicit am: Materializer, ec: ExecutionContext, logging: LoggingAdapter) extends ShardRecordProcessorFactory {
  override def shardRecordProcessor(): ShardRecordProcessor = {
    val queue = Source
      .queue[Seq[Record]](0, OverflowStrategy.backpressure)
      .mapConcat(identity)
      .toMat(sink)(Keep.left)
      .run()

    new RecordProcessorImpl(queue, checkpointTracker, killSwitch, workerId)
  }
} 
Example 26
Source File: CommandRegistration.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.commands

import scala.concurrent.Future

import akka.Done
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source}
import akka.stream.{KillSwitches, UniqueKillSwitch}

case class CommandRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) {

  def stop(): Unit = killSwitch.shutdown()
}
object CommandRegistration {
  def toSink[A, M](source: Source[A, M]): RunnableGraph[CommandRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) {
      case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch)
    }

  def withRegistration[A, M](source: Source[A, M]): Source[A, CommandRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).watchTermination() {
      case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch)
    }
} 
Example 27
Source File: AkkaStreamOps.scala    From phobos   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.phobos.ops

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Keep, Sink}
import javax.xml.stream.XMLStreamConstants
import ru.tinkoff.phobos.decoding._
import scala.concurrent.Future

private[phobos] trait AkkaStreamOps {

  
  def decodingFlowUnsafe[A: XmlDecoder](charset: String = "UTF-8"): Flow[Array[Byte], A, NotUsed] =
    decodingFlow(charset).map(_.fold(throw _, identity))

  def decodingSink[A: XmlDecoder](charset: String = "UTF-8"): Sink[Array[Byte], Future[Either[DecodingError, A]]] =
    decodingFlow(charset).toMat(Sink.head)(Keep.right)

  def decodingSinkUnsafe[A: XmlDecoder](charset: String = "UTF-8"): Sink[Array[Byte], Future[A]] =
    decodingFlowUnsafe(charset).toMat(Sink.head)(Keep.right)
}

private[phobos] case class SinkDecoderState[A](
    xmlStreamReader: XmlStreamReader,
    cursor: Cursor,
    elementDecoder: ElementDecoder[A]
) {
  def withEncoder(that: ElementDecoder[A]): SinkDecoderState[A] = copy(elementDecoder = that)
}

private[phobos] object SinkDecoderState {

  def initial[A](xmlDecoder: XmlDecoder[A], charset: String): SinkDecoderState[A] = {
    val sr: XmlStreamReader = XmlDecoder.createStreamReader(charset)
    val cursor              = new Cursor(sr)
    SinkDecoderState(
      xmlStreamReader = sr,
      cursor = cursor,
      elementDecoder = xmlDecoder.elementdecoder
    )
  }
} 
Example 28
Source File: LocalFilePersistService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import akka.Done
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.RestartFlow
import akka.stream.scaladsl.Sink
import com.netflix.atlas.akka.StreamOps
import com.netflix.atlas.akka.StreamOps.SourceQueue
import com.netflix.atlas.core.model.Datapoint
import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Singleton

import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration.Duration

@Singleton
class LocalFilePersistService @Inject()(
  val config: Config,
  val registry: Registry,
  // S3CopyService is actually NOT used by this service, it is here just to guarantee that the
  // shutdown callback (stopImpl) of this service is invoked before S3CopyService's
  val s3CopyService: S3CopyService,
  implicit val system: ActorSystem
) extends AbstractService
    with StrictLogging {
  implicit val ec = scala.concurrent.ExecutionContext.global
  implicit val mat = ActorMaterializer()

  private val queueSize = config.getInt("atlas.persistence.queue-size")

  private val fileConfig = config.getConfig("atlas.persistence.local-file")
  private val dataDir = fileConfig.getString("data-dir")
  private val maxRecords = fileConfig.getLong("max-records")
  private val maxDurationMs = fileConfig.getDuration("max-duration").toMillis
  private val maxLateDurationMs = fileConfig.getDuration("max-late-duration").toMillis
  private val rollingConf = RollingConfig(maxRecords, maxDurationMs, maxLateDurationMs)

  require(queueSize > 0)
  require(maxRecords > 0)
  require(maxDurationMs > 0)

  private var queue: SourceQueue[Datapoint] = _
  private var flowComplete: Future[Done] = _

  override def startImpl(): Unit = {
    logger.info("Starting service")
    val (q, f) = StreamOps
      .blockingQueue[Datapoint](registry, "LocalFilePersistService", queueSize)
      .via(getRollingFileFlow)
      .toMat(Sink.ignore)(Keep.both)
      .run
    queue = q
    flowComplete = f
  }

  private def getRollingFileFlow(): Flow[Datapoint, NotUsed, NotUsed] = {
    import scala.concurrent.duration._
    RestartFlow.withBackoff(
      minBackoff = 1.second,
      maxBackoff = 3.seconds,
      randomFactor = 0,
      maxRestarts = -1
    ) { () =>
      Flow.fromGraph(
        new RollingFileFlow(dataDir, rollingConf, registry)
      )
    }
  }

  // This service should stop the Akka flow when application is shutdown gracefully, and let
  // S3CopyService do the cleanup. It should trigger:
  //   1. stop taking more data points (monitor droppedQueueClosed)
  //   2. close current file writer so that last file is ready to copy to s3
  override def stopImpl(): Unit = {
    logger.info("Stopping service")
    queue.complete()
    Await.result(flowComplete, Duration.Inf)
    logger.info("Stopped service")
  }

  def persist(dp: Datapoint): Unit = {
    queue.offer(dp)
  }
} 
Example 29
Source File: S3CopyService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import java.io.File
import java.nio.file.Files
import java.nio.file.Paths

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.KillSwitch
import akka.stream.KillSwitches
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Source
import com.netflix.atlas.core.util.Streams
import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Singleton

import scala.concurrent.duration._

@Singleton
class S3CopyService @Inject()(
  val config: Config,
  val registry: Registry,
  implicit val system: ActorSystem
) extends AbstractService
    with StrictLogging {

  private val dataDir = config.getString("atlas.persistence.local-file.data-dir")

  private implicit val mat = ActorMaterializer()

  private var killSwitch: KillSwitch = _
  private val s3Config = config.getConfig("atlas.persistence.s3")

  private val cleanupTimeoutMs = s3Config.getDuration("cleanup-timeout").toMillis
  private val maxInactiveMs = s3Config.getDuration("max-inactive-duration").toMillis
  private val maxFileDurationMs =
    config.getDuration("atlas.persistence.local-file.max-duration").toMillis

  require(
    maxInactiveMs > maxFileDurationMs,
    "`max-inactive-duration` MUST be longer than `max-duration`, otherwise file may be renamed before normal write competes"
  )

  override def startImpl(): Unit = {
    logger.info("Starting service")
    killSwitch = Source
      .tick(1.second, 5.seconds, NotUsed)
      .viaMat(KillSwitches.single)(Keep.right)
      .flatMapMerge(Int.MaxValue, _ => Source(FileUtil.listFiles(new File(dataDir))))
      .toMat(new S3CopySink(s3Config, registry, system))(Keep.left)
      .run()
  }

  override def stopImpl(): Unit = {
    logger.info("Stopping service")
    waitForCleanup()
    if (killSwitch != null) killSwitch.shutdown()
  }

  private def waitForCleanup(): Unit = {
    logger.info("Waiting for cleanup")
    val start = System.currentTimeMillis
    while (hasMoreFiles) {
      if (System.currentTimeMillis() > start + cleanupTimeoutMs) {
        logger.error("Cleanup timeout")
        return
      }
      Thread.sleep(1000)
    }
    logger.info("Cleanup done")
  }

  private def hasMoreFiles: Boolean = {
    try {
      Streams.scope(Files.list(Paths.get(dataDir))) { dir =>
        dir.anyMatch(f => Files.isRegularFile(f))
      }
    } catch {
      case e: Exception => {
        logger.error(s"Error checking hasMoreFiles in $dataDir", e)
        true // Assuming there's more files on error to retry
      }
    }
  }
} 
Example 30
Source File: CacheStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.collection.mutable

import ackcord.cachehandlers.CacheSnapshotBuilder
import ackcord.gateway.GatewayEvent.ReadyData
import ackcord.gateway.GatewayMessage
import ackcord.requests.SupervisionStreams
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Sink, Source}
import org.slf4j.Logger

object CacheStreams {

  
  def cacheUpdater(
      cacheProcessor: MemoryCacheSnapshot.CacheProcessor
  )(implicit system: ActorSystem[Nothing]): Flow[CacheEvent, (CacheEvent, CacheState), NotUsed] =
    Flow[CacheEvent].statefulMapConcat { () =>
      var state: CacheState    = null
      implicit val log: Logger = system.log

      //We only handle events when we are ready to, and we have received the ready event.
      def isReady: Boolean = state != null

      {
        case readyEvent @ APIMessageCacheUpdate(_: ReadyData, _, _, _, _) =>
          val builder = new CacheSnapshotBuilder(
            0,
            null, //The event will populate this,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            cacheProcessor
          )

          readyEvent.process(builder)

          val snapshot = builder.toImmutable
          state = CacheState(snapshot, snapshot)
          List(readyEvent -> state)
        case handlerEvent: CacheEvent if isReady =>
          val builder = CacheSnapshotBuilder(state.current)
          handlerEvent.process(builder)

          state = state.update(builder.toImmutable)
          List(handlerEvent -> state)
        case _ if !isReady =>
          log.error("Received event before ready")
          Nil
      }
    }
} 
Example 31
Source File: EventRegistration.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.concurrent.Future

import akka.Done
import akka.stream.{KillSwitches, UniqueKillSwitch}
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source}

case class EventRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) {

  def stop(): Unit = killSwitch.shutdown()
}
object EventRegistration {
  def toSink[A, M](source: Source[A, M]): RunnableGraph[EventRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) {
      case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch)
    }

  def withRegistration[A, M](source: Source[A, M]): Source[A, EventRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).watchTermination() {
      case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch)
    }
} 
Example 32
Source File: MovedMonitor.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.lavaplayer

import ackcord.{APIMessage, Cache}
import akka.actor.typed._
import akka.actor.typed.scaladsl._
import akka.stream.scaladsl.Keep
import akka.stream.typed.scaladsl.ActorSink
import akka.stream.{KillSwitches, UniqueKillSwitch}

private[lavaplayer] object MovedMonitor {

  private[lavaplayer] def apply(
      cache: Cache,
      handler: ActorRef[LavaplayerHandler.Command]
  ): Behavior[Command] =
    Behaviors.setup { ctx =>
      implicit val system: ActorSystem[Nothing] = ctx.system

      val killSwitch = cache.subscribeAPI
        .collectType[APIMessage.VoiceStateUpdate]
        .viaMat(KillSwitches.single)(Keep.right)
        .to(
          ActorSink
            .actorRefWithBackpressure(ctx.self, ReceivedEvent, InitSink, AckSink, CompletedSink, _ => CompletedSink)
        )
        .run()

      running(ctx, killSwitch, handler)
    }

  private def running(
      ctx: ActorContext[Command],
      killSwitch: UniqueKillSwitch,
      handler: ActorRef[LavaplayerHandler.Command]
  ): Behavior[Command] =
    Behaviors.receiveMessage {
      case Stop =>
        killSwitch.shutdown()
        Behaviors.same

      case InitSink(ackTo) =>
        ackTo ! AckSink
        Behaviors.same

      case ReceivedEvent(ackTo, APIMessage.VoiceStateUpdate(state, c)) if state.userId == c.current.botUser.id =>
        handler ! LavaplayerHandler.VoiceChannelMoved(state.channelId)
        ackTo ! AckSink
        Behaviors.same

      case ReceivedEvent(ackTo, _) =>
        ackTo ! AckSink
        Behaviors.same

      case CompletedSink =>
        Behaviors.stopped
    }

  private case object AckSink

  sealed trait Command
  case object Stop extends Command

  private case class InitSink(ackTo: ActorRef[AckSink.type])                                            extends Command
  private case class ReceivedEvent(ackTo: ActorRef[AckSink.type], message: APIMessage.VoiceStateUpdate) extends Command
  private case object CompletedSink                                                                     extends Command
} 
Example 33
Source File: Commands.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.oldcommands

import scala.concurrent.Future

import ackcord.CacheSnapshot
import ackcord.requests.{Requests, SupervisionStreams}
import akka.stream.scaladsl.{Keep, Source}
import akka.{Done, NotUsed}


  def subscribe[A, Mat, Mat2](
      factory: ParsedCmdFactory[A, Mat]
  )(combine: (Future[Done], Mat) => Mat2): Mat2 =
    SupervisionStreams
      .addLogAndContinueFunction(
        subscribeCmdParsed(factory.refiner)(factory.parser)
          .via(CmdHelper.addErrorHandlingParsed(requests))
          .watchTermination()(Keep.right)
          .toMat(factory.sink(requests))(combine)
          .addAttributes
      )
      .run()
} 
Example 34
Source File: CmdStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.oldcommands

import ackcord._
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Keep, Source}
import akka.stream.{ActorAttributes, Supervision}

object CmdStreams {

  
  def cmdStreams[A](
      settings: AbstractCommandSettings,
      apiMessages: Source[APIMessage, A]
  )(implicit system: ActorSystem[Nothing]): (A, Source[RawCmdMessage, NotUsed]) = {
    apiMessages
      .collect {
        case APIMessage.MessageCreate(msg, c) =>
          implicit val cache: MemoryCacheSnapshot = c.current

          CmdHelper.isValidCommand(settings.needMention(msg), msg).map { args =>
            if (args == Nil) NoCmd(msg, c.current)
            else {
              settings
                .getPrefix(args, msg)
                .fold[RawCmdMessage](NoCmdPrefix(msg, args.head, args.tail, cache)) {
                  case (prefix, remaining) => RawCmd(msg, prefix, remaining.head, remaining.tail.toList, c.current)
                }
            }
          }
      }
      .mapConcat(_.toList)
      .toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
      .addAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
      .run()
  }

} 
Example 35
Source File: VoiceUDPFlow.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress
import java.nio.ByteOrder

import scala.concurrent.{Future, Promise}

import ackcord.data.{RawSnowflake, UserId}
import ackcord.util.UdpConnectedFlow
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BidiFlow, Concat, Flow, GraphDSL, Keep, Source}
import akka.stream.{BidiShape, OverflowStrategy}
import akka.util.ByteString

object VoiceUDPFlow {

  val silence = ByteString(0xF8, 0xFF, 0xFE)

  val SampleRate = 48000
  val FrameSize  = 960
  val FrameTime  = 20

  def flow[Mat](
      remoteAddress: InetSocketAddress,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      secretKeys: Source[Option[ByteString], Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[ByteString, AudioAPIMessage.ReceivedData, (Mat, Future[FoundIP])] =
    NaclBidiFlow
      .bidiFlow(ssrc, serverId, userId, secretKeys)
      .atopMat(voiceBidi(ssrc).reversed)(Keep.both)
      .async
      .join(Flow[ByteString].buffer(32, OverflowStrategy.backpressure).via(UdpConnectedFlow.flow(remoteAddress)))

  def voiceBidi(ssrc: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[FoundIP]] = {
    implicit val byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN
    val ipDiscoveryPacket = {
      val byteBuilder = ByteString.createBuilder
      byteBuilder.sizeHint(74)
      byteBuilder.putShort(0x1).putShort(70).putInt(ssrc)

      byteBuilder.putBytes(new Array[Byte](66))

      byteBuilder.result()
    }

    val valvePromise = Promise[Unit]
    val valve        = Source.future(valvePromise.future).drop(1).asInstanceOf[Source[ByteString, NotUsed]]

    val ipDiscoveryFlow = Flow[ByteString]
      .viaMat(new IPDiscoveryFlow(() => valvePromise.success(())))(Keep.right)

    BidiFlow
      .fromGraph(GraphDSL.create(ipDiscoveryFlow) { implicit b => ipDiscovery =>
        import GraphDSL.Implicits._

        val voiceIn = b.add(Flow[ByteString])

        val ipDiscoverySource           = b.add(Source.single(ipDiscoveryPacket) ++ valve)
        val ipDiscoveryAndThenVoiceData = b.add(Concat[ByteString]())

        ipDiscoverySource ~> ipDiscoveryAndThenVoiceData
        voiceIn ~> ipDiscoveryAndThenVoiceData

        BidiShape(
          ipDiscovery.in,
          ipDiscovery.out,
          voiceIn.in,
          ipDiscoveryAndThenVoiceData.out
        )
      })
  }

  
  case class FoundIP(address: String, port: Int)
} 
Example 36
Source File: VoiceUDPHandler.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress

import scala.concurrent.duration._
import scala.util.{Failure, Success}

import ackcord.data.{RawSnowflake, UserId}
import akka.NotUsed
import akka.actor.typed._
import akka.actor.typed.scaladsl._
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Keep, Sink, Source, SourceQueueWithComplete}
import akka.util.ByteString
import org.slf4j.Logger

object VoiceUDPHandler {

  def apply(
      address: String,
      port: Int,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      soundProducer: Source[ByteString, NotUsed],
      soundConsumer: Sink[AudioAPIMessage, NotUsed],
      parent: ActorRef[VoiceHandler.Command]
  ): Behavior[Command] =
    Behaviors
      .supervise(
        Behaviors.setup[Command] { ctx =>
          implicit val system: ActorSystem[Nothing] = ctx.system

          val ((queue, futIp), watchDone) = soundProducer
            .viaMat(
              VoiceUDPFlow
                .flow(
                  new InetSocketAddress(address, port),
                  ssrc,
                  serverId,
                  userId,
                  Source.queue[Option[ByteString]](0, OverflowStrategy.dropBuffer)
                )
                .watchTermination()(Keep.both)
            )(Keep.right)
            .to(soundConsumer)
            .run()

          ctx.pipeToSelf(futIp) {
            case Success(value) => IPDiscoveryResult(value)
            case Failure(e)     => SendExeption(e)
          }
          ctx.pipeToSelf(watchDone)(_ => ConnectionDied)

          handle(ctx, ctx.log, ssrc, queue, parent)
        }
      )
      .onFailure(
        SupervisorStrategy
          .restartWithBackoff(100.millis, 5.seconds, 1D)
          .withResetBackoffAfter(10.seconds)
          .withMaxRestarts(5)
      )

  def handle(
      ctx: ActorContext[Command],
      log: Logger,
      ssrc: Int,
      queue: SourceQueueWithComplete[Option[ByteString]],
      parent: ActorRef[VoiceHandler.Command]
  ): Behavior[Command] = Behaviors.receiveMessage {
    case SendExeption(e) => throw e
    case ConnectionDied  => Behaviors.stopped
    case Shutdown =>
      queue.complete()
      Behaviors.same
    case IPDiscoveryResult(VoiceUDPFlow.FoundIP(localAddress, localPort)) =>
      parent ! VoiceHandler.GotLocalIP(localAddress, localPort)
      Behaviors.same
    case SetSecretKey(key) =>
      queue.offer(key)
      Behaviors.same
  }

  sealed trait Command

  case object Shutdown extends Command

  private case class SendExeption(e: Throwable)                       extends Command
  private case object ConnectionDied                                  extends Command
  private case class IPDiscoveryResult(foundIP: VoiceUDPFlow.FoundIP) extends Command
  private[voice] case class SetSecretKey(key: Option[ByteString])     extends Command
} 
Example 37
Source File: MusicCommands.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.examplecore.music

import ackcord._
import ackcord.commands.{CommandBuilder, CommandController, NamedCommand, VoiceGuildMemberCommandMessage}
import ackcord.data.{GuildId, TextChannel}
import ackcord.examplecore.music.MusicHandler.{NextTrack, QueueUrl, StopMusic, TogglePause}
import akka.NotUsed
import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.stream.typed.scaladsl.ActorFlow
import akka.util.Timeout

class MusicCommands(requests: Requests, guildId: GuildId, musicHandler: ActorRef[MusicHandler.Command])(
    implicit timeout: Timeout,
    system: ActorSystem[Nothing]
) extends CommandController(requests) {

  val VoiceCommand: CommandBuilder[VoiceGuildMemberCommandMessage, NotUsed] =
    GuildVoiceCommand.andThen(CommandBuilder.inOneGuild(guildId))

  val queue: NamedCommand[String] =
    VoiceCommand.named("&", Seq("q", "queue")).parsing[String].withSideEffects { m =>
      musicHandler.ask[MusicHandler.CommandAck.type](QueueUrl(m.parsed, m.textChannel, m.voiceChannel.id, _))
    }

  private def simpleCommand(
      aliases: Seq[String],
      mapper: (TextChannel, ActorRef[MusicHandler.CommandAck.type]) => MusicHandler.MusicHandlerEvents
  ): NamedCommand[NotUsed] = {
    VoiceCommand.andThen(CommandBuilder.inOneGuild(guildId)).named("&", aliases, mustMention = true).toSink {
      Flow[VoiceGuildMemberCommandMessage[NotUsed]]
        .map(_.textChannel)
        .via(ActorFlow.ask(requests.parallelism)(musicHandler)(mapper))
        .toMat(Sink.ignore)(Keep.none)
    }
  }

  val stop: NamedCommand[NotUsed] = simpleCommand(Seq("s", "stop"), StopMusic.apply)

  val next: NamedCommand[NotUsed] = simpleCommand(Seq("n", "next"), NextTrack.apply)

  val pause: NamedCommand[NotUsed] = simpleCommand(Seq("p", "pause"), TogglePause.apply)
} 
Example 38
Source File: Main.scala    From kinesis-stream   with MIT License 5 votes vote down vote up
import akka.Done
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink}
import px.kinesis.stream.consumer

import scala.concurrent.Future

object Main extends App {

  implicit val system = ActorSystem("kinesis-source")
  implicit val ec = system.dispatcher
  implicit val mat = ActorMaterializer()

  // A simple consumer that will print to the console for now
  val console = Sink.foreach[String](println)

  val runnableGraph: RunnableGraph[Future[Done]] =
    consumer
      .source("test-stream", "test-app")
      .via(consumer.commitFlow(parallelism = 2))
      .map(r => r.data.utf8String)
      .toMat(console)(Keep.left)

  val done = runnableGraph.run()
  done.onComplete(_ => {
    println("Shutdown completed")
    system.terminate()
  })

} 
Example 39
Source File: MaterializeValue.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.graph

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Keep, RunnableGraph, Sink, Source, Tcp }
import akka.util.ByteString

import scala.concurrent.{ Future, Promise }

object MaterializeValue {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  case class MyClass(private val p: Promise[Option[Int]], conn: Tcp.OutgoingConnection) extends AutoCloseable {
    override def close(): Unit = p.trySuccess(None)
  }

  // Materializes to Promise[Option[Int]]
  val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int]

  // Materializes to NotUsed
  val flow1: Flow[Int, Int, NotUsed] = Flow[Int].take(100)

  // Materializes to Promise[Int]
  val nestedSource
      : Source[Int, Promise[Option[Int]]] = source.viaMat(flow1)(Keep.left).named("nestedSource") // viaMat === via()(Keep.left)
  //  val nestedSource2: Source[Int, NotUsed] = source.viaMat(flow1)(Keep.right)

  // Materializes to NotUsed
  val flow2: Flow[Int, ByteString, NotUsed] =
    Flow[Int].map(i => ByteString(i.toString))

  // Materializes to Future[Tcp.OutgoingConnection   (Keep.right)
  val flow3: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] =
    Tcp().outgoingConnection("localhost", 8080)

  val nestedFlow: Flow[Int, ByteString, Future[Tcp.OutgoingConnection]] =
    flow2.viaMat(flow3)(Keep.right)

  val nestedFlow2: Flow[Int, ByteString, NotUsed] =
    flow2.viaMat(flow3)(Keep.left) // flow2.via(flow3)
  val nestedFlow3: Flow[Int, ByteString, (NotUsed, Future[Tcp.OutgoingConnection])] =
    flow2.viaMat(flow3)(Keep.both)

  // Materializes to Future[String]   (Keep.right)
  val sink: Sink[ByteString, Future[String]] =
    Sink.fold[String, ByteString]("")(_ + _.utf8String)

  val nestedSink: Sink[Int, (Future[Tcp.OutgoingConnection], Future[String])] =
    nestedFlow.toMat(sink)(Keep.both)

  def f(p: Promise[Option[Int]], rest: (Future[Tcp.OutgoingConnection], Future[String])): Future[MyClass] = {
    val connFuture = rest._1
    connFuture.map(outConn => MyClass(p, outConn))
  }

  // Materializes to Future[MyClass]
  val runnableGraph: RunnableGraph[Future[MyClass]] =
    nestedSource.toMat(nestedSink)(f)

  val r: RunnableGraph[Promise[Option[Int]]] =
    nestedSource.toMat(nestedSink)(Keep.left)

  val r2: RunnableGraph[(Future[Tcp.OutgoingConnection], Future[String])] =
    nestedSource.toMat(nestedSink)(Keep.right)
} 
Example 40
Source File: PartialGraph.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.graph

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Balance, Broadcast, Flow, GraphDSL, Keep, Merge, RunnableGraph, Sink, Source }
import akka.stream.{ ActorMaterializer, FlowShape, SourceShape }

import scala.concurrent.Future
import scala.io.StdIn

object PartialGraph extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  def partial =
    GraphDSL
      .create() { implicit b =>
        import GraphDSL.Implicits._

        val B = b.add(Broadcast[Int](2))
        val C = b.add(Merge[Int](2))
        val D = Flow[Int].map(_ + 1)
        val E = b.add(Balance[Int](2))
        val F = b.add(Merge[Int](2))

        C <~ F
        B ~> C ~> F
        B ~> D ~> E ~> F

        FlowShape(B.in, E.out(1))
      }
      .named("partial")

  // 转换partial从FlowShape到Flow,可访问流DSL(比如:.filter() 函数)
  val flow = Flow.fromGraph(partial)

  val source = Source.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._
    val merge = b.add(Merge[Int](2))
    Source.single(0) ~> merge
    Source(List(2, 3, 4)) ~> merge
    SourceShape(merge.out)
  })

  val sink: Sink[Int, Future[Int]] = Flow[Int].map(_ * 2).drop(10).named("nestedFlow").toMat(Sink.head)(Keep.right)

  val closed: RunnableGraph[Future[Int]] =
    source.via(flow.filter(_ > 1)).toMat(sink)(Keep.right)

  closed.run().foreach(println)

  StdIn.readLine()
  system.terminate()
} 
Example 41
Source File: SimplePublishSubscribe.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.dynamichub

import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, KillSwitches, UniqueKillSwitch }
import akka.stream.scaladsl.{ BroadcastHub, Flow, Keep, MergeHub, Sink, Source }
import com.typesafe.scalalogging.StrictLogging

import scala.io.StdIn
import scala.concurrent.duration._

object SimplePublishSubscribe extends App with StrictLogging {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val (sink, source) =
    MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run()

  source.runWith(Sink.ignore)

  val busFlow: Flow[String, String, UniqueKillSwitch] = Flow
    .fromSinkAndSource(sink, source)
    .joinMat(KillSwitches.singleBidi[String, String])(Keep.right)
    .backpressureTimeout(3.seconds)

  val switch: UniqueKillSwitch =
    Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(v => logger.info(s"switch: $v"))).run()

  Thread.sleep(200)
  switch.shutdown()

  StdIn.readLine()
  system.terminate()
} 
Example 42
Source File: FullStream.scala    From elastic-indexer4s   with MIT License 5 votes vote down vote up
package com.yannick_cw.elastic_indexer4s.indexing_logic

import akka.NotUsed
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import com.yannick_cw.elastic_indexer4s.Index_results.{IndexError, StageSucceeded, StageSuccess}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object FullStream extends LazyLogging {

  private def countAntLogSink[A](logPer: FiniteDuration): Sink[A, Future[Int]] =
    Flow[A]
      .groupedWithin(Int.MaxValue, logPer)
      .map(_.length)
      .map { elementsPerTime =>
        logger.info(s"Indexed $elementsPerTime elements last $logPer")
        elementsPerTime
      }
      .toMat(Sink.reduce[Int](_ + _))(Keep.right)

  def run[A](source: Source[A, NotUsed], sink: Sink[A, Future[Unit]], logSpeedInterval: FiniteDuration)(
      implicit materializer: ActorMaterializer,
      ex: ExecutionContext): Future[Either[IndexError, StageSucceeded]] =
    (for {
      count <- source
        .alsoToMat(countAntLogSink(logSpeedInterval))(Keep.right)
        .toMat(sink)(Keep.both)
        .mapMaterializedValue { case (fCount, fDone) => fDone.flatMap(_ => fCount) }
        .run()
    } yield Right(StageSuccess(s"Indexed $count documents successfully")))
      .recover {
        case NonFatal(t) =>
          Left(IndexError("Writing documents failed.", Some(t)))
      }
} 
Example 43
Source File: XMLParsingStopSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParsingStopSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  it should "Stop parsing when the passed in xPath is encountered" in {

    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Qualifier")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Function")),
      XMLExtract(Seq("GovTalkMessage", "Body", "IRenvelope", "MTR", "SA100", "YourPersonalDetails", "NationalInsuranceNumber")), //This is in the body, will not be parsed
      XMLStopParsing(Seq("GovTalkMessage", "Body"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Function"), Map(), Some("submit")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Qualifier"), Map(), Some("request"))
    )

    whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }

  it should "Notify if the payload exceeded the maximum allowed size" in {
    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")))
    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List(), Map(), Some("Stream max size"))
    )

    whenReady(source.runWith(parseToXMLElements(paths, Some(200)))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }


} 
Example 44
Source File: XMLParserXMLExtractNamespaceSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import akka.util.ByteString
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParserXMLExtractNamespaceSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  behavior of "CompleteChunkStage#parser"


  it should "Parse and extract several non-default namespaces" in {

    val testXMLX =
      <ns5:GovTalkMessage
      xmlns:ns0="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"
      xmlns:ns2="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"
      xmlns:ns5="http://www.govtalk.gov.uk/CM/envelope"
      xmlns:ns1="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/14-15/1"
      xmlns:ns3="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/16-17/1"
      xmlns:ns4="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"
      xmlns="">
        <ns5:EnvelopeVersion>2.0</ns5:EnvelopeVersion>
        <ns5:Header></ns5:Header>
        <ns5:GovTalkDetails></ns5:GovTalkDetails>
      </ns5:GovTalkMessage>

    val source = Source(List(ByteString(testXMLX.toString())))


    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:BLABLA" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/CM/envelope"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns0" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns4" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns5" -> "http://www.govtalk.gov.uk/CM/envelope"), Some("")),
      XMLElement(List(), Map(CompleteChunkStage.STREAM_SIZE -> "681"), Some(CompleteChunkStage.STREAM_SIZE))
    )

          whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }
} 
Example 45
Source File: OutputXMLMatchesInputXMLSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
import akka.stream.scaladsl.{Keep, Source}
import akka.util.ByteString
import org.scalatest
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{BeforeAndAfterEach, Matchers}
import uk.gov.hmrc.akka.xml._
import uk.gov.hmrc.play.test.UnitSpec

import scala.concurrent.ExecutionContext.Implicits.global

class OutputXMLMatchesInputXMLSpec extends UnitSpec with BeforeAndAfterEach with Matchers with ScalaFutures with MockitoSugar with Eventually with XMLParserFixtures {

  val inputXml                        = "<Address xmlns=\"http://www.govtalk.gov.uk/CM/address\"><Line>Line 1</Line><Line>Line 2</Line><PostCode>Tf3 4NT</PostCode></Address>"
  val inputXmlWithSelfClosingElement  = "<Address xmlns=\"http://www.govtalk.gov.uk/CM/address\"><Line>Line 1</Line><Line>Line 2</Line><Line/><PostCode>Tf3 4NT</PostCode></Address>"
  val inputXmlWithBlankElement        = "<Address xmlns=\"http://www.govtalk.gov.uk/CM/address\"><Line>Line 1</Line><Line>Line 2</Line><Line></Line><PostCode>Tf3 4NT</PostCode></Address>"

  val f = fixtures

  def xpathValue(xmlElements: Set[XMLElement], xPath: Seq[String]): Option[String] = xmlElements.collectFirst { case XMLElement(`xPath`, _, Some(xpathValue)) => xpathValue }

  def parseAndCompare(inputXml: String): scalatest.Assertion = {
    val inputXmlSource: Source[ByteString, _] = Source.single(ByteString(inputXml))

    await(
      for {
        parsedXmlElements <- inputXmlSource
          .via(CompleteChunkStage.parser())
          .via(ParsingStage.parser(Seq(XMLExtract(Seq("Address"), Map.empty, true))))
          .via(f.flowXMLElements)
          .toMat(f.collectXMLElements)(Keep.right)
          .run()(f.mat)

        parsedXml = xpathValue(parsedXmlElements, Seq("Address"))
      } yield {

        val outputXml = parsedXml.get

        println(s"INPUT  XML = $inputXml")
        println(s"OUTPUT XML = $outputXml")
        println()

        outputXml shouldBe inputXml
      }
    )
  }


  "The output XML" should {
    "match the input XML" when {
      "blank elements *** ARE *** present"            in parseAndCompare(inputXmlWithBlankElement)
      "self closing elements are *** NOT *** present" in parseAndCompare(inputXml)
      "self closing elements *** ARE *** present"     in parseAndCompare(inputXmlWithSelfClosingElement)
    }
  }


} 
Example 46
Source File: ActorSourceSpec.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.infrastrucuture.akka

import akka.actor.{ ActorSystem, Props }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestKit
import com.github.j5ik2o.bank.infrastrucuture.akka.dsl.ActorSource
import org.scalatest.FreeSpecLike
import org.scalatest.concurrent.ScalaFutures

class ActorSourceSpec extends TestKit(ActorSystem("ActorSourceSpec")) with FreeSpecLike with ScalaFutures {

  implicit val mat = ActorMaterializer()

  "ActorSource" - {
    "should be able to send message via stream" in {
      val props = Props(SourceActor[String]({ case (subscriber, msg) => subscriber ! msg }))

      val (sourceRefFuture, sinkProbe) = ActorSource[String](props).toMat(TestSink.probe)(Keep.both).run()

      sourceRefFuture.futureValue ! "TEST"
      sinkProbe.request(1).expectNext("TEST")
    }
    "should be able to error handling" in {
      val props = Props(SourceActor[String]({ case (_, x) => throw new Exception(s"message = $x") }))

      val (sourceRefFuture, sinkProbe) = ActorSource[String](props).toMat(TestSink.probe)(Keep.both).run()

      sourceRefFuture.futureValue ! "TEST"
      sinkProbe.request(1).expectError()
    }
  }
} 
Example 47
Source File: BankAccountReadModelUseCase.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.useCase

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Flow, Keep, Sink, Source, SourceQueueWithComplete }
import akka.stream.{ ActorMaterializer, OverflowStrategy }
import akka.{ Done, NotUsed }
import com.github.j5ik2o.bank.domain.model._
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol.{
  ResolveBankAccountEventsRequest,
  ResolveBankAccountEventsResponse
}
import com.github.j5ik2o.bank.useCase.port.{ BankAccountReadModelFlows, JournalReader }
import pureconfig._

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor, Future, Promise }

class BankAccountReadModelUseCase(bankAccountReadModelFlows: BankAccountReadModelFlows, journalReader: JournalReader)(
    implicit val system: ActorSystem
) extends UseCaseSupport {

  import UseCaseSupport._

  private val config = loadConfigOrThrow[BankAccountAggregateUseCaseConfig]("bank.use-case.bank-account-use-case")

  private val bufferSize: Int = config.bufferSize

  private implicit val mat: ActorMaterializer       = ActorMaterializer()
  private implicit val ec: ExecutionContextExecutor = system.dispatcher

  def resolveBankAccountEventsById(
      request: ResolveBankAccountEventsRequest
  )(implicit ec: ExecutionContext): Future[ResolveBankAccountEventsResponse] =
    offerToQueue(resolveBankAccountEventQueue)(request, Promise())

  private lazy val resolveBankAccountEventQueue
    : SourceQueueWithComplete[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])] =
    Source
      .queue[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])](bufferSize,
                                                                                           OverflowStrategy.dropNew)
      .via(bankAccountReadModelFlows.resolveBankAccountEventByIdFlow.zipPromise)
      .toMat(completePromiseSink)(Keep.left)
      .run()

  private val projectionFlow: Flow[(BankAccountEvent, Long), Int, NotUsed] =
    Flow[(BankAccountEvent, Long)].flatMapConcat {
      case (event: BankAccountOpened, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.openBankAccountFlow)
      case (event: BankAccountEventUpdated, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.updateAccountFlow)
      case (event: BankAccountDeposited, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.deposit, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.depositBankAccountFlow)
      case (event: BankAccountWithdrawn, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.withdraw, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.withdrawBankAccountFlow)
      case (event: BankAccountClosed, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.closeBankAccountFlow)
    }

  def execute(): Future[Done] = {
    bankAccountReadModelFlows.resolveLastSeqNrSource
      .flatMapConcat { lastSeqNr =>
        journalReader.eventsByTagSource(classOf[BankAccountEvent].getName, lastSeqNr + 1)
      }
      .map { eventBody =>
        (eventBody.event.asInstanceOf[BankAccountEvent], eventBody.sequenceNr)
      }
      .via(projectionFlow)
      .toMat(Sink.ignore)(Keep.right)
      .run()

  }
} 
Example 48
Source File: SttpBackendStubAkkaTests.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import sttp.client._
import sttp.model.Headers

import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class SttpBackendStubAkkaTests extends AnyFlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll {

  implicit val system: ActorSystem = ActorSystem()

  override protected def afterAll(): Unit = {
    Await.result(system.terminate().map(_ => ()), 5.seconds)
  }

  "backend stub" should "cycle through responses using a single sent request" in {
    // given
    implicit val backend = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenRespondCyclic("a", "b", "c")

    // when
    def r = basicRequest.get(uri"http://example.org/a/b/c").send().futureValue

    // then
    r.body shouldBe Right("a")
    r.body shouldBe Right("b")
    r.body shouldBe Right("c")
    r.body shouldBe Right("a")
  }

  it should "use given flow as web socket handler" in {
    // This test is an example how can we test client flow.
    // We check behavior of client when connected to echo server.
    // Client responsibility was to send two messages to the server and collect received messages.
    val useHandler: Flow[Message, Message, Future[Seq[Message]]] => Future[Seq[Message]] = clientFlow => {
      val ((outQueue, clientReceivedMessages), inQueue) = Source
        .queue(1, OverflowStrategy.fail)
        .viaMat(clientFlow)(Keep.both)
        .toMat(Sink.queue())(Keep.both)
        .run()

      def echoMsg(): Future[Unit] =
        inQueue.pull().flatMap {
          case None =>
            echoMsg()
          case Some(msg) =>
            outQueue.offer(TextMessage(s"echo: " + msg.asTextMessage.getStrictText)).map(_ => ())
        }

      (for {
        _ <- outQueue.offer(TextMessage("Hi!"))
        _ <- echoMsg()
        _ <- echoMsg()
        _ = outQueue.complete()
        _ <- outQueue.watchCompletion()
      } yield ()).flatMap(_ => clientReceivedMessages)
    }

    val clientFlow: Flow[Message, Message, Future[Seq[Message]]] = {
      Flow.fromSinkAndSourceMat(
        Sink.seq[Message],
        Source((1 to 2).map(i => TextMessage(s"test$i")))
      )(Keep.left)
    }

    implicit val b = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenHandleOpenWebSocket(Headers(List.empty), useHandler)

    val receivedMessages = basicRequest
      .get(uri"wss://echo.websocket.org")
      .openWebsocket(clientFlow)
      .flatMap(_.result)
      .futureValue
      .toList

    receivedMessages shouldBe List("Hi!", "echo: test1", "echo: test2").map(TextMessage(_))
  }
} 
Example 49
Source File: BoundedOrderingSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.streams

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{FlatSpec, Matchers}

class BoundedOrderingSpec extends FlatSpec with Matchers with ScalaFutures {

  implicit val system = ActorSystem("OrderingStateSpec")
  implicit val mat = ActorMaterializer()

  it should "require waitFor > 0" in {
    an [IllegalArgumentException] should be thrownBy BoundedOrdering[Int, Int](maxBounded = 0, 1, _ + 1, identity)
  }

  it should "retain order of a stream" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(1, 2, 3, 4, 5)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input
  }

  it should "re-order the stream completely within the ordering range" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(2, 3, 4, 1, 5, 7, 8, 6, 9, 10)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input.sorted
  }

  it should "re-order the stream incompletely outside of the ordering range" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(1, 3, 4, 5, 6, 7, 8, 9, 2, 10)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input
  }

  it should "ignore the missing element and keep the stream moving" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(1, 3, 4, 5, 6, 7, 8, 9, 10, 11)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input
  }

  it should "re-order the stream with identifier type different from message type" in {
    case class Element(id: Long, content: String)
    val boundedOrdering = BoundedOrdering[Element, Long](maxBounded = 5, 1L, _ + 1L, _.id)
    val input = List(Element(1, "one"), Element(3, "three"), Element(5, "five"), Element(2, "two"), Element(6, "six"),
      Element(7, "seven"), Element(8, "eight"), Element(9, "nine"), Element(10, "ten"), Element(4, "four"))
    val wisb = List(Element(1, "one"), Element(2, "two"), Element(3, "three"), Element(5, "five"), Element(6, "six"),
      Element(7, "seven"), Element(8, "eight"), Element(9, "nine"), Element(10, "ten"), Element(4, "four"))

    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs wisb
  }

  it should "re-order the stream using custom id ordering" in {
    case class Element(id: String, content: String)
    implicit val order: Ordering[String] = Ordering.by(_.toInt)
    val boundedOrdering = BoundedOrdering[Element, String](maxBounded = 5, "2", s => (s.toInt + 2).toString, _.id)
    val input = List(Element("2", "one"), Element("6", "three"), Element("10", "five"), Element("4", "two"),
      Element("12", "six"), Element("14", "seven"), Element("16", "eight"), Element("18", "nine"),
      Element("20", "ten"), Element("8", "four"))
    val wisb = List(Element("2", "one"), Element("4", "two"), Element("6", "three"), Element("10", "five"),
      Element("12", "six"), Element("14", "seven"), Element("16", "eight"), Element("18", "nine"),
      Element("20", "ten"), Element("8", "four"))

    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs wisb
  }
} 
Example 50
Source File: UnorderedParallelParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import java.util.UUID

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.apache.parquet.schema.MessageType
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object UnorderedParallelParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             parallelism: Int,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val valueCodecConfiguration = options.toValueCodecConfiguration

    validateWritePath(path, options)

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .zipWithIndex
      .groupBy(parallelism, elemAndIndex => Math.floorMod(elemAndIndex._2, parallelism))
      .map(elemAndIndex => encode(elemAndIndex._1))
      .fold(UnorderedChunk(path, schema, options))(_.write(_))
      .map(_.close())
      .async
      .mergeSubstreamsWithParallelism(parallelism)
      .toMat(Sink.ignore)(Keep.right)
  }

  private trait UnorderedChunk {

    def write(record: RowParquetRecord): UnorderedChunk

    def close(): Unit

  }

  private object UnorderedChunk {

    def apply(basePath: Path,
              schema: MessageType,
              options: ParquetWriter.Options): UnorderedChunk = new PendingUnorderedChunk(basePath, schema, options)

    private[UnorderedChunk] class PendingUnorderedChunk(basePath: Path,
                                        schema: MessageType,
                                        options: ParquetWriter.Options) extends UnorderedChunk {
      override def write(record: RowParquetRecord): UnorderedChunk = {
        val chunkPath = Path.mergePaths(basePath, new Path(s"/part-${UUID.randomUUID()}.parquet"))
        val writer = ParquetWriter.internalWriter(chunkPath, schema, options)
        writer.write(record)
        new StartedUnorderedChunk(chunkPath, writer, acc = 1)
      }

      override def close(): Unit = ()
    }

    private[UnorderedChunk] class StartedUnorderedChunk(chunkPath: Path,
                                        writer: ParquetWriter.InternalWriter,
                                        acc: Long
                                       ) extends UnorderedChunk {
      override def write(record: RowParquetRecord): UnorderedChunk = {
        writer.write(record)
        new StartedUnorderedChunk(chunkPath, writer, acc = acc + 1)
      }

      override def close(): Unit = {
        if (logger.isDebugEnabled) logger.debug(s"$acc records were successfully written to $chunkPath")
        writer.close()
      }
    }
  }

} 
Example 51
Source File: WordCountProducer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.kafka

import java.util
import java.util.concurrent.ThreadLocalRandom

import akka.actor.ActorSystem
import akka.kafka.ProducerMessage.Message
import akka.kafka.ProducerSettings
import akka.kafka.scaladsl.Producer
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.{Done, NotUsed}
import org.apache.kafka.clients.producer.{Partitioner, ProducerRecord}
import org.apache.kafka.common.errors.{NetworkException, UnknownTopicOrPartitionException}
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.kafka.common.{Cluster, PartitionInfo}

import scala.concurrent.Future
import scala.concurrent.duration._


class CustomPartitioner extends Partitioner {
  override def partition(topic: String, key: Any, keyBytes: Array[Byte], value: Any, valueBytes: Array[Byte], cluster: Cluster): Int = {
    val partitionInfoList: util.List[PartitionInfo] = cluster.availablePartitionsForTopic(topic)
    val partitionCount = partitionInfoList.size
    val fakeNewsPartition = 0

    //println("CustomPartitioner received key: " + key + " and value: " + value)

    if (value.toString.contains(WordCountProducer.fakeNewsKeyword)) {
      //println("CustomPartitioner send message: " + value + " to fakeNewsPartition")
      fakeNewsPartition
    }
    else ThreadLocalRandom.current.nextInt(1, partitionCount) //round robin
  }

  override def close(): Unit = {
    println("CustomPartitioner: " + Thread.currentThread + " received close")
  }

  override def configure(configs: util.Map[String, _]): Unit = {
    println("CustomPartitioner received configure with configuration: " + configs)
  }
}

object CustomPartitioner {
  private def deserialize[V](objectData: Array[Byte]): V = org.apache.commons.lang3.SerializationUtils.deserialize(objectData).asInstanceOf[V]
} 
Example 52
Source File: PrintMoreNumbers.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor_simple

import akka.actor.Actor
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{KillSwitches, UniqueKillSwitch}

import scala.concurrent.duration._

class PrintMoreNumbers extends Actor {
  implicit val system = context.system
  implicit val executionContext = context.system.dispatcher

  private val (killSwitch: UniqueKillSwitch, done) =
    Source.tick(0.seconds, 1.second, 1)
      .scan(0)(_ + _)
      .map(_.toString)
      .viaMat(KillSwitches.single)(Keep.right)
      .toMat(Sink.foreach(println))(Keep.both)
      .run()

  done.map(_ => self ! "done")

  override def receive: Receive = {
    //When the actor is stopped, it will also stop the stream
    case "stop" =>
      println("Stopping...")
      killSwitch.shutdown()
    case "done" =>
      println("Done")
      context.stop(self)
      context.system.terminate()
  }
} 
Example 53
Source File: WebSocketClient.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.ws._
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source}
import akka.stream.{FlowShape, SourceShape}
import sample.stream_actor.WindTurbineSimulator._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

object WebSocketClient {
  def apply(id: String, endpoint: String, windTurbineSimulator: ActorRef)
           (implicit
            system: ActorSystem,
            executionContext: ExecutionContext) = {
    new WebSocketClient(id, endpoint, windTurbineSimulator)(system, executionContext)
  }
}

class WebSocketClient(id: String, endpoint: String, windTurbineSimulator: ActorRef)
                     (implicit
                      system: ActorSystem,
                      executionContext: ExecutionContext) {


  val webSocketFlow: Flow[Message, Message, Future[WebSocketUpgradeResponse]] = {
    val websocketUri = s"$endpoint/measurements/$id"
    Http().webSocketClientFlow(WebSocketRequest(websocketUri))
  }

  val outgoing = GraphDSL.create() { implicit builder =>
    val data = WindTurbineData(id)

    val flow = builder.add {
      Source.tick(1.second, 100.millis,())  //valve for the WindTurbineData frequency
        .map(_ => TextMessage(data.getNext))
    }

    SourceShape(flow.out)
  }

  val incoming = GraphDSL.create() { implicit builder =>
    val flow = builder.add {
      Flow[Message]
        .collect {
          case TextMessage.Strict(text) =>
            Future.successful(text)
          case TextMessage.Streamed(textStream) =>
            textStream.runFold("")(_ + _)
              .flatMap(Future.successful)
        }
        .mapAsync(1)(identity)
        .map(each => println(s"Client received msg: $each"))
    }

    FlowShape(flow.in, flow.out)
  }

  val (upgradeResponse, closed) = Source.fromGraph(outgoing)
    .viaMat(webSocketFlow)(Keep.right) // keep the materialized Future[WebSocketUpgradeResponse]
    .via(incoming)
    .toMat(Sink.ignore)(Keep.both) // also keep the Future[Done]
    .run()


  val connected =
    upgradeResponse.map { upgrade =>
      upgrade.response.status match {
        case StatusCodes.SwitchingProtocols => windTurbineSimulator ! Upgraded
        case statusCode => windTurbineSimulator ! FailedUpgrade(statusCode)
      }
    }

  connected.onComplete {
    case Success(_) => windTurbineSimulator ! Connected
    case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex)
  }

  closed.map { _ =>
    windTurbineSimulator ! Terminated
  }
  closed.onComplete {
    case Success(_)  => windTurbineSimulator ! Connected
    case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex)
  }
} 
Example 54
Source File: SplitWhen.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import java.nio.file.Paths

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.IOResult
import akka.stream.scaladsl.{FileIO, Flow, Framing, Keep, Sink, Source}
import akka.util.ByteString
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.util.{Failure, Success}


object SplitWhen extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("SplitWhen")
  implicit val executionContext = system.dispatcher

  val nonLinearCapacityFactor = 100 //raise to see how it scales
  val filename = "splitWhen.csv"

  def genResourceFile() = {
    logger.info(s"Writing resource file: $filename...")

    def fileSink(filename: String): Sink[String, Future[IOResult]] =
      Flow[String]
        .map(s => ByteString(s + "\n"))
        .toMat(FileIO.toPath(Paths.get(filename)))(Keep.right)

    Source.fromIterator(() => (1 to nonLinearCapacityFactor).toList.combinations(2))
      .map(each => s"${each.head},${each.last}")
      .runWith(fileSink(filename))
  }

  val sourceOfLines = FileIO.fromPath(Paths.get(filename))
    .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 1024, allowTruncation = true)
      .map(_.utf8String))

  val csvToRecord: Flow[String, Record, NotUsed] = Flow[String]
    .map(_.split(",").map(_.trim))
    .map(stringArrayToRecord)

  val terminationHook: Flow[Record, Record, Unit] = Flow[Record]
    .watchTermination() { (_, done) =>
      done.onComplete {
        case Failure(err) => logger.info(s"Flow failed: $err")
        case _ => system.terminate(); logger.info(s"Flow terminated")
      }
    }

  val printSink = Sink.foreach[Vector[Record]](each => println(s"Reached sink: $each"))

  private def stringArrayToRecord(cols: Array[String]) = Record(cols(0), cols(1))

  private def hasKeyChanged = {
    () => {
      var lastRecordKey: Option[String] = None
      currentRecord: Record =>
        lastRecordKey match {
          case Some(currentRecord.key) | None =>
            lastRecordKey = Some(currentRecord.key)
            List((currentRecord, false))
          case _ =>
            lastRecordKey = Some(currentRecord.key)
            List((currentRecord, true))
        }
    }
  }

  genResourceFile().onComplete {
    case Success(_) =>
      logger.info(s"Start processing...")
      sourceOfLines
        .via(csvToRecord)
        .via(terminationHook)
        .statefulMapConcat(hasKeyChanged)   // stateful decision
        .splitWhen(_._2)                    // split when key has changed
        .map(_._1)                          // proceed with payload
        .fold(Vector.empty[Record])(_ :+ _) // sum payload
        .mergeSubstreams                    // better performance, but why?
        .runWith(printSink)
    case Failure(exception) => logger.info(s"Exception: $exception")
  }

  case class Record(key: String, value: String)
} 
Example 55
Source File: Blacklist.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.stage._

import scala.concurrent.duration._



object Blacklist extends App {
  implicit val system = ActorSystem("Blacklist")

  val initBlacklist = Set.empty[String]

  val service: StateService[Set[String]] =
    Source.repeat("yes")
      .throttle(1, 1.second, 10, ThrottleMode.shaping)
      .viaMat(new ZipWithState(initBlacklist))(Keep.right)
      .filterNot { case (blacklist: Set[String], elem: String) => blacklist(elem) }
      .to(Sink.foreach(each => println(each._2)))
      .run()

  println("Starting with empty blacklist on a list of 'yes' elements -> elements are passing")

  Thread.sleep(2000)
  println("Inject new blacklist with value: 'yes' -> elements are filtered")
  service.update(Set("yes"))

  Thread.sleep(5000)
  println("Inject new blacklist with value: 'no' -> elements are passing again")
  service.update(Set("no"))
}


trait StateService[A] {
  def update(state: A): Unit
}

class StateServiceCallback[A](callback: AsyncCallback[A]) extends StateService[A] {
  override def update(state: A): Unit = callback.invoke(state)
}

class ZipWithState[S, I](initState: S) extends GraphStageWithMaterializedValue[FlowShape[I, (S, I)], StateService[S]] {
  val in = Inlet[I]("ZipWithState.in")
  val out = Outlet[(S, I)]("ZipWithState.out")

  override val shape: FlowShape[I, (S, I)] = FlowShape.of(in, out)

  override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, StateService[S]) = {
    val logic = new GraphStageLogic(shape) {
      private[this] var state: S = initState
      val updateStateCallback: AsyncCallback[S] =
        getAsyncCallback[S] {
          state = _
        }

      setHandler(in, new InHandler {
        override def onPush(): Unit = {
          push(out, (state, grab(in)))
        }
      })

      setHandler(out, new OutHandler {
        override def onPull(): Unit = {
          pull(in)
        }
      })
    }

    (logic, new StateServiceCallback(logic.updateStateCallback))
  }
} 
Example 56
Source File: ParametrizedFlow.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.Done
import akka.actor.{ActorSystem, Cancellable}
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source, SourceQueueWithComplete, Zip}
import akka.stream.{FlowShape, OverflowStrategy}

import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}



object ParametrizedFlow extends App {
  val service = ParameterizedFlowService

  Thread.sleep(5000)
  service.update(1.0)

  Thread.sleep(2000)
  service.update(1.5)
  Thread.sleep(2000)
  service.cancel()
  Thread.sleep(2000)

  println(service.result())
}

object ParameterizedFlowService {
  implicit val system = ActorSystem("ParameterizedFlowService")
  implicit val executionContext = system.dispatcher

  def update(element: Double): Unit = flow._1._2.offer(element)

  def cancel(): Boolean = flow._1._1.cancel()

  def result(): Future[Seq[Double]] = flow._2

  val fun = (flowValue: Int, paramValue: Double) => flowValue * paramValue
  val flow: ((Cancellable, SourceQueueWithComplete[Double]), Future[immutable.Seq[Double]]) =
    Source.tick(0.seconds, 500.millis, 10)
      .viaMat(createParamFlow(1, OverflowStrategy.dropBuffer, 0.5)(fun))(Keep.both)
      .wireTap(x => println(x))
      .toMat(Sink.seq)(Keep.both)
      .run()

  val done: Future[Done] = flow._1._2.watchCompletion()
  terminateWhen(done)

  private def createParamFlow[A, P, O](bufferSize: Int, overflowStrategy: OverflowStrategy, initialParam: P)(fun: (A, P) => O) =
    Flow.fromGraph(GraphDSL.create(Source.queue[P](bufferSize, overflowStrategy)) { implicit builder =>
      queue =>
        import GraphDSL.Implicits._
        val zip = builder.add(Zip[A, P]())
        //Interesting use of the extrapolate operator
        //based on https://doc.akka.io/docs/akka/current/stream/stream-rate.html#understanding-extrapolate-and-expand
        val extra = builder.add(Flow[P].extrapolate(Iterator.continually(_), Some(initialParam)))
        val map = builder.add(Flow[(A, P)].map(r => fun(r._1, r._2)))

        queue ~> extra ~> zip.in1
        zip.out ~> map
        FlowShape(zip.in0, map.out)
    })

  private def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 57
Source File: TcpEcho.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source, Tcp}
import akka.util.ByteString

import scala.concurrent.Future
import scala.util.{Failure, Success}


object TcpEcho extends App {
  val systemServer = ActorSystem("TcpEchoServer")
  val systemClient = ActorSystem("TcpEchoClient")

  var serverBinding: Future[Tcp.ServerBinding] = _

    if (args.isEmpty) {
      val (address, port) = ("127.0.0.1", 6000)
      serverBinding = server(systemServer, address, port)
      (1 to 10).par.foreach(each => client(each, systemClient, address, port))
    } else {
      val (address, port) =
        if (args.length == 3) (args(1), args(2).toInt)
        else ("127.0.0.1", 6000)
      if (args(0) == "server") {
        serverBinding = server(systemServer, address, port)
      } else if (args(0) == "client") {
        client(1, systemClient, address, port)
      }
    }

  def server(system: ActorSystem, address: String, port: Int): Future[Tcp.ServerBinding] = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val handler = Sink.foreach[Tcp.IncomingConnection] { connection =>

      // parse incoming commands and append !
      val commandParser = Flow[String].takeWhile(_ != "BYE").map(_ + "!")

      val welcomeMsg = s"Welcome to: ${connection.localAddress}, you are: ${connection.remoteAddress}!"
      val welcomeSource = Source.single(welcomeMsg)

      val serverEchoFlow = Flow[ByteString]
        .via(Framing.delimiter( //chunk the inputs up into actual lines of text
          ByteString("\n"),
          maximumFrameLength = 256,
          allowTruncation = true))
        .map(_.utf8String)
        .via(commandParser)
        .merge(welcomeSource) // merge the initial banner after parser
        .map(_ + "\n")
        .map(ByteString(_))
        .watchTermination()((_, done) => done.onComplete {
        case Failure(err) =>
          println(s"Server flow failed: $err")
        case _ => println(s"Server flow terminated for client: ${connection.remoteAddress}")
      })
      connection.handleWith(serverEchoFlow)
    }
    
    val connections = Tcp().bind(interface = address, port = port)
    val binding = connections.watchTermination()(Keep.left).to(handler).run()

    binding.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to: $address:$port: ${e.getMessage}")
        system.terminate()
    }

    binding
  }

  def client(id: Int, system: ActorSystem, address: String, port: Int): Unit = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val connection: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] = Tcp().outgoingConnection(address, port)
    val testInput = ('a' to 'z').map(ByteString(_)) ++ Seq(ByteString("BYE"))
    val source =  Source(testInput).via(connection)
    val closed = source.runForeach(each => println(s"Client: $id received echo: ${each.utf8String}"))
    closed.onComplete(each => println(s"Client: $id closed: $each"))
  }
} 
Example 58
Source File: PartitionHubWithDynamicSinks.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, PartitionHub, RunnableGraph, Source}

import scala.concurrent.duration._


object PartitionHubWithDynamicSinks {
  implicit val system = ActorSystem()
  implicit val ec = system.dispatcher

  def main(args: Array[String]): Unit = {

    val producer = Source.tick(1.second, 100.millis, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b")

    // A new instance of the partitioner functions and its state is created for each materialization of the PartitionHub
    def partitionRoundRobin(): (PartitionHub.ConsumerInfo, String) => Long = {
      var i = -1L

      (info, elem) => {
        i += 1
        info.consumerIdByIdx((i % info.size).toInt)
      }
    }

    def partitionToFastestConsumer(): (PartitionHub.ConsumerInfo, String) => Long = {
      (info: PartitionHub.ConsumerInfo, each:String) => info.consumerIds.minBy(id => info.queueSize(id))
    }

    // Attach a PartitionHub Sink to the producer. This will materialize to a corresponding Source
    // We need to use toMat and Keep.right since by default the materialized value to the left is used
    val runnableGraph: RunnableGraph[Source[String, NotUsed]] =
    producer.toMat(PartitionHub.statefulSink(
      //Switch the partitioning function
      () => partitionRoundRobin(),
      //() => partitionToFastestConsumer(),
      startAfterNrOfConsumers = 1, bufferSize = 1))(Keep.right)

    // By running/materializing the producer, we get back a Source, which
    // gives us access to the elements published by the producer.
    val fromProducer: Source[String, NotUsed] = runnableGraph.run()

    // Attach three dynamic fan-out sinks to the PartitionHub
    fromProducer.runForeach(msg => println("fast consumer1 received: " + msg))
    fromProducer.throttle(100, 1.millis, 10, ThrottleMode.Shaping)
      .runForeach(msg => println("slow consumer2 received: " + msg))
    fromProducer.throttle(100, 2.millis, 10, ThrottleMode.Shaping)
      .runForeach(msg => println("really slow consumer3 received: " + msg))
  }
} 
Example 59
Source File: SequentialFileSplittingParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.apache.parquet.schema.MessageType
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object SequentialFileSplittingParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             maxRecordsPerFile: Long,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val valueCodecConfiguration = options.toValueCodecConfiguration

    validateWritePath(path, options)

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .zipWithIndex
      .map { case (elem, index) => OrderedChunkElem(encode(elem), index) }
      .fold(OrderedChunk(path, schema, maxRecordsPerFile, options))(_.write(_))
      .map(_.close())
      .toMat(Sink.ignore)(Keep.right)
  }

  private case class OrderedChunkElem(record: RowParquetRecord, index: Long) {
    def isSplit(maxRecordsPerFile: Long): Boolean = index % maxRecordsPerFile == 0
  }

  private trait OrderedChunk {
    def write(elem: OrderedChunkElem): OrderedChunk
    def close(): Unit
  }

  private object OrderedChunk {

    def apply(basePath: Path,
              schema: MessageType,
              maxRecordsPerFile: Long,
              options: ParquetWriter.Options): OrderedChunk = new PendingOrderedChunk(basePath, schema, maxRecordsPerFile, options)


    private[OrderedChunk] class PendingOrderedChunk(basePath: Path,
                                                    schema: MessageType,
                                                    maxRecordsPerFile: Long,
                                                    options: ParquetWriter.Options) extends OrderedChunk {
      override def write(elem: OrderedChunkElem): OrderedChunk = {
        val chunkNumber: Int = Math.floorDiv(elem.index, maxRecordsPerFile).toInt
        val chunkPath = Path.mergePaths(basePath, new Path(chunkFileName(chunkNumber)))
        val writer = ParquetWriter.internalWriter(chunkPath, schema, options)
        writer.write(elem.record)
        new StartedOrderedChunk(basePath, schema, maxRecordsPerFile, options, chunkPath, writer, acc = 1)
      }

      override def close(): Unit = ()

      private def chunkFileName(chunkNumber: Int): String = f"/part-$chunkNumber%05d.parquet"
    }

    private[OrderedChunk] class StartedOrderedChunk(basePath: Path,
                                                    schema: MessageType,
                                                    maxRecordsPerFile: Long,
                                                    options: ParquetWriter.Options,
                                                    chunkPath: Path,
                                                    writer: ParquetWriter.InternalWriter,
                                                    acc: Long) extends OrderedChunk {
      override def write(elem: OrderedChunkElem): OrderedChunk = {
        if (elem.isSplit(maxRecordsPerFile)) {
          this.close()
          new PendingOrderedChunk(basePath, schema, maxRecordsPerFile, options).write(elem)
        } else {
          writer.write(elem.record)
          new StartedOrderedChunk(basePath, schema, maxRecordsPerFile, options, chunkPath, writer, acc = acc + 1)
        }
      }

      override def close(): Unit = {
        if (logger.isDebugEnabled) logger.debug(s"$acc records were successfully written to $chunkPath")
        writer.close()
      }
    }
  }

} 
Example 60
Source File: SingleFileParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object SingleFileParquetSink {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val writer = ParquetWriter.internalWriter(path, schema, options)
    val valueCodecConfiguration = options.toValueCodecConfiguration
    val isDebugEnabled = logger.isDebugEnabled

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .map(encode)
      .fold(0) { case (acc, record) => writer.write(record); acc + 1}
      .map { count =>
        if (isDebugEnabled) logger.debug(s"$count records were successfully written to $path")
        writer.close()
      }
      .toMat(Sink.ignore)(Keep.right)
  }

} 
Example 61
Source File: IndefiniteStreamParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s
import akka.stream.FlowShape
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Keep, Sink, ZipWith}
import com.github.mjakubowski84.parquet4s.ParquetWriter.ParquetWriterFactory
import org.apache.hadoop.fs.Path
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.duration.FiniteDuration


private[parquet4s] object IndefiniteStreamParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[In, ToWrite: ParquetWriterFactory, Mat](path: Path,
                                                    maxChunkSize: Int,
                                                    chunkWriteTimeWindow: FiniteDuration,
                                                    buildChunkPath: ChunkPathBuilder[In] = ChunkPathBuilder.default,
                                                    preWriteTransformation: In => ToWrite = identity[In] _,
                                                    postWriteSink: Sink[Seq[In], Mat] = Sink.ignore,
                                                    options: ParquetWriter.Options = ParquetWriter.Options()
                                            ): Sink[In, Mat] = {
    validateWritePath(path, options)

    val internalFlow = Flow.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._
    
      val inChunkFlow = b.add(Flow[In].groupedWithin(maxChunkSize, chunkWriteTimeWindow))
      val broadcastChunks = b.add(Broadcast[Seq[In]](outputPorts = 2))
      val writeFlow = Flow[Seq[In]].map { chunk =>
        val toWrite = chunk.map(preWriteTransformation)
        val chunkPath = buildChunkPath(path, chunk)
        if (logger.isDebugEnabled()) logger.debug(s"Writing ${toWrite.size} records to $chunkPath")
        ParquetWriter.writeAndClose(chunkPath.toString, toWrite, options)
      }
      val zip = b.add(ZipWith[Seq[In], Unit, Seq[In]]((chunk, _) => chunk))
      
      inChunkFlow ~> broadcastChunks ~> writeFlow ~> zip.in1
                     broadcastChunks ~> zip.in0

      FlowShape(inChunkFlow.in, zip.out)               
    })

    internalFlow.toMat(postWriteSink)(Keep.right)
  }

} 
Example 62
Source File: BatchWriteStageSpec.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.stream

import akka.actor._
import akka.stream._
import akka.stream.scaladsl.Keep
import akka.stream.testkit._
import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
import akka.pattern
import akka.testkit._

import com.rbmhtechnology.eventuate.DurableEvent

import org.scalatest._

import scala.collection.immutable.Seq
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.Random

class BatchWriteStageSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
  import BatchWriteStage.BatchWriter

  private val settings: DurableEventWriterSettings =
    new DurableEventWriterSettings(system.settings.config)

  implicit val materializer: Materializer =
    ActorMaterializer()

  private var src: TestPublisher.Probe[Seq[DurableEvent]] = _
  private var snk: TestSubscriber.Probe[Seq[DurableEvent]] = _

  override def beforeEach(): Unit = {
    val probes = TestSource.probe[Seq[DurableEvent]]
      .via(new BatchWriteStage(ec => writer(ec)))
      .toMat(TestSink.probe[Seq[DurableEvent]])(Keep.both)
      .run()

    src = probes._1
    snk = probes._2
  }

  override def afterEach(): Unit = {
    snk.cancel()
  }

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  private def random: Int =
    Random.nextInt(100)

  private def writer(implicit ec: ExecutionContext): BatchWriter = events =>
    if (events.exists(_.payload == "boom")) Future(throw TestException)
    else pattern.after(random.millis, system.scheduler)(Future(events))

  "A BatchWriterStage" must {
    "write batches sequentially" in {
      val b1 = Seq("a", "b", "c").map(DurableEvent(_))
      val b2 = Seq("d", "e", "f").map(DurableEvent(_))
      val b3 = Seq("g", "h", "i").map(DurableEvent(_))

      snk.request(3)
      src.sendNext(b1)
      src.sendNext(b2)
      src.sendNext(b3)
      snk.expectNext() should be(b1)
      snk.expectNext() should be(b2)
      snk.expectNext() should be(b3)
    }
    "fail if the batch writer fails" in {
      val b = Seq("a", "boom", "c").map(DurableEvent(_))

      snk.request(3)
      src.sendNext(b)
      snk.expectError(TestException)
    }
  }
} 
Example 63
Source File: MessageSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.indefinite

import java.sql.Timestamp
import java.util.UUID

import akka.Done
import akka.kafka.CommitterSettings
import akka.kafka.ConsumerMessage.CommittableOffsetBatch
import akka.kafka.scaladsl.Committer
import akka.stream.scaladsl.{Flow, Keep, Sink}
import com.github.mjakubowski84.parquet4s.{ChunkPathBuilder, ParquetStreams, ParquetWriter}
import com.google.common.io.Files
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.metadata.CompressionCodecName

import scala.concurrent.Future
import scala.concurrent.duration._

object MessageSink {

  case class Data(timestamp: Timestamp, word: String)

  val MaxChunkSize: Int = 128
  val ChunkWriteTimeWindow: FiniteDuration = 10.seconds
  val WriteDirectoryName: String = "messages"

}

trait MessageSink {

  this: Akka =>

  import MessageSink._
  import MessageSource._

  protected val baseWritePath: String = new Path(Files.createTempDir().getAbsolutePath, WriteDirectoryName).toString

  private val writerOptions = ParquetWriter.Options(compressionCodecName = CompressionCodecName.SNAPPY)

  private lazy val committerSink = Flow.apply[Seq[Message]].map { messages =>
    CommittableOffsetBatch(messages.map(_.committableOffset))
  }.toMat(Committer.sink(CommitterSettings(system)))(Keep.right)

  def chunkPath: ChunkPathBuilder[Message] = {
    case (basePath, chunk) =>
      val lastElementDateTime = new Timestamp(chunk.last.record.timestamp()).toLocalDateTime
      val year = lastElementDateTime.getYear
      val month = lastElementDateTime.getMonthValue
      val day = lastElementDateTime.getDayOfMonth
      val uuid = UUID.randomUUID()

      basePath.suffix(s"/$year/$month/$day/part-$uuid.parquet")
  }

  lazy val messageSink: Sink[Message, Future[Done]] = ParquetStreams.toParquetIndefinite(
    path = baseWritePath,
    maxChunkSize = MaxChunkSize,
    chunkWriteTimeWindow = ChunkWriteTimeWindow,
    buildChunkPath = chunkPath,
    preWriteTransformation = { message: Message =>
      Data(
        timestamp = new Timestamp(message.record.timestamp()),
        word = message.record.value()
      )
    },
    postWriteSink = committerSink,
    options = writerOptions
  )

} 
Example 64
Source File: ExampleApp.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.indefinite

import akka.Done
import akka.kafka.scaladsl.Consumer.DrainingControl
import akka.stream.scaladsl.Keep

import scala.concurrent.Await
import scala.concurrent.duration._

object ExampleApp
  extends App
    with Logger
    with Akka
    with Kafka
    with RandomDataProducer
    with MessageSource
    with MessageSink {


  startKafka()
  startDataProducer()

  logger.info(s"Starting stream that reads messages from Kafka and writes them to $baseWritePath...")
  val streamControl: DrainingControl[Done] = messageSource
    .toMat(messageSink)(Keep.both)
    .mapMaterializedValue(DrainingControl.apply)
    .run()

  def stopStream(): Unit = {
    logger.info("Stopping stream...")
    Await.ready(streamControl.drainAndShutdown(), 10.second)
  }

  sys.addShutdownHook {
    stopDataProducer()
    stopStream()
    stopAkka()
    stopKafka()
    logger.info("Exiting...")
  } 
} 
Example 65
Source File: FailedSource.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.source

import akka.Done
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{ Keep, Merge, Sink, Source, SourceQueueWithComplete }
import com.github.dnvriend.streams.TestSpec

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.collection.immutable._

class FailedSource extends TestSpec {
  it should "fail the stream" in {
    Source.failed[Int](new RuntimeException("test error")).testProbe { tp ⇒
      tp.request(Long.MaxValue)
      tp.expectError()
    }
  }

  it should "complete a stream" in {
    val (queue: SourceQueueWithComplete[Int], done: Future[Done]) = Source.queue[Int](1, OverflowStrategy.dropNew)
      .toMat(Sink.ignore)(Keep.both).run
    queue.complete()
    done.toTry should be a 'success
  }

  it should "complete a stream normally" in {
    val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat {
      case "stop" ⇒ Source.failed(new RuntimeException("test error"))
      case str    ⇒ Source.single(str)
    }.toMat(Sink.seq)(Keep.both).run

    Thread.sleep(3000)
    queue.offer("foo").futureValue
    queue.offer("bar").futureValue
    queue.complete()
    done.futureValue shouldBe List("foo", "bar")
  }

  it should "force stop a stream with an error" in {
    val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat {
      case "stop" ⇒ Source.failed(new RuntimeException("test error"))
      case str    ⇒ Source.single(str)
    }.toMat(Sink.seq)(Keep.both).run

    Thread.sleep(3000)
    queue.offer("stop").futureValue
    done.toTry should be a 'failure
  }

} 
Example 66
Source File: QueueSourceTest.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.source

import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete }
import com.github.dnvriend.streams.TestSpec

import scala.collection.immutable._
import scala.concurrent.Future

class QueueSourceTest extends TestSpec {
  it should "queue a b and c and return Seq(a, b, c)" in {
    val (queue: SourceQueueWithComplete[String], xs: Future[Seq[String]]) =
      Source.queue[String](Int.MaxValue, OverflowStrategy.backpressure).toMat(Sink.seq)(Keep.both).run()

    queue.offer("a").toTry should be a 'success // offer 'a' to stream
    queue.offer("b").toTry should be a 'success // b
    queue.offer("c").toTry should be a 'success // and c

    // complete the queue
    queue.complete()
    queue.watchCompletion().toTry should be a 'success

    // get the results of the stream
    xs.futureValue shouldEqual Seq("a", "b", "c")
    xs.futureValue should not equal Seq("c", "b", "a")
  }
} 
Example 67
Source File: TransferTransformFile.scala    From streams-tests   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.streams

import java.io.File

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.io.Framing
import akka.stream.scaladsl.{FileIO, Keep}
import akka.util.ByteString
import com.softwaremill.streams.util.TestFiles
import com.softwaremill.streams.util.Timed._

import scala.concurrent.{Await, Future}
import scalaz.stream.{io, text}
import scala.concurrent.duration._

trait TransferTransformFile {
  
  def run(from: File, to: File): Long
}

object AkkaStreamsTransferTransformFile extends TransferTransformFile {
  private lazy implicit val system = ActorSystem()

  override def run(from: File, to: File) = {
    implicit val mat = ActorMaterializer()

    val r: Future[Long] = FileIO.fromFile(from)
      .via(Framing.delimiter(ByteString("\n"), 1048576))
      .map(_.utf8String)
      .filter(!_.contains("#!@"))
      .map(_.replace("*", "0"))
      .intersperse("\n")
      .map(ByteString(_))
      .toMat(FileIO.toFile(to))(Keep.right)
      .run()

    Await.result(r, 1.hour)
  }

  def shutdown() = {
    system.terminate()
  }
}

object ScalazStreamsTransferTransformFile extends TransferTransformFile {
  override def run(from: File, to: File) = {
    io.linesR(from.getAbsolutePath)
      .filter(!_.contains("#!@"))
      .map(_.replace("*", "0"))
      .intersperse("\n")
      .pipe(text.utf8Encode)
      .to(io.fileChunkW(to.getAbsolutePath))
      .run
      .run

    to.length()
  }
}

object TransferTransformFileRunner extends App {
  def runTransfer(ttf: TransferTransformFile, sizeMB: Int): String = {
    val output = File.createTempFile("fft", "txt")
    try {
      ttf.run(TestFiles.testFile(sizeMB), output).toString
    } finally output.delete()
  }

  val tests = List(
    (ScalazStreamsTransferTransformFile, 10),
    (ScalazStreamsTransferTransformFile, 100),
    (ScalazStreamsTransferTransformFile, 500),
    (AkkaStreamsTransferTransformFile, 10),
    (AkkaStreamsTransferTransformFile, 100),
    (AkkaStreamsTransferTransformFile, 500)
  )

  runTests(tests.map { case (ttf, sizeMB) =>
    (s"${if (ttf == ScalazStreamsTransferTransformFile) "scalaz" else "akka"}, $sizeMB MB",
      () => runTransfer(ttf, sizeMB))
  }, 3)

  AkkaStreamsTransferTransformFile.shutdown()
} 
Example 68
Source File: AkkaResourceOwnerSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources.akka

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.{Done, NotUsed}
import com.daml.resources.ResourceOwner
import org.scalatest.{AsyncWordSpec, Matchers}

import scala.concurrent.{Future, Promise}

class AkkaResourceOwnerSpec extends AsyncWordSpec with Matchers {
  "a function returning an ActorSystem" should {
    "convert to a ResourceOwner" in {
      val testPromise = Promise[Int]()
      class TestActor extends Actor {
        @SuppressWarnings(Array("org.wartremover.warts.Any"))
        override def receive: Receive = {
          case value: Int => testPromise.success(value)
          case value => testPromise.failure(new IllegalArgumentException(s"$value"))
        }
      }

      val resource = for {
        actorSystem <- AkkaResourceOwner
          .forActorSystem(() => ActorSystem("TestActorSystem"))
          .acquire()
        actor <- ResourceOwner
          .successful(actorSystem.actorOf(Props(new TestActor)))
          .acquire()
      } yield (actorSystem, actor)

      for {
        resourceFuture <- resource.asFuture
        (actorSystem, actor) = resourceFuture
        _ = actor ! 7
        result <- testPromise.future
        _ <- resource.release()
      } yield {
        result should be(7)
        an[IllegalStateException] should be thrownBy actorSystem.actorOf(Props(new TestActor))
      }
    }
  }

  "a function returning a Materializer" should {
    "convert to a ResourceOwner" in {
      val resource = for {
        actorSystem <- AkkaResourceOwner
          .forActorSystem(() => ActorSystem("TestActorSystem"))
          .acquire()
        materializer <- AkkaResourceOwner.forMaterializer(() => Materializer(actorSystem)).acquire()
      } yield materializer

      for {
        materializer <- resource.asFuture
        numbers <- Source(1 to 10)
          .toMat(Sink.seq)(Keep.right[NotUsed, Future[Seq[Int]]])
          .run()(materializer)
        _ <- resource.release()
      } yield {
        numbers should be(1 to 10)
        an[IllegalStateException] should be thrownBy Source
          .single(0)
          .toMat(Sink.ignore)(Keep.right[NotUsed, Future[Done]])
          .run()(materializer)
      }
    }
  }
} 
Example 69
Source File: DispatcherTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams.dispatcher

import java.util.concurrent.atomic.AtomicReference

import akka.NotUsed
import akka.stream.scaladsl.{Keep, Sink}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.platform.akkastreams.dispatcher.SubSource.OneAfterAnother
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.{ExecutionContextExecutor, Future}

//TODO: merge/review the tests we have around the Dispatcher!
class DispatcherTest extends WordSpec with AkkaBeforeAndAfterAll with Matchers with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(250, Milliseconds)))

  "A Dispatcher" should {
    "not race when creating new subscriptions" in {
      // The test setup here is a little different from the above tests,
      // because we wanted to be specific about emitted pairs and use of Thread.sleep.

      implicit val ec: ExecutionContextExecutor = materializer.executionContext

      val elements = new AtomicReference(Map.empty[Int, Int])
      def readElement(i: Int): Future[Int] = Future {
        Thread.sleep(10) // In a previous version of Dispatcher, this sleep caused a race condition.
        elements.get()(i)
      }
      def readSuccessor(i: Int): Int = i + 1

      // compromise between catching flakes and not taking too long
      0 until 25 foreach { _ =>
        val d = Dispatcher("test", 0, 0)

        // Verify that the results are what we expected
        val subscriptions = 1 until 10 map { i =>
          elements.updateAndGet(m => m + (i -> i))
          d.signalNewHead(i)
          d.startingAt(i - 1, OneAfterAnother(readSuccessor, readElement))
            .toMat(Sink.collection)(Keep.right[NotUsed, Future[Seq[(Int, Int)]]])
            .run()
        }

        d.close()

        subscriptions.zip(1 until 10) foreach {
          case (f, i) =>
            whenReady(f) { vals =>
              vals.map(_._1) should contain theSameElementsAs (i to 9)
              vals.map(_._2) should contain theSameElementsAs (i until 10)
            }
        }
      }
    }
  }
} 
Example 70
Source File: TrackerImplTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services.tracking

import akka.NotUsed
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Keep, Source, SourceQueueWithComplete}
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import com.daml.ledger.api.testing.utils.{
  AkkaBeforeAndAfterAll,
  IsStatusException,
  TestingException
}
import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest
import com.daml.ledger.api.v1.commands.Commands
import com.daml.ledger.api.v1.completion.Completion
import com.daml.dec.DirectExecutionContext
import com.google.rpc.status.{Status => RpcStatus}
import io.grpc.Status
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterEach, Matchers, Succeeded, WordSpec}

import scala.concurrent.ExecutionContext.Implicits.global

class TrackerImplTest
    extends WordSpec
    with Matchers
    with BeforeAndAfterEach
    with ScalaFutures
    with AkkaBeforeAndAfterAll {

  private var sut: Tracker = _
  private var consumer: TestSubscriber.Probe[NotUsed] = _
  private var queue: SourceQueueWithComplete[TrackerImpl.QueueInput] = _

  private def input(cid: Int) = SubmitAndWaitRequest(Some(Commands(commandId = cid.toString)))

  override protected def beforeEach(): Unit = {
    val (q, sink) = Source
      .queue[TrackerImpl.QueueInput](1, OverflowStrategy.dropNew)
      .map { in =>
        in.context.success(Completion(in.value.getCommands.commandId, Some(RpcStatus())))
        NotUsed
      }
      .toMat(TestSink.probe[NotUsed])(Keep.both)
      .run()
    queue = q
    sut = new TrackerImpl(q)
    consumer = sink
  }

  override protected def afterEach(): Unit = {
    consumer.cancel()
    queue.complete()
  }

  "Tracker Implementation" when {

    "input is submitted, and the queue is available" should {

      "work successfully" in {

        val resultF1 = sut.track(input(1))
        consumer.requestNext()
        val resultF = resultF1.flatMap(_ => sut.track(input(2)))(DirectExecutionContext)
        consumer.requestNext()
        whenReady(resultF)(_ => Succeeded)
      }
    }

    "input is submitted, and the queue is backpressuring" should {

      "return a RESOURCE_EXHAUSTED error" in {

        sut.track(input(1))
        whenReady(sut.track(input(2)).failed)(IsStatusException(Status.RESOURCE_EXHAUSTED))
      }
    }

    "input is submitted, and the queue has been completed" should {

      "return an ABORTED error" in {

        queue.complete()
        whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED))
      }
    }

    "input is submitted, and the queue has failed" should {

      "return an ABORTED error" in {

        queue.fail(TestingException("The queue fails with this error."))
        whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED))
      }
    }
  }
} 
Example 71
Source File: LedgerEntriesSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.stores.ledger.inmemory

import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import com.daml.ledger.participant.state.v1.Offset
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import org.scalatest.{AsyncWordSpec, Inspectors, Matchers}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Random

class LedgerEntriesSpec
    extends AsyncWordSpec
    with Matchers
    with AkkaBeforeAndAfterAll
    with Inspectors {

  case class Error(msg: String)

  case class Transaction(content: String)

  val NO_OF_MESSAGES = 10000
  val NO_OF_SUBSCRIBERS = 50

  private def genTransactions() = (1 to NO_OF_MESSAGES).map { i =>
    if (Random.nextBoolean())
      Right(Transaction(i.toString))
    else
      Left(Error(i.toString))
  }

  "LedgerEntries" should {

    "store new blocks and a late subscriber can read them" in {
      val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString)
      val transactions = genTransactions()

      transactions.foreach(t => ledger.publish(t))

      val sink =
        Flow[(Offset, Either[Error, Transaction])]
          .take(NO_OF_MESSAGES.toLong)
          .toMat(Sink.seq)(Keep.right)

      val blocksF = ledger.getSource(None, None).runWith(sink)

      blocksF.map { blocks =>
        val readTransactions = blocks.collect { case (_, transaction) => transaction }
        readTransactions shouldEqual transactions
      }
    }

    "store new blocks while multiple subscribers are reading them with different pace" in {
      val transactions = genTransactions()

      val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString)

      val publishRate = NO_OF_MESSAGES / 10

      val blocksInStream =
        Source(transactions)
          .throttle(publishRate, 100.milliseconds, publishRate, ThrottleMode.shaping)
          .to(Sink.foreach { t =>
            ledger.publish(t)
            ()
          })

      def subscribe() = {
        val subscribeRate = NO_OF_MESSAGES / (Random.nextInt(100) + 1)
        ledger
          .getSource(None, None)
          .runWith(
            Flow[(Offset, Either[Error, Transaction])]
              .throttle(subscribeRate, 100.milliseconds, subscribeRate, ThrottleMode.shaping)
              .take(NO_OF_MESSAGES.toLong)
              .toMat(Sink.seq)(Keep.right)
          )
      }

      val readBlocksF = Future.sequence((1 to NO_OF_SUBSCRIBERS).map(_ => subscribe()))
      blocksInStream.run()

      readBlocksF.map { readBlocksForAll =>
        forAll(readBlocksForAll) { readBlocks =>
          val readTransactions = readBlocks.collect { case (_, transaction) => transaction }
          readTransactions shouldEqual transactions
        }
      }
    }
  }
} 
Example 72
Source File: ExtractSingleMaterializedValueTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.util.akkastreams

import akka.stream.scaladsl.{Keep, Sink, Source}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}

import scala.util.Random

class ExtractSingleMaterializedValueTest
    extends WordSpec
    with Matchers
    with ScalaFutures
    with AkkaBeforeAndAfterAll {

  private val discriminator = { i: Int =>
    if (i < 0) Some(i) else None
  }

  private val elemsThatPassThrough = 0.to(10).toVector

  ExtractMaterializedValue.getClass.getSimpleName when {

    "there's a single valid value" should {
      "extract it" in {
        val elemToExtract = -1

        val elements = elemToExtract +: elemsThatPassThrough
        val (extractedF, restF) = processElements(Random.shuffle(elements))

        whenReady(extractedF)(_ shouldEqual elemToExtract)
        whenReady(restF)(_ should contain theSameElementsAs elements)
      }
    }

    "there are multiple valid values" should {
      "extract the first matching element" in {
        val elemToExtract = -1
        val otherCandidateShuffledIn = -2

        val elements = elemToExtract +: Random.shuffle(
          otherCandidateShuffledIn +: elemsThatPassThrough)
        val (extractedF, restF) = processElements(elements)

        whenReady(extractedF)(_ shouldEqual elemToExtract)
        whenReady(restF)(_ should contain theSameElementsAs elements)
      }
    }

    "there are no valid values" should {
      "fail the materialized future, but let the stream continue otherwise" in {

        val (extractedF, restF) =
          processElements(Random.shuffle(elemsThatPassThrough))

        whenReady(extractedF.failed)(_ shouldBe a[RuntimeException])
        whenReady(restF)(_.sorted shouldEqual elemsThatPassThrough)
      }
    }

  }

  private def processElements(elements: Iterable[Int]) = {
    Source
      .fromIterator(() => elements.iterator)
      .viaMat(ExtractMaterializedValue(discriminator))(Keep.right)
      .toMat(Sink.seq)(Keep.both)
      .run()
  }
} 
Example 73
Source File: ActiveContractSetClient.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.services.acs

import akka.NotUsed
import akka.stream.scaladsl.{Keep, Source}
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc.ActiveContractsServiceStub
import com.daml.ledger.api.v1.active_contracts_service.{
  GetActiveContractsRequest,
  GetActiveContractsResponse
}
import com.daml.ledger.api.v1.transaction_filter.TransactionFilter
import com.daml.ledger.client.LedgerClient
import com.daml.util.akkastreams.ExtractMaterializedValue
import scalaz.syntax.tag._

import scala.concurrent.Future

object ActiveContractSetClient {

  private val extractOffset =
    new ExtractMaterializedValue[GetActiveContractsResponse, String](r =>
      if (r.offset.nonEmpty) Some(r.offset) else None)

}

final class ActiveContractSetClient(ledgerId: LedgerId, service: ActiveContractsServiceStub)(
    implicit esf: ExecutionSequencerFactory) {

  import ActiveContractSetClient.extractOffset

  private def request(filter: TransactionFilter, verbose: Boolean) =
    GetActiveContractsRequest(ledgerId.unwrap, Some(filter), verbose)

  private def activeContractSource(
      request: GetActiveContractsRequest,
      token: Option[String]): Source[GetActiveContractsResponse, NotUsed] =
    ClientAdapter.serverStreaming(request, LedgerClient.stub(service, token).getActiveContracts)

  
  def getActiveContracts(
      filter: TransactionFilter,
      verbose: Boolean = false,
      token: Option[String] = None): Source[GetActiveContractsResponse, Future[String]] =
    activeContractSource(request(filter, verbose), token).viaMat(extractOffset)(Keep.right)

} 
Example 74
Source File: MemoryAttachmentStore.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.memory

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ContentType
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.util.{ByteString, ByteStringBuilder}
import org.apache.openwhisk.common.LoggingMarkers.{
  DATABASE_ATTS_DELETE,
  DATABASE_ATT_DELETE,
  DATABASE_ATT_GET,
  DATABASE_ATT_SAVE
}
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.database.StoreUtils._
import org.apache.openwhisk.core.database._
import org.apache.openwhisk.core.entity.DocId

import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag

object MemoryAttachmentStoreProvider extends AttachmentStoreProvider {
  override def makeStore[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem,
                                                              logging: Logging,
                                                              materializer: ActorMaterializer): AttachmentStore =
    new MemoryAttachmentStore(implicitly[ClassTag[D]].runtimeClass.getSimpleName.toLowerCase)
}


  override protected[core] def readAttachment[T](docId: DocId, name: String, sink: Sink[ByteString, Future[T]])(
    implicit transid: TransactionId): Future[T] = {

    val start =
      transid.started(
        this,
        DATABASE_ATT_GET,
        s"[ATT_GET] '$dbName' finding attachment '$name' of document 'id: $docId'")

    val f = attachments.get(attachmentKey(docId, name)) match {
      case Some(Attachment(bytes)) =>
        val r = Source.single(bytes).toMat(sink)(Keep.right).run
        r.map(t => {
          transid.finished(this, start, s"[ATT_GET] '$dbName' completed: found attachment '$name' of document '$docId'")
          t
        })
      case None =>
        transid.finished(
          this,
          start,
          s"[ATT_GET] '$dbName', retrieving attachment '$name' of document '$docId'; not found.")
        Future.failed(NoDocumentException("Not found on 'readAttachment'."))
    }
    reportFailure(
      f,
      start,
      failure => s"[ATT_GET] '$dbName' internal error, name: '$name', doc: '$docId', failure: '${failure.getMessage}'")
  }

  override protected[core] def deleteAttachments(docId: DocId)(implicit transid: TransactionId): Future[Boolean] = {
    val start = transid.started(this, DATABASE_ATTS_DELETE, s"[ATTS_DELETE] uploading attachment of document '$docId'")

    val prefix = docId + "/"
    attachments --= attachments.keySet.filter(_.startsWith(prefix))
    transid.finished(this, start, s"[ATTS_DELETE] completed: delete attachment of document '$docId'")
    Future.successful(true)
  }

  override protected[core] def deleteAttachment(docId: DocId, name: String)(
    implicit transid: TransactionId): Future[Boolean] = {
    val start = transid.started(this, DATABASE_ATT_DELETE, s"[ATT_DELETE] uploading attachment of document '$docId'")
    attachments.remove(attachmentKey(docId, name))
    transid.finished(this, start, s"[ATT_DELETE] completed: delete attachment of document '$docId'")
    Future.successful(true)
  }

  def attachmentCount: Int = attachments.size

  def isClosed = closed

  override def shutdown(): Unit = {
    closed = true
  }

  private def attachmentKey(docId: DocId, name: String) = s"${docId.id}/$name"
} 
Example 75
Source File: CrdtsClient.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.samples

import akka.actor.ActorSystem
import akka.grpc.GrpcClientSettings
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.{ActorMaterializer, KillSwitches}
import com.example.crdts.crdt_example._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}


class CrdtsClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) {
  def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) =
    this(hostname, port, hostnameOverride, ActorSystem())
  private implicit val system = sys
  private implicit val materializer = ActorMaterializer()
  import sys.dispatcher

  val settings = {
    val s = GrpcClientSettings.connectToServiceAt(hostname, port).withTls(false)
    hostnameOverride.fold(s)(host => s.withChannelBuilderOverrides(_.overrideAuthority(host)))
  }
  println(s"Connecting to $hostname:$port")
  val service = CrdtExampleClient(settings)

  def shutdown(): Unit = {
    await(service.close())
    await(system.terminate())
  }

  def await[T](future: Future[T]): T = Await.result(future, 10.seconds)

  def getGCounter(id: String) = await(service.getGCounter(Get(id))).value

  def incrementGCounter(id: String, value: Long) = await(service.incrementGCounter(UpdateCounter(id, value))).value

  def getPNCounter(id: String) = await(service.getPNCounter(Get(id))).value

  def updatePNCounter(id: String, value: Long) = await(service.updatePNCounter(UpdateCounter(id, value))).value

  def getGSet(id: String) = await(service.getGSet(Get(id))).items

  def mutateGSet(id: String, values: Seq[SomeValue]) = await(service.mutateGSet(MutateSet(add = values))).size

  def getORSet(id: String) = await(service.getORSet(Get(id))).items

  def mutateORSet(id: String, add: Seq[SomeValue] = Nil, remove: Seq[SomeValue] = Nil, clear: Boolean = false) =
    await(service.mutateORSet(MutateSet(key = id, add = add, remove = remove, clear = clear))).size

  def connect(id: String) =
    service.connect(User(id)).viaMat(KillSwitches.single)(Keep.right).to(Sink.ignore).run()

  def monitor(monitorId: String, id: String) =
    service
      .monitor(User(id))
      .viaMat(KillSwitches.single)(Keep.right)
      .to(
        Sink.foreach(
          status =>
            println(
              s"Monitor $monitorId saw user $id go " + (if (status.online) "online"
                                                        else "offline")
            )
        )
      )
      .run()
} 
Example 76
Source File: Watcher.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.operator

import akka.{Done, NotUsed}
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.scaladsl.{Flow, Keep, RestartSource, Sink, Source}
import play.api.libs.json.Format
import skuber.{ListResource, ObjectResource, ResourceDefinition}
import skuber.api.client.{EventType, KubernetesClient, WatchEvent}

import scala.concurrent.duration._
import skuber.json.format._

import scala.concurrent.ExecutionContext

object Watcher {

  private implicit def listResourceFormat[Resource <: ObjectResource: Format]: Format[ListResource[Resource]] =
    ListResourceFormat(implicitly[Format[Resource]])

  def watch[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    // Summary of what we want our event loop to look like:
    // * We start by listing all the resources, and process them.
    // * Then we start watching from the resourceVersion that we got in our list, so we get all updates.
    // * But we also want to periodically recheck all resources, since sometimes there are race conditions
    //   between operators handling dependent resources (eg, if you deploy a journal and a service that uses
    //   it at the same time), so we only run the watch for a maximum of that time (eg, 5 minutes), before
    //   restarting.
    // * Also, if errors are encountered, we don't want to continually restart in a hot loop, so we use the
    //   RestartSource to restart with backoff.
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client
                  .list[ListResource[Resource]]()
                  .map { resources =>
                    val watch = client
                      .watchAllContinuously[Resource](sinceResourceVersion = Some(resources.resourceVersion))

                    Source(resources)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()

  def watchSingle[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      resourceName: String,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client.getOption[Resource](resourceName).map {
                  case Some(resource) =>
                    val watch =
                      client.watchContinuously[Resource](resourceName,
                                                         sinceResourceVersion = Some(resource.resourceVersion))
                    Source
                      .single(resource)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  case None =>
                    throw new RuntimeException(
                      s"Resource $resourceName not found in namespace ${client.namespaceName}!"
                    )
                }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()
} 
Example 77
Source File: AttributesComputation.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage.attributes

import java.nio.file.{Files, Path}
import java.security.MessageDigest

import akka.http.scaladsl.model.HttpCharsets.`UTF-8`
import akka.http.scaladsl.model.MediaTypes.{`application/octet-stream`, `application/x-tar`}
import akka.http.scaladsl.model.{ContentType, MediaType, MediaTypes}
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink}
import akka.util.ByteString
import cats.effect.Effect
import cats.implicits._
import ch.epfl.bluebrain.nexus.storage.File.{Digest, FileAttributes}
import ch.epfl.bluebrain.nexus.storage.StorageError.InternalError
import ch.epfl.bluebrain.nexus.storage._
import org.apache.commons.io.FilenameUtils

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

trait AttributesComputation[F[_], Source] {

  
  implicit def akkaAttributes[F[_]](implicit
      ec: ExecutionContext,
      mt: Materializer,
      F: Effect[F]
  ): AttributesComputation[F, AkkaSource] =
    (path: Path, algorithm: String) => {
      if (!Files.exists(path)) F.raiseError(InternalError(s"Path not found '$path'"))
      else
        Try(MessageDigest.getInstance(algorithm)) match {
          case Success(msgDigest) =>
            val isDir  = Files.isDirectory(path)
            val source = if (isDir) folderSource(path) else fileSource(path)
            source
              .alsoToMat(sinkSize)(Keep.right)
              .toMat(sinkDigest(msgDigest)) { (bytesF, digestF) =>
                (bytesF, digestF).mapN {
                  case (bytes, digest) => FileAttributes(path.toAkkaUri, bytes, digest, detectMediaType(path, isDir))
                }
              }
              .run()
              .to[F]
          case Failure(_)         => F.raiseError(InternalError(s"Invalid algorithm '$algorithm'."))
        }

    }
} 
Example 78
Source File: CleaningPipelineSpec.scala    From tap   with Apache License 2.0 5 votes vote down vote up
package io.heta.tap.pipelines

import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import io.heta.tap.UnitSpec

import scala.concurrent.Await
import scala.concurrent.duration._



class CleaningPipelineSpec extends UnitSpec {

  import io.heta.tap.pipelines.materialize.PipelineContext._

  val cleaning = new Cleaning


  def testSource(input:String) = Source.single(input)
  val testSink = Flow[String].toMat(Sink.head[String])(Keep.right)

  "revealInvisible" should "replace whitespace characters with visible characters" in {

    import cleaning.White._
    val input = s"1${sp}2${nb}3${nl}4${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.revealInvisible runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1·2·3¬4¬5�6�7")
  }

  "simplify" should "replace quotes and hyphens with single byte versions" in {

    import cleaning.Quote._
    val input = s"1${singleCurlyLeft}2${singleCurlyRight}3${doubleCurlyLeft}4${doubleCurlyRight}5${cleaning.Hyphen.rgx_hyphens}6"
    val future = testSource(input) via cleaning.Pipeline.simplify runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1'2'3\"4\"5-|-|-|-|-|-|-|-6")
  }

  "lengthPreserve" should "replace control characters while preserving length" in {
    import cleaning.White._
    val input = s"1${sp}2${nb}3${nl}4${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.lengthPreserve runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 2 3\n4\n5�6�7" && result.length==input.length)
  }

  "utfMinimal" should "strip control characters, and reduce whitespace" in {
    import cleaning.White._
    val input = s"1${sp}${nb}3${nl}${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.utfMinimal runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 3\n567")
  }

  "utfSimplify" should "replace hyphens and quotes, strip controls and reduce whitespace" in {
    import cleaning.Quote._
    import cleaning.White._
    val input = s"1${sp}${nb}3${nl}${cr}5\u001e6\u00807${singleCurlyLeft}8${singleCurlyRight}9${doubleCurlyLeft}10${doubleCurlyRight}11${cleaning.Hyphen.rgx_hyphens}12"
    val future = testSource(input) via cleaning.Pipeline.utfSimplify runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 3\n567'8'9\"10\"11-|-|-|-|-|-|-|-12")
  }

//  "asciiOnly" should "replace or strip all non-ascii characters" in {
//    import cleaning.Quote._
//    import cleaning.White._
//    val input = s"1${sp}${nb}3${nl}${cr}56\u00807${singleCurlyLeft}8${singleCurlyRight}9${doubleCurlyLeft}10${doubleCurlyRight}11${cleaning.Hyphen.rgx_hyphens}12"
//    val future = testSource(input) via cleaning.Pipeline.asciiOnly runWith testSink
//    val result = Await.result(future, 3 seconds)
//    assert(result=="1 3\r\n567891011|||||||12")
//  }

} 
Example 79
Source File: PubSubSinkIT.scala    From akka-cloudpubsub   with Apache License 2.0 5 votes vote down vote up
package com.qubit.pubsub.akka

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.testkit.scaladsl.TestSource
import akka.stream.{ActorMaterializer, Attributes, Graph, SinkShape}
import com.google.common.base.Charsets
import com.qubit.pubsub.PubSubIntegrationTest
import com.qubit.pubsub.akka.attributes.{
  PubSubClientAttribute,
  PubSubStageBufferSizeAttribute
}
import com.qubit.pubsub.client.PubSubMessage
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

class PubSubSinkIT
    extends FunSuite
    with Matchers
    with BeforeAndAfterAll
    with PubSubIntegrationTest {

  implicit val actorSystem = ActorSystem("pubsub-stream-test")
  implicit val materializer = ActorMaterializer()

  override def testName = "pubsubsink"

  override def beforeAll(): Unit = {
    Await.ready(client.createTopic(testTopic), timeout)
    Await
      .ready(client.createSubscription(testSubscription, testTopic), timeout)
  }

  override def afterAll(): Unit = {
    actorSystem.terminate()
    Await.ready(client.deleteSubscription(testSubscription), timeout)
    Await.ready(client.deleteTopic(testTopic), timeout)
  }

  test("PubSubSink success") {
    val sinkGraph: Graph[SinkShape[PubSubMessage], NotUsed] =
      new PubSubSink(testTopic, 1.second)
    val sinkAttributes = Attributes(
      List(PubSubClientAttribute(client), PubSubStageBufferSizeAttribute(30)))
    val pubsubSink = Sink.fromGraph(sinkGraph).withAttributes(sinkAttributes)

    val (pub, _) = TestSource
      .probe[Array[Byte]]
      .map(PubSubMessage(_))
      .toMat(pubsubSink)(Keep.both)
      .run()

    Range(0, 100)
      .map(i => s"xxx$i".getBytes(Charsets.UTF_8))
      .foreach(pub.sendNext)
    pub.sendComplete()

    // wait for buffers to flush
    Try(Thread.sleep(1000))

    val output = Await.result(client.pull(testSubscription, 100), timeout)
    client.ack(testSubscription, output.map(m => m.ackId))

    output should not be (null)
    output should have size (100)
    output
      .map(m => new String(m.payload.payload, Charsets.UTF_8))
      .forall(_.startsWith("xxx")) should be(true)
  }
} 
Example 80
Source File: StreamSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep}
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.stream.testkit.{TestPublisher, TestSubscriber}
import akka.testkit.TestKit
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.collection.immutable.Seq

trait StreamSpec extends BeforeAndAfterAll { this: TestKit with Suite =>
  implicit val materializer = ActorMaterializer()

  val emitterId = "emitter"

  override def afterAll(): Unit = {
    materializer.shutdown()
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  def probes[I, O, M](flow: Flow[I, O, M]): (TestPublisher.Probe[I], TestSubscriber.Probe[O]) =
    TestSource.probe[I].viaMat(flow)(Keep.left).toMat(TestSink.probe[O])(Keep.both).run()

  def durables[A](emitted: Seq[Emitted[A]], offset: Int = 0): Seq[Durable[A]] =
    emitted.zipWithIndex.map { case (e, i) => e.durable(i + offset) }
} 
Example 81
Source File: LoadTest.scala    From ws_to_kafka   with MIT License 5 votes vote down vote up
package com.pkinsky

import java.util.concurrent.atomic.AtomicInteger

import akka.http.scaladsl.model.ws.{InvalidUpgradeResponse, WebsocketUpgradeResponse, WebsocketRequest, TextMessage}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, Sink, RunnableGraph, Source}
import play.api.libs.json.Json

import scala.concurrent.{Future, Await}
import scala.concurrent.duration._
import scala.language.postfixOps

object LoadTest extends App with AppContext {
  val clients = 256
  val eventsPerClient = 256

  val eventsSent = new AtomicInteger(0)

  def testData(clientId: String): Source[Event, Unit] =
    Source.unfoldInf(1) { n =>
      val event = Event(s"msg number $n", clientId, System.currentTimeMillis())
      (n + 1, event)
    }.take(eventsPerClient).throttle(1, 100 millis, 1, ThrottleMode.Shaping)

  def wsClient(clientId: String): RunnableGraph[Future[WebsocketUpgradeResponse]] =
    testData(clientId).map(e => TextMessage.Strict(Json.toJson(e).toString))
      .map { x => eventsSent.incrementAndGet(); x }
      .viaMat(Http().websocketClientFlow(WebsocketRequest(Uri(s"ws://localhost:$port/ws"))))(Keep.right).to(Sink.ignore)

  //set up websocket connections
  (1 to clients).foreach { id =>
    wsClient(s"client $id").run()
  }

  //watch kafka for messages sent via websocket
  val kafkaConsumerGraph: RunnableGraph[Future[Seq[Event]]] =
    kafka.consume[Event](eventTopic, "group_new")
      .take(clients * eventsPerClient).takeWithin(2 minutes)
      .toMat(Sink.seq)(Keep.right)

  val res = Await.result(kafkaConsumerGraph.run, 5 minutes)
  println(s"sent ${eventsSent.get()} events total")
  println(s"res size: ${res.length}")
} 
Example 82
Source File: InternalSubscriberStub.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.testkit

import akka.Done
import akka.actor.ActorRef
import akka.stream.Materializer
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source

import scala.concurrent.Future
import scala.language.higherKinds

private[lagom] class InternalSubscriberStub[Payload, Message[_]](
    groupId: String,
    topicBuffer: ActorRef
)(implicit materializer: Materializer) {
  def mostOnceSource: Source[Message[Payload], _] = {
    Source
      .actorRef[Message[Payload]](1024, OverflowStrategy.fail)
      .prependMat(Source.empty)(subscribeToBuffer)
  }

  def leastOnce(flow: Flow[Message[Payload], Done, _]): Future[Done] = {
    mostOnceSource
      .via(flow)
      .toMat(Sink.ignore)(Keep.right[Any, Future[Done]])
      .run()
  }

  private def subscribeToBuffer[R](ref: ActorRef, t: R) = {
    topicBuffer.tell(TopicBufferActor.SubscribeToBuffer(groupId, ref), ActorRef.noSender)
    t
  }
} 
Example 83
Source File: LogJson.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, Path }
import java.io.File
import java.time.ZonedDateTime

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.{ Success, Failure }

import akka.Done
import akka.actor._
import akka.util.ByteString

import akka.stream.{ ActorAttributes, ActorMaterializer, IOResult }
import akka.stream.scaladsl.JsonFraming
import akka.stream.scaladsl.{ FileIO, BidiFlow, Flow, Framing, Keep, Sink, Source }

import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import spray.json._

object LogJson extends EventMarshalling 
    with NotificationMarshalling 
    with MetricMarshalling {
  def textInFlow(maxLine: Int) = {
    Framing.delimiter(ByteString("\n"), maxLine)
    .map(_.decodeString("UTF8"))
    .map(LogStreamProcessor.parseLineEx)
    .collect { case Some(e) => e }
  }

  def jsonInFlow(maxJsonObject: Int) = {
    JsonFraming.objectScanner(maxJsonObject) 
      .map(_.decodeString("UTF8").parseJson.convertTo[Event])
  }

  def jsonFramed(maxJsonObject: Int) =
    JsonFraming.objectScanner(maxJsonObject) 

  val jsonOutFlow = Flow[Event].map { event => 
    ByteString(event.toJson.compactPrint)
  }

  val notifyOutFlow = Flow[Summary].map { ws => 
    ByteString(ws.toJson.compactPrint)
  }

  val metricOutFlow = Flow[Metric].map { m => 
    ByteString(m.toJson.compactPrint)
  }

  val textOutFlow = Flow[Event].map{ event => 
    ByteString(LogStreamProcessor.logLine(event))
  }

  def logToJson(maxLine: Int) = {
    BidiFlow.fromFlows(textInFlow(maxLine), jsonOutFlow)
  }

  def jsonToLog(maxJsonObject: Int) = {
    BidiFlow.fromFlows(jsonInFlow(maxJsonObject), textOutFlow)
  }

  def logToJsonFlow(maxLine: Int) = {
    logToJson(maxLine).join(Flow[Event])
  }

  def jsonToLogFlow(maxJsonObject: Int) = {
    jsonToLog(maxJsonObject).join(Flow[Event])
  }
} 
Example 84
Source File: TracedFlowSpec.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.akka.acceptance.stream

import akka.stream.Attributes
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.stage.{ InHandler, OutHandler }
import com.comcast.money.akka.Blocking.RichFuture
import com.comcast.money.akka.SpanHandlerMatchers.{ haveSomeSpanNames, maybeCollectingSpanHandler }
import com.comcast.money.akka.stream.{ TracedFlow, TracedFlowLogic }
import com.comcast.money.akka.{ AkkaMoneyScope, MoneyExtension, SpanContextWithStack }
import org.scalatest.Ignore

class TracedFlowSpec extends AkkaMoneyScope {

  "MoneyExtension should pass a span through an Akka Stream" in {
    implicit val moneyExtension: MoneyExtension = MoneyExtension(actorSystem)
    implicit val spanContextWithStack: SpanContextWithStack = new SpanContextWithStack

    testStream().get()

    maybeCollectingSpanHandler should haveSomeSpanNames(testSpanNames)
  }

  "MoneyExtension should pass a span through an asynchronous Akka Stream" in {
    implicit val moneyExtension: MoneyExtension = MoneyExtension(actorSystem)
    implicit val spanContextWithStack: SpanContextWithStack = new SpanContextWithStack

    multithreadedTestStream().get()

    maybeCollectingSpanHandler should haveSomeSpanNames(testSpanNames)
  }

  val testSpanNames = Seq("flow-3", "flow-2", "flow-1")

  def testStream()(implicit spanContextWithStack: SpanContextWithStack, moneyExtension: MoneyExtension) =
    Source[(String, SpanContextWithStack)](List(("", spanContextWithStack)))
      .via(new TestFlowShape("flow-1"))
      .via(new TestFlowShape("flow-2"))
      .via(new TestFlowShape("flow-3", isFinalFlow = true))
      .runWith(Sink.seq)

  def multithreadedTestStream()(implicit spanContextWithStack: SpanContextWithStack, moneyExtension: MoneyExtension) =
    Source[(String, SpanContextWithStack)](List(("", spanContextWithStack)))
      .via(new TestFlowShape("flow-1").async)
      .via(new TestFlowShape("flow-2").async)
      .via(new TestFlowShape("flow-3", isFinalFlow = true).async)
      .runWith(Sink.seq)

  class TestFlowShape(id: String, isFinalFlow: Boolean = false)(implicit moneyExtension: MoneyExtension) extends TracedFlow[String, String] {

    override val inletName: String = "testin"
    override val outletName: String = "testout"

    override def createLogic(inheritedAttributes: Attributes) =
      new TracedFlowLogic {
        setHandler(in, new InHandler {
          override def onPush(): Unit = {
            val logic = (msg: String) => s"$msg$id"
            if (isFinalFlow) stopTracePush(key = id, stageLogic = logic)
            else tracedPush(id, logic)
          }
        })

        setHandler(out, new OutHandler {
          override def onPull(): Unit =
            if (isClosed(in)) completeStage()
            else pull(in)
        })
      }
  }

} 
Example 85
Source File: KafkaEventProducer.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb.cache

import akka.Done
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Producer
import akka.kafka.{ProducerMessage, ProducerSettings}
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.openwhisk.connector.kafka.KamonMetricsReporter

import scala.collection.immutable.Seq
import scala.concurrent.{ExecutionContext, Future, Promise}

case class KafkaEventProducer(
  settings: ProducerSettings[String, String],
  topic: String,
  eventProducerConfig: EventProducerConfig)(implicit system: ActorSystem, materializer: ActorMaterializer)
    extends EventProducer {
  private implicit val executionContext: ExecutionContext = system.dispatcher

  private val queue = Source
    .queue[(Seq[String], Promise[Done])](eventProducerConfig.bufferSize, OverflowStrategy.dropNew) //TODO Use backpressure
    .map {
      case (msgs, p) =>
        ProducerMessage.multi(msgs.map(newRecord), p)
    }
    .via(Producer.flexiFlow(producerSettings))
    .map {
      case ProducerMessage.MultiResult(_, passThrough) =>
        passThrough.success(Done)
      case _ => //As we use multi mode only other modes need not be handled
    }
    .toMat(Sink.ignore)(Keep.left)
    .run

  override def send(msg: Seq[String]): Future[Done] = {
    val promise = Promise[Done]
    queue.offer(msg -> promise).flatMap {
      case QueueOfferResult.Enqueued    => promise.future
      case QueueOfferResult.Dropped     => Future.failed(new Exception("Kafka request queue is full."))
      case QueueOfferResult.QueueClosed => Future.failed(new Exception("Kafka request queue was closed."))
      case QueueOfferResult.Failure(f)  => Future.failed(f)
    }
  }

  def close(): Future[Done] = {
    queue.complete()
    queue.watchCompletion()
  }

  private def newRecord(msg: String) = new ProducerRecord[String, String](topic, "messages", msg)

  private def producerSettings =
    settings.withProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, KamonMetricsReporter.name)
} 
Example 86
Source File: Timed.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.metrics

import java.util.concurrent.CompletionStage

import akka.Done
import akka.stream.scaladsl.{Keep, Source}
import com.codahale.metrics.{Counter, Timer}
import com.daml.dec.DirectExecutionContext

import scala.concurrent.Future

object Timed {

  def value[T](timer: Timer, value: => T): T =
    timer.time(() => value)

  def completionStage[T](timer: Timer, future: => CompletionStage[T]): CompletionStage[T] = {
    val ctx = timer.time()
    future.whenComplete { (_, _) =>
      ctx.stop()
      ()
    }
  }

  def future[T](timer: Timer, future: => Future[T]): Future[T] = {
    val ctx = timer.time()
    val result = future
    result.onComplete(_ => ctx.stop())(DirectExecutionContext)
    result
  }

  def trackedFuture[T](counter: Counter, future: => Future[T]): Future[T] = {
    counter.inc()
    future.andThen { case _ => counter.dec() }(DirectExecutionContext)
  }

  def timedAndTrackedFuture[T](timer: Timer, counter: Counter, future: => Future[T]): Future[T] = {
    Timed.future(timer, trackedFuture(counter, future))
  }

  def source[Out, Mat](timer: Timer, source: => Source[Out, Mat]): Source[Out, Mat] = {
    val ctx = timer.time()
    source
      .watchTermination()(Keep.both[Mat, Future[Done]])
      .mapMaterializedValue {
        case (mat, done) =>
          done.onComplete(_ => ctx.stop())(DirectExecutionContext)
          mat
      }
  }

} 
Example 87
Source File: ActiveMqTestSpec.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.NotUsed
import akka.actor.ActorRef
import akka.stream.integration.PersonDomain.Person
import akka.stream.scaladsl.{ Flow, Keep }
import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
import akka.stream.testkit.{ TestPublisher, TestSubscriber }
import akka.testkit.TestActor.AutoPilot
import akka.testkit.TestProbe
import JsonCamelMessageExtractor._
import JsonCamelMessageBuilder._

import scala.util.{ Failure, Success, Try }

  implicit def function1ToAutoPilot[S, T](f: S => T): AutoPilot = new AutoPilot {
    override def run(sender: ActorRef, msg: Any): AutoPilot = msg match {
      case s: S =>
        val tryT: Try[T] = Try(f(s))
        tryT match {
          case Success(t) =>
            sender ! t
            function1ToAutoPilot(f)
          case Failure(f) =>
            fail(s"Failed to apply supplied function to received message: $s", f)
        }
      case _ =>
        fail(s"Received message is not of the required type: $msg")
    }
  }
} 
Example 88
Source File: ActiveMqProducerTest.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.stream.integration.PersonDomain.Person
import akka.stream.scaladsl.{ Keep, Source }

import scala.concurrent.Promise
import scala.concurrent.duration._

class ActiveMqProducerTest extends TestSpec {
  it should "produce messages to a queue" in {
    withTestTopicSubscriber() { sub =>
      withTestTopicPublisher() { pub =>
        pub.sendNext(testPerson1)
        pub.sendComplete()

        sub.request(1)
        sub.expectNextPF {
          case (p: Promise[Unit], `testPerson1`) => p.success(())
        }

        sub.expectNoMsg(500.millis)
        sub.cancel()
      }
    }
  }

  it should "produce multiple messages to a queue" in {
    withTestTopicSubscriber() { sub =>
      withTestTopicPublisher() { pub =>

        (0 to 10).foreach { _ =>
          pub.sendNext(testPerson1)
          sub.request(1)
          sub.expectNextPF {
            case (p: Promise[Unit], `testPerson1`) => p.success(())
          }
        }
        pub.sendComplete()
        sub.cancel()
      }
    }
  }

  it should "send 250 messages to the queue" in {
    import PersonDomain._
    val numberOfPersons = 250
    Source.repeat(testPerson1).take(numberOfPersons).runWith(ActiveMqProducer("PersonProducer")).toTry should be a 'success
  }

  it should "send and receive 250 messages from the queue" in {
    val numberOfPersons = 250
    Source.repeat(testPerson1).take(numberOfPersons).runWith(ActiveMqProducer[Person]("PersonProducer")).toTry should be a 'success
    val (ref, fxs) = ActiveMqConsumer[Person]("PersonConsumer").take(numberOfPersons).toMat(AckSink.seq)(Keep.both).run()
    fxs.toTry should be a 'success
    terminateEndpoint(ref)
  }
} 
Example 89
Source File: AckSink.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.Done
import akka.stream.scaladsl.{ Flow, Keep, Sink }

import scala.concurrent.Future

object AckSink {
  
  def complete[A]: Sink[AckTup[A, A], Future[Done]] = {
    Flow[AckTup[A, A]].map {
      case (p, a) =>
        try {
          if (!p.isCompleted) p.success(a)
        } catch {
          case cause: Throwable =>
            if (!p.isCompleted) p.failure(cause)
        }
    }.toMat(Sink.ignore)(Keep.right).named("completeAckSink")
  }
} 
Example 90
Source File: AckActiveMqProducer.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.Done
import akka.actor.ActorSystem
import akka.camel.{ CamelExtension, CamelMessage }
import akka.stream.integration.activemq.extension.ActiveMqExtension
import akka.stream.scaladsl.{ Flow, Keep, Sink }

import scala.collection.JavaConversions._
import scala.concurrent.{ ExecutionContext, Future }

object AckActiveMqProducer {
  def apply[A](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem, builder: MessageBuilder[A, CamelMessage]): Sink[AckUTup[A], Future[Done]] =
    sink(producerName, qos)

  def sink[A](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem, builder: MessageBuilder[A, CamelMessage]): Sink[AckUTup[A], Future[Done]] =
    flow(producerName, qos).toMat(Sink.ignore)(Keep.right)

  def flow[A](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem, builder: MessageBuilder[A, CamelMessage]) = {
    val template = CamelExtension(system).template
    Flow[AckUTup[A]].mapAsync(qos) {
      case (p, payload) =>
        Future {
          val camelMessage = builder.build(payload)
          val uri = ActiveMqExtension(system).producerEndpointUri(producerName)
          template.sendBodyAndHeaders(uri, camelMessage.body, camelMessage.headers.mapValues(_.asInstanceOf[AnyRef]))
        }.map { _ => if (!p.isCompleted) p.success(()) }.recover { case cause: Throwable => if (!p.isCompleted) p.failure(cause) }
    }
  }
} 
Example 91
Source File: ActiveMqProducer.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.actor.ActorSystem
import akka.camel.{ CamelExtension, CamelMessage }
import akka.stream.integration.activemq.extension.ActiveMqExtension
import akka.stream.scaladsl.{ Flow, Keep, Sink }
import akka.{ Done, NotUsed }
import org.apache.camel.ProducerTemplate

import scala.collection.JavaConversions._
import scala.concurrent.{ ExecutionContext, Future }

object ActiveMqProducer {

  private def send[A: CamelMessageBuilder](payload: A, producerName: String, endpointUri: String, producer: ProducerTemplate)(implicit ec: ExecutionContext): Future[A] = Future {
    val msg: CamelMessage = implicitly[CamelMessageBuilder[A]].build(payload)
    producer.sendBodyAndHeaders(endpointUri, msg.body, msg.headers.mapValues(_.asInstanceOf[AnyRef]))
    payload
  }

  
  def apply[A: CamelMessageBuilder](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem): Sink[A, Future[Done]] =
    sink(producerName, qos)
} 
Example 92
Source File: CarbonClient.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.graphite

import java.time.{Clock, Instant}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.scaladsl.{Flow, Keep, RestartFlow, Sink, Source, Tcp}
import akka.stream.{OverflowStrategy, QueueOfferResult}
import akka.util.ByteString
import fr.davit.akka.http.metrics.core.Dimension

import scala.concurrent.Await
import scala.concurrent.duration.{Duration, _}

object CarbonClient {

  def apply(host: String, port: Int)(implicit system: ActorSystem): CarbonClient = new CarbonClient(host, port)
}

class CarbonClient(host: String, port: Int)(implicit system: ActorSystem) extends AutoCloseable {

  private val logger         = Logging(system.eventStream, classOf[CarbonClient])
  protected val clock: Clock = Clock.systemUTC()

  private def serialize[T](name: String, value: T, dimensions: Seq[Dimension], ts: Instant): ByteString = {
    val tags         = dimensions.map(d => d.key + "=" + d.value).toList
    val taggedMetric = (name :: tags).mkString(";")
    ByteString(s"$taggedMetric $value ${ts.getEpochSecond}\n")
  }

  // TODO read backoff from config
  private def connection: Flow[ByteString, ByteString, NotUsed] =
    RestartFlow.withBackoff(
      minBackoff = 3.seconds,
      maxBackoff = 30.seconds,
      randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
      maxRestarts = -1 // keep retrying forever
    )(() => Tcp().outgoingConnection(host, port))

  private val queue = Source
    .queue[ByteString](19, OverflowStrategy.dropHead)
    .via(connection)
    .toMat(Sink.ignore)(Keep.left)
    .run()

  def publish[T](
      name: String,
      value: T,
      dimensions: Seq[Dimension] = Seq.empty,
      ts: Instant = Instant
        .now(clock)
  ): Unit = {
    // it's reasonable to block until the message in enqueued
    Await.result(queue.offer(serialize(name, value, dimensions, ts)), Duration.Inf) match {
      case QueueOfferResult.Enqueued    => logger.debug("Metric {} enqueued", name)
      case QueueOfferResult.Dropped     => logger.debug("Metric {} dropped", name)
      case QueueOfferResult.Failure(e)  => logger.error(e, s"Failed publishing metric $name")
      case QueueOfferResult.QueueClosed => throw new Exception("Failed publishing metric to closed carbon client")
    }
  }

  override def close(): Unit = {
    queue.complete()
    Await.result(queue.watchCompletion(), Duration.Inf)
  }
} 
Example 93
Source File: SqsAckSinkShapeSpec.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs.shape

import akka.Done
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.testkit.scaladsl.TestSource
import com.amazonaws.handlers.AsyncHandler
import com.amazonaws.services.sqs.model._
import me.snov.akka.sqs._
import me.snov.akka.sqs.client.SqsClient
import org.mockito.Mockito._
import org.mockito.ArgumentMatchers._
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.mockito.MockitoSugar.mock
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._

class SqsAckSinkShapeSpec extends FlatSpec with Matchers with DefaultTestContext {
  it should "delete messages on Ack" in {

    val sqsClient = mock[SqsClient]
    when(sqsClient.deleteAsync(any(), any())).thenAnswer(
      new Answer[Object] {
        override def answer(invocation: InvocationOnMock): Object = {
          val receiptHandle = invocation.getArgument[String](0)
          val callback = invocation.getArgument[AsyncHandler[DeleteMessageRequest, DeleteMessageResult]](1)
          callback.onSuccess(
            new DeleteMessageRequest().withReceiptHandle(receiptHandle),
            new DeleteMessageResult
          )
          None
        }
      }
    )

    val (probe, future) = TestSource.probe[MessageActionPair]
      .toMat(Sink.fromGraph(SqsAckSinkShape(sqsClient)))(Keep.both)
      .run()

    probe
      .sendNext((new Message().withReceiptHandle("123"), Ack()))
      .sendComplete()

    Await.result(future, 1.second) shouldBe Done
    verify(sqsClient, times(1)).deleteAsync(any(), any())
  }

  it should "requeue messages on RequeueWithDelay" in {

    val sqsClient = mock[SqsClient]
    when(sqsClient.sendWithDelayAsync(any[String], any[Int], any())).thenAnswer(
      new Answer[Object] {
        override def answer(invocation: InvocationOnMock): Object = {
          val body = invocation.getArgument[String](0)
          val delay = invocation.getArgument[Int](1)
          val callback = invocation.getArgument[AsyncHandler[SendMessageRequest, SendMessageResult]](2)
          callback.onSuccess(
            new SendMessageRequest().withMessageBody(body).withDelaySeconds(delay),
            new SendMessageResult().withMessageId("12345")
          )
          None
        }
      }
    )

    val (probe, future) = TestSource.probe[MessageActionPair]
      .toMat(Sink.fromGraph(SqsAckSinkShape(sqsClient)))(Keep.both)
      .run()

    probe
      .sendNext((new Message().withBody("foo"), RequeueWithDelay(9)))
      .sendComplete()

    Await.result(future, 100.second) shouldBe Done
    verify(sqsClient, times(1)).sendWithDelayAsync(any(), any(), any())
  }
} 
Example 94
Source File: ClickhouseClientTest.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import akka.stream.scaladsl.{Keep, Sink}
import com.crobox.clickhouse.internal.progress.QueryProgress.{Progress, QueryAccepted, QueryFinished, QueryProgress}
import com.typesafe.config.ConfigFactory


class ClickhouseClientTest extends ClickhouseClientAsyncSpec {

  val client: ClickhouseClient = new ClickhouseClient(Some(config))

  "Clickhouse client" should "select" in {
    client
      .query("select 1 + 2")
      .map { f =>
        f.trim.toInt should be(3)
      }
      .flatMap(
        _ =>
          client.query("select currentDatabase()").map { f =>
            f.trim should be("default")
        }
      )
  }

  it should "support compression" in {
    val client: ClickhouseClient = new ClickhouseClient(
      Some(config.resolveWith(ConfigFactory.parseString("crobox.clickhouse.client.http-compression = true")))
    )
    client.query("select count(*) from system.tables").map { f =>
      f.trim.toInt > 10 should be(true)
    }
  }

  it should "decline execute SELECT query" in {
    client.execute("select 1 + 2").map(_ => fail()).recover {
      case _: IllegalArgumentException => succeed
    }
  }

  "Query progress" should "publish query progress messages" in {
    client
      .queryWithProgress("select 1 + 2")
      .runWith(Sink.seq[QueryProgress])
      .map(progress => progress should contain theSameElementsAs Seq(QueryAccepted, QueryFinished))
  }

  it should "materialize progress source with the query result" in {
    client
      .queryWithProgress("select 1 + 2")
      .toMat(Sink.ignore)(Keep.left)
      .run()
      .map(result => result.shouldBe("3\n"))
  }

  it should "send full progress messages" in {
    client
      .queryWithProgress("select sum(number) FROM (select number from system.numbers limit 100000000)")
      .runWith(Sink.seq[QueryProgress])
      .map(progress => {
//        println(progress)
        progress collect {
          case qp: Progress => qp
        } should not be empty
      })
  }

} 
Example 95
Source File: ClusterConnectionFlowTest.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.cluster

import akka.http.scaladsl.model.Uri
import akka.stream.scaladsl.{Keep, Sink}
import com.crobox.clickhouse.ClickhouseClientAsyncSpec
import com.crobox.clickhouse.internal.ClickhouseHostBuilder

import scala.concurrent._
import scala.concurrent.duration._

class ClusterConnectionFlowTest extends ClickhouseClientAsyncSpec {

  private val clickhouseUri: Uri = ClickhouseHostBuilder.toHost("localhost", Some(8123))
  it should "select cluster hosts" in {
    val (_, futureResult) = ClusterConnectionFlow
      .clusterConnectionsFlow(Future.successful(clickhouseUri), 2 seconds, "test_shard_localhost")
      .toMat(Sink.head)(Keep.both)
      .run()
    futureResult.map(result => {
      result.hosts should contain only ClickhouseHostBuilder.toHost("127.0.0.1", Some(8123))
    })
  }

  it should "fail for non existing cluster" in {
    val (_, futureResult) = ClusterConnectionFlow
      .clusterConnectionsFlow(Future.successful(clickhouseUri), 2 seconds, "cluster")
      .toMat(Sink.head)(Keep.both)
      .run()
    futureResult
      .map(_ => {
        fail("Returned answer for non existing clsuter")
      })
      .recover {
        case _: IllegalArgumentException => succeed
      }
  }

} 
Example 96
Source File: QueryProgress.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.internal.progress
import akka.NotUsed
import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source, SourceQueueWithComplete}
import akka.stream.{ActorAttributes, OverflowStrategy, Supervision}
import com.typesafe.scalalogging.LazyLogging
import spray.json._
import spray.json.DefaultJsonProtocol._
import scala.util.{Failure, Success, Try}

object QueryProgress extends LazyLogging {

  sealed trait QueryProgress
  case object QueryAccepted                                 extends QueryProgress
  case object QueryFinished                                 extends QueryProgress
  case object QueryRejected                                 extends QueryProgress
  case class QueryFailed(cause: Throwable)                  extends QueryProgress
  case class QueryRetry(cause: Throwable, retryNumber: Int) extends QueryProgress

  case class ClickhouseQueryProgress(identifier: String, progress: QueryProgress)
  case class Progress(rowsRead: Long, bytesRead: Long, rowsWritten: Long, bytesWritten: Long, totalRows: Long) extends QueryProgress

  def queryProgressStream: RunnableGraph[(SourceQueueWithComplete[String], Source[ClickhouseQueryProgress, NotUsed])] =
    Source
      .queue[String](1000, OverflowStrategy.dropHead)
      .map[Option[ClickhouseQueryProgress]](queryAndProgress => {
        queryAndProgress.split("\n", 2).toList match {
          case queryId :: ProgressHeadersAsEventsStage.AcceptedMark :: Nil =>
            Some(ClickhouseQueryProgress(queryId, QueryAccepted))
          case queryId :: progressJson :: Nil =>
            Try {
              progressJson.parseJson match {
                case JsObject(fields) if fields.size == 3 =>
                  ClickhouseQueryProgress(
                    queryId,
                    Progress(
                        fields("read_rows").convertTo[String].toLong,
                        fields("read_bytes").convertTo[String].toLong,
                        0,
                        0,
                        fields("total_rows").convertTo[String].toLong
                    )
                  )
                case JsObject(fields) if fields.size == 5 =>
                  ClickhouseQueryProgress(
                    queryId,
                    Progress(
                      fields("read_rows").convertTo[String].toLong,
                      fields("read_bytes").convertTo[String].toLong,
                      fields("written_rows").convertTo[String].toLong,
                      fields("written_bytes").convertTo[String].toLong,
                      fields("total_rows_to_read").convertTo[String].toLong
                    )
                  )
                case _ => throw new IllegalArgumentException(s"Cannot extract progress from $progressJson")
              }
            } match {
              case Success(value) => Some(value)
              case Failure(exception) =>
                logger.warn(s"Failed to parse json $progressJson", exception)
                None
            }
          case other @ _ =>
            logger.warn(s"Could not get progress from $other")
            None

        }
      })
      .collect {
        case Some(progress) => progress
      }
      .withAttributes(ActorAttributes.supervisionStrategy({
        case ex @ _ =>
          logger.warn("Detected failure in the query progress stream, resuming operation.", ex)
          Supervision.Resume
      }))
      .toMat(BroadcastHub.sink)(Keep.both)
} 
Example 97
Source File: ClickhouseSink.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.stream

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import com.crobox.clickhouse.ClickhouseClient
import com.crobox.clickhouse.internal.QuerySettings
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

case class ClickhouseIndexingException(msg: String, cause: Throwable, payload: Seq[String], table: String)
    extends RuntimeException(msg, cause)
case class Insert(table: String, jsonRow: String)

object ClickhouseSink extends LazyLogging {

  def insertSink(config: Config, client: ClickhouseClient, indexerName: Option[String] = None)(
      implicit ec: ExecutionContext,
      settings: QuerySettings = QuerySettings()
  ): Sink[Insert, Future[Done]] = {
    val indexerGeneralConfig = config.getConfig("crobox.clickhouse.indexer")
    val mergedIndexerConfig = indexerName
      .flatMap(
        theIndexName =>
          if (indexerGeneralConfig.hasPath(theIndexName))
            Some(indexerGeneralConfig.getConfig(theIndexName).withFallback(indexerGeneralConfig))
          else None
      )
      .getOrElse(indexerGeneralConfig)
    Flow[Insert]
      .groupBy(Int.MaxValue, _.table)
      .groupedWithin(mergedIndexerConfig.getInt("batch-size"),
                     mergedIndexerConfig.getDuration("flush-interval").getSeconds.seconds)
      .mapAsyncUnordered(mergedIndexerConfig.getInt("concurrent-requests"))(inserts => {
        val table       = inserts.head.table
        val insertQuery = s"INSERT INTO $table FORMAT JSONEachRow"
        val payload     = inserts.map(_.jsonRow)
        val payloadSql  = payload.mkString("\n")
        client.execute(insertQuery, payloadSql) recover {
          case ex =>
            throw ClickhouseIndexingException("failed to index", ex, payload, table)
        } map (_ => inserts)
      })
      .mergeSubstreams
      .toMat(Sink.ignore)(Keep.right)
  }
}