akka.stream.scaladsl.Flow Scala Examples

The following examples show how to use akka.stream.scaladsl.Flow. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ExecuteAfterResponse.scala    From opencensus-scala   with Apache License 2.0 6 votes vote down vote up
package io.opencensus.scala.akka.http.utils

import akka.NotUsed
import akka.http.scaladsl.model.{HttpEntity, HttpResponse}
import akka.stream.scaladsl.Flow
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}

object ExecuteAfterResponse {

  private class AfterResponseFlow[Element](
      onFinish: () => Unit,
      onFailure: Throwable => Unit
  ) extends GraphStage[FlowShape[Element, Element]] {
    private val in  = Inlet[Element]("in")
    private val out = Outlet[Element]("out")

    override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
      new GraphStageLogic(shape) with InHandler with OutHandler {
        def onPush(): Unit = push(out, grab(in))
        def onPull(): Unit = pull(in)

        setHandler(in, this)
        setHandler(out, this)

        override def onUpstreamFinish(): Unit = {
          onFinish()
          super.onUpstreamFinish()
        }
        override def onUpstreamFailure(ex: Throwable): Unit = {
          onFailure(ex)
          super.onUpstreamFailure(ex)
        }
      }

    override val shape = FlowShape(in, out)
  }

  private object AfterResponseFlow {
    def apply[Element](
        onFinish: () => Unit,
        onFailure: Throwable => Unit
    ): Flow[Element, Element, NotUsed] =
      Flow.fromGraph(new AfterResponseFlow(onFinish, onFailure))
  }

  def onComplete(
      response: HttpResponse,
      onFinish: () => Unit,
      onFailure: Throwable => Unit
  ): HttpResponse = {

    response.copy(
      entity = if (response.status.allowsEntity) {
        response.entity.transformDataBytes(
          AfterResponseFlow(onFinish, onFailure)
        )
      } else {
        onFinish()
        HttpEntity.Empty
      }
    )
  }
} 
Example 2
Source File: CommandSubmissionFlow.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.services.commands

import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.util.Ctx
import com.google.protobuf.empty.Empty

import scala.concurrent.Future
import scala.util.{Success, Try}

object CommandSubmissionFlow {

  def apply[Context](
      submit: SubmitRequest => Future[Empty],
      maxInFlight: Int): Flow[Ctx[Context, SubmitRequest], Ctx[Context, Try[Empty]], NotUsed] = {
    Flow[Ctx[Context, SubmitRequest]]
      .log("submission at client", _.value.commands.fold("")(_.commandId))
      .mapAsyncUnordered(maxInFlight) {
        case Ctx(context, request) =>
          submit(request)
            .transform { tryResponse =>
              Success(
                Ctx(
                  context,
                  tryResponse
                ))
            }(DirectExecutionContext)
      }
  }

} 
Example 3
Source File: StaticTime.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.services.testing.time

import java.time.Instant
import java.util.concurrent.atomic.AtomicReference

import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, RunnableGraph, Sink}
import akka.stream.{ClosedShape, KillSwitches, Materializer, UniqueKillSwitch}
import com.daml.api.util.{TimeProvider, TimestampConversion}
import com.daml.api.util.TimestampConversion._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.{TimeService, TimeServiceStub}
import com.daml.ledger.client.LedgerClient

import scala.concurrent.{ExecutionContext, Future}

class StaticTime(
    timeService: TimeService,
    clock: AtomicReference[Instant],
    killSwitch: UniqueKillSwitch,
    ledgerId: String)
    extends TimeProvider
    with AutoCloseable {

  def getCurrentTime: Instant = clock.get

  def timeRequest(instant: Instant) =
    SetTimeRequest(
      ledgerId,
      Some(TimestampConversion.fromInstant(getCurrentTime)),
      Some(TimestampConversion.fromInstant(instant)))

  def setTime(instant: Instant)(implicit ec: ExecutionContext): Future[Unit] = {
    timeService.setTime(timeRequest(instant)).map { _ =>
      val _ = StaticTime.advanceClock(clock, instant)
    }
  }

  override def close(): Unit = killSwitch.shutdown()
}

object StaticTime {
  def advanceClock(clock: AtomicReference[Instant], instant: Instant): Instant = {
    clock.updateAndGet {
      case current if instant isAfter current => instant
      case current => current
    }
  }

  def updatedVia(timeService: TimeServiceStub, ledgerId: String, token: Option[String] = None)(
      implicit m: Materializer,
      esf: ExecutionSequencerFactory): Future[StaticTime] = {
    val clockRef = new AtomicReference[Instant](Instant.EPOCH)
    val killSwitchExternal = KillSwitches.single[Instant]
    val sinkExternal = Sink.head[Instant]

    RunnableGraph
      .fromGraph {
        GraphDSL.create(killSwitchExternal, sinkExternal) {
          case (killSwitch, futureOfFirstElem) =>
            // We serve this in a future which completes when the first element has passed through.
            // Thus we make sure that the object we serve already received time data from the ledger.
            futureOfFirstElem.map(_ => new StaticTime(timeService, clockRef, killSwitch, ledgerId))(
              DirectExecutionContext)
        } { implicit b => (killSwitch, sinkHead) =>
          import GraphDSL.Implicits._
          val instantSource = b.add(
            ClientAdapter
              .serverStreaming(
                GetTimeRequest(ledgerId),
                LedgerClient.stub(timeService, token).getTime)
              .map(r => toInstant(r.getCurrentTime)))

          val updateClock = b.add(Flow[Instant].map { i =>
            advanceClock(clockRef, i)
            i
          })

          val broadcastTimes = b.add(Broadcast[Instant](2))

          val ignore = b.add(Sink.ignore)

          // format: OFF
          instantSource ~> killSwitch ~> updateClock ~> broadcastTimes.in
                                                        broadcastTimes.out(0) ~> sinkHead
                                                        broadcastTimes.out(1) ~> ignore
          // format: ON

          ClosedShape
        }
      }
      .run()
  }

} 
Example 4
Source File: ExtractMaterializedValue.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.util.akkastreams

import akka.stream.scaladsl.Flow
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import akka.stream.stage.{GraphStageLogic, GraphStageWithMaterializedValue, InHandler, OutHandler}

import scala.concurrent.{Future, Promise}


class ExtractMaterializedValue[T, Mat](toMaterialized: T => Option[Mat])
    extends GraphStageWithMaterializedValue[FlowShape[T, T], Future[Mat]] {

  val inlet: Inlet[T] = Inlet[T]("in")
  val outlet: Outlet[T] = Outlet[T]("out")

  override def createLogicAndMaterializedValue(
      inheritedAttributes: Attributes): (GraphStageLogic, Future[Mat]) = {
    val promise = Promise[Mat]()

    val logic = new GraphStageLogic(shape) {

      setHandler(
        inlet,
        new InHandler {
          override def onPush(): Unit = {
            val input = grab(inlet)
            push(outlet, input)
            toMaterialized(input).foreach { materialized =>
              promise.trySuccess(materialized)
              setSimplerHandler()
            }
          }

          private def setSimplerHandler(): Unit = {
            setHandler(inlet, new InHandler {
              override def onPush(): Unit =
                push(outlet, grab(inlet))
            })
          }

          override def onUpstreamFailure(ex: Throwable): Unit = {
            promise.tryFailure(ex)
            super.onUpstreamFailure(ex)
          }

          override def onUpstreamFinish(): Unit = {
            promise.tryFailure(
              new RuntimeException("Upstream completed before matching element arrived."))
            super.onUpstreamFinish()
          }
        }
      )

      setHandler(
        outlet,
        new OutHandler {
          override def onPull(): Unit = pull(inlet)

          override def onDownstreamFinish(cause: Throwable): Unit = {
            promise.tryFailure(
              new RuntimeException("Downstream completed before matching element arrived."))
            super.onDownstreamFinish(cause)
          }
        }
      )

    }

    logic -> promise.future
  }

  override def shape: FlowShape[T, T] = FlowShape(inlet, outlet)
}

object ExtractMaterializedValue {
  def apply[T, Mat](toOutputOrMaterialized: T => Option[Mat]): Flow[T, T, Future[Mat]] =
    Flow.fromGraph(new ExtractMaterializedValue[T, Mat](toOutputOrMaterialized))
} 
Example 5
Source File: LedgerEntriesSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.stores.ledger.inmemory

import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import com.daml.ledger.participant.state.v1.Offset
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import org.scalatest.{AsyncWordSpec, Inspectors, Matchers}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Random

class LedgerEntriesSpec
    extends AsyncWordSpec
    with Matchers
    with AkkaBeforeAndAfterAll
    with Inspectors {

  case class Error(msg: String)

  case class Transaction(content: String)

  val NO_OF_MESSAGES = 10000
  val NO_OF_SUBSCRIBERS = 50

  private def genTransactions() = (1 to NO_OF_MESSAGES).map { i =>
    if (Random.nextBoolean())
      Right(Transaction(i.toString))
    else
      Left(Error(i.toString))
  }

  "LedgerEntries" should {

    "store new blocks and a late subscriber can read them" in {
      val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString)
      val transactions = genTransactions()

      transactions.foreach(t => ledger.publish(t))

      val sink =
        Flow[(Offset, Either[Error, Transaction])]
          .take(NO_OF_MESSAGES.toLong)
          .toMat(Sink.seq)(Keep.right)

      val blocksF = ledger.getSource(None, None).runWith(sink)

      blocksF.map { blocks =>
        val readTransactions = blocks.collect { case (_, transaction) => transaction }
        readTransactions shouldEqual transactions
      }
    }

    "store new blocks while multiple subscribers are reading them with different pace" in {
      val transactions = genTransactions()

      val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString)

      val publishRate = NO_OF_MESSAGES / 10

      val blocksInStream =
        Source(transactions)
          .throttle(publishRate, 100.milliseconds, publishRate, ThrottleMode.shaping)
          .to(Sink.foreach { t =>
            ledger.publish(t)
            ()
          })

      def subscribe() = {
        val subscribeRate = NO_OF_MESSAGES / (Random.nextInt(100) + 1)
        ledger
          .getSource(None, None)
          .runWith(
            Flow[(Offset, Either[Error, Transaction])]
              .throttle(subscribeRate, 100.milliseconds, subscribeRate, ThrottleMode.shaping)
              .take(NO_OF_MESSAGES.toLong)
              .toMat(Sink.seq)(Keep.right)
          )
      }

      val readBlocksF = Future.sequence((1 to NO_OF_SUBSCRIBERS).map(_ => subscribe()))
      blocksInStream.run()

      readBlocksF.map { readBlocksForAll =>
        forAll(readBlocksForAll) { readBlocks =>
          val readTransactions = readBlocks.collect { case (_, transaction) => transaction }
          readTransactions shouldEqual transactions
        }
      }
    }
  }
} 
Example 6
Source File: FlowUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http.util

import akka.NotUsed
import akka.stream.scaladsl.Flow
import scalaz.{-\/, \/}

object FlowUtil {
  def allowOnlyFirstInput[E, A](error: => E): Flow[E \/ A, E \/ A, NotUsed] =
    Flow[E \/ A]
      .scan(Option.empty[E \/ A]) { (s0, x) =>
        s0 match {
          case Some(_) =>
            Some(-\/(error))
          case None =>
            Some(x)
        }
      }
      .collect {
        case Some(x) => x
      }
} 
Example 7
Source File: AkkaImplementation.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.utils.implementations

import java.util.concurrent.atomic.AtomicInteger

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.server.akka.ServerAdapter
import com.daml.grpc.sampleservice.Responding
import com.daml.platform.hello.HelloServiceGrpc.HelloService
import com.daml.platform.hello.{HelloRequest, HelloResponse, HelloServiceGrpc}
import io.grpc.stub.StreamObserver
import io.grpc.{BindableService, ServerServiceDefinition}

import scala.concurrent.ExecutionContext.Implicits.global

class AkkaImplementation(
    implicit executionSequencerFactory: ExecutionSequencerFactory,
    materializer: Materializer,
) extends HelloService
    with Responding
    with BindableService {

  private val serverStreamingCalls = new AtomicInteger()

  def getServerStreamingCalls: Int = serverStreamingCalls.get()

  override def bindService(): ServerServiceDefinition =
    HelloServiceGrpc.bindService(this, global)

  override def serverStreaming(
      request: HelloRequest,
      responseObserver: StreamObserver[HelloResponse],
  ): Unit =
    Source
      .single(request)
      .via(Flow[HelloRequest].mapConcat(responses))
      .runWith(ServerAdapter.toSink(responseObserver))
      .onComplete(_ => serverStreamingCalls.incrementAndGet())

} 
Example 8
Source File: Jwt.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.engine.trigger.test

import akka.stream.scaladsl.{Flow}
import com.daml.lf.data.Ref._
import com.daml.platform.sandbox.services.SandboxFixtureWithAuth
import com.daml.ledger.api.testing.utils.{SuiteResourceManagementAroundAll}
import com.daml.ledger.api.v1.commands._
import com.daml.ledger.api.v1.commands.CreateCommand
import com.daml.ledger.api.v1.{value => LedgerApi}
import org.scalatest._

import com.daml.lf.engine.trigger.TriggerMsg

class Jwt
    extends AsyncWordSpec
    with AbstractTriggerTest
    with SandboxFixtureWithAuth
    with Matchers
    with SuiteResourceManagementAroundAll
    with TryValues {
  self: Suite =>

  override protected def ledgerClientConfiguration = super.ledgerClientConfiguration.copy(
    token = Some(toHeader(readWriteToken(party)))
  )

  private val party = "AliceAuth"

  "Jwt" can {
    // We just need something simple to test the connection.
    val assetId = LedgerApi.Identifier(packageId, "ACS", "Asset")
    val assetMirrorId = LedgerApi.Identifier(packageId, "ACS", "AssetMirror")
    def asset(party: String): CreateCommand =
      CreateCommand(
        templateId = Some(assetId),
        createArguments = Some(LedgerApi.Record(
          fields = Seq(LedgerApi.RecordField("issuer", Some(LedgerApi.Value().withParty(party)))))))
    "1 create" in {
      for {
        client <- ledgerClient()
        runner = getRunner(client, QualifiedName.assertFromString("ACS:test"), party)
        (acs, offset) <- runner.queryACS()
        // Start the future here
        finalStateF = runner.runWithACS(acs, offset, msgFlow = Flow[TriggerMsg].take(6))._2
        // Execute commands
        contractId <- create(client, party, asset(party))
        // Wait for the trigger to terminate
        _ <- finalStateF
        acs <- queryACS(client, party)
      } yield {
        assert(acs(assetId).size == 1)
        assert(acs(assetMirrorId).size == 1)
      }
    }
  }
} 
Example 9
Source File: Tls.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.engine.trigger.test

import akka.stream.scaladsl.{Flow}
import com.daml.bazeltools.BazelRunfiles._
import com.daml.lf.data.Ref._
import com.daml.ledger.api.testing.utils.{SuiteResourceManagementAroundAll}
import com.daml.ledger.api.tls.TlsConfiguration
import com.daml.ledger.api.v1.commands._
import com.daml.ledger.api.v1.commands.CreateCommand
import com.daml.ledger.api.v1.{value => LedgerApi}
import java.io.File
import org.scalatest._

import com.daml.lf.engine.trigger.TriggerMsg

class Tls
    extends AsyncWordSpec
    with AbstractTriggerTest
    with Matchers
    with SuiteResourceManagementAroundAll
    with TryValues {
  self: Suite =>

  val List(serverCrt, serverPem, caCrt, clientCrt, clientPem) = {
    List("server.crt", "server.pem", "ca.crt", "client.crt", "client.pem").map { src =>
      Some(new File(rlocation("ledger/test-common/test-certificates/" + src)))
    }
  }

  override protected def config =
    super.config
      .copy(tlsConfig = Some(TlsConfiguration(enabled = true, serverCrt, serverPem, caCrt)))

  override protected def ledgerClientConfiguration =
    super.ledgerClientConfiguration
      .copy(sslContext = TlsConfiguration(enabled = true, clientCrt, clientPem, caCrt).client)

  "TLS" can {
    // We just need something simple to test the connection.
    val assetId = LedgerApi.Identifier(packageId, "ACS", "Asset")
    val assetMirrorId = LedgerApi.Identifier(packageId, "ACS", "AssetMirror")
    def asset(party: String): CreateCommand =
      CreateCommand(
        templateId = Some(assetId),
        createArguments = Some(LedgerApi.Record(
          fields = Seq(LedgerApi.RecordField("issuer", Some(LedgerApi.Value().withParty(party)))))))
    "1 create" in {
      for {
        client <- ledgerClient()
        party <- allocateParty(client)
        runner = getRunner(client, QualifiedName.assertFromString("ACS:test"), party)
        (acs, offset) <- runner.queryACS()
        // Start the future here
        finalStateF = runner.runWithACS(acs, offset, msgFlow = Flow[TriggerMsg].take(6))._2
        // Execute commands
        contractId <- create(client, party, asset(party))
        // Wait for the trigger to terminate
        _ <- finalStateF
        acs <- queryACS(client, party)
      } yield {
        assert(acs(assetId).size == 1)
        assert(acs(assetMirrorId).size == 1)
      }
    }
  }
} 
Example 10
Source File: ContextualizedLogger.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.logging

import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.daml.grpc.GrpcException
import io.grpc.Status
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.concurrent.TrieMap
import scala.util.{Failure, Try}
import scala.util.control.NonFatal

object ContextualizedLogger {

  // Caches loggers to prevent them from needlessly wasting memory
  // Replicates the behavior of the underlying Slf4j logger factory
  private[this] val cache = TrieMap.empty[String, ContextualizedLogger]

  // Allows to explicitly pass a logger, should be used for testing only
  private[logging] def createFor(withoutContext: Logger): ContextualizedLogger =
    new ContextualizedLogger(withoutContext)

  // Slf4j handles the caching of the underlying logger itself
  private[logging] def createFor(name: String): ContextualizedLogger =
    createFor(LoggerFactory.getLogger(name))

  
  def get(clazz: Class[_]): ContextualizedLogger = {
    val name = clazz.getName.stripSuffix("$")
    cache.getOrElseUpdate(name, createFor(name))
  }

}

final class ContextualizedLogger private (val withoutContext: Logger) {

  val trace = new LeveledLogger.Trace(withoutContext)
  val debug = new LeveledLogger.Debug(withoutContext)
  val info = new LeveledLogger.Info(withoutContext)
  val warn = new LeveledLogger.Warn(withoutContext)
  val error = new LeveledLogger.Error(withoutContext)

  private def internalOrUnknown(code: Status.Code): Boolean =
    code == Status.Code.INTERNAL || code == Status.Code.UNKNOWN

  private def logError(t: Throwable)(implicit logCtx: LoggingContext): Unit =
    error("Unhandled internal error", t)

  def logErrorsOnCall[Out](implicit logCtx: LoggingContext): PartialFunction[Try[Out], Unit] = {
    case Failure(e @ GrpcException(s, _)) =>
      if (internalOrUnknown(s.getCode)) {
        logError(e)
      }
    case Failure(NonFatal(e)) =>
      logError(e)
  }

  def logErrorsOnStream[Out](implicit logCtx: LoggingContext): Flow[Out, Out, NotUsed] =
    Flow[Out].mapError {
      case e @ GrpcException(s, _) =>
        if (internalOrUnknown(s.getCode)) {
          logError(e)
        }
        e
      case NonFatal(e) =>
        logError(e)
        e
    }

} 
Example 11
Source File: WebsocketController.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.server.controller

import akka.actor._
import akka.stream.scaladsl.Flow
import akka.stream.{Materializer, OverflowStrategy}
import monix.execution.Scheduler
import play.api.http.websocket.Message
import play.api.libs.streams._
import play.api.mvc._
import play.api.mvc.WebSocket.MessageFlowTransformer
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future
import walfie.gbf.raidfinder.domain._
import walfie.gbf.raidfinder.protocol._
import walfie.gbf.raidfinder.RaidFinder
import walfie.gbf.raidfinder.server.actor.WebsocketRaidsHandler
import walfie.gbf.raidfinder.server.util.MessageFlowTransformerUtil
import walfie.gbf.raidfinder.server.{BossNameTranslator, MetricsCollector}

class WebsocketController(
  raidFinder:        RaidFinder[BinaryProtobuf],
  translator:        BossNameTranslator,
  keepAliveInterval: FiniteDuration,
  metricsCollector:  MetricsCollector
)(implicit system: ActorSystem, materializer: Materializer, scheduler: Scheduler) extends Controller {
  private val jsonTransformer = MessageFlowTransformerUtil.protobufJsonMessageFlowTransformer
  private val binaryTransformer = MessageFlowTransformerUtil.protobufBinaryMessageFlowTransformer
  private val defaultTransformer = jsonTransformer

  
        val flow = ActorFlow.actorRef(props = props)
        transformer.transform(flow)
      }
      case None => Left {
        val unsupportedProtocols = requestedProtocols.mkString("[", ", ", "]")
        Results.BadRequest("Unsupported websocket subprotocols " + unsupportedProtocols)
      }
    }

    Future.successful(result)
  }
} 
Example 12
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.api.ws.connection

import java.util.concurrent.ConcurrentLinkedQueue

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging {

  log.info(s"""Connecting to Matcher WS API:
            |         URI = $uri
            |  Keep alive = $keepAlive""".stripMargin)

  import materializer.executionContext

  private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive)

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // From test to server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]()

  // From server to test
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage =>
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x) => {
              messagesBuffer.add(x)
              if (keepAlive) x match {
                case value: WsPingOrPong => wsHandlerRef ! value
                case _                   =>
              }
              Future.successful(x)
            }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  val connectionOpenedTs: Long                   = System.currentTimeMillis
  val connectionClosedTs: Future[Long]           = closed.map(_ => System.currentTimeMillis)
  val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS))

  def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList
  def clearMessages(): Unit           = messagesBuffer.clear()

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def close(): Unit     = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
  def isClosed: Boolean = closed.isCompleted
} 
Example 13
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.load.ws

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging {

  import system.dispatcher
  private implicit val materializer = Materializer(system)
  private val wsHandlerRef          = system.actorOf(TestWsHandlerActor.props(keepAlive = true))

  log.info(s"Connecting to Matcher WS API: $uri")

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // To server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  // To client
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage => // TODO move to tests
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x)         => Future.successful { receive(x).foreach(wsHandlerRef ! _) }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def isClosed: Boolean = closed.isCompleted
  def close(): Future[Done] = {
    if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
    closed
  }
} 
Example 14
Source File: DispatcherUtils.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.utils

import akka.NotUsed
import akka.stream.ActorAttributes
import akka.stream.scaladsl.Flow
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.config.client.ClientVersion

object DispatcherUtils extends LoggingSupport {

  def applyV1Dispatcher[A, B](pluginConfig: PluginConfig, flow: Flow[A, B, NotUsed]): Flow[A, B, NotUsed] = {
    (if (pluginConfig.clientConfig.clientVersion == ClientVersion.V1)
       pluginConfig.clientConfig.v1ClientConfig.dispatcherName
     else
       pluginConfig.clientConfig.v1DaxClientConfig.dispatcherName)
      .fold {
        flow
      } { name => flow.withAttributes(ActorAttributes.dispatcher(name)) }
  }

  def applyV2Dispatcher[A, B](pluginConfig: PluginConfig, flow: Flow[A, B, NotUsed]): Flow[A, B, NotUsed] = {
    pluginConfig.clientConfig.v2ClientConfig.dispatcherName.fold(flow) { name =>
      flow.withAttributes(ActorAttributes.dispatcher(name))
    }
  }

} 
Example 15
Source File: ReadJournalDaoImpl.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query.dao

import akka.NotUsed
import akka.actor.ActorSystem
import akka.persistence.PersistentRepr
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Source }
import com.github.j5ik2o.akka.persistence.dynamodb.config.QueryPluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.journal.dao.{ DaoSupport, JournalRowReadDriver }
import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow
import com.github.j5ik2o.akka.persistence.dynamodb.metrics.MetricsReporter
import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber }
import com.github.j5ik2o.akka.persistence.dynamodb.serialization.FlowPersistentReprSerializer

import scala.collection.immutable.Set
import scala.concurrent.ExecutionContext
import scala.util.Try

class ReadJournalDaoImpl(
    queryProcessor: QueryProcessor,
    override protected val journalRowDriver: JournalRowReadDriver,
    pluginConfig: QueryPluginConfig,
    override val serializer: FlowPersistentReprSerializer[JournalRow],
    override protected val metricsReporter: Option[MetricsReporter]
)(implicit val ec: ExecutionContext, system: ActorSystem)
    extends ReadJournalDao
    with DaoSupport {

  implicit val mat = ActorMaterializer()

  override def allPersistenceIds(max: Long): Source[PersistenceId, NotUsed] = queryProcessor.allPersistenceIds(max)

  private def perfectlyMatchTag(tag: String, separator: String): Flow[JournalRow, JournalRow, NotUsed] =
    Flow[JournalRow].filter(_.tags.exists(tags => tags.split(separator).contains(tag)))

  override def eventsByTag(
      tag: String,
      offset: Long,
      maxOffset: Long,
      max: Long
  ): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] =
    eventsByTagAsJournalRow(tag, offset, maxOffset, max)
      .via(perfectlyMatchTag(tag, pluginConfig.tagSeparator))
      .via(serializer.deserializeFlowAsTry)

  override def eventsByTagAsJournalRow(
      tag: String,
      offset: Long,
      maxOffset: Long,
      max: Long
  ): Source[JournalRow, NotUsed] = queryProcessor.eventsByTagAsJournalRow(tag, offset, maxOffset, max)

  override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] =
    queryProcessor.journalSequence(offset, limit)

  override def getMessagesAsJournalRow(
      persistenceId: PersistenceId,
      fromSequenceNr: SequenceNumber,
      toSequenceNr: SequenceNumber,
      max: Long,
      deleted: Option[Boolean]
  ): Source[JournalRow, NotUsed] =
    journalRowDriver.getJournalRows(persistenceId, fromSequenceNr, toSequenceNr, max, deleted)

  override def maxJournalSequence(): Source[Long, NotUsed] = {
    Source.single(Long.MaxValue)
  }

} 
Example 16
Source File: FlowPersistentReprSerializer.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.serialization

import akka.NotUsed
import akka.persistence.PersistentRepr
import akka.stream.scaladsl.Flow

import scala.util.{ Failure, Success, Try }

trait FlowPersistentReprSerializer[T] extends PersistentReprSerializer[T] {

  def deserializeFlow: Flow[T, (PersistentRepr, Set[String], Long), NotUsed] = {
    Flow[T].map(deserialize).map {
      case Right(r) => r
      case Left(ex) => throw ex
    }
  }

  def deserializeFlowWithoutTags: Flow[T, PersistentRepr, NotUsed] = {
    deserializeFlow.map(keepPersistentRepr)
  }

  // ---

  def deserializeFlowAsEither: Flow[T, Either[Throwable, (PersistentRepr, Set[String], Long)], NotUsed] = {
    Flow[T].map(deserialize)
  }

  def deserializeFlowWithoutTagsAsEither: Flow[T, Either[Throwable, PersistentRepr], NotUsed] = {
    deserializeFlowAsEither.map {
      case Right(v) => Right(keepPersistentRepr(v))
      case Left(ex) => Left(ex)
    }
  }

  // ---

  def deserializeFlowAsTry: Flow[T, Try[(PersistentRepr, Set[String], Long)], NotUsed] = {
    Flow[T].map(deserialize).map {
      case Right(v) => Success(v)
      case Left(ex) => Failure(ex)
    }
  }

  def deserializeFlowWithoutTagsAsTry: Flow[T, Try[PersistentRepr], NotUsed] = {
    deserializeFlowAsTry.map(_.map(keepPersistentRepr))
  }

  private def keepPersistentRepr(tup: (PersistentRepr, Set[String], Long)): PersistentRepr = tup match {
    case (repr, _, _) => repr
  }

} 
Example 17
Source File: JournalRowDriver.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.journal.dao

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.Attributes
import akka.stream.scaladsl.{ Flow, Source, SourceUtils }
import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow
import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber }

trait JournalRowDriver {

  def system: ActorSystem

  protected val startTimeSource: Source[Long, NotUsed] =
    SourceUtils
      .lazySource(() => Source.single(System.nanoTime())).mapMaterializedValue(_ => NotUsed)

  protected val logLevels: Attributes = Attributes.logLevels(
    onElement = Attributes.LogLevels.Debug,
    onFailure = Attributes.LogLevels.Error,
    onFinish = Attributes.LogLevels.Debug
  )

}

trait JournalRowReadDriver extends JournalRowDriver {

  def getJournalRows(
      persistenceId: PersistenceId,
      toSequenceNr: SequenceNumber,
      deleted: Boolean
  ): Source[Seq[JournalRow], NotUsed]

  def getJournalRows(
      persistenceId: PersistenceId,
      fromSequenceNr: SequenceNumber,
      toSequenceNr: SequenceNumber,
      max: Long,
      deleted: Option[Boolean] = Some(false)
  ): Source[JournalRow, NotUsed]

  def highestSequenceNr(
      persistenceId: PersistenceId,
      fromSequenceNr: Option[SequenceNumber] = None,
      deleted: Option[Boolean] = None
  ): Source[Long, NotUsed]
}

trait JournalRowWriteDriver extends JournalRowReadDriver {

  def singlePutJournalRowFlow: Flow[JournalRow, Long, NotUsed]
  def multiPutJournalRowsFlow: Flow[Seq[JournalRow], Long, NotUsed]

  def updateMessage(journalRow: JournalRow): Source[Unit, NotUsed]

  def singleDeleteJournalRowFlow: Flow[PersistenceIdWithSeqNr, Long, NotUsed]
  def multiDeleteJournalRowsFlow: Flow[Seq[PersistenceIdWithSeqNr], Long, NotUsed]

} 
Example 18
Source File: MessageSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.indefinite

import java.sql.Timestamp
import java.util.UUID

import akka.Done
import akka.kafka.CommitterSettings
import akka.kafka.ConsumerMessage.CommittableOffsetBatch
import akka.kafka.scaladsl.Committer
import akka.stream.scaladsl.{Flow, Keep, Sink}
import com.github.mjakubowski84.parquet4s.{ChunkPathBuilder, ParquetStreams, ParquetWriter}
import com.google.common.io.Files
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.metadata.CompressionCodecName

import scala.concurrent.Future
import scala.concurrent.duration._

object MessageSink {

  case class Data(timestamp: Timestamp, word: String)

  val MaxChunkSize: Int = 128
  val ChunkWriteTimeWindow: FiniteDuration = 10.seconds
  val WriteDirectoryName: String = "messages"

}

trait MessageSink {

  this: Akka =>

  import MessageSink._
  import MessageSource._

  protected val baseWritePath: String = new Path(Files.createTempDir().getAbsolutePath, WriteDirectoryName).toString

  private val writerOptions = ParquetWriter.Options(compressionCodecName = CompressionCodecName.SNAPPY)

  private lazy val committerSink = Flow.apply[Seq[Message]].map { messages =>
    CommittableOffsetBatch(messages.map(_.committableOffset))
  }.toMat(Committer.sink(CommitterSettings(system)))(Keep.right)

  def chunkPath: ChunkPathBuilder[Message] = {
    case (basePath, chunk) =>
      val lastElementDateTime = new Timestamp(chunk.last.record.timestamp()).toLocalDateTime
      val year = lastElementDateTime.getYear
      val month = lastElementDateTime.getMonthValue
      val day = lastElementDateTime.getDayOfMonth
      val uuid = UUID.randomUUID()

      basePath.suffix(s"/$year/$month/$day/part-$uuid.parquet")
  }

  lazy val messageSink: Sink[Message, Future[Done]] = ParquetStreams.toParquetIndefinite(
    path = baseWritePath,
    maxChunkSize = MaxChunkSize,
    chunkWriteTimeWindow = ChunkWriteTimeWindow,
    buildChunkPath = chunkPath,
    preWriteTransformation = { message: Message =>
      Data(
        timestamp = new Timestamp(message.record.timestamp()),
        word = message.record.value()
      )
    },
    postWriteSink = committerSink,
    options = writerOptions
  )

} 
Example 19
Source File: UnorderedParallelParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import java.util.UUID

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.apache.parquet.schema.MessageType
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object UnorderedParallelParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             parallelism: Int,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val valueCodecConfiguration = options.toValueCodecConfiguration

    validateWritePath(path, options)

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .zipWithIndex
      .groupBy(parallelism, elemAndIndex => Math.floorMod(elemAndIndex._2, parallelism))
      .map(elemAndIndex => encode(elemAndIndex._1))
      .fold(UnorderedChunk(path, schema, options))(_.write(_))
      .map(_.close())
      .async
      .mergeSubstreamsWithParallelism(parallelism)
      .toMat(Sink.ignore)(Keep.right)
  }

  private trait UnorderedChunk {

    def write(record: RowParquetRecord): UnorderedChunk

    def close(): Unit

  }

  private object UnorderedChunk {

    def apply(basePath: Path,
              schema: MessageType,
              options: ParquetWriter.Options): UnorderedChunk = new PendingUnorderedChunk(basePath, schema, options)

    private[UnorderedChunk] class PendingUnorderedChunk(basePath: Path,
                                        schema: MessageType,
                                        options: ParquetWriter.Options) extends UnorderedChunk {
      override def write(record: RowParquetRecord): UnorderedChunk = {
        val chunkPath = Path.mergePaths(basePath, new Path(s"/part-${UUID.randomUUID()}.parquet"))
        val writer = ParquetWriter.internalWriter(chunkPath, schema, options)
        writer.write(record)
        new StartedUnorderedChunk(chunkPath, writer, acc = 1)
      }

      override def close(): Unit = ()
    }

    private[UnorderedChunk] class StartedUnorderedChunk(chunkPath: Path,
                                        writer: ParquetWriter.InternalWriter,
                                        acc: Long
                                       ) extends UnorderedChunk {
      override def write(record: RowParquetRecord): UnorderedChunk = {
        writer.write(record)
        new StartedUnorderedChunk(chunkPath, writer, acc = acc + 1)
      }

      override def close(): Unit = {
        if (logger.isDebugEnabled) logger.debug(s"$acc records were successfully written to $chunkPath")
        writer.close()
      }
    }
  }

} 
Example 20
Source File: IndefiniteStreamParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s
import akka.stream.FlowShape
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Keep, Sink, ZipWith}
import com.github.mjakubowski84.parquet4s.ParquetWriter.ParquetWriterFactory
import org.apache.hadoop.fs.Path
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.duration.FiniteDuration


private[parquet4s] object IndefiniteStreamParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[In, ToWrite: ParquetWriterFactory, Mat](path: Path,
                                                    maxChunkSize: Int,
                                                    chunkWriteTimeWindow: FiniteDuration,
                                                    buildChunkPath: ChunkPathBuilder[In] = ChunkPathBuilder.default,
                                                    preWriteTransformation: In => ToWrite = identity[In] _,
                                                    postWriteSink: Sink[Seq[In], Mat] = Sink.ignore,
                                                    options: ParquetWriter.Options = ParquetWriter.Options()
                                            ): Sink[In, Mat] = {
    validateWritePath(path, options)

    val internalFlow = Flow.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._
    
      val inChunkFlow = b.add(Flow[In].groupedWithin(maxChunkSize, chunkWriteTimeWindow))
      val broadcastChunks = b.add(Broadcast[Seq[In]](outputPorts = 2))
      val writeFlow = Flow[Seq[In]].map { chunk =>
        val toWrite = chunk.map(preWriteTransformation)
        val chunkPath = buildChunkPath(path, chunk)
        if (logger.isDebugEnabled()) logger.debug(s"Writing ${toWrite.size} records to $chunkPath")
        ParquetWriter.writeAndClose(chunkPath.toString, toWrite, options)
      }
      val zip = b.add(ZipWith[Seq[In], Unit, Seq[In]]((chunk, _) => chunk))
      
      inChunkFlow ~> broadcastChunks ~> writeFlow ~> zip.in1
                     broadcastChunks ~> zip.in0

      FlowShape(inChunkFlow.in, zip.out)               
    })

    internalFlow.toMat(postWriteSink)(Keep.right)
  }

} 
Example 21
Source File: SingleFileParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object SingleFileParquetSink {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val writer = ParquetWriter.internalWriter(path, schema, options)
    val valueCodecConfiguration = options.toValueCodecConfiguration
    val isDebugEnabled = logger.isDebugEnabled

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .map(encode)
      .fold(0) { case (acc, record) => writer.write(record); acc + 1}
      .map { count =>
        if (isDebugEnabled) logger.debug(s"$count records were successfully written to $path")
        writer.close()
      }
      .toMat(Sink.ignore)(Keep.right)
  }

} 
Example 22
Source File: SequentialFileSplittingParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.apache.parquet.schema.MessageType
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object SequentialFileSplittingParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             maxRecordsPerFile: Long,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val valueCodecConfiguration = options.toValueCodecConfiguration

    validateWritePath(path, options)

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .zipWithIndex
      .map { case (elem, index) => OrderedChunkElem(encode(elem), index) }
      .fold(OrderedChunk(path, schema, maxRecordsPerFile, options))(_.write(_))
      .map(_.close())
      .toMat(Sink.ignore)(Keep.right)
  }

  private case class OrderedChunkElem(record: RowParquetRecord, index: Long) {
    def isSplit(maxRecordsPerFile: Long): Boolean = index % maxRecordsPerFile == 0
  }

  private trait OrderedChunk {
    def write(elem: OrderedChunkElem): OrderedChunk
    def close(): Unit
  }

  private object OrderedChunk {

    def apply(basePath: Path,
              schema: MessageType,
              maxRecordsPerFile: Long,
              options: ParquetWriter.Options): OrderedChunk = new PendingOrderedChunk(basePath, schema, maxRecordsPerFile, options)


    private[OrderedChunk] class PendingOrderedChunk(basePath: Path,
                                                    schema: MessageType,
                                                    maxRecordsPerFile: Long,
                                                    options: ParquetWriter.Options) extends OrderedChunk {
      override def write(elem: OrderedChunkElem): OrderedChunk = {
        val chunkNumber: Int = Math.floorDiv(elem.index, maxRecordsPerFile).toInt
        val chunkPath = Path.mergePaths(basePath, new Path(chunkFileName(chunkNumber)))
        val writer = ParquetWriter.internalWriter(chunkPath, schema, options)
        writer.write(elem.record)
        new StartedOrderedChunk(basePath, schema, maxRecordsPerFile, options, chunkPath, writer, acc = 1)
      }

      override def close(): Unit = ()

      private def chunkFileName(chunkNumber: Int): String = f"/part-$chunkNumber%05d.parquet"
    }

    private[OrderedChunk] class StartedOrderedChunk(basePath: Path,
                                                    schema: MessageType,
                                                    maxRecordsPerFile: Long,
                                                    options: ParquetWriter.Options,
                                                    chunkPath: Path,
                                                    writer: ParquetWriter.InternalWriter,
                                                    acc: Long) extends OrderedChunk {
      override def write(elem: OrderedChunkElem): OrderedChunk = {
        if (elem.isSplit(maxRecordsPerFile)) {
          this.close()
          new PendingOrderedChunk(basePath, schema, maxRecordsPerFile, options).write(elem)
        } else {
          writer.write(elem.record)
          new StartedOrderedChunk(basePath, schema, maxRecordsPerFile, options, chunkPath, writer, acc = acc + 1)
        }
      }

      override def close(): Unit = {
        if (logger.isDebugEnabled) logger.debug(s"$acc records were successfully written to $chunkPath")
        writer.close()
      }
    }
  }

} 
Example 23
Source File: ReliableHttpProxyFactory.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkahttp.proxy

import akka.NotUsed
import akka.actor._
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpEntity, HttpRequest, HttpResponse}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import org.slf4j.LoggerFactory
import rhttpc.client.protocol.{Correlated, Request}
import rhttpc.client.proxy._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

object ReliableHttpProxyFactory {

  private lazy val logger = LoggerFactory.getLogger(getClass)

  def send(successRecognizer: SuccessHttpResponseRecognizer, batchSize: Int, parallelConsumers: Int)
          (request: Request[HttpRequest])
          (implicit actorSystem: ActorSystem, materialize: Materializer): Future[HttpResponse] = {
    import actorSystem.dispatcher
    send(prepareHttpFlow(batchSize * parallelConsumers), successRecognizer)(request.correlated)
  }

  private def prepareHttpFlow(parallelism: Int)
                             (implicit actorSystem: ActorSystem, materialize: Materializer):
    Flow[(HttpRequest, String), HttpResponse, NotUsed] = {

    import actorSystem.dispatcher
    Http().superPool[String]().mapAsync(parallelism) {
      case (tryResponse, id) =>
        tryResponse match {
          case Success(response) =>
            response.toStrict(1 minute)
          case Failure(ex) =>
            Future.failed(ex)
        }
    }
  }

  private def send(httpFlow: Flow[(HttpRequest, String), HttpResponse, Any], successRecognizer: SuccessHttpResponseRecognizer)
                  (corr: Correlated[HttpRequest])
                  (implicit ec: ExecutionContext, materialize: Materializer): Future[HttpResponse] = {
    import collection.JavaConverters._
    logger.debug(
      s"""Sending request for ${corr.correlationId} to ${corr.msg.getUri()}. Headers:
         |${corr.msg.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${corr.msg.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
    val logResp = logResponse(corr) _
    val responseFuture = Source.single((corr.msg, corr.correlationId)).via(httpFlow).runWith(Sink.head)
    responseFuture.onComplete {
      case Failure(ex) =>
        logger.error(s"Got failure for ${corr.correlationId} to ${corr.msg.getUri()}", ex)
      case Success(_) =>
    }
    for {
      response <- responseFuture
      transformedToFailureIfNeed <- {
        if (successRecognizer.isSuccess(response)) {
          logResp(response, "success response")
          Future.successful(response)
        } else {
          logResp(response, "response recognized as non-success")
          Future.failed(NonSuccessResponse)
        }
      }
    } yield transformedToFailureIfNeed
  }

  private def logResponse(corr: Correlated[HttpRequest])
                         (response: HttpResponse, additionalInfo: String): Unit = {
    import collection.JavaConverters._
    logger.debug(
      s"""Got $additionalInfo for ${corr.correlationId} to ${corr.msg.getUri()}. Status: ${response.status.value}. Headers:
         |${response.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${response.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
  }

} 
Example 24
Source File: HTTPResponseStream.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package akkahttp

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives.{complete, get, logRequestResult, path, _}
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Sink, Source}
import com.typesafe.config.ConfigFactory
import spray.json.DefaultJsonProtocol

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object HTTPResponseStream extends App with DefaultJsonProtocol with SprayJsonSupport {
  implicit val system = ActorSystem("HTTPResponseStream")
  implicit val executionContext = system.dispatcher

  //JSON Protocol and streaming support
  final case class ExamplePerson(name: String)

  implicit def examplePersonFormat = jsonFormat1(ExamplePerson.apply)

  implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json()

  val (address, port) = ("127.0.0.1", 8080)
  server(address, port)
  client(address, port)

  def client(address: String, port: Int): Unit = {
    val requestParallelism = ConfigFactory.load.getInt("akka.http.host-connection-pool.max-connections")

    val requests: Source[HttpRequest, NotUsed] = Source
      .fromIterator(() =>
        Range(0, requestParallelism).map(i => HttpRequest(uri = Uri(s"http://$address:$port/download/$i"))).iterator
      )

    // Run singleRequest and completely consume response elements
    def runRequestDownload(req: HttpRequest) =
      Http()
        .singleRequest(req)
        .flatMap { response =>
          val unmarshalled: Future[Source[ExamplePerson, NotUsed]] = Unmarshal(response).to[Source[ExamplePerson, NotUsed]]
          val source: Source[ExamplePerson, Future[NotUsed]] = Source.futureSource(unmarshalled)
          source.via(processorFlow).runWith(printSink)
        }

    requests
      .mapAsync(requestParallelism)(runRequestDownload)
      .runWith(Sink.ignore)
  }


  val printSink = Sink.foreach[ExamplePerson] { each: ExamplePerson => println(s"Client processed element: $each") }

  val processorFlow: Flow[ExamplePerson, ExamplePerson, NotUsed] = Flow[ExamplePerson].map {
    each: ExamplePerson => {
      //println(s"Process: $each")
      each
    }
  }


  def server(address: String, port: Int): Unit = {

    def routes: Route = logRequestResult("httpecho") {
      path("download" / Segment) { id: String =>
        get {
          println(s"Server received request with id: $id, stream response...")
          extractRequest { r: HttpRequest =>
            val finishedWriting = r.discardEntityBytes().future
            onComplete(finishedWriting) { done =>
              //Limit response by appending eg .take(5)
              val responseStream: Stream[ExamplePerson] = Stream.continually(ExamplePerson(s"request:$id"))
              complete(Source(responseStream).throttle(1, 1.second, 1, ThrottleMode.shaping))
            }
          }
        }
      }
    }

    val bindingFuture = Http().bindAndHandle(routes, address, port)
    bindingFuture.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to: $address:$port. Exception message: ${e.getMessage}")
        system.terminate()
    }
  }
} 
Example 25
package sample.stream

import akka.actor.ActorSystem
import akka.stream.Supervision.Decider
import akka.stream._
import akka.stream.scaladsl.{Flow, Sink, Source, SourceQueueWithComplete}
import akka.{Done, NotUsed}
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object PublishToSourceQueueFromMultipleThreads extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("PublishToSourceQueueFromMultipleThreads")
  implicit val ec = system.dispatcher

  val bufferSize = 100
  // As of akka 2.6.x there is a thread safe implementation for SourceQueue
  val maxConcurrentOffers = 1000
  val numberOfPublishingClients = 1000

  val slowSink: Sink[Seq[Int], NotUsed] =
    Flow[Seq[Int]]
      .delay(2.seconds, DelayOverflowStrategy.backpressure)
      .to(Sink.foreach(e => logger.info(s"Reached sink: $e")))

  val sourceQueue: SourceQueueWithComplete[Int] = Source
    .queue[Int](bufferSize, OverflowStrategy.backpressure, maxConcurrentOffers)
    .groupedWithin(10, 1.seconds)
    .to(slowSink)
    .run

  val doneConsuming: Future[Done] = sourceQueue.watchCompletion()
  signalWhen(doneConsuming, "consuming") //never completes

  simulatePublishingFromMulitpleThreads()

  // Before 2.6.x a stream had to be used to throttle and control the backpressure
  //simulatePublishingClientsFromStream()

  // Decide on the stream level, because the OverflowStrategy.backpressure
  // on the sourceQueue causes an IllegalStateException
  // Handling this on the stream level allows to restart the stream
  private def simulatePublishingClientsFromStream() = {

    val decider: Decider = {
      case _: IllegalStateException => println("Got backpressure signal for offered element, restart..."); Supervision.Restart
      case _ => Supervision.Stop
    }

    val donePublishing: Future[Done] = Source(1 to numberOfPublishingClients)
      .mapAsync(10)(offerToSourceQueue) //throttle
      .withAttributes(ActorAttributes.supervisionStrategy(decider))
      .runWith(Sink.ignore)
    signalWhen(donePublishing, "publishing")
  }

  private def simulatePublishingFromMulitpleThreads() = (1 to numberOfPublishingClients).par.foreach(offerToSourceQueue)

  private def offerToSourceQueue(each: Int) = {
    sourceQueue.offer(each).map {
      case QueueOfferResult.Enqueued => logger.info(s"enqueued $each")
      case QueueOfferResult.Dropped => logger.info(s"dropped $each")
      case QueueOfferResult.Failure(ex) => logger.info(s"Offer failed: $ex")
      case QueueOfferResult.QueueClosed => logger.info("Source Queue closed")
    }
  }

  private def signalWhen(done: Future[Done], operation: String) = {
    done.onComplete {
      case Success(b) =>
        logger.info(s"Finished: $operation")
      case Failure(e) =>
        logger.info(s"Failure: $e About to terminate...")
        system.terminate()
    }
  }
} 
Example 26
Source File: PublishToBlockingResource.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.duration._
import scala.util.Failure



object PublishToBlockingResource extends App {
  implicit val system = ActorSystem("PublishToBlockingResource")
  implicit val ec = system.dispatcher

  val slowSink: Sink[Seq[Int], NotUsed] =
    Flow[Seq[Int]]
      .delay(1.seconds, DelayOverflowStrategy.backpressure)
      .to(Sink.foreach(e => println(s"Reached sink: $e")))

  val blockingResource: BlockingQueue[Int] = new ArrayBlockingQueue[Int](100)

  //Start a new `Source` from some (third party) blocking resource which can be opened, read and closed
  val source: Source[Int, NotUsed] =
    Source.unfoldResource[Int, BlockingQueue[Int]](
      () => blockingResource,                   //open
      (q: BlockingQueue[Int]) => Some(q.take()),//read
      (_: BlockingQueue[Int]) => {})            //close

  val done = source
    .groupedWithin(10, 2.seconds)
    .watchTermination()((_, done) => done.onComplete {
      case Failure(err) =>
        println(s"Flow failed: $err")
      case each => println(s"Server flow terminated: $each")
    })
    .runWith(slowSink)

  //simulate n process that publish in blocking fashion to the queue
  (1 to 1000).par.foreach(value => blockingResource.put(value))
} 
Example 27
Source File: TweetExample.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.time.{Instant, ZoneId}

import akka.NotUsed
import akka.actor.{ActorSystem, Cancellable}
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, MergePrioritized, Sink, Source}
import org.apache.commons.lang3.exception.ExceptionUtils
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.duration._
import scala.util.{Failure, Success}



object TweetExample extends App {
  implicit val system = ActorSystem("TweetExample")
  implicit val ec = system.dispatcher
  val logger: Logger = LoggerFactory.getLogger(this.getClass)

  final case class Author(handle: String)

  final case class Hashtag(name: String)

  final case class Tweet(author: Author, timestamp: Long, body: String) {
    def hashtags: Set[Hashtag] =
      body.split(" ").collect { case t if t.startsWith("#") => Hashtag(t) }.toSet

    override def toString = {
      val localDateTime = Instant.ofEpochMilli(timestamp).atZone(ZoneId.systemDefault()).toLocalDateTime
      s"$localDateTime - ${author.handle} tweeted: ${body.take(5)}..."
    }
  }

  val akkaTag = Hashtag("#akka")

  val tweetsLowPrio: Source[Tweet, Cancellable] = Source.tick(1.second, 200.millis, NotUsed).map(_ => Tweet(Author("LowPrio"), System.currentTimeMillis, "#other #akka aBody"))
  val tweetsHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("HighPrio"), System.currentTimeMillis, "#akka #other aBody"))
  val tweetsVeryHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("VeryHighPrio"), System.currentTimeMillis, "#akka #other aBody"))

  val limitedTweets: Source[Tweet, NotUsed] = Source.combine(tweetsLowPrio, tweetsHighPrio, tweetsVeryHighPrio)(_ => MergePrioritized(List(1, 10, 100))).take(20)

  val processingFlow = Flow[Tweet]
    .filter(_.hashtags.contains(akkaTag))
    .wireTap(each => logger.info(s"$each"))

  val slowDownstream  =
    Flow[Tweet]
      .delay(5.seconds, DelayOverflowStrategy.backpressure)

  val processedTweets =
    limitedTweets
      .via(processingFlow)
      .via(slowDownstream)
      .runWith(Sink.seq)

  processedTweets.onComplete {
    case Success(results) =>
      logger.info(s"Successfully processed: ${results.size} tweets")
      system.terminate()
    case Failure(exception) =>
      logger.info(s"The stream failed with: ${ExceptionUtils.getRootCause(exception)}")
      system.terminate()
  }
} 
Example 28
Source File: MergeHubWithDynamicSources.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, MergeHub, RunnableGraph, Sink, Source}

import scala.concurrent.duration._


object MergeHubWithDynamicSources {
  implicit val system = ActorSystem("MergeHubWithDynamicSources")
  implicit val ec = system.dispatcher

  def main(args: Array[String]): Unit = {

    val slowSink: Sink[Seq[String], NotUsed] =
      Flow[Seq[String]]
        .delay(1.seconds, DelayOverflowStrategy.backpressure)
        .to(Sink.foreach(e => println(s"Reached Sink: $e")))

    // Attach a MergeHub Source to the consumer. This will materialize to a corresponding Sink
    val runnableGraph: RunnableGraph[Sink[String, NotUsed]] =
      MergeHub.source[String](perProducerBufferSize = 16)
        .groupedWithin(10, 2.seconds)
        .to(slowSink)

    // By running/materializing the graph we get back a Sink, and hence now have access to feed elements into it
    // This Sink can then be materialized any number of times, and every element that enters the Sink will be consumed by our consumer
    val toConsumer: Sink[String, NotUsed] = runnableGraph.run()

    def fastSource(sourceId: Int, toConsumer: Sink[String, NotUsed]) = {
      Source(1 to 10)
        .map{each => println(s"Produced: $sourceId.$each"); s"$sourceId.$each"}
        .runWith(toConsumer)
    }

    // Add dynamic producer sources. If the consumer cannot keep up, then ALL of the producers are backpressured
    (1 to 10).par.foreach(each => fastSource(each, toConsumer))
  }
} 
Example 29
Source File: AsyncExecution.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.Done
import akka.actor.ActorSystem
import akka.stream.ActorAttributes
import akka.stream.scaladsl.{Flow, Sink, Source}
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.util.{Failure, Success}


object AsyncExecution extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("AsyncExecution")
  implicit val ec = system.dispatcher

  def stage(name: String) =
    Flow[Int]
      .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}"))

  def stageBlocking(name: String) =
    Flow[Int]
      .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}"))
      .wireTap(_ => Thread.sleep(5000))
      .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking"))

  def sinkBlocking: Sink[Int, Future[Done]] =
    Sink.foreach { index: Int =>
      Thread.sleep(2000)
      logger.info(s"Slow sink processing element $index by ${Thread.currentThread().getName}")
     }
      //Adding a custom dispatcher creates an async boundary
      //see discussion in: https://discuss.lightbend.com/t/how-can-i-make-sure-that-fileio-frompath-is-picking-up-my-dispatcher/6528/4
      .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking"))


  val done = Source(1 to 10)
    .via(stage("A")).async
    //When activated instead of alsoTo(sinkBlocking): elements for stage C are held up by stage B
    //.via(stageBlocking("B")).async
    .alsoTo(sinkBlocking).async
    .via(stage("C")).async
    .runWith(Sink.ignore)

  //With alsoTo(sinkBlocking) the stages A and C signal "done" too early and thus would terminate the whole stream
  //The reason for this is the custom dispatcher in sinkBlocking
  //terminateWhen(done)

  def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 30
Source File: BasicTransformation.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}


object BasicTransformation {
  implicit val system = ActorSystem("BasicTransformation")
  import system.dispatcher

  def main(args: Array[String]): Unit = {
    val text =
      """|Lorem Ipsum is simply dummy text of the printing and typesetting industry.
         |Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
         |when an unknown printer took a galley of type and scrambled it to make a type
         |specimen book.""".stripMargin

    val source = Source.fromIterator(() => text.split("\\s").iterator)
    val sink = Sink.foreach[String](println)
    val flow = Flow[String].map(x => x.toUpperCase)
    val result = source.via(flow).runWith(sink)
    result.onComplete(_ => system.terminate())
  }
} 
Example 31
Source File: TcpEcho.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source, Tcp}
import akka.util.ByteString

import scala.concurrent.Future
import scala.util.{Failure, Success}


object TcpEcho extends App {
  val systemServer = ActorSystem("TcpEchoServer")
  val systemClient = ActorSystem("TcpEchoClient")

  var serverBinding: Future[Tcp.ServerBinding] = _

    if (args.isEmpty) {
      val (address, port) = ("127.0.0.1", 6000)
      serverBinding = server(systemServer, address, port)
      (1 to 10).par.foreach(each => client(each, systemClient, address, port))
    } else {
      val (address, port) =
        if (args.length == 3) (args(1), args(2).toInt)
        else ("127.0.0.1", 6000)
      if (args(0) == "server") {
        serverBinding = server(systemServer, address, port)
      } else if (args(0) == "client") {
        client(1, systemClient, address, port)
      }
    }

  def server(system: ActorSystem, address: String, port: Int): Future[Tcp.ServerBinding] = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val handler = Sink.foreach[Tcp.IncomingConnection] { connection =>

      // parse incoming commands and append !
      val commandParser = Flow[String].takeWhile(_ != "BYE").map(_ + "!")

      val welcomeMsg = s"Welcome to: ${connection.localAddress}, you are: ${connection.remoteAddress}!"
      val welcomeSource = Source.single(welcomeMsg)

      val serverEchoFlow = Flow[ByteString]
        .via(Framing.delimiter( //chunk the inputs up into actual lines of text
          ByteString("\n"),
          maximumFrameLength = 256,
          allowTruncation = true))
        .map(_.utf8String)
        .via(commandParser)
        .merge(welcomeSource) // merge the initial banner after parser
        .map(_ + "\n")
        .map(ByteString(_))
        .watchTermination()((_, done) => done.onComplete {
        case Failure(err) =>
          println(s"Server flow failed: $err")
        case _ => println(s"Server flow terminated for client: ${connection.remoteAddress}")
      })
      connection.handleWith(serverEchoFlow)
    }
    
    val connections = Tcp().bind(interface = address, port = port)
    val binding = connections.watchTermination()(Keep.left).to(handler).run()

    binding.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to: $address:$port: ${e.getMessage}")
        system.terminate()
    }

    binding
  }

  def client(id: Int, system: ActorSystem, address: String, port: Int): Unit = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val connection: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] = Tcp().outgoingConnection(address, port)
    val testInput = ('a' to 'z').map(ByteString(_)) ++ Seq(ByteString("BYE"))
    val source =  Source(testInput).via(connection)
    val closed = source.runForeach(each => println(s"Client: $id received echo: ${each.utf8String}"))
    closed.onComplete(each => println(s"Client: $id closed: $each"))
  }
} 
Example 32
Source File: AlsoTo.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_divert

import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.Attributes
import akka.stream.scaladsl.{Flow, Sink, Source}



object AlsoTo extends App {
  implicit val system = ActorSystem("AlsoTo")
  implicit val executionContext = system.dispatcher
  implicit val adapter = Logging(system, this.getClass)

  val source = Source(1 to 10)

  val sink = Sink.foreach { x: Int => adapter.log(Logging.InfoLevel, s" --> Element: $x reached sink") }

  def sinkBlocking = Sink.foreach { x: Int =>
    Thread.sleep(1000)
    adapter.log(Logging.InfoLevel, s" --> Element: $x logged in alsoTo sinkBlocking by ${Thread.currentThread().getName}")
  }

  val flow = Flow[Int]
    .log("before alsoTo")
    .alsoTo(sinkBlocking)
    .log("after alsoTo")
    .withAttributes(
      Attributes.logLevels(
        onElement = Logging.InfoLevel,
        onFinish = Logging.InfoLevel,
        onFailure = Logging.DebugLevel
      ))

  val done = source.via(flow).runWith(sink)
  done.onComplete(_ => system.terminate())
} 
Example 33
Source File: DivertTo.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_divert

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.Future
import scala.util.{Failure, Success}


object DivertTo extends App {
  implicit val system = ActorSystem("DivertTo")
  implicit val executionContext = system.dispatcher

  val source = Source(1 to 10)

  val sink = Sink.foreach[Either[Valid[Int], Invalid[Int]]](each => println(s"Reached sink: ${each.left.get}"))

  val errorSink = Flow[Invalid[Int]]
    .map(each => println(s"Reached errorSink: $each"))
    .to(Sink.ignore)

  val flow: Flow[Int, Either[Valid[Int], Invalid[Int]], NotUsed] = Flow[Int]
    .map { x =>
      if (x % 2 == 0) Left(Valid(x))
      else Right(Invalid(x, Some(new Exception("Is odd"))))
    }
    .map {
      //Drawback of this approach: Pattern matching on all downstream operations
      case left@Left(_) => businessLogicOn(left)
      case right@Right(_) => right
    }
    .map {
      case left@Left(_) => left
      case right@Right(_) => right
    }
    //Divert invalid elements
    //contramap: apply "right.get" to each incoming upstream element *before* it is passed to the errorSink
    .divertTo(errorSink.contramap(_.right.get), _.isRight)

  private def businessLogicOn(left: Left[Valid[Int], Invalid[Int]]) = {
    if (left.value.payload > 5) left
    else Right(Invalid(left.value.payload, Some(new Exception("Is smaller than 5"))))
  }

  val done = source.via(flow).runWith(sink)
  terminateWhen(done)


  def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
}

case class Valid[T](payload: T)

case class Invalid[T](payload: T, cause: Option[Throwable]) 
Example 34
Source File: ParametrizedFlow.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.Done
import akka.actor.{ActorSystem, Cancellable}
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source, SourceQueueWithComplete, Zip}
import akka.stream.{FlowShape, OverflowStrategy}

import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}



object ParametrizedFlow extends App {
  val service = ParameterizedFlowService

  Thread.sleep(5000)
  service.update(1.0)

  Thread.sleep(2000)
  service.update(1.5)
  Thread.sleep(2000)
  service.cancel()
  Thread.sleep(2000)

  println(service.result())
}

object ParameterizedFlowService {
  implicit val system = ActorSystem("ParameterizedFlowService")
  implicit val executionContext = system.dispatcher

  def update(element: Double): Unit = flow._1._2.offer(element)

  def cancel(): Boolean = flow._1._1.cancel()

  def result(): Future[Seq[Double]] = flow._2

  val fun = (flowValue: Int, paramValue: Double) => flowValue * paramValue
  val flow: ((Cancellable, SourceQueueWithComplete[Double]), Future[immutable.Seq[Double]]) =
    Source.tick(0.seconds, 500.millis, 10)
      .viaMat(createParamFlow(1, OverflowStrategy.dropBuffer, 0.5)(fun))(Keep.both)
      .wireTap(x => println(x))
      .toMat(Sink.seq)(Keep.both)
      .run()

  val done: Future[Done] = flow._1._2.watchCompletion()
  terminateWhen(done)

  private def createParamFlow[A, P, O](bufferSize: Int, overflowStrategy: OverflowStrategy, initialParam: P)(fun: (A, P) => O) =
    Flow.fromGraph(GraphDSL.create(Source.queue[P](bufferSize, overflowStrategy)) { implicit builder =>
      queue =>
        import GraphDSL.Implicits._
        val zip = builder.add(Zip[A, P]())
        //Interesting use of the extrapolate operator
        //based on https://doc.akka.io/docs/akka/current/stream/stream-rate.html#understanding-extrapolate-and-expand
        val extra = builder.add(Flow[P].extrapolate(Iterator.continually(_), Some(initialParam)))
        val map = builder.add(Flow[(A, P)].map(r => fun(r._1, r._2)))

        queue ~> extra ~> zip.in1
        zip.out ~> map
        FlowShape(zip.in0, map.out)
    })

  private def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 35
Source File: ConflateWithSeed.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import org.slf4j.{Logger, LoggerFactory}

import scala.collection._
import scala.concurrent.duration._
import scala.util.Random


object ConflateWithSeed extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("ConflateWithSeed")
  implicit val executionContext = system.dispatcher

  def seed(i: Int): mutable.LinkedHashMap[Int, Int] = mutable.LinkedHashMap[Int, Int](i -> 1)

  def aggregate(state: mutable.LinkedHashMap[Int, Int], i: Int): mutable.LinkedHashMap[Int, Int] = {
    logger.info(s"Got: $i")
    state.put(i, state.getOrElseUpdate(i, 0) + 1)
    state
  }

  // lazyFlow is not really needed here, but nice to know that it exists
  // conflateWithSeed invokes the seed method every time, so it
  // is safe to materialize this flow multiple times
  val lazyFlow = Flow.lazyFlow(() =>
    Flow[Int]
    .map(_ => Random.nextInt(100))
    .conflateWithSeed(seed)(aggregate)

  )
  Source(1 to 10)
    .via(lazyFlow)
    .throttle(1, 1.second) //simulate slow sink
    .runForeach(each => logger.info(s"1st reached sink: $each"))

//  Source(1 to 10)
//    .via(lazyFlow)
//    .throttle(1, 1.second) //simulate slow sink
//    .runForeach(each => logger.info(s"2nd reached sink: $each"))
} 
Example 36
Source File: SplitWhen.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import java.nio.file.Paths

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.IOResult
import akka.stream.scaladsl.{FileIO, Flow, Framing, Keep, Sink, Source}
import akka.util.ByteString
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.util.{Failure, Success}


object SplitWhen extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("SplitWhen")
  implicit val executionContext = system.dispatcher

  val nonLinearCapacityFactor = 100 //raise to see how it scales
  val filename = "splitWhen.csv"

  def genResourceFile() = {
    logger.info(s"Writing resource file: $filename...")

    def fileSink(filename: String): Sink[String, Future[IOResult]] =
      Flow[String]
        .map(s => ByteString(s + "\n"))
        .toMat(FileIO.toPath(Paths.get(filename)))(Keep.right)

    Source.fromIterator(() => (1 to nonLinearCapacityFactor).toList.combinations(2))
      .map(each => s"${each.head},${each.last}")
      .runWith(fileSink(filename))
  }

  val sourceOfLines = FileIO.fromPath(Paths.get(filename))
    .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 1024, allowTruncation = true)
      .map(_.utf8String))

  val csvToRecord: Flow[String, Record, NotUsed] = Flow[String]
    .map(_.split(",").map(_.trim))
    .map(stringArrayToRecord)

  val terminationHook: Flow[Record, Record, Unit] = Flow[Record]
    .watchTermination() { (_, done) =>
      done.onComplete {
        case Failure(err) => logger.info(s"Flow failed: $err")
        case _ => system.terminate(); logger.info(s"Flow terminated")
      }
    }

  val printSink = Sink.foreach[Vector[Record]](each => println(s"Reached sink: $each"))

  private def stringArrayToRecord(cols: Array[String]) = Record(cols(0), cols(1))

  private def hasKeyChanged = {
    () => {
      var lastRecordKey: Option[String] = None
      currentRecord: Record =>
        lastRecordKey match {
          case Some(currentRecord.key) | None =>
            lastRecordKey = Some(currentRecord.key)
            List((currentRecord, false))
          case _ =>
            lastRecordKey = Some(currentRecord.key)
            List((currentRecord, true))
        }
    }
  }

  genResourceFile().onComplete {
    case Success(_) =>
      logger.info(s"Start processing...")
      sourceOfLines
        .via(csvToRecord)
        .via(terminationHook)
        .statefulMapConcat(hasKeyChanged)   // stateful decision
        .splitWhen(_._2)                    // split when key has changed
        .map(_._1)                          // proceed with payload
        .fold(Vector.empty[Record])(_ :+ _) // sum payload
        .mergeSubstreams                    // better performance, but why?
        .runWith(printSink)
    case Failure(exception) => logger.info(s"Exception: $exception")
  }

  case class Record(key: String, value: String)
} 
Example 37
Source File: WebSocketClient.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.ws._
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source}
import akka.stream.{FlowShape, SourceShape}
import sample.stream_actor.WindTurbineSimulator._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

object WebSocketClient {
  def apply(id: String, endpoint: String, windTurbineSimulator: ActorRef)
           (implicit
            system: ActorSystem,
            executionContext: ExecutionContext) = {
    new WebSocketClient(id, endpoint, windTurbineSimulator)(system, executionContext)
  }
}

class WebSocketClient(id: String, endpoint: String, windTurbineSimulator: ActorRef)
                     (implicit
                      system: ActorSystem,
                      executionContext: ExecutionContext) {


  val webSocketFlow: Flow[Message, Message, Future[WebSocketUpgradeResponse]] = {
    val websocketUri = s"$endpoint/measurements/$id"
    Http().webSocketClientFlow(WebSocketRequest(websocketUri))
  }

  val outgoing = GraphDSL.create() { implicit builder =>
    val data = WindTurbineData(id)

    val flow = builder.add {
      Source.tick(1.second, 100.millis,())  //valve for the WindTurbineData frequency
        .map(_ => TextMessage(data.getNext))
    }

    SourceShape(flow.out)
  }

  val incoming = GraphDSL.create() { implicit builder =>
    val flow = builder.add {
      Flow[Message]
        .collect {
          case TextMessage.Strict(text) =>
            Future.successful(text)
          case TextMessage.Streamed(textStream) =>
            textStream.runFold("")(_ + _)
              .flatMap(Future.successful)
        }
        .mapAsync(1)(identity)
        .map(each => println(s"Client received msg: $each"))
    }

    FlowShape(flow.in, flow.out)
  }

  val (upgradeResponse, closed) = Source.fromGraph(outgoing)
    .viaMat(webSocketFlow)(Keep.right) // keep the materialized Future[WebSocketUpgradeResponse]
    .via(incoming)
    .toMat(Sink.ignore)(Keep.both) // also keep the Future[Done]
    .run()


  val connected =
    upgradeResponse.map { upgrade =>
      upgrade.response.status match {
        case StatusCodes.SwitchingProtocols => windTurbineSimulator ! Upgraded
        case statusCode => windTurbineSimulator ! FailedUpgrade(statusCode)
      }
    }

  connected.onComplete {
    case Success(_) => windTurbineSimulator ! Connected
    case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex)
  }

  closed.map { _ =>
    windTurbineSimulator ! Terminated
  }
  closed.onComplete {
    case Success(_)  => windTurbineSimulator ! Connected
    case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex)
  }
} 
Example 38
Source File: WebsocketServer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.env

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.directives.WebSocketDirectives
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}


object WebsocketServer extends App with WebSocketDirectives {
  implicit val system = ActorSystem("WebsocketServer")
  implicit val executionContext = system.dispatcher

  val (address, port) = ("127.0.0.1", 6002)
  server(address, port)

  def server(address: String, port: Int) = {

    def echoFlow: Flow[Message, Message, Any] =
      Flow[Message].mapConcat {
        case tm: TextMessage =>
          println(s"Server received: $tm")
          TextMessage(Source.single("Echo: ") ++ tm.textStream) :: Nil
        case bm: BinaryMessage =>
          // ignore binary messages but drain content to avoid the stream being clogged
          bm.dataStream.runWith(Sink.ignore)
          Nil
      }

    val websocketRoute: Route =
      path("echo") {
        handleWebSocketMessages(echoFlow)
      }

    val bindingFuture = Http().bindAndHandle(websocketRoute, address, port)
    bindingFuture.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to $address:$port. Exception message: ${e.getMessage}")
        system.terminate()
    }

    sys.addShutdownHook {
      println("About to shutdown...")
      val fut = bindingFuture.map(serverBinding => serverBinding.terminate(hardDeadline = 3.seconds))
      println("Waiting for connections to terminate...")
      val onceAllConnectionsTerminated = Await.result(fut, 10.seconds)
      println("Connections terminated")
      onceAllConnectionsTerminated.flatMap { _ => system.terminate()
      }
    }
  }
} 
Example 39
Source File: IngestSocketFactory.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import akka.NotUsed
import akka.actor.{ActorRef, ActorRefFactory, Props}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Sink, Source}
import hydra.core.ingest.IngestionReport

trait IngestSocketFactory {
  def ingestFlow(): Flow[String, OutgoingMessage, NotUsed]
}

object IngestSocketFactory {

  def createSocket(fact: ActorRefFactory): IngestSocketFactory = { () =>
    {

      val socketActor = fact.actorOf(Props[IngestionSocketActor])

      def actorSink =
        Sink.actorRefWithBackpressure(
          socketActor,
          onInitMessage = SocketInit,
          ackMessage = SocketAck,
          onCompleteMessage = SocketEnded,
          onFailureMessage = SocketFailed.apply
        )

      val in =
        Flow[String]
          .map(IncomingMessage)
          .to(actorSink)

      val out =
        Source
          .actorRefWithBackpressure[OutgoingMessage](
            SocketAck,
            PartialFunction.empty,
            PartialFunction.empty
          )
          .mapMaterializedValue(socketActor ! SocketStarted(_))

      Flow.fromSinkAndSourceCoupled(in, out)

    }
  }
}

sealed trait SocketEvent

case object SocketInit extends SocketEvent

case class SocketStarted(ref: ActorRef) extends SocketEvent

case object SocketEnded extends SocketEvent

case object SocketAck extends SocketEvent

case class IncomingMessage(message: String) extends SocketEvent

case class SocketFailed(ex: Throwable)

sealed trait OutgoingMessage extends SocketEvent

case class SimpleOutgoingMessage(status: Int, message: String)
    extends OutgoingMessage

case class IngestionOutgoingMessage(report: IngestionReport)
    extends OutgoingMessage 
Example 40
Source File: GitHub.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.NotUsed
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.scaladsl.Flow

trait GitHub {
  def from(original: HttpRequest): Flow[HttpRequest, HttpRequest, NotUsed]
  def send: Flow[HttpRequest, HttpResponse, NotUsed]
}

class GitHubImpl(
    host:       String,
    port:       Int,
    timeout:    Int,
    httpClient: HttpClient
) extends GitHub {
  def from(original: HttpRequest): Flow[HttpRequest, HttpRequest, NotUsed] = Flow[HttpRequest].map { req =>
    val xForwardedHost = original.headers.find(_.is("host")).map(_.value()).getOrElse(s"$host:$port")
    val modifiedHeader = original.addHeader(RawHeader("X-Forwarded-Host", xForwardedHost))
      .headers
      .filterNot(_.lowercaseName() == "host")
      .filterNot(_.lowercaseName() == "timeout-access")

    req.withHeaders(modifiedHeader)
  }

  def send: Flow[HttpRequest, HttpResponse, NotUsed] =
    Flow[HttpRequest].via(httpClient.connectionHttps(host, port, timeout))
} 
Example 41
Source File: HttpClientSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.actor.ActorSystem
import akka.stream.scaladsl.Flow
import org.scalatest.{ BeforeAndAfterAll, Matchers, FlatSpec }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class HttpClientSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit val system = ActorSystem("http-client-spec")
  implicit val executor = system.dispatcher

  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  "#conectionHttps" should "return outgoing connection flow" in {
    val httpClient = new HttpClientImpl(system)
    val connection = httpClient.connectionHttps("127.0.0.1", 8000, 5)

    connection shouldBe a[Flow[_, _, _]]
  }
} 
Example 42
Source File: GitHubSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ HttpMethods, HttpRequest, HttpResponse }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Source }
import akka.stream.testkit.scaladsl.TestSink
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class GitHubSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory {
  implicit val system = ActorSystem("github-spec")
  implicit val executor = system.dispatcher
  implicit val materializer = ActorMaterializer()

  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  "#from" should "merge original headers to github request" in {
    val github = new GitHubImpl("127.0.0.1", 8000, 5, mock[HttpClient])
    val request = HttpRequest(HttpMethods.GET, "/")
      .addHeader(RawHeader("host", "dummy"))
      .addHeader(RawHeader("timeout-access", "dummy"))

    val result = Source.single(HttpRequest(HttpMethods.GET, "/v1/github/users/xxxxxx"))
      .via(github.from(request))
      .runWith(TestSink.probe[HttpRequest])
      .request(1)
      .expectNext()

    result.headers.filter(_.lowercaseName() == "host") shouldBe empty
    result.headers.filter(_.lowercaseName() == "timeout-access") shouldBe empty
    result.headers.filter(_.lowercaseName() == "x-forwarded-host") shouldNot be(empty)
  }

  "#send" should "connect using http client" in {
    val httpResponse = HttpResponse()
    val httpClient = mock[HttpClient]
    (httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))

    val github = new GitHubImpl("127.0.0.1", 8000, 5, httpClient)
    val result = Source.single(HttpRequest(HttpMethods.GET, "/"))
      .via(github.send)
      .runWith(TestSink.probe[HttpResponse])
      .request(1)
      .expectNext()

    result shouldBe httpResponse
  }
} 
Example 43
Source File: ClickhouseSink.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.stream

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import com.crobox.clickhouse.ClickhouseClient
import com.crobox.clickhouse.internal.QuerySettings
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

case class ClickhouseIndexingException(msg: String, cause: Throwable, payload: Seq[String], table: String)
    extends RuntimeException(msg, cause)
case class Insert(table: String, jsonRow: String)

object ClickhouseSink extends LazyLogging {

  def insertSink(config: Config, client: ClickhouseClient, indexerName: Option[String] = None)(
      implicit ec: ExecutionContext,
      settings: QuerySettings = QuerySettings()
  ): Sink[Insert, Future[Done]] = {
    val indexerGeneralConfig = config.getConfig("crobox.clickhouse.indexer")
    val mergedIndexerConfig = indexerName
      .flatMap(
        theIndexName =>
          if (indexerGeneralConfig.hasPath(theIndexName))
            Some(indexerGeneralConfig.getConfig(theIndexName).withFallback(indexerGeneralConfig))
          else None
      )
      .getOrElse(indexerGeneralConfig)
    Flow[Insert]
      .groupBy(Int.MaxValue, _.table)
      .groupedWithin(mergedIndexerConfig.getInt("batch-size"),
                     mergedIndexerConfig.getDuration("flush-interval").getSeconds.seconds)
      .mapAsyncUnordered(mergedIndexerConfig.getInt("concurrent-requests"))(inserts => {
        val table       = inserts.head.table
        val insertQuery = s"INSERT INTO $table FORMAT JSONEachRow"
        val payload     = inserts.map(_.jsonRow)
        val payloadSql  = payload.mkString("\n")
        client.execute(insertQuery, payloadSql) recover {
          case ex =>
            throw ClickhouseIndexingException("failed to index", ex, payload, table)
        } map (_ => inserts)
      })
      .mergeSubstreams
      .toMat(Sink.ignore)(Keep.right)
  }
} 
Example 44
Source File: ClickhouseHostHealth.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.health

import akka.NotUsed
import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import com.crobox.clickhouse.internal.ClickhouseResponseParser

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

object ClickhouseHostHealth extends ClickhouseResponseParser {

  sealed trait ClickhouseHostStatus {
    val host: Uri
    val code: String
  }

  case class Alive(host: Uri) extends ClickhouseHostStatus { override val code: String = "ok" }

  case class Dead(host: Uri, reason: Throwable) extends ClickhouseHostStatus { override val code: String = "nok" }

  
  def healthFlow(host: Uri)(
      implicit system: ActorSystem,
      materializer: Materializer,
      executionContext: ExecutionContext
  ): Source[ClickhouseHostStatus, Cancellable] = {
    val healthCheckInterval: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.interval")
        .getSeconds.seconds
    val healthCheckTimeout: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.timeout")
        .getSeconds.seconds

    val healthCachedPool = Http(system).cachedHostConnectionPool[Int](
      host.authority.host.address(),
      host.effectivePort,
      settings = ConnectionPoolSettings(system)
        .withMaxConnections(1)
        .withMinConnections(1)
        .withMaxOpenRequests(2)
        .withMaxRetries(3)
        .withUpdatedConnectionSettings(
          _.withIdleTimeout(healthCheckTimeout + healthCheckInterval).withConnectingTimeout(healthCheckTimeout)
        )
    )
    Source
      .tick(0.milliseconds, healthCheckInterval, 0)
      .map(tick => {
        (HttpRequest(method = HttpMethods.GET, uri = host), tick)
      })
      .via(healthCachedPool)
      .via(parsingFlow(host))
  }

  private[health] def parsingFlow[T](
      host: Uri
  )(implicit ec: ExecutionContext, mat: Materializer): Flow[(Try[HttpResponse], T), ClickhouseHostStatus, NotUsed] =
    Flow[(Try[HttpResponse], T)].mapAsync(1) {
      case (Success(response @ akka.http.scaladsl.model.HttpResponse(StatusCodes.OK, _, _, _)), _) =>
        Unmarshaller.stringUnmarshaller(decodeResponse(response).entity)
          .map(splitResponse)
          .map(
            stringResponse =>
              if (stringResponse.equals(Seq("Ok."))) {
                Alive(host)
              } else {
                Dead(host, new IllegalArgumentException(s"Got wrong result $stringResponse"))
            }
          )
      case (Success(response), _) =>
        Future.successful(Dead(host, new IllegalArgumentException(s"Got response with status code ${response.status}")))
      case (Failure(ex), _) =>
        Future.successful(Dead(host, ex))
    }

} 
Example 45
Source File: FilterRecursiveListBucketHandler.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.handler

import java.net.URLDecoder

import akka.NotUsed
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.alpakka.xml.scaladsl.{ XmlParsing, XmlWriting }
import akka.stream.alpakka.xml.{ EndElement, ParseEvent, StartElement, TextEvent }
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import com.ing.wbaa.rokku.proxy.data.{ Read, RequestId, S3Request, User }

import scala.collection.immutable
import scala.collection.mutable.ListBuffer


  protected[this] def filterRecursiveListObjects(user: User, requestS3: S3Request)(implicit id: RequestId): Flow[ByteString, ByteString, NotUsed] = {
    def elementResult(allContentsElements: ListBuffer[ParseEvent], isContentsTag: Boolean, element: ParseEvent): immutable.Seq[ParseEvent] = {
      if (isContentsTag) {
        allContentsElements += element
        immutable.Seq.empty
      } else {
        immutable.Seq(element)
      }
    }

    def isPathOkInRangerPolicy(path: String)(implicit id: RequestId): Boolean = {
      val pathToCheck = normalizePath(path)
      val isUserAuthorized = isUserAuthorizedForRequest(requestS3.copy(s3BucketPath = Some(pathToCheck)), user)
      isUserAuthorized
    }

    def normalizePath(path: String): String = {
      val delimiter = "/"
      val decodedPath = URLDecoder.decode(path, "UTF-8")
      val delimiterIndex = decodedPath.lastIndexOf(delimiter)
      val pathToCheckWithoutLastSlash = if (delimiterIndex > 0) delimiter + decodedPath.substring(0, delimiterIndex) else ""
      val s3BucketName = requestS3.s3BucketPath.getOrElse(delimiter)
      val s3pathWithoutLastDelimiter = if (s3BucketName.length > 1 && s3BucketName.endsWith(delimiter)) s3BucketName.substring(0, s3BucketName.length - 1) else s3BucketName
      s3pathWithoutLastDelimiter + pathToCheckWithoutLastSlash
    }

    Flow[ByteString].via(XmlParsing.parser)
      .statefulMapConcat(() => {
        // state
        val keyTagValue = StringBuilder.newBuilder
        val allContentsElements = new ListBuffer[ParseEvent]
        var isContentsTag = false
        var isKeyTag = false

        // aggregation function
        parseEvent =>
          parseEvent match {
            //catch <Contents> to start collecting elements
            case element: StartElement if element.localName == "Contents" =>
              isContentsTag = true
              allContentsElements.clear()
              allContentsElements += element
              immutable.Seq.empty
            //catch end </Contents> to validate the path in ranger
            case element: EndElement if element.localName == "Contents" =>
              isContentsTag = false
              allContentsElements += element
              if (isPathOkInRangerPolicy(keyTagValue.stripMargin)) {
                allContentsElements.toList
              } else {
                immutable.Seq.empty
              }
            // catch <Key> where is the patch name to match in ranger
            case element: StartElement if element.localName == "Key" =>
              keyTagValue.clear()
              isKeyTag = true
              elementResult(allContentsElements, isContentsTag, element)
            //catch end </Key>
            case element: EndElement if element.localName == "Key" =>
              isKeyTag = false
              elementResult(allContentsElements, isContentsTag, element)
            //catch all element text <..>text<\..> but only set the text from <Key>
            case element: TextEvent =>
              if (isKeyTag) keyTagValue.append(element.text)
              elementResult(allContentsElements, isContentsTag, element)
            //just past through the rest of elements
            case element =>
              elementResult(allContentsElements, isContentsTag, element)
          }
      })
      .via(XmlWriting.writer)
  }

} 
Example 46
Source File: Client.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse, Uri }
import akka.stream.Materializer
import akka.stream.scaladsl.{ Flow, Sink }
import com.github.dakatsuka.akka.http.oauth2.client.Error.UnauthorizedException
import com.github.dakatsuka.akka.http.oauth2.client.strategy.Strategy

import scala.concurrent.{ ExecutionContext, Future }

class Client(config: ConfigLike, connection: Option[Flow[HttpRequest, HttpResponse, _]] = None)(implicit system: ActorSystem)
    extends ClientLike {
  def getAuthorizeUrl[A <: GrantType](grant: A, params: Map[String, String] = Map.empty)(implicit s: Strategy[A]): Option[Uri] =
    s.getAuthorizeUrl(config, params)

  def getAccessToken[A <: GrantType](
      grant: A,
      params: Map[String, String] = Map.empty
  )(implicit s: Strategy[A], ec: ExecutionContext, mat: Materializer): Future[Either[Throwable, AccessToken]] = {
    val source = s.getAccessTokenSource(config, params)

    source
      .via(connection.getOrElse(defaultConnection))
      .mapAsync(1)(handleError)
      .mapAsync(1)(AccessToken.apply)
      .runWith(Sink.head)
      .map(Right.apply)
      .recover {
        case ex => Left(ex)
      }
  }

  def getConnectionWithAccessToken(accessToken: AccessToken): Flow[HttpRequest, HttpResponse, _] =
    Flow[HttpRequest]
      .map(_.addCredentials(OAuth2BearerToken(accessToken.accessToken)))
      .via(connection.getOrElse(defaultConnection))

  private def defaultConnection: Flow[HttpRequest, HttpResponse, _] =
    config.site.getScheme match {
      case "http"  => Http().outgoingConnection(config.getHost, config.getPort)
      case "https" => Http().outgoingConnectionHttps(config.getHost, config.getPort)
    }

  private def handleError(response: HttpResponse)(implicit ec: ExecutionContext, mat: Materializer): Future[HttpResponse] = {
    if (response.status.isFailure()) UnauthorizedException.fromHttpResponse(response).flatMap(Future.failed(_))
    else Future.successful(response)
  }
}

object Client {
  def apply(config: ConfigLike)(implicit system: ActorSystem): Client =
    new Client(config)

  def apply(config: ConfigLike, connection: Flow[HttpRequest, HttpResponse, _])(implicit system: ActorSystem): Client =
    new Client(config, Some(connection))
} 
Example 47
Source File: CarbonClient.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.graphite

import java.time.{Clock, Instant}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.scaladsl.{Flow, Keep, RestartFlow, Sink, Source, Tcp}
import akka.stream.{OverflowStrategy, QueueOfferResult}
import akka.util.ByteString
import fr.davit.akka.http.metrics.core.Dimension

import scala.concurrent.Await
import scala.concurrent.duration.{Duration, _}

object CarbonClient {

  def apply(host: String, port: Int)(implicit system: ActorSystem): CarbonClient = new CarbonClient(host, port)
}

class CarbonClient(host: String, port: Int)(implicit system: ActorSystem) extends AutoCloseable {

  private val logger         = Logging(system.eventStream, classOf[CarbonClient])
  protected val clock: Clock = Clock.systemUTC()

  private def serialize[T](name: String, value: T, dimensions: Seq[Dimension], ts: Instant): ByteString = {
    val tags         = dimensions.map(d => d.key + "=" + d.value).toList
    val taggedMetric = (name :: tags).mkString(";")
    ByteString(s"$taggedMetric $value ${ts.getEpochSecond}\n")
  }

  // TODO read backoff from config
  private def connection: Flow[ByteString, ByteString, NotUsed] =
    RestartFlow.withBackoff(
      minBackoff = 3.seconds,
      maxBackoff = 30.seconds,
      randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
      maxRestarts = -1 // keep retrying forever
    )(() => Tcp().outgoingConnection(host, port))

  private val queue = Source
    .queue[ByteString](19, OverflowStrategy.dropHead)
    .via(connection)
    .toMat(Sink.ignore)(Keep.left)
    .run()

  def publish[T](
      name: String,
      value: T,
      dimensions: Seq[Dimension] = Seq.empty,
      ts: Instant = Instant
        .now(clock)
  ): Unit = {
    // it's reasonable to block until the message in enqueued
    Await.result(queue.offer(serialize(name, value, dimensions, ts)), Duration.Inf) match {
      case QueueOfferResult.Enqueued    => logger.debug("Metric {} enqueued", name)
      case QueueOfferResult.Dropped     => logger.debug("Metric {} dropped", name)
      case QueueOfferResult.Failure(e)  => logger.error(e, s"Failed publishing metric $name")
      case QueueOfferResult.QueueClosed => throw new Exception("Failed publishing metric to closed carbon client")
    }
  }

  override def close(): Unit = {
    queue.complete()
    Await.result(queue.watchCompletion(), Duration.Inf)
  }
} 
Example 48
Source File: HttpMetricsRoute.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.core.scaladsl.server

import akka.NotUsed
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.server._
import akka.http.scaladsl.settings.{ParserSettings, RoutingSettings}
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import fr.davit.akka.http.metrics.core.HttpMetricsHandler
import fr.davit.akka.http.metrics.core.scaladsl.model.PathLabelHeader

import scala.concurrent.{ExecutionContextExecutor, Future}

object HttpMetricsRoute {

  implicit def apply(route: Route): HttpMetricsRoute = new HttpMetricsRoute(route)

}


final class HttpMetricsRoute private (route: Route) extends HttpMetricsDirectives {

  private def markUnhandled(inner: Route): Route = {
    Directives.mapResponse(markUnhandled).tapply(_ => inner)
  }

  private def markUnhandled(response: HttpResponse): HttpResponse = {
    response.addHeader(PathLabelHeader.Unhandled)
  }

  def recordMetrics(metricsHandler: HttpMetricsHandler)(
      implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): Flow[HttpRequest, HttpResponse, NotUsed] = {
    val effectiveEC = if (executionContext ne null) executionContext else materializer.executionContext

    {
      // override the execution context passed as parameter
      implicit val executionContext: ExecutionContextExecutor = effectiveEC
      Flow[HttpRequest]
        .mapAsync(1)(recordMetricsAsync(metricsHandler))
        .watchTermination() {
          case (mat, completion) =>
            // every connection materializes a stream.
            metricsHandler.onConnection(completion)
            mat
        }
    }
  }

  def recordMetricsAsync(metricsHandler: HttpMetricsHandler)(
      implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): HttpRequest => Future[HttpResponse] = {
    val effectiveEC               = if (executionContext ne null) executionContext else materializer.executionContext
    val effectiveRejectionHandler = rejectionHandler.mapRejectionResponse(markUnhandled)
    val effectiveExceptionHandler = ExceptionHandler.seal(exceptionHandler).andThen(markUnhandled(_))

    {
      // override the execution context passed as parameter, rejection and error handler
      implicit val executionContext: ExecutionContextExecutor = effectiveEC
      implicit val rejectionHandler: RejectionHandler         = effectiveRejectionHandler
      implicit val exceptionHandler: ExceptionHandler         = effectiveExceptionHandler

      request =>
        val response = Route.asyncHandler(route).apply(request)
        metricsHandler.onRequest(request, response)
        response
    }
  }
} 
Example 49
Source File: ConsumerStream.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.consumers

import akka.actor.{ActorRef, ActorSystem}
import akka.kafka.ConsumerMessage.CommittableOffsetBatch
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerMessage, ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.{Flow, Sink}
import com.omearac.shared.EventMessages.FailedMessageConversion
import com.omearac.shared.JsonMessageConversion.Conversion
import com.omearac.shared.{AkkaStreams, EventSourcing}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.Future



trait ConsumerStream extends AkkaStreams with EventSourcing {
    implicit val system: ActorSystem
    def self: ActorRef


    def createStreamSink(consumerActorSink : ActorRef) = {
        Sink.actorRefWithAck(consumerActorSink, "STREAM_INIT", "OK", "STREAM_DONE")
    }

    def createStreamSource(consumerProperties: Map[String,String])  = {
        val kafkaMBAddress = consumerProperties("bootstrap-servers")
        val groupID = consumerProperties("groupId")
        val topicSubscription = consumerProperties("subscription-topic")
        val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer)
            .withBootstrapServers(kafkaMBAddress)
            .withGroupId(groupID)
            .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

        Consumer.committableSource(consumerSettings, Subscriptions.topics(topicSubscription))
    }

    def createStreamFlow[msgType: Conversion] = {
        Flow[ConsumerMessage.CommittableMessage[Array[Byte], String]]
            .map(msg => (msg.committableOffset, Conversion[msgType].convertFromJson(msg.record.value)))
            //Publish the conversion error event messages returned from the JSONConversion
            .map (tuple => publishConversionErrors[msgType](tuple))
            .filter(result => result.isRight)
            .map(test => test.right.get)
            //Group the commit offsets and correctly converted messages for more efficient Kafka commits
            .batch(max = 20, tuple => (CommittableOffsetBatch.empty.updated(tuple._1), ArrayBuffer[msgType](tuple._2)))
            {(tupleOfCommitOffsetAndMsgs, tuple) =>
            (tupleOfCommitOffsetAndMsgs._1.updated(tuple._1), tupleOfCommitOffsetAndMsgs._2 :+ tuple._2)
            }
            //Take the first element of the tuple (set of commit numbers) to add to kafka commit log and then return the collection of grouped case class messages
            .mapAsync(4)(tupleOfCommitOffsetAndMsgs => commitOffsetsToKafka[msgType](tupleOfCommitOffsetAndMsgs))
            .map(msgGroup => msgGroup._2)
    }

    def commitOffsetsToKafka[msgType](tupleOfCommitOffsetAndMsgs: (ConsumerMessage.CommittableOffsetBatch, ArrayBuffer[msgType])) = Future {
        (tupleOfCommitOffsetAndMsgs._1.commitScaladsl(), tupleOfCommitOffsetAndMsgs._2)
    }

    def publishConversionErrors[msgType](tupleOfCommitOffsetAndConversionResults: (ConsumerMessage.CommittableOffset, Either[FailedMessageConversion,msgType]))
    : Either[Unit,(ConsumerMessage.CommittableOffset,msgType)] = {

        if (tupleOfCommitOffsetAndConversionResults._2.isLeft) {

            //Publish a local event that there was a failure in conversion
            publishLocalEvent(tupleOfCommitOffsetAndConversionResults._2.left.get)

            //Commit the Kafka Offset to acknowledge that the message was consumed
            Left(tupleOfCommitOffsetAndConversionResults._1.commitScaladsl())
        }
        else
            Right(tupleOfCommitOffsetAndConversionResults._1,tupleOfCommitOffsetAndConversionResults._2.right.get)
    }
} 
Example 50
Source File: ProducerStream.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.producers

import akka.actor.{ActorRef, ActorSystem}
import akka.kafka.ProducerSettings
import akka.kafka.scaladsl.Producer
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Source}
import com.omearac.shared.JsonMessageConversion.Conversion
import com.omearac.shared.{AkkaStreams, EventSourcing}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}



trait ProducerStream extends AkkaStreams with EventSourcing {
    implicit val system: ActorSystem
    def self: ActorRef

    def createStreamSource[msgType] = {
        Source.queue[msgType](Int.MaxValue,OverflowStrategy.backpressure)
    }

    def createStreamSink(producerProperties: Map[String, String]) = {
        val kafkaMBAddress = producerProperties("bootstrap-servers")
        val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer).withBootstrapServers(kafkaMBAddress)

        Producer.plainSink(producerSettings)
    }

    def createStreamFlow[msgType: Conversion](producerProperties: Map[String, String]) = {
        val numberOfPartitions = producerProperties("num.partitions").toInt -1
        val topicToPublish = producerProperties("publish-topic")
        val rand = new scala.util.Random
        val range = 0 to numberOfPartitions

        Flow[msgType].map { msg =>
            val partition = range(rand.nextInt(range.length))
            val stringJSONMessage = Conversion[msgType].convertToJson(msg)
            new ProducerRecord[Array[Byte], String](topicToPublish, partition, null, stringJSONMessage)
        }
    }
} 
Example 51
Source File: AckFlow.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.NotUsed
import akka.stream.scaladsl.Flow

import scala.concurrent.{ ExecutionContext, Future }

object AckFlow {

  
  def filter[A](predicate: A => Boolean): Flow[AckUTup[A], AckUTup[A], NotUsed] = Flow[AckUTup[A]].filter {
    case (p, a) =>
      try {
        val bool = predicate(a)
        if (!p.isCompleted) p.success(())
        bool
      } catch {
        case t: Throwable =>
          if (!p.isCompleted) p.failure(t)
          throw t
      }
  }
} 
Example 52
Source File: AckBidiFlow.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.NotUsed
import akka.stream._
import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Unzip, Zip }

object AckBidiFlow {

  
  def apply[R, S, T](
    bufferSize: Int = 10,
    overflowStrategy: OverflowStrategy = OverflowStrategy.backpressure
  ): BidiFlow[(R, S), S, T, (R, T), NotUsed] = {

    BidiFlow.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._

      val unzip = b.add(Unzip[R, S])
      val zip = b.add(Zip[R, T])

      if (bufferSize > 0) {
        val buffer = b.add(Flow[R].buffer(bufferSize, overflowStrategy))
        unzip.out0 ~> buffer ~> zip.in0
      } else
        unzip.out0 ~> zip.in0

      BidiShape(unzip.in, unzip.out1, zip.in1, zip.out)
    })
  }
} 
Example 53
Source File: ActiveMqProducer.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.actor.ActorSystem
import akka.camel.{ CamelExtension, CamelMessage }
import akka.stream.integration.activemq.extension.ActiveMqExtension
import akka.stream.scaladsl.{ Flow, Keep, Sink }
import akka.{ Done, NotUsed }
import org.apache.camel.ProducerTemplate

import scala.collection.JavaConversions._
import scala.concurrent.{ ExecutionContext, Future }

object ActiveMqProducer {

  private def send[A: CamelMessageBuilder](payload: A, producerName: String, endpointUri: String, producer: ProducerTemplate)(implicit ec: ExecutionContext): Future[A] = Future {
    val msg: CamelMessage = implicitly[CamelMessageBuilder[A]].build(payload)
    producer.sendBodyAndHeaders(endpointUri, msg.body, msg.headers.mapValues(_.asInstanceOf[AnyRef]))
    payload
  }

  
  def apply[A: CamelMessageBuilder](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem): Sink[A, Future[Done]] =
    sink(producerName, qos)
} 
Example 54
Source File: AckActiveMqProducer.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.Done
import akka.actor.ActorSystem
import akka.camel.{ CamelExtension, CamelMessage }
import akka.stream.integration.activemq.extension.ActiveMqExtension
import akka.stream.scaladsl.{ Flow, Keep, Sink }

import scala.collection.JavaConversions._
import scala.concurrent.{ ExecutionContext, Future }

object AckActiveMqProducer {
  def apply[A](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem, builder: MessageBuilder[A, CamelMessage]): Sink[AckUTup[A], Future[Done]] =
    sink(producerName, qos)

  def sink[A](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem, builder: MessageBuilder[A, CamelMessage]): Sink[AckUTup[A], Future[Done]] =
    flow(producerName, qos).toMat(Sink.ignore)(Keep.right)

  def flow[A](producerName: String, qos: Int = 8)(implicit ec: ExecutionContext, system: ActorSystem, builder: MessageBuilder[A, CamelMessage]) = {
    val template = CamelExtension(system).template
    Flow[AckUTup[A]].mapAsync(qos) {
      case (p, payload) =>
        Future {
          val camelMessage = builder.build(payload)
          val uri = ActiveMqExtension(system).producerEndpointUri(producerName)
          template.sendBodyAndHeaders(uri, camelMessage.body, camelMessage.headers.mapValues(_.asInstanceOf[AnyRef]))
        }.map { _ => if (!p.isCompleted) p.success(()) }.recover { case cause: Throwable => if (!p.isCompleted) p.failure(cause) }
    }
  }
} 
Example 55
Source File: AckSink.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.Done
import akka.stream.scaladsl.{ Flow, Keep, Sink }

import scala.concurrent.Future

object AckSink {
  
  def complete[A]: Sink[AckTup[A, A], Future[Done]] = {
    Flow[AckTup[A, A]].map {
      case (p, a) =>
        try {
          if (!p.isCompleted) p.success(a)
        } catch {
          case cause: Throwable =>
            if (!p.isCompleted) p.failure(cause)
        }
    }.toMat(Sink.ignore)(Keep.right).named("completeAckSink")
  }
} 
Example 56
Source File: ActiveMqFlowTest.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.stream.integration.PersonDomain.Person
import akka.stream.scaladsl.Flow

import scala.concurrent.Promise

class ActiveMqFlowTest extends ActiveMqTestSpec {

  behavior of "ActiveMqFlow"

  it should "propagate messages from input to output unmodified, if mediated by the identity flow" in {
    withTestTopicPublisher("AckBidiFlowTestInput") { pub =>
      withTestTopicSubscriber("AckBidiFlowTestOutput") { sub =>
        withActiveMqBidiFlow("AckBidiFlowTestInput", "AckBidiFlowTestOutput") { flow =>

          val identityFlow = Flow[Person].map(identity)
          val ref = flow.join(identityFlow).run()

          pub.sendNext(testPerson1)

          sub.request(1)
          sub.expectNextPF {
            case (p: Promise[Unit], `testPerson1`) => p.success(())
          }

          pub.sendNext(testPerson2)

          sub.request(1)
          sub.expectNextPF {
            case (p: Promise[Unit], `testPerson2`) => p.success(())
          }

          pub.sendComplete()
          sub.cancel()
          ref
        }
      }
    }
  }
} 
Example 57
Source File: ActiveMqTestSpec.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.NotUsed
import akka.actor.ActorRef
import akka.stream.integration.PersonDomain.Person
import akka.stream.scaladsl.{ Flow, Keep }
import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
import akka.stream.testkit.{ TestPublisher, TestSubscriber }
import akka.testkit.TestActor.AutoPilot
import akka.testkit.TestProbe
import JsonCamelMessageExtractor._
import JsonCamelMessageBuilder._

import scala.util.{ Failure, Success, Try }

  implicit def function1ToAutoPilot[S, T](f: S => T): AutoPilot = new AutoPilot {
    override def run(sender: ActorRef, msg: Any): AutoPilot = msg match {
      case s: S =>
        val tryT: Try[T] = Try(f(s))
        tryT match {
          case Success(t) =>
            sender ! t
            function1ToAutoPilot(f)
          case Failure(f) =>
            fail(s"Failed to apply supplied function to received message: $s", f)
        }
      case _ =>
        fail(s"Received message is not of the required type: $msg")
    }
  }
} 
Example 58
Source File: PoolingRestClient.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.http

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.model._
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.http.scaladsl.unmarshalling._
import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult}
import akka.stream.scaladsl.{Flow, _}
import spray.json._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}


  def requestJson[T: RootJsonReader](futureRequest: Future[HttpRequest]): Future[Either[StatusCode, T]] =
    request(futureRequest).flatMap { response =>
      if (response.status.isSuccess) {
        Unmarshal(response.entity.withoutSizeLimit).to[T].map(Right.apply)
      } else {
        Unmarshal(response.entity).to[String].flatMap { body =>
          val statusCode = response.status
          val reason =
            if (body.nonEmpty) s"${statusCode.reason} (details: $body)" else statusCode.reason
          val customStatusCode = StatusCodes
            .custom(intValue = statusCode.intValue, reason = reason, defaultMessage = statusCode.defaultMessage)
          // This is important, as it drains the entity stream.
          // Otherwise the connection stays open and the pool dries up.
          response.discardEntityBytes().future.map(_ => Left(customStatusCode))
        }
      }
    }

  def shutdown(): Future[Unit] = Future.successful(materializer.shutdown())
}

object PoolingRestClient {

  def mkRequest(method: HttpMethod,
                uri: Uri,
                body: Future[MessageEntity] = Future.successful(HttpEntity.Empty),
                headers: List[HttpHeader] = List.empty)(implicit ec: ExecutionContext): Future[HttpRequest] = {
    body.map { b =>
      HttpRequest(method, uri, headers, b)
    }
  }

  def mkJsonRequest(method: HttpMethod, uri: Uri, body: JsValue, headers: List[HttpHeader] = List.empty)(
    implicit ec: ExecutionContext): Future[HttpRequest] = {
    val b = Marshal(body).to[MessageEntity]
    mkRequest(method, uri, b, headers)
  }
} 
Example 59
Source File: TransformStream.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor.stream

import akka.NotUsed
import akka.stream.scaladsl.Flow
import ml.combust.mleap.executor._
import ml.combust.mleap.runtime.frame.{DefaultLeapFrame, Row, RowTransformer, Transformer}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try


  def row[Tag](rowTransformer: RowTransformer)
              (implicit ec: ExecutionContext,
               parallelism: Parallelism): Flow[(StreamTransformRowRequest, Tag), (Try[Option[Row]], Tag), NotUsed] = {
    Flow[(StreamTransformRowRequest, Tag)].mapAsyncUnordered(parallelism) {
      case (request, tag) =>
        Future {
          val result = request.row.map(rowTransformer.transformOption)

          (result, tag)
        }
    }
  }
} 
Example 60
Source File: Flows.scala    From BusFloatingData   with Apache License 2.0 5 votes vote down vote up
package de.nierbeck.floating.data.server

import akka.actor.{ActorRef, Props}
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.FlowShape
import akka.stream.scaladsl.{Flow, GraphDSL, Merge, Source}
import de.nierbeck.floating.data.domain.Vehicle
import GraphDSL.Implicits._
import de.nierbeck.floating.data.server._
import de.nierbeck.floating.data.server.actors.websocket._


object Flows {

  def graphFlowWithStats(router: ActorRef): Flow[Message, Message, _] = {
    Flow.fromGraph(GraphDSL.create() { implicit builder =>


      // create an actor source
      val source = Source.actorPublisher[String](VehiclePublisher.props(router))

      // Graph elements we'll use
      val merge = builder.add(Merge[String](2))
      val filter = builder.add(Flow[String].filter(_ => false))

      // get BBox from request and send it to route, return nothing ...
      val mapMsgToString = builder.add(Flow[Message].map[String] {
        case TextMessage.Strict(msg) => {
          println(s"received message: $msg")
          if (msg.contains("close")) {
            router ! msg
          } else if (msg.contains("spark")) {
            router ! SPARK
          } else if (msg.contains("flink")) {
            router ! FLINK
          } else {
            val bbox = toBoundingBox(msg)
            println(s"transformedt to bbox: $bbox")
            router ! bbox
          }
          ""
        }
      })
      //outgoing message ...
      val mapStringToMsg = builder.add(Flow[String].map[Message](x => TextMessage.Strict(x)))

      //add source to flow
      val vehiclesSource = builder.add(source)

      // connect the graph
      mapMsgToString ~> filter ~> merge // this part of the merge will never provide msgs
      vehiclesSource ~> merge ~> mapStringToMsg

      // expose ports
      FlowShape(mapMsgToString.in, mapStringToMsg.out)
    })
  }

} 
Example 61
Source File: bakerServiceImpl.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch15

import akka.NotUsed
import akka.stream.{Attributes, DelayOverflowStrategy}
import akka.stream.scaladsl.{BidiFlow, Flow, Source}
import ch15.model._
import com.lightbend.lagom.scaladsl.api._

import scala.concurrent.duration._
import scala.concurrent.Future

import play.api.Logger

class BakerServiceImpl extends BakerService {

  private val logger = Logger("Baker")

  override def bake: ServiceCall[Source[RawCookies, NotUsed], Source[ReadyCookies, NotUsed]] = ServiceCall { dough =>
    logger.info(s"Baking: $dough")
    Future.successful(dough.via(bakerFlow))
  }

  private val bakerFlow: Flow[RawCookies, ReadyCookies, NotUsed] =
    Baker.bakeFlow.join(Oven.bakeFlow)
}

object Baker {
  private val logger = Logger("BakerFlow")

  def bakeFlow: BidiFlow[RawCookies, RawCookies, ReadyCookies, ReadyCookies, NotUsed] = BidiFlow.fromFlows(inFlow, outFlow)

  private val inFlow = Flow[RawCookies]
    .flatMapConcat(extractFromBox)
    .grouped(Oven.ovenSize)
    .map(_.reduce(_ + _))

  private def outFlow = Flow[ReadyCookies].map { c =>
    logger.info(s"Sending to manager: $c")
    c
  }

  private def extractFromBox(c: RawCookies) = {
    logger.info(s"Extracting: $c")
    Source(List.fill(c.count)(RawCookies(1)))
  }
}

object Oven {
  private val logger = Logger("Oven")

  val ovenSize = 12
  private val bakingTime = 2.seconds

  def bakeFlow: Flow[RawCookies, ReadyCookies, NotUsed] =
    Flow[RawCookies]
      .map(bake)
      .delay(bakingTime, DelayOverflowStrategy.backpressure)
      .addAttributes(Attributes.inputBuffer(1, 1))

  private def bake(c: RawCookies): ReadyCookies = {
    logger.info(s"Baked: $c")
    assert(c.count == ovenSize)
    ReadyCookies(c.count)
  }
} 
Example 62
Source File: Balancer.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch13

import akka.NotUsed
import akka.stream.FlowShape
import akka.stream.scaladsl.{Balance, Flow, GraphDSL, Merge}


object Balancer {
  def apply[In, Out](subFlow: Flow[In, Out, Any],
                     count: Int): Flow[In, Out, NotUsed] = {

    Flow.fromGraph(createGraph(subFlow, count))
  }

  import akka.stream.scaladsl.GraphDSL
  import GraphDSL.Implicits._

  def createGraph[Out, In](subFlow: Flow[In, Out, Any], count: Int) = {
    val balanceBlock  = Balance[In](count, waitForAllDownstreams = false)
    val mergeBlock = Merge[Out](count, eagerComplete = false)
    GraphDSL.create() { implicit builder ⇒
      val balancer = builder.add(balanceBlock)
      val merge = builder.add(mergeBlock)

      for (_ ← 1 to count) balancer ~> subFlow ~> merge

      FlowShape(balancer.in, merge.out)
    }
  }
} 
Example 63
Source File: AkkaConnectionBackend.scala    From drunk   with Apache License 2.0 5 votes vote down vote up
package com.github.jarlakxen.drunk.backend

import akka.actor.ActorSystem
import akka.http.scaladsl.Http.OutgoingConnection
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.collection.immutable
import scala.concurrent.{ExecutionContext, Future}

class AkkaConnectionBackend  private[AkkaConnectionBackend] (
  uri: Uri,
  flow: Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]],
  headers: immutable.Seq[HttpHeader]
)(override implicit val as: ActorSystem, override implicit val mat: ActorMaterializer)
   extends AkkaBackend {

  def send(body: String): Future[(Int, String)] = {
    implicit val ec: ExecutionContext = as.dispatcher

    val req = HttpRequest(
      method = HttpMethods.POST,
      uri = uri,
      headers = headers,
      entity = HttpEntity(ContentTypes.`application/json`, body)
    )

    val res = Source.single(req).via(flow).runWith(Sink.head)

    res.flatMap { hr =>
      val code = hr.status.intValue()

      val charsetFromHeaders = encodingFromContentType(hr.entity.contentType.toString).getOrElse("utf-8")
      val decodedResponse = decodeResponse(hr)
      val stringBody = bodyToString(decodedResponse, charsetFromHeaders)

      if (code >= 200 && code < 300) {
        stringBody.map { body =>
          hr.discardEntityBytes()
          (code, body)
        }
      } else {
        stringBody.flatMap { body =>
          hr.discardEntityBytes()
          Future.failed(new RuntimeException(s"${uri.toString} return $code with body: $body"))
        }
      }
    }
  }

}

object AkkaConnectionBackend {

  def apply(uri: Uri,
             flow: Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]],
             headers: immutable.Seq[HttpHeader] = Nil
           )( implicit  as: ActorSystem,  mat: ActorMaterializer): AkkaConnectionBackend =
    new AkkaConnectionBackend(uri, flow, headers)

} 
Example 64
Source File: LowLevelServer.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.lowlevelserver

import akka.NotUsed
import akka.actor.{ ActorSystem, Props }
import akka.event.{ Logging, LoggingAdapter }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.pattern.ask
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.Timeout
import com.github.dnvriend.component.lowlevelserver.dto.{ Person, PersonWithId }
import com.github.dnvriend.component.lowlevelserver.marshaller.Marshaller
import com.github.dnvriend.component.lowlevelserver.repository.PersonRepository
import spray.json.{ DefaultJsonProtocol, _ }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }

class LowLevelServer(implicit val system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter, timeout: Timeout) extends DefaultJsonProtocol with Marshaller {
  val personDb = system.actorOf(Props[PersonRepository])

  def debug(t: Any)(implicit log: LoggingAdapter = null): Unit =
    if (Option(log).isEmpty) println(t) else log.debug(t.toString)

  def http200Okay(req: HttpRequest): HttpResponse =
    HttpResponse(StatusCodes.OK)

  def http200AsyncOkay(req: HttpRequest): Future[HttpResponse] =
    Future(http200Okay(req))

  val http200OkayFlow: Flow[HttpRequest, HttpResponse, NotUsed] = Flow[HttpRequest].map { req =>
    HttpResponse(StatusCodes.OK)
  }

  val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] =
    Http().bind(interface = "localhost", port = 8080)

  val binding: Future[Http.ServerBinding] = serverSource.to(Sink.foreach { conn =>
    //    conn.handleWith(http200OkayFlow)
    //    conn.handleWithSyncHandler(http200Okay)
    //    conn.handleWithAsyncHandler(http200AsyncOkay, 8)
    conn.handleWithAsyncHandler(personRequestHandler)
  }).run()

  def personRequestHandler(req: HttpRequest): Future[HttpResponse] = req match {
    case HttpRequest(HttpMethods.GET, Uri.Path("/api/person"), _, _, _) => for {
      xs <- (personDb ? "findAll").mapTo[List[PersonWithId]]
      entity = HttpEntity(ContentTypes.`application/json`, xs.toJson.compactPrint)
    } yield HttpResponse(StatusCodes.OK, entity = entity)
    case HttpRequest(HttpMethods.POST, Uri.Path("/api/person"), _, ent, _) => for {
      strictEntity <- ent.toStrict(1.second)
      person <- (personDb ? strictEntity.data.utf8String.parseJson.convertTo[Person]).mapTo[PersonWithId]
    } yield HttpResponse(StatusCodes.OK, entity = person.toJson.compactPrint)
    case req =>
      req.discardEntityBytes()
      Future.successful(HttpResponse(StatusCodes.NotFound))
  }
}

object LowLevelServerLauncher extends App with DefaultJsonProtocol {
  // setting up some machinery
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val timeout: Timeout = Timeout(10.seconds)

  new LowLevelServer()
} 
Example 65
Source File: PostcodeClient.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.webservices.postcode

import akka.NotUsed
import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import com.github.dnvriend.component.webservices.generic.HttpClient
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
import scala.util.matching.Regex

case class Address(
  street: String,
  houseNumber: Int,
  houseNumberAddition: String,
  postcode: String,
  city: String,
  municipality: String,
  province: String,
  rdX: Option[Int],
  rdY: Option[Int],
  latitude: Double,
  longitude: Double,
  bagNumberDesignationId: String,
  bagAddressableObjectId: String,
  addressType: String,
  purposes: Option[List[String]],
  surfaceArea: Int,
  houseNumberAdditions: List[String]
)

trait Marshallers extends DefaultJsonProtocol {
  implicit val addressJsonFormat = jsonFormat17(Address)
}

case class GetAddressRequest(zip: String, houseNumber: String)

trait PostcodeClient {
  def address(postcode: String, houseNumber: Int): Future[Option[Address]]

  def address[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetAddressRequest, T), (Option[Address], T), NotUsed]
}

object PostcodeClient {
  import spray.json._
  val ZipcodeWithoutSpacePattern: Regex = """([1-9][0-9]{3})([A-Za-z]{2})""".r
  val ZipcodeWithSpacePattern: Regex = """([1-9][0-9]{3})[\s]([A-Za-z]{2})""".r

  def mapToAddress(json: String)(implicit reader: JsonReader[Address]): Option[Address] =
    Try(json.parseJson.convertTo[Address]).toOption

  def responseToString(resp: HttpResponse)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Future[String] =
    HttpClient.responseToString(resp)

  def getAddressRequestFlow[T]: Flow[(GetAddressRequest, T), (HttpRequest, T), NotUsed] =
    Flow[(GetAddressRequest, T)].map { case (request, id) => (HttpClient.mkGetRequest(s"/rest/addresses/${request.zip}/${request.houseNumber}/"), id) }

  def mapResponseToAddressFlow[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, reader: JsonReader[Address]): Flow[(Try[HttpResponse], T), (Option[Address], T), NotUsed] =
    HttpClient.responseToString[T].map { case (json, id) => (mapToAddress(json), id) }
  
  def normalizeZipcode(zipcode: String): Option[String] = zipcode.toUpperCase match {
    case ZipcodeWithoutSpacePattern(numbers, letters) => Option(s"$numbers$letters")
    case ZipcodeWithSpacePattern(numbers, letters)    => Option(s"$numbers$letters")
    case _                                            => None
  }

  def apply()(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter) = new PostcodeClientImpl
}

class PostcodeClientImpl()(implicit val system: ActorSystem, val mat: Materializer, val ec: ExecutionContext, val log: LoggingAdapter) extends PostcodeClient with Marshallers {
  import PostcodeClient._
  private val client = HttpClient("postcode")

  override def address(postcode: String, houseNumber: Int): Future[Option[Address]] =
    normalizeZipcode(postcode) match {
      case Some(zip) => client.get(s"/rest/addresses/$zip/$houseNumber/")
        .flatMap(responseToString).map(mapToAddress)
      case None => Future.successful(None)
    }

  override def address[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetAddressRequest, T), (Option[Address], T), NotUsed] =
    getAddressRequestFlow[T]
      .via(client.cachedHostConnectionFlow[T])
      .via(mapResponseToAddressFlow[T])
} 
Example 66
Source File: WeatherClient.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.webservices.weather

import akka.NotUsed
import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import com.github.dnvriend.component.webservices.generic.HttpClient
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

case class Wind(speed: Double, deg: Double)
case class Main(temp: Double, temp_min: Double, temp_max: Double, pressure: Double, sea_level: Option[Double], grnd_level: Option[Double], humidity: Int)
case class Cloud(all: Int)
case class Weather(id: Int, main: String, description: String, icon: String)
case class Sys(message: Double, country: String, sunrise: Long, sunset: Long)
case class Coord(lon: Double, lat: Double)
case class WeatherResult(coord: Coord, sys: Sys, weather: List[Weather], base: String, main: Main, wind: Wind, clouds: Cloud, dt: Long, id: Int, name: String, cod: Int)

trait Marshallers extends DefaultJsonProtocol {
  implicit val windJsonFormat = jsonFormat2(Wind)
  implicit val mainJsonFormat = jsonFormat7(Main)
  implicit val cloudJsonFormat = jsonFormat1(Cloud)
  implicit val weatherJsonFormat = jsonFormat4(Weather)
  implicit val sysJsonFormat = jsonFormat4(Sys)
  implicit val coordJsonFormat = jsonFormat2(Coord)
  implicit val weatherResultJsonFormat = jsonFormat11(WeatherResult)
}

case class GetWeatherRequest(zip: String, country: String)

trait OpenWeatherApi {
  def getWeather(zip: String, country: String): Future[Option[WeatherResult]]

  def getWeather[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetWeatherRequest, T), (Option[WeatherResult], T), NotUsed]
}

object OpenWeatherApi {
  import spray.json._
  def apply()(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter) = new OpenWeatherApiImpl

  def mapResponseToWeatherResult(json: String)(implicit reader: JsonReader[WeatherResult]): Option[WeatherResult] =
    Try(json.parseJson.convertTo[WeatherResult]).toOption

  def responseToString(resp: HttpResponse)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Future[String] =
    HttpClient.responseToString(resp)

  def getWeatherRequestFlow[T]: Flow[(GetWeatherRequest, T), (HttpRequest, T), NotUsed] =
    Flow[(GetWeatherRequest, T)].map { case (request, id) => (HttpClient.mkGetRequest(s"/data/2.5/weather?zip=${request.zip},${request.country}"), id) }

  def mapResponseToWeatherResultFlow[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, reader: JsonReader[WeatherResult]): Flow[(Try[HttpResponse], T), (Option[WeatherResult], T), NotUsed] =
    HttpClient.responseToString[T].map { case (json, id) => (mapResponseToWeatherResult(json), id) }
}

class OpenWeatherApiImpl()(implicit val system: ActorSystem, val ec: ExecutionContext, val mat: Materializer, val log: LoggingAdapter) extends OpenWeatherApi with Marshallers {
  import OpenWeatherApi._

  private val client = HttpClient("weather")

  override def getWeather(zip: String, country: String): Future[Option[WeatherResult]] =
    client.get(s"/data/2.5/weather?zip=$zip,$country").
      flatMap(responseToString)
      .map(mapResponseToWeatherResult)

  override def getWeather[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetWeatherRequest, T), (Option[WeatherResult], T), NotUsed] =
    getWeatherRequestFlow[T]
      .via(client.cachedHostConnectionFlow[T])
      .via(mapResponseToWeatherResultFlow[T])
} 
Example 67
Source File: JsonStreamingRoute.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.simpleserver.route

import akka.event.LoggingAdapter
import akka.http.scaladsl.common.{ EntityStreamingSupport, JsonEntityStreamingSupport }
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.{ Directives, Route }
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import com.github.dnvriend.component.repository.PersonRepository
import com.github.dnvriend.component.simpleserver.dto.http.Person
import com.github.dnvriend.component.simpleserver.marshaller.Marshallers

import scala.concurrent.ExecutionContext

object JsonStreamingRoute extends Directives with SprayJsonSupport with Marshallers {
  val start = ByteString.empty
  val sep = ByteString("\n")
  val end = ByteString.empty

  implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json()
    .withFramingRenderer(Flow[ByteString].intersperse(start, sep, end))
    .withParallelMarshalling(parallelism = 8, unordered = true)

  def route(dao: PersonRepository)(implicit mat: Materializer, ec: ExecutionContext): Route =
    path("stream" / IntNumber) { numberOfPersons =>
      (get & pathEnd) {
        complete(dao.people(numberOfPersons))
      }
    } ~
      (post & path("stream") & entity(asSourceOf[Person])) { people =>
        val total = people.log("people").runFold(0) { case (c, _) => c + 1 }
        complete(total.map(n => s"Received $n number of person"))
      }
} 
Example 68
Source File: EndpointsSettings.scala    From endpoints4s   with MIT License 5 votes vote down vote up
package endpoints4s.akkahttp.client

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpEntity, HttpRequest, HttpResponse, Uri}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Try

final case class EndpointsSettings(
    requestExecutor: AkkaHttpRequestExecutor,
    baseUri: Uri = Uri("/"),
    toStrictTimeout: FiniteDuration = 2.seconds,
    stringContentExtractor: HttpEntity.Strict => String = _.data.utf8String
)

trait AkkaHttpRequestExecutor {
  def apply(request: HttpRequest): Future[HttpResponse]
}

object AkkaHttpRequestExecutor {
  def cachedHostConnectionPool(host: String, port: Int)(implicit
      system: ActorSystem,
      materializer: Materializer
  ): AkkaHttpRequestExecutor =
    default(Http().cachedHostConnectionPool[Int](host, port))

  def default(
      poolClientFlow: Flow[
        (HttpRequest, Int),
        (Try[HttpResponse], Int),
        Http.HostConnectionPool
      ]
  )(implicit materializer: Materializer): AkkaHttpRequestExecutor =
    new AkkaHttpRequestExecutor {
      override def apply(request: HttpRequest): Future[HttpResponse] =
        Source
          .single(request -> 1)
          .via(poolClientFlow)
          .map(_._1.get)
          .runWith(Sink.head)
    }
} 
Example 69
Source File: TracedFlowUnorderedAsyncImplicits.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.akka.stream

import akka.stream.scaladsl.Flow
import com.comcast.money.akka.TraceContext
import com.comcast.money.akka.stream.DefaultStreamSpanKeyCreators.DefaultFlowSpanKeyCreator

import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag



    def tracedMapAsyncUnordered(parallelism: Int)(f: In => Future[Out]): Flow[TracedIn, TracedOut, _] =
      Flow[TracedIn].mapAsyncUnordered[TracedOut](parallelism) {
        (tuple: TracedIn) =>
          val (in, traceContext) = tuple
          traceContext.tracer.startSpan(fskc.flowToKey(flow))
          f(in) map {
            out =>
              traceContext.tracer.stopSpan()
              (out, traceContext)
          }
      }
  }

} 
Example 70
Source File: Test15.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test15 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test15", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    import akka.stream.scaladsl.GraphDSL.Implicits._
    RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
      val A = builder.add(Source.single(0)).out
      val B = builder.add(Broadcast[Int](2))
      val C = builder.add(Merge[Int](2).named("C"))
      val D = builder.add(Flow[Int].map(_ + 1).named("D"))
      val E = builder.add(Balance[Int](2).named("E"))
      val F = builder.add(Merge[Int](2).named("F"))
      val G = builder.add(Sink.foreach(println).named("G")).in

      C <~ F
      A ~> B ~> C ~> F
      B ~> D ~> E ~> F
      E ~> G

      ClosedShape
    }).run()

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 71
Source File: LogJson.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, Path }
import java.io.File
import java.time.ZonedDateTime

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.{ Success, Failure }

import akka.Done
import akka.actor._
import akka.util.ByteString

import akka.stream.{ ActorAttributes, ActorMaterializer, IOResult }
import akka.stream.scaladsl.JsonFraming
import akka.stream.scaladsl.{ FileIO, BidiFlow, Flow, Framing, Keep, Sink, Source }

import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import spray.json._

object LogJson extends EventMarshalling 
    with NotificationMarshalling 
    with MetricMarshalling {
  def textInFlow(maxLine: Int) = {
    Framing.delimiter(ByteString("\n"), maxLine)
    .map(_.decodeString("UTF8"))
    .map(LogStreamProcessor.parseLineEx)
    .collect { case Some(e) => e }
  }

  def jsonInFlow(maxJsonObject: Int) = {
    JsonFraming.objectScanner(maxJsonObject) 
      .map(_.decodeString("UTF8").parseJson.convertTo[Event])
  }

  def jsonFramed(maxJsonObject: Int) =
    JsonFraming.objectScanner(maxJsonObject) 

  val jsonOutFlow = Flow[Event].map { event => 
    ByteString(event.toJson.compactPrint)
  }

  val notifyOutFlow = Flow[Summary].map { ws => 
    ByteString(ws.toJson.compactPrint)
  }

  val metricOutFlow = Flow[Metric].map { m => 
    ByteString(m.toJson.compactPrint)
  }

  val textOutFlow = Flow[Event].map{ event => 
    ByteString(LogStreamProcessor.logLine(event))
  }

  def logToJson(maxLine: Int) = {
    BidiFlow.fromFlows(textInFlow(maxLine), jsonOutFlow)
  }

  def jsonToLog(maxJsonObject: Int) = {
    BidiFlow.fromFlows(jsonInFlow(maxJsonObject), textOutFlow)
  }

  def logToJsonFlow(maxLine: Int) = {
    logToJson(maxLine).join(Flow[Event])
  }

  def jsonToLogFlow(maxJsonObject: Int) = {
    jsonToLog(maxJsonObject).join(Flow[Event])
  }
} 
Example 72
Source File: GrpcAkkaStreamsClientCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Source}
import com.trueaccord.scalapb.grpc.Grpc
import io.grpc.{ClientCall, Metadata, Status}
import io.grpc.stub._

object GrpcAkkaStreamsClientCalls {

  def unaryFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow[I].flatMapConcat(request =>
      Source.fromFuture(
        Grpc.guavaFuture2ScalaFuture(
          ClientCalls.futureUnaryCall(call, request)
        )
      )
    )

  def serverStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(
      new GrpcGraphStage[I, O](outputObserver => {
        val out = outputObserver.asInstanceOf[ClientResponseObserver[I, O]]
        val in = new ClientCallStreamObserver[I] {
          val halfClosed = new AtomicBoolean(false)
          val onReadyHandler = new AtomicReference[Option[Runnable]](None)
          val listener = new ClientCall.Listener[O] {
            override def onClose(status: Status, trailers: Metadata): Unit =
              status.getCode match {
                case Status.Code.OK => out.onCompleted()
                case _ => out.onError(status.asException(trailers))
              }
            override def onMessage(message: O): Unit =
              out.onNext(message)
            override def onReady(): Unit =
              onReadyHandler.get().foreach(_.run())
          }
          call.start(listener, new Metadata())

          override def cancel(message: String, cause: Throwable): Unit =
            call.cancel(message, cause)
          override def setOnReadyHandler(onReadyHandler: Runnable): Unit =
            this.onReadyHandler.set(Some(onReadyHandler))
          override def request(count: Int): Unit = call.request(count)
          override def disableAutoInboundFlowControl(): Unit = ()
          override def isReady: Boolean = !halfClosed.get() || call.isReady
          override def setMessageCompression(enable: Boolean): Unit =
            call.setMessageCompression(enable)
          override def onError(t: Throwable): Unit =
            call.cancel("Cancelled by client with StreamObserver.onError()", t)
          override def onCompleted(): Unit = ()
          override def onNext(request: I): Unit = {
            call.sendMessage(request)
            halfClosed.set(true)
            call.halfClose()
          }
        }
        out.beforeStart(in)
        in
      })
    )

  def clientStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(new GrpcGraphStage[I, O](ClientCalls.asyncClientStreamingCall(call, _)))

  def bidiStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(new GrpcGraphStage[I, O](ClientCalls.asyncBidiStreamingCall(call, _)))
} 
Example 73
Source File: GrpcAkkaStreamsServerCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import io.grpc.ServerCallHandler
import io.grpc.stub.{CallStreamObserver, ServerCalls, StreamObserver}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}

object GrpcAkkaStreamsServerCalls {

  def unaryCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncUnaryCall(
    new ServerCalls.UnaryMethod[I, O] {
      override def invoke(request: I, responseObserver: StreamObserver[O]) =
        Source
          .single(request)
          .via(service)
          .runForeach(responseObserver.onNext)
          .onComplete {
            case Success(_) => responseObserver.onCompleted()
            case Failure(t) => responseObserver.onError(t)
          }(mat.executionContext)
    }
  )

  def serverStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] =
    ServerCalls.asyncServerStreamingCall(
      new ServerCalls.ServerStreamingMethod[I, O] {
        override def invoke(request: I, responseObserver: StreamObserver[O]) =
          Source
            .single(request)
            .via(service)
            .runWith(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            )))
      }
    )

  def clientStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncClientStreamingCall(
    new ServerCalls.ClientStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )

  def bidiStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncBidiStreamingCall(
    new ServerCalls.BidiStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )
} 
Example 74
Source File: ReverseProxy.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.serving

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{Host, `Access-Control-Allow-Origin`}
import akka.stream.scaladsl.Flow
import akka.stream.{ActorMaterializer, Materializer}


object ReverseProxy {

  def main(args: Array[String]) {
    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    val redirectHost = "localhost"
    val redirectPort = 8090

    val requestFlow = Flow.fromFunction[HttpRequest, HttpRequest]( request => {
      request
        .withUri(request.uri.withAuthority(redirectHost, redirectPort))
        .mapHeaders(headers => headers.filterNot(_.lowercaseName() == Host.lowercaseName))
        .addHeader(Host(redirectHost, redirectPort))
    })

    val outgoingConnection = Http().outgoingConnection(redirectHost, redirectPort)

    val responseFlow = Flow.fromFunction[HttpResponse, HttpResponse]( response => {
      response.withHeaders(`Access-Control-Allow-Origin`.*)
    })

    Http().bindAndHandle(requestFlow via outgoingConnection via responseFlow, "localhost", 8080)
  }

} 
Example 75
Source File: WorkingWithGraphsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source}
import scala.concurrent.duration._
import scala.util.Random


object WorkingWithGraphsApplication extends App {

  implicit val actorSystem = ActorSystem("WorkingWithGraphs")
  implicit val actorMaterializer = ActorMaterializer()

  trait MobileMsg {
    def id = Random.nextInt(1000)
    def toGenMsg(origin: String) = GenericMsg(id, origin)
  }
  class AndroidMsg extends MobileMsg
  class IosMsg extends MobileMsg
  case class GenericMsg(id: Int, origin: String)

  val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    //Sources
    val androidNotification = Source.tick(2 seconds, 500 millis, new AndroidMsg)
    val iOSNotification = Source.tick(700 millis, 600 millis, new IosMsg)

    //Flow
    val groupAndroid = Flow[AndroidMsg].map(_.toGenMsg("ANDROID")).groupedWithin(5, 5 seconds).async
    val groupIos = Flow[IosMsg].map(_.toGenMsg("IOS")).groupedWithin(5, 5 seconds).async
    def counter = Flow[Seq[GenericMsg]].via(new StatefulCounterFlow())
    def mapper = Flow[Seq[GenericMsg]].mapConcat(_.toList)

    //Junctions
    val aBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2))
    val iBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2))
    val balancer = builder.add(Balance[Seq[GenericMsg]](2))
    val notitificationMerge = builder.add(Merge[Seq[GenericMsg]](2))
    val genericNotitificationMerge = builder.add(Merge[GenericMsg](2))

    def counterSink(s: String) = Sink.foreach[Int](x => println(s"$s: [$x]"))

    //Graph
    androidNotification ~> groupAndroid ~> aBroadcast ~> counter ~> counterSink("Android")
                                           aBroadcast ~> notitificationMerge
                                           iBroadcast ~> notitificationMerge
    iOSNotification     ~> groupIos     ~> iBroadcast ~> counter ~> counterSink("Ios")

    notitificationMerge ~> balancer ~> mapper.async ~> genericNotitificationMerge
                           balancer ~> mapper.async ~> genericNotitificationMerge

    genericNotitificationMerge ~> Sink.foreach(println)

    ClosedShape
  })

  graph.run()
} 
Example 76
Source File: ProcessingKafkaApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions}
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer}

import scala.concurrent.duration._

object ProcessingKafkaApplication extends App {
  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val bootstrapServers = "localhost:9092"
  val kafkaTopic = "akka_streams_topic"
  val partition = 0
  val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition))

  val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
    .withBootstrapServers(bootstrapServers)
    .withGroupId("akka_streams_group")
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
    .withBootstrapServers(bootstrapServers)

  val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!")
    val kafkaSource = Consumer.plainSource(consumerSettings, subscription)
    val kafkaSink = Producer.plainSink(producerSettings)
    val printlnSink = Sink.foreach(println)

    val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem))
    val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value())

    tickSource  ~> mapToProducerRecord   ~> kafkaSink
    kafkaSource ~> mapFromConsumerRecord ~> printlnSink

    ClosedShape
  })

  runnableGraph.run()
} 
Example 77
Source File: PipeliningParallelizing.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, FlowShape}
import akka.stream.scaladsl.{Balance, Flow, GraphDSL, Merge, Sink, Source}

import scala.util.Random

trait PipeliningParallelizing extends App {

  implicit val actorSystem = ActorSystem("PipeliningParallelizing")
  implicit val actorMaterializer = ActorMaterializer()

  case class Wash(id: Int)
  case class Dry(id: Int)
  case class Done(id: Int)

  val tasks = (1 to 5).map(Wash)

  def washStage = Flow[Wash].map(wash => {
    val sleepTime = Random.nextInt(3) * 1000
    println(s"Washing ${wash.id}. It will take $sleepTime milliseconds.")
    Thread.sleep(sleepTime)
    Dry(wash.id)
  })

  def dryStage = Flow[Dry].map(dry => {
    val sleepTime = Random.nextInt(3) * 1000
    println(s"Drying ${dry.id}. It will take $sleepTime milliseconds.")
    Thread.sleep(sleepTime)
    Done(dry.id)
  })

  val parallelStage = Flow.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    val dispatchLaundry = builder.add(Balance[Wash](3))
    val mergeLaundry = builder.add(Merge[Done](3))

    dispatchLaundry.out(0) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(0)
    dispatchLaundry.out(1) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(1)
    dispatchLaundry.out(2) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(2)

    FlowShape(dispatchLaundry.in, mergeLaundry.out)
  })

  def runGraph(testingFlow: Flow[Wash, Done, NotUsed]) = Source(tasks).via(testingFlow).to(Sink.foreach(println)).run()
} 
Example 78
Source File: StoppedReadSideProcessor.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.example.helloworld.impl.readsides

import java.util.concurrent.ConcurrentHashMap

import akka.Done
import akka.NotUsed
import akka.persistence.query.Offset
import akka.stream.scaladsl.Flow
import com.example.helloworld.impl.GreetingMessageChanged
import com.example.helloworld.impl.HelloWorldEvent
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import com.lightbend.lagom.scaladsl.persistence.ReadSide
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler

import scala.concurrent.Future

// -------- Started instance of the processor --------
object StartedReadSideProcessor {
  val Name = "StartedProcessor"
  private val greetings = new ConcurrentHashMap[String, String]()
}

class StartedReadSideProcessor(readSide: ReadSide)
    extends AbstractReadSideProcessor(
      readSide,
      StartedReadSideProcessor.Name,
      StartedReadSideProcessor.greetings
    )

// -------- Started instance of the processor --------
object StoppedReadSideProcessor {
  val Name = "StoppedProcessor"
  private val greetings = new ConcurrentHashMap[String, String]()
}

class StoppedReadSideProcessor(readSide: ReadSide)
    extends AbstractReadSideProcessor(
      readSide,
      StoppedReadSideProcessor.Name,
      StoppedReadSideProcessor.greetings
    )

// -------- Abstract  processor --------
class AbstractReadSideProcessor(private val readSide: ReadSide,
                                processorName: String,
                                inMemoryView: ConcurrentHashMap[String, String])
    extends ReadSideProcessor[HelloWorldEvent] {

  override def readSideName: String = processorName

  override def aggregateTags: Set[AggregateEventTag[HelloWorldEvent]] =
    HelloWorldEvent.Tag.allTags

  def getLastMessage(id: String): String =
    inMemoryView.getOrDefault(id, "default-projected-message")

  override def buildHandler()
    : ReadSideProcessor.ReadSideHandler[HelloWorldEvent] = {
    new ReadSideHandler[HelloWorldEvent] {

      val completedDone = Future.successful(Done)
      override def globalPrepare(): Future[Done] = completedDone

      override def prepare(
        tag: AggregateEventTag[HelloWorldEvent]
      ): Future[Offset] =
        Future.successful(Offset.noOffset)

      override def handle()
        : Flow[EventStreamElement[HelloWorldEvent], Done, NotUsed] = {
        Flow[EventStreamElement[HelloWorldEvent]]
          .mapAsync(1) { streamElement =>
            streamElement.event match {
              case GreetingMessageChanged(id, message) =>
                inMemoryView.put(id, message)
                completedDone
            }
          }
      }
    }
  }

} 
Example 79
Source File: AnotherServiceImpl.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package docs.scaladsl.mb

import akka.Done
import akka.NotUsed
import akka.stream.FlowShape
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.GraphDSL
import akka.stream.scaladsl.GraphDSL.Implicits._
import akka.stream.scaladsl.Merge
import akka.stream.scaladsl.Partition
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.api.broker.Message

//#inject-service
class AnotherServiceImpl(helloService: HelloService) extends AnotherService {
  //#inject-service

  //#subscribe-to-topic
  helloService
    .greetingsTopic()
    .subscribe // <-- you get back a Subscriber instance
    .atLeastOnce(
      Flow.fromFunction(doSomethingWithTheMessage)
    )
  //#subscribe-to-topic

  var lastObservedMessage: String = _

  private def doSomethingWithTheMessage(greetingMessage: GreetingMessage): Done = {
    lastObservedMessage = greetingMessage.message
    Done
  }

  import scala.concurrent.ExecutionContext.Implicits.global

  override def foo: ServiceCall[NotUsed, String] = ServiceCall { req =>
    scala.concurrent.Future.successful(lastObservedMessage)
  }

  def subscribeWithMetadata = {
    //#subscribe-to-topic-with-metadata
    import com.lightbend.lagom.scaladsl.api.broker.Message
    import com.lightbend.lagom.scaladsl.broker.kafka.KafkaMetadataKeys

    helloService
      .greetingsTopic()
      .subscribe
      .withMetadata
      .atLeastOnce(
        Flow[Message[GreetingMessage]].map { msg =>
          val greetingMessage = msg.payload
          val messageKey      = msg.messageKeyAsString
          val kafkaHeaders    = msg.get(KafkaMetadataKeys.Headers)
          println(s"Message: $greetingMessage Key: $messageKey Headers: $kafkaHeaders")
          Done
        }
      )
    //#subscribe-to-topic-with-metadata
  }

  def skipMessages = {
    //#subscribe-to-topic-skip-messages
    helloService
      .greetingsTopic()
      .subscribe
      .atLeastOnce(
        Flow[GreetingMessage].map {
          case msg @ GreetingMessage("Kia ora") => doSomethingWithTheMessage(msg)
          case _                                => Done // Skip all messages where the message is not "Kia ora".
        }
      )
    //#subscribe-to-topic-skip-messages
  }
} 
Example 80
Source File: BlogEventProcessor.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package docs.home.scaladsl.persistence

import scala.concurrent.Future

import akka.Done
import akka.NotUsed
import akka.persistence.query._
import akka.stream.scaladsl.Flow
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler

//#my-database
trait MyDatabase {

  
  def handleEvent(event: BlogEvent, offset: Offset): Future[Done]
}
//#my-database

object MyDatabase extends MyDatabase {
  def createTables(): Future[Done]                                  = Future.successful(Done)
  def loadOffset(tag: AggregateEventTag[BlogEvent]): Future[Offset] = Future.successful(NoOffset)
  def handleEvent(event: BlogEvent, offset: Offset): Future[Done]   = Future.successful(Done)
}

class BlogEventProcessor(myDatabase: MyDatabase) extends ReadSideProcessor[BlogEvent] {
  //#tag
  override def aggregateTags: Set[AggregateEventTag[BlogEvent]] =
    BlogEvent.Tag.allTags
  //#tag

  //#build-handler
  override def buildHandler(): ReadSideProcessor.ReadSideHandler[BlogEvent] = {
    new ReadSideHandler[BlogEvent] {
      override def globalPrepare(): Future[Done] =
        myDatabase.createTables()

      override def prepare(tag: AggregateEventTag[BlogEvent]): Future[Offset] =
        myDatabase.loadOffset(tag)

      override def handle(): Flow[EventStreamElement[BlogEvent], Done, NotUsed] = {
        Flow[EventStreamElement[BlogEvent]]
          .mapAsync(1) { eventElement =>
            myDatabase.handleEvent(eventElement.event, eventElement.offset)
          }
      }
    }
  }
  //#build-handler
} 
Example 81
Source File: CassandraReadSideHandler.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.scaladsl.persistence.cassandra

import akka.persistence.query.Offset
import akka.stream.ActorAttributes
import akka.stream.scaladsl.Flow
import akka.Done
import akka.NotUsed
import com.datastax.driver.core.BatchStatement
import com.datastax.driver.core.BoundStatement
import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetDao
import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetStore
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
import com.lightbend.lagom.scaladsl.persistence._
import com.lightbend.lagom.scaladsl.persistence.cassandra.CassandraSession
import org.slf4j.LoggerFactory

import scala.collection.immutable
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.collection.JavaConverters._


private[cassandra] final class CassandraAutoReadSideHandler[Event <: AggregateEvent[Event]](
    session: CassandraSession,
    offsetStore: CassandraOffsetStore,
    handlers: Map[Class[_ <: Event], CassandraAutoReadSideHandler.Handler[Event]],
    globalPrepareCallback: () => Future[Done],
    prepareCallback: AggregateEventTag[Event] => Future[Done],
    readProcessorId: String,
    dispatcher: String
)(implicit ec: ExecutionContext)
    extends CassandraReadSideHandler[Event, CassandraAutoReadSideHandler.Handler[Event]](
      session,
      handlers,
      dispatcher
    ) {
  import CassandraAutoReadSideHandler.Handler

  @volatile
  private var offsetDao: CassandraOffsetDao = _

  protected override def invoke(
      handler: Handler[Event],
      element: EventStreamElement[Event]
  ): Future[immutable.Seq[BoundStatement]] = {
    for {
      statements <- handler
        .asInstanceOf[EventStreamElement[Event] => Future[immutable.Seq[BoundStatement]]]
        .apply(element)
    } yield statements :+ offsetDao.bindSaveOffset(element.offset)
  }

  protected def offsetStatement(offset: Offset): immutable.Seq[BoundStatement] =
    immutable.Seq(offsetDao.bindSaveOffset(offset))

  override def globalPrepare(): Future[Done] = {
    globalPrepareCallback.apply()
  }

  override def prepare(tag: AggregateEventTag[Event]): Future[Offset] = {
    for {
      _   <- prepareCallback.apply(tag)
      dao <- offsetStore.prepare(readProcessorId, tag.tag)
    } yield {
      offsetDao = dao
      dao.loadedOffset
    }
  }
} 
Example 82
Source File: ProducerStubFactory.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.testkit

import java.util.concurrent.ConcurrentHashMap
import java.util.function.{ Function => JFunction }

import akka.Done
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Source
import com.lightbend.lagom.internal.testkit.InternalSubscriberStub
import com.lightbend.lagom.internal.testkit.TopicBufferActor
import com.lightbend.lagom.scaladsl.api.broker.Topic.TopicId
import com.lightbend.lagom.scaladsl.api.broker.Message
import com.lightbend.lagom.scaladsl.api.broker.Subscriber
import com.lightbend.lagom.scaladsl.api.broker.Topic

import scala.concurrent.Future


  def send(message: Message[T]): Unit = bufferActor.tell(message, ActorRef.noSender)
}

private[lagom] class TopicStub[T](val topicId: Topic.TopicId, topicBuffer: ActorRef)(
    implicit materializer: Materializer
) extends Topic[T] {
  def subscribe = new SubscriberStub[T, T]("default", topicBuffer, _.payload)

  class SubscriberStub[Payload, SubscriberPayload](
      groupId: String,
      topicBuffer: ActorRef,
      transform: Message[Payload] => SubscriberPayload
  )(implicit materializer: Materializer)
      extends InternalSubscriberStub[Payload, Message](groupId, topicBuffer)
      with Subscriber[SubscriberPayload] {
    override def withMetadata: Subscriber[Message[SubscriberPayload]] =
      new SubscriberStub[Payload, Message[SubscriberPayload]](
        groupId,
        topicBuffer,
        msg => msg.withPayload(transform(msg))
      )

    override def withGroupId(groupId: String): Subscriber[SubscriberPayload] =
      new SubscriberStub[Payload, SubscriberPayload](groupId, topicBuffer, transform)

    override def atMostOnceSource: Source[SubscriberPayload, _] = super.mostOnceSource.map(transform)

    override def atLeastOnce(flow: Flow[SubscriberPayload, Done, _]): Future[Done] =
      super.leastOnce(Flow[Message[Payload]].map(transform).via(flow))
  }
} 
Example 83
Source File: InternalSubscriberStub.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.testkit

import akka.Done
import akka.actor.ActorRef
import akka.stream.Materializer
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source

import scala.concurrent.Future
import scala.language.higherKinds

private[lagom] class InternalSubscriberStub[Payload, Message[_]](
    groupId: String,
    topicBuffer: ActorRef
)(implicit materializer: Materializer) {
  def mostOnceSource: Source[Message[Payload], _] = {
    Source
      .actorRef[Message[Payload]](1024, OverflowStrategy.fail)
      .prependMat(Source.empty)(subscribeToBuffer)
  }

  def leastOnce(flow: Flow[Message[Payload], Done, _]): Future[Done] = {
    mostOnceSource
      .via(flow)
      .toMat(Sink.ignore)(Keep.right[Any, Future[Done]])
      .run()
  }

  private def subscribeToBuffer[R](ref: ActorRef, t: R) = {
    topicBuffer.tell(TopicBufferActor.SubscribeToBuffer(groupId, ref), ActorRef.noSender)
    t
  }
} 
Example 84
Source File: WebSocketWorker.scala    From chordial   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.tristanpenman.chordial.demo

import akka.actor._
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.{Flow, Source}
import ch.megard.akka.http.cors.scaladsl.CorsDirectives._

import scala.concurrent.ExecutionContext

final class WebSocketWorker(governor: ActorRef, eventsSource: Source[TextMessage, _])(implicit val ec: ExecutionContext)
    extends WebService {

  def route =
    // scalafmt: { indentOperator = spray }
    cors() {
      routes(governor) ~
      pathPrefix("eventstream")(getFromResourceDirectory("webapp")) ~
      handleWebSocketMessages(Flow[Message].take(0).prepend(eventsSource))
    }
}

object WebSocketWorker {
  def apply(nodeRef: ActorRef, eventsSource: Source[TextMessage, _])(
      implicit ec: ExecutionContext): Route =
    new WebSocketWorker(nodeRef, eventsSource).route
} 
Example 85
Source File: KafkaService.scala    From ws_to_kafka   with MIT License 5 votes vote down vote up
package com.pkinsky


import akka.actor.ActorSystem
import akka.stream.scaladsl.{Source, Flow, Sink}
import com.softwaremill.react.kafka.{ConsumerProperties, ProducerProperties, ProducerMessage, ReactiveKafka}
import org.apache.kafka.common.serialization.{Deserializer, Serializer}
import play.api.libs.json.{Json, Reads, Writes}

case class KafkaServiceConf(bootstrapServers: String)

class KafkaService(kafkaClient: ReactiveKafka, conf: KafkaServiceConf) {
  
  def consume[T](topic: String, groupId: String)(implicit writes: Reads[T], actorSystem: ActorSystem): Source[T, Unit] =
    Source.fromPublisher(kafkaClient.consume(
      ConsumerProperties(
        bootstrapServers = conf.bootstrapServers, // IP and port of local Kafka instance
        topic = topic, // topic to consume messages from
        groupId = groupId, // consumer group
        valueDeserializer = KafkaService.deserializer[T]
      )
    )).map(_.value())
}


object KafkaService {
  def serializer[T: Writes] = new Serializer[T] {
    override def serialize(topic: String, data: T): Array[Byte] = {
      val js = Json.toJson(data)
      js.toString().getBytes("UTF-8")
    }

    override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = ()
    override def close(): Unit = ()
  }

  def deserializer[T: Reads] = new Deserializer[T] {
    override def deserialize(topic: String, data: Array[Byte]): T = {
      val s = new String(data, "UTF-8")
      Json.fromJson(Json.parse(s)).get //throw exception on error ¯\_(ツ)_/¯ (consider returning JsResult[T])
    }

    override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = ()
    override def close(): Unit = ()
  }
} 
Example 86
Source File: CouchbaseReadSideHandler.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.scaladsl.persistence.couchbase

import akka.persistence.query.Offset
import akka.stream.ActorAttributes
import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
import akka.stream.scaladsl.Flow
import akka.{Done, NotUsed}
import com.lightbend.lagom.internal.persistence.couchbase.{CouchbaseOffsetDao, CouchbaseOffsetStore}
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
import com.lightbend.lagom.scaladsl.persistence._
import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContext, Future}


private[couchbase] final class CouchbaseReadSideHandler[Event <: AggregateEvent[Event]](
    couchbase: CouchbaseSession,
    offsetStore: CouchbaseOffsetStore,
    handlers: Map[Class[_ <: Event], CouchbaseReadSideHandler.Handler[Event]],
    globalPrepareCallback: CouchbaseSession => Future[Done],
    prepareCallback: (CouchbaseSession, AggregateEventTag[Event]) => Future[Done],
    readProcessorId: String,
    dispatcher: String
)(implicit ec: ExecutionContext)
    extends ReadSideHandler[Event] {
  import CouchbaseReadSideHandler.Handler

  private val log = LoggerFactory.getLogger(this.getClass)

  @volatile
  private var offsetDao: CouchbaseOffsetDao = _

  protected def invoke(handler: Handler[Event], element: EventStreamElement[Event]): Future[Done] =
    handler
      .apply(couchbase, element)
      .flatMap(_ => offsetDao.bindSaveOffset(element.offset).execute(couchbase, ec))

  override def globalPrepare(): Future[Done] = globalPrepareCallback(couchbase)

  override def prepare(tag: AggregateEventTag[Event]): Future[Offset] =
    for {
      _ <- prepareCallback.apply(couchbase, tag)
      dao <- offsetStore.prepare(readProcessorId, tag.tag)
    } yield {
      offsetDao = dao
      dao.loadedOffset
    }

  override def handle(): Flow[EventStreamElement[Event], Done, NotUsed] =
    Flow[EventStreamElement[Event]]
      .mapAsync(parallelism = 1) { elem =>
        val eventClass = elem.event.getClass

        val handler =
          handlers.getOrElse(
            // lookup handler
            eventClass,
            // fallback to empty handler if none
            {
              if (log.isDebugEnabled()) log.debug("Unhandled event [{}]", eventClass.getName)
              CouchbaseReadSideHandler.emptyHandler.asInstanceOf[Handler[Event]]
            }
          )

        invoke(handler, elem)
      }
      .withAttributes(ActorAttributes.dispatcher(dispatcher))
} 
Example 87
Source File: RequestRoutingSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.testkit.TestKit
import com.github.krasserm.ases.log.AkkaPersistenceEventLog
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpecLike}
import scala.collection.immutable.Seq

object RequestRoutingSpec {
  import EventSourcing._

  sealed trait Request {
    def aggregateId: String
  }
  case class GetState(aggregateId: String) extends Request              // Query
  case class Increment(aggregateId: String, delta: Int) extends Request // Command
  case class Incremented(aggregateId: String, delta: Int)               // Event
  case class Response(aggregateId: String, state: Int)

  val requestHandler: RequestHandler[Int, Incremented, Request, Response] = {
    case (s, GetState(aggregateId))     => respond(Response(aggregateId, s))
    case (_, Increment(aggregateId, d)) => emit(Seq(Incremented(aggregateId, d)), Response(aggregateId, _))
  }

  val eventHandler: EventHandler[Int, Incremented] =
    (s, e) => s + e.delta
}

class RequestRoutingSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec {
  import RequestRoutingSpec._

  val akkaPersistenceEventLog: AkkaPersistenceEventLog =
    new log.AkkaPersistenceEventLog(journalId = "akka.persistence.journal.inmem")

  def processor(aggregateId: String): Flow[Request, Response, NotUsed] =
    EventSourcing(aggregateId, 0, requestHandler, eventHandler).join(akkaPersistenceEventLog.flow(aggregateId))

  def router: Flow[Request, Response, NotUsed] =
    Router(_.aggregateId, processor)

  "A request router" when {
    "configured to route based on aggregate id" must {
      "dynamically create a request processor for each aggregate id" in {
        val aggregateId1 = "a1"
        val aggregateId2 = "a2"

        val (pub, sub) = probes(router)

        pub.sendNext(Increment(aggregateId1, 3))
        sub.requestNext(Response(aggregateId1, 3))

        pub.sendNext(Increment(aggregateId2, 1))
        sub.requestNext(Response(aggregateId2, 1))

        pub.sendNext(Increment(aggregateId1, 2))
        sub.requestNext(Response(aggregateId1, 5))

        pub.sendNext(Increment(aggregateId2, -4))
        sub.requestNext(Response(aggregateId2, -3))
      }
      "handle single command using Source.single" in {
        val request = Increment("a3", 3)
        val expected = Response("a3", 3)
        Source.single(request)
          .via(router)
          .runWith(Sink.head)
          .futureValue should be(expected)
      }
      "handle single command using Source.apply(Seq)" in {
        val request = Increment("a4", 3)
        val expected = Response("a4", 3)
        Source(Seq(request))
          .via(router)
          .runWith(Sink.head)
          .futureValue should be(expected)
      }
      "handle multiple commands" in {
        Source(Seq(Increment("a5", 1), Increment("a5", 2), Increment("a5", 3)))
          .via(router)
          .runWith(Sink.seq)
          .futureValue should be(Seq(Response("a5", 1), Response("a5", 3), Response("a5", 6)))
      }
    }
  }
} 
Example 88
Source File: EventCollaborationSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink}
import akka.testkit.TestKit
import com.github.krasserm.ases.log.{KafkaEventLog, KafkaSpec}
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class EventCollaborationSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
  import EventSourcingSpec._

  implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))

  val emitterId1 = "processor1"
  val emitterId2 = "processor2"

  val kafkaEventLog: KafkaEventLog =
    new log.KafkaEventLog(host, port)

  def processor(emitterId: String, topicPartition: TopicPartition): Flow[Request, Response, NotUsed] =
    EventSourcing(emitterId, 0, requestHandler, eventHandler).join(kafkaEventLog.flow(topicPartition))

  "A group of EventSourcing stages" when {
    "joined with a shared event log" can {
      "collaborate via publish-subscribe" in {
        val topicPartition = new TopicPartition("p-1", 0)    // shared topic partition
        val (pub1, sub1) = probes(processor(emitterId1, topicPartition)) // processor 1
        val (pub2, sub2) = probes(processor(emitterId2, topicPartition)) // processor 2

        pub1.sendNext(Increment(3))
        // Both processors receive event but
        // only processor 1 creates response
        sub1.requestNext(Response(3))

        pub2.sendNext(Increment(-4))
        // Both processors receive event but
        // only processor 2 creates response
        sub2.requestNext(Response(-1))

        // consume and verify events emitted by both processors
        kafkaEventLog.source[Incremented](topicPartition).via(log.replayed).map {
          case Durable(event, eid, _, sequenceNr) => (event, eid, sequenceNr)
        }.runWith(Sink.seq).futureValue should be(Seq(
          (Incremented(3), emitterId1, 0L),
          (Incremented(-4), emitterId2, 1L)
        ))
      }
    }
  }
} 
Example 89
Source File: StreamSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep}
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.stream.testkit.{TestPublisher, TestSubscriber}
import akka.testkit.TestKit
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.collection.immutable.Seq

trait StreamSpec extends BeforeAndAfterAll { this: TestKit with Suite =>
  implicit val materializer = ActorMaterializer()

  val emitterId = "emitter"

  override def afterAll(): Unit = {
    materializer.shutdown()
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  def probes[I, O, M](flow: Flow[I, O, M]): (TestPublisher.Probe[I], TestSubscriber.Probe[O]) =
    TestSource.probe[I].viaMat(flow)(Keep.left).toMat(TestSink.probe[O])(Keep.both).run()

  def durables[A](emitted: Seq[Emitted[A]], offset: Int = 0): Seq[Durable[A]] =
    emitted.zipWithIndex.map { case (e, i) => e.durable(i + offset) }
} 
Example 90
Source File: Pipe.scala    From tap   with Apache License 2.0 5 votes vote down vote up
package io.heta.tap.pipelines

import akka.NotUsed
import akka.stream.scaladsl.Flow
import io.heta.tap.data.doc.expression.affect.AffectThresholds
import io.heta.tap.pipelines.materialize.FilePipeline.File
import org.clulab.processors.Document


object Pipe {

  val annotatedSentences: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.AnalyticsResult_File

  val vocabulary: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.Sentences_Vocabulary via
        Segment.AnalyticsResult_File

  val metrics: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.Sentences_Metrics via
        Segment.AnalyticsResult_File

  val posStats: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.Sentences_PosStats via
        Segment.AnalyticsResult_File

  val syllables: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.Sentences_Syllables via
        Segment.AnalyticsResult_File

  val spelling: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.Sentences_Spelling via
        Segment.AnalyticsResult_File

  val expressions: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.Sentences_Expressions via
        Segment.AnalyticsResult_File

  val affectExpressions: Flow[Document, File, NotUsed] =
    Segment.Document_SentencesBatchResult via
      Segment.Sentences_AffectExpressions(Some(AffectThresholds(arousal=0.0,valence = 0.0,dominance = 0.0))) via
        Segment.AnalyticsResult_File

  val reflectExpressions: Flow[Document, File, NotUsed] =
    Segment.Document_ReflectiveExpressionsBatchResult via
      Segment.AnalyticsResult_File

} 
Example 91
Source File: AnnotatingTypes.scala    From tap   with Apache License 2.0 5 votes vote down vote up
package io.heta.tap.pipelines

import akka.NotUsed
import akka.stream.scaladsl.Flow
import io.nlytx.expressions.data.ReflectiveExpressions
import io.nlytx.nlp.api.DocumentModel.{Document, Section}
import org.clulab.processors
import io.heta.tap.data._
import io.heta.tap.data.doc.expression.reflect.ReflectExpressions
import io.heta.tap.data.doc.spell.Spelling
import io.heta.tap.data.doc.{Metrics, PosStats, Sentence, Syllables} // scalastyle:ignore


  type TapSentences = Vector[Sentence]
  type Sections = Vector[Section]

  //type CluDocumentFlow = Flow[String,processors.Document, NotUsed]
  //type CluSentencesFlow = Flow[processors.Document, TapSentences, NotUsed]
  type DocumentFlow = Flow[String, Document, NotUsed]
  //type SentencesFlow = Flow[Document, TapSentences, NotUsed]
  //type VocabFlow = Flow[Document, TapVocab, NotUsed]
  type MetricsFlow = Flow[Document, Metrics, NotUsed]
  //type ExpressionsFlow = Flow[Document, Vector[Expressions], NotUsed]
  type SyllablesFlow = Flow[Document, Vector[Syllables],NotUsed]
  type SpellingFlow = Flow[Document, Vector[Spelling],NotUsed]
  type PosStatsFlow = Flow[Document, PosStats, NotUsed]
  type ReflectExpressionFlow = Flow[Document, ReflectExpressions, NotUsed]
  //type AffectExpressionFlow = Flow[processors.Document, Vector[AffectExpressions], NotUsed]


} 
Example 92
Source File: CleaningPipelineSpec.scala    From tap   with Apache License 2.0 5 votes vote down vote up
package io.heta.tap.pipelines

import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import io.heta.tap.UnitSpec

import scala.concurrent.Await
import scala.concurrent.duration._



class CleaningPipelineSpec extends UnitSpec {

  import io.heta.tap.pipelines.materialize.PipelineContext._

  val cleaning = new Cleaning


  def testSource(input:String) = Source.single(input)
  val testSink = Flow[String].toMat(Sink.head[String])(Keep.right)

  "revealInvisible" should "replace whitespace characters with visible characters" in {

    import cleaning.White._
    val input = s"1${sp}2${nb}3${nl}4${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.revealInvisible runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1·2·3¬4¬5�6�7")
  }

  "simplify" should "replace quotes and hyphens with single byte versions" in {

    import cleaning.Quote._
    val input = s"1${singleCurlyLeft}2${singleCurlyRight}3${doubleCurlyLeft}4${doubleCurlyRight}5${cleaning.Hyphen.rgx_hyphens}6"
    val future = testSource(input) via cleaning.Pipeline.simplify runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1'2'3\"4\"5-|-|-|-|-|-|-|-6")
  }

  "lengthPreserve" should "replace control characters while preserving length" in {
    import cleaning.White._
    val input = s"1${sp}2${nb}3${nl}4${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.lengthPreserve runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 2 3\n4\n5�6�7" && result.length==input.length)
  }

  "utfMinimal" should "strip control characters, and reduce whitespace" in {
    import cleaning.White._
    val input = s"1${sp}${nb}3${nl}${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.utfMinimal runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 3\n567")
  }

  "utfSimplify" should "replace hyphens and quotes, strip controls and reduce whitespace" in {
    import cleaning.Quote._
    import cleaning.White._
    val input = s"1${sp}${nb}3${nl}${cr}5\u001e6\u00807${singleCurlyLeft}8${singleCurlyRight}9${doubleCurlyLeft}10${doubleCurlyRight}11${cleaning.Hyphen.rgx_hyphens}12"
    val future = testSource(input) via cleaning.Pipeline.utfSimplify runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 3\n567'8'9\"10\"11-|-|-|-|-|-|-|-12")
  }

//  "asciiOnly" should "replace or strip all non-ascii characters" in {
//    import cleaning.Quote._
//    import cleaning.White._
//    val input = s"1${sp}${nb}3${nl}${cr}56\u00807${singleCurlyLeft}8${singleCurlyRight}9${doubleCurlyLeft}10${doubleCurlyRight}11${cleaning.Hyphen.rgx_hyphens}12"
//    val future = testSource(input) via cleaning.Pipeline.asciiOnly runWith testSink
//    val result = Await.result(future, 3 seconds)
//    assert(result=="1 3\r\n567891011|||||||12")
//  }

} 
Example 93
Source File: WebService.scala    From heimdallr   with Apache License 2.0 5 votes vote down vote up
package chat

import scala.concurrent.ExecutionContext.Implicits._
import scala.util.{Failure,Success}
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.{ ServerBinding }
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse, Uri }
import akka.stream.scaladsl.{ Flow, Sink, Source }
import org.slf4j.LoggerFactory

trait WebService {
  val log = LoggerFactory.getLogger("total")
  private var binding: scala.concurrent.Future[ServerBinding] = null

  def serviceBind(serviceName: String, bindRoute: Flow[HttpRequest, HttpResponse, Any], bindPort: Int)
                 (implicit actorSystem: ActorSystem, materializer: Materializer): Unit = {
    binding = Http().bindAndHandle(bindRoute,"0.0.0.0", bindPort)

    // the rest of the sample code will go here
    binding.onComplete {
      //binding success check
      case Success(binding) =>
        val localAddress = binding.localAddress
        log.info(s"${serviceName} is listening on ${localAddress.getAddress}:${localAddress.getPort}")

      case Failure(e) =>
        log.error(s"${serviceName} Binding failed with ${e.getMessage}")
    }
  }

  def serviceUnbind(serviceName: String) = {
    if( binding != null )
    {
      binding
        .flatMap(_.unbind())
        .onComplete(_ =>
          log.info(s"${serviceName} listening port unbinding ... ")
        )
    }
    else
      log.info( s"${serviceName} Unbinding Failed !" )
  }
} 
Example 94
Source File: StorageIndexer.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import java.time.Instant

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.iam.auth.AccessToken
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache}
import ch.epfl.bluebrain.nexus.kg.config.KgConfig.StorageConfig
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.kg.storage.Storage
import ch.epfl.bluebrain.nexus.service.config.ServiceConfig
import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object StorageIndexer {

  implicit private val log = Logger[StorageIndexer.type]

  def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])(implicit
      projectCache: ProjectCache[F],
      F: Effect[F],
      as: ActorSystem,
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: ServiceConfig
  ): StreamSupervisor[F, Unit] = {

    implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials
    implicit val indexing: IndexingConfig       = config.kg.keyValueStore.indexing
    implicit val ec: ExecutionContext           = as.dispatcher
    implicit val tm: Timeout                    = Timeout(config.kg.keyValueStore.askTimeout)
    implicit val storageConfig: StorageConfig   = config.kg.storage
    val name                                    = "storage-indexer"

    def toStorage(event: Event): F[Option[(Storage, Instant)]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        storages.fetchStorage(event.id).value.map {
          case Left(err)           =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(timedStorage) => Some(timedStorage)
        }
      }

    val source: Source[PairMsg[Any], _]   = cassandraSource(s"type=${nxv.Storage.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toStorage)
      .collectSome[(Storage, Instant)]
      .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 95
Source File: ResolverIndexer.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.iam.auth.AccessToken
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache}
import ch.epfl.bluebrain.nexus.kg.resolve.Resolver
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.service.config.ServiceConfig
import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object ResolverIndexer {

  implicit private val log = Logger[ResolverIndexer.type]

  
  final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])(implicit
      projectCache: ProjectCache[F],
      as: ActorSystem,
      F: Effect[F],
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: ServiceConfig
  ): StreamSupervisor[F, Unit] = {
    implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials
    implicit val indexing: IndexingConfig       = config.kg.keyValueStore.indexing
    implicit val ec: ExecutionContext           = as.dispatcher
    implicit val tm: Timeout                    = Timeout(config.kg.keyValueStore.askTimeout)

    val name = "resolver-indexer"

    def toResolver(event: Event): F[Option[Resolver]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        resolvers.fetchResolver(event.id).value.map {
          case Left(err)       =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(resolver) => Some(resolver)
        }
      }

    val source: Source[PairMsg[Any], _]   = cassandraSource(s"type=${nxv.Resolver.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toResolver)
      .collectSome[Resolver]
      .runAsync(resolverCache.put)()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 96
Source File: ViewIndexer.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.iam.auth.AccessToken
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache}
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.service.config.ServiceConfig
import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object ViewIndexer {

  implicit private val log = Logger[ViewIndexer.type]

  def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])(implicit
      projectCache: ProjectCache[F],
      F: Effect[F],
      as: ActorSystem,
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: ServiceConfig
  ): StreamSupervisor[F, Unit] = {

    implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials
    implicit val indexing: IndexingConfig       = config.kg.keyValueStore.indexing
    implicit val ec: ExecutionContext           = as.dispatcher
    implicit val tm: Timeout                    = Timeout(config.kg.keyValueStore.askTimeout)
    val name                                    = "view-indexer"

    def toView(event: Event): F[Option[View]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        views.fetchView(event.id).value.map {
          case Left(err)   =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(view) => Some(view)
        }
      }

    val source: Source[PairMsg[Any], _]   = cassandraSource(s"type=${nxv.View.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toView)
      .collectSome[View]
      .runAsync(viewCache.put)()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 97
Source File: TarFlow.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Files, Path}

import akka.NotUsed
import akka.stream.scaladsl.{FileIO, Flow, Source}
import akka.util.ByteString
import org.apache.commons.compress.archivers.tar.{TarArchiveEntry, TarConstants}


  def writer(basePath: Path): Flow[Path, ByteString, NotUsed] =
    Flow[Path]
      .flatMapConcat {
        case path if Files.isRegularFile(path) =>
          val headerSource  = Source.single(headerBytes(basePath, path))
          val paddingSource = Source.single(padToBoundary(path))
          headerSource.concat(FileIO.fromPath(path)).concat(paddingSource)
        case path                              =>
          Source.single(headerBytes(basePath, path))
      }
      .concat(Source.single(terminalChunk))
} 
Example 98
Source File: Demo.scala    From toketi-iothubreact   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package D_Throttling

import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Sink}
import com.microsoft.azure.iot.iothubreact.MessageFromDevice
import com.microsoft.azure.iot.iothubreact.scaladsl._
import com.microsoft.azure.iot.iothubreact.ResumeOnError._

import scala.concurrent.duration._
import scala.language.postfixOps

object Demo extends App {

  val maxSpeed = 100

  // Sink combining throttling and monitoring
  lazy val throttleAndMonitor = Flow[MessageFromDevice]
    .alsoTo(throttler)
    .to(monitor)

  // Stream throttling sink
  val throttler = Flow[MessageFromDevice]
    .throttle(maxSpeed, 1.second, maxSpeed / 10, ThrottleMode.Shaping)
    .to(Sink.ignore)

  // Messages throughput monitoring sink
  val monitor = Sink.foreach[MessageFromDevice] {
    m ⇒ {
      Monitoring.total += 1
      Monitoring.totals(m.runtimeInfo.partitionInfo.partitionNumber.get) += 1
    }
  }

  println(s"Streaming messages at ${maxSpeed} msg/sec")

  IoTHub().source
    .to(throttleAndMonitor)
    .run()

  // Print statistics at some interval
  Monitoring.printStatisticsWithFrequency(1 second)
} 
Example 99
Source File: Demo.scala    From toketi-iothubreact   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package F_SendMessageToDevice

import akka.stream.scaladsl.Flow
import com.microsoft.azure.iot.iothubreact.MessageToDevice
import com.microsoft.azure.iot.iothubreact.ResumeOnError._
import com.microsoft.azure.iot.iothubreact.filters.MessageSchema
import com.microsoft.azure.iot.iothubreact.scaladsl._

import scala.language.{implicitConversions, postfixOps}

object Demo extends App with Deserialize {

  val turnFanOn  = MessageToDevice("Turn fan ON")
  val turnFanOff = MessageToDevice("Turn fan OFF")

  val hub = IoTHub()

  // Source
  val temperatures = hub
    .source()
    .filter(MessageSchema("temperature"))
    .map(deserialize)

  // Too cold sink
  val tooColdWorkflow = Flow[Temperature]
    .filter(_.value < 65)
    .map(t ⇒ turnFanOff.to(t.deviceId))
    .to(hub.sink())

  // Too warm sink
  val tooWarmWorkflow = Flow[Temperature]
    .filter(_.value > 85)
    .map(t ⇒ turnFanOn.to(t.deviceId))
    .to(hub.sink())

  temperatures
    .alsoTo(tooColdWorkflow)
    .to(tooWarmWorkflow)
    .run()

  
} 
Example 100
Source File: Watcher.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.operator

import akka.{Done, NotUsed}
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.scaladsl.{Flow, Keep, RestartSource, Sink, Source}
import play.api.libs.json.Format
import skuber.{ListResource, ObjectResource, ResourceDefinition}
import skuber.api.client.{EventType, KubernetesClient, WatchEvent}

import scala.concurrent.duration._
import skuber.json.format._

import scala.concurrent.ExecutionContext

object Watcher {

  private implicit def listResourceFormat[Resource <: ObjectResource: Format]: Format[ListResource[Resource]] =
    ListResourceFormat(implicitly[Format[Resource]])

  def watch[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    // Summary of what we want our event loop to look like:
    // * We start by listing all the resources, and process them.
    // * Then we start watching from the resourceVersion that we got in our list, so we get all updates.
    // * But we also want to periodically recheck all resources, since sometimes there are race conditions
    //   between operators handling dependent resources (eg, if you deploy a journal and a service that uses
    //   it at the same time), so we only run the watch for a maximum of that time (eg, 5 minutes), before
    //   restarting.
    // * Also, if errors are encountered, we don't want to continually restart in a hot loop, so we use the
    //   RestartSource to restart with backoff.
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client
                  .list[ListResource[Resource]]()
                  .map { resources =>
                    val watch = client
                      .watchAllContinuously[Resource](sinceResourceVersion = Some(resources.resourceVersion))

                    Source(resources)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()

  def watchSingle[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      resourceName: String,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client.getOption[Resource](resourceName).map {
                  case Some(resource) =>
                    val watch =
                      client.watchContinuously[Resource](resourceName,
                                                         sinceResourceVersion = Some(resource.resourceVersion))
                    Source
                      .single(resource)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  case None =>
                    throw new RuntimeException(
                      s"Resource $resourceName not found in namespace ${client.namespaceName}!"
                    )
                }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()
} 
Example 101
Source File: EventSourcedSupportFactory.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.proxy.eventsourced

import akka.NotUsed
import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.sharding.ShardRegion.HashCodeMessageExtractor
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import akka.event.Logging
import akka.grpc.GrpcClientSettings
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import com.google.protobuf.Descriptors.ServiceDescriptor
import io.cloudstate.protocol.entity.{Entity, Metadata}
import io.cloudstate.protocol.event_sourced.EventSourcedClient
import io.cloudstate.proxy._
import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply}

import scala.concurrent.{ExecutionContext, Future}
import scala.collection.JavaConverters._

class EventSourcedSupportFactory(system: ActorSystem,
                                 config: EntityDiscoveryManager.Configuration,
                                 grpcClientSettings: GrpcClientSettings,
                                 concurrencyEnforcer: ActorRef,
                                 statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer)
    extends EntityTypeSupportFactory {

  private final val log = Logging.getLogger(system, this.getClass)

  private val eventSourcedClient = EventSourcedClient(grpcClientSettings)(system)

  override def buildEntityTypeSupport(entity: Entity,
                                      serviceDescriptor: ServiceDescriptor,
                                      methodDescriptors: Map[String, EntityMethodDescriptor]): EntityTypeSupport = {
    validate(serviceDescriptor, methodDescriptors)

    val stateManagerConfig = EventSourcedEntity.Configuration(entity.serviceName,
                                                              entity.persistenceId,
                                                              config.passivationTimeout,
                                                              config.relayOutputBufferSize)

    log.debug("Starting EventSourcedEntity for {}", entity.persistenceId)
    val clusterSharding = ClusterSharding(system)
    val clusterShardingSettings = ClusterShardingSettings(system)
    val eventSourcedEntity = clusterSharding.start(
      typeName = entity.persistenceId,
      entityProps =
        EventSourcedEntitySupervisor.props(eventSourcedClient, stateManagerConfig, concurrencyEnforcer, statsCollector),
      settings = clusterShardingSettings,
      messageExtractor = new EntityIdExtractor(config.numberOfShards),
      allocationStrategy = new DynamicLeastShardAllocationStrategy(1, 10, 2, 0.0),
      handOffStopMessage = EventSourcedEntity.Stop
    )

    new EventSourcedSupport(eventSourcedEntity, config.proxyParallelism, config.relayTimeout)
  }

  private def validate(serviceDescriptor: ServiceDescriptor,
                       methodDescriptors: Map[String, EntityMethodDescriptor]): Unit = {
    val streamedMethods =
      methodDescriptors.values.filter(m => m.method.toProto.getClientStreaming || m.method.toProto.getServerStreaming)
    if (streamedMethods.nonEmpty) {
      val offendingMethods = streamedMethods.map(_.method.getName).mkString(",")
      throw EntityDiscoveryException(
        s"Event sourced entities do not support streamed methods, but ${serviceDescriptor.getFullName} has the following streamed methods: ${offendingMethods}"
      )
    }
    val methodsWithoutKeys = methodDescriptors.values.filter(_.keyFieldsCount < 1)
    if (methodsWithoutKeys.nonEmpty) {
      val offendingMethods = methodsWithoutKeys.map(_.method.getName).mkString(",")
      throw new EntityDiscoveryException(
        s"Event sourced entities do not support methods whose parameters do not have at least one field marked as entity_key, " +
        "but ${serviceDescriptor.getFullName} has the following methods without keys: ${offendingMethods}"
      )
    }
  }
}

private class EventSourcedSupport(eventSourcedEntity: ActorRef,
                                  parallelism: Int,
                                  private implicit val relayTimeout: Timeout)
    extends EntityTypeSupport {
  import akka.pattern.ask

  override def handler(method: EntityMethodDescriptor,
                       metadata: Metadata): Flow[EntityCommand, UserFunctionReply, NotUsed] =
    Flow[EntityCommand].mapAsync(parallelism)(
      command =>
        (eventSourcedEntity ? EntityTypeSupport.mergeStreamLevelMetadata(metadata, command))
          .mapTo[UserFunctionReply]
    )

  override def handleUnary(command: EntityCommand): Future[UserFunctionReply] =
    (eventSourcedEntity ? command).mapTo[UserFunctionReply]
}

private final class EntityIdExtractor(shards: Int) extends HashCodeMessageExtractor(shards) {
  override final def entityId(message: Any): String = message match {
    case command: EntityCommand => command.entityId
  }
} 
Example 102
Source File: HttpClientProvider.scala    From reactive-nakadi   with MIT License 5 votes vote down vote up
package org.zalando.react.nakadi.client.providers

import java.security.SecureRandom
import java.security.cert.X509Certificate
import javax.net.ssl.{SSLContext, TrustManager, X509TrustManager}

import akka.actor.ActorContext
import akka.http.scaladsl.Http.OutgoingConnection
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.settings.ClientConnectionSettings
import akka.http.scaladsl.{Http, HttpsConnectionContext}
import akka.stream.scaladsl.Flow

import scala.concurrent.Future
import scala.concurrent.duration._


class HttpClientProvider(actorContext: ActorContext,
                         server: String, port: Int,
                         isConnectionSSL: Boolean = false,
                         acceptAnyCertificate: Boolean = false,
                         connectionTimeout: FiniteDuration) {

  val http = Http(actorContext.system)

  private val settings = {
    ClientConnectionSettings
      .apply(actorContext.system)
      .withConnectingTimeout(connectionTimeout)
      .withIdleTimeout(Duration.Inf)
  }

  val connection: Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = {

    isConnectionSSL match {
      case true =>
        val sslContext = if (!acceptAnyCertificate) SSLContext.getDefault else {

          val permissiveTrustManager: TrustManager = new X509TrustManager() {
            override def checkClientTrusted(chain: Array[X509Certificate], authType: String): Unit = {}
            override def checkServerTrusted(chain: Array[X509Certificate], authType: String): Unit = {}
            override def getAcceptedIssuers(): Array[X509Certificate] = Array.empty
          }

          val ctx = SSLContext.getInstance("TLS")
          ctx.init(Array.empty, Array(permissiveTrustManager), new SecureRandom())
          ctx
        }
        http.outgoingConnectionHttps(server, port, new HttpsConnectionContext(sslContext), settings = settings)
      case false =>
        http.outgoingConnection(server, port, settings = settings)
    }
  }

} 
Example 103
Source File: WFConsumerServiceImpl.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.wf.consumer.impl

import akka.stream.scaladsl.Flow
import akka.{Done, NotUsed}
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
import com.packt.publishing.wf.api.WFService
import com.packt.publishing.wf.api.model.WFMessage
import com.packt.publishing.wf.consumer.api.WFConsumerService
import com.packt.publishing.wf.consumer.impl.repositories.WFRepository
import com.packt.publishing.wf.consumer.api.models.WeatherForcasting

class WFConsumerServiceImpl(registry: PersistentEntityRegistry, wfService: WFService, wfRepository: WFRepository)
  extends WFConsumerService {

  wfService.wfTopic
    .subscribe
    .atLeastOnce(
      Flow[WFMessage].map { wf =>
        putWFMessage(wf)
        Done
      }
    )

  var lastObservedMessage: WeatherForcasting = _

  private def putWFMessage(wfMessage: WFMessage) = {
    entityRef(wfMessage.city.toString, wfMessage.temperature.toString).ask(SaveNewWF(wfMessage.city, wfMessage.temperature))
    lastObservedMessage = WeatherForcasting(wfMessage.city,wfMessage.temperature)
  }

  override def findTopTenWFData(): ServiceCall[NotUsed, Seq[WeatherForcasting]] = {
    ServiceCall {
      req => wfRepository.fetchWFData(10)
    }
  }

  override def findOneWFData(): ServiceCall[NotUsed, WeatherForcasting] = {
    ServiceCall {
      req => wfRepository.fetchOneWFData
    }
  }

  override def latestWF(): ServiceCall[NotUsed, WeatherForcasting] = {
    ServiceCall {
      req => scala.concurrent.Future.successful(lastObservedMessage)
    }
  }

  private def entityRef(city: String, temperature:String) = registry.refFor[WFEntity](city)
} 
Example 104
Source File: ChatController.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package controllers

import java.net.URI
import javax.inject._
import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.Materializer
import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Source}
import play.api.Logger
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}

@Singleton
class ChatController @Inject()(cc: ControllerComponents)
                              (implicit actorSystem: ActorSystem,
                               mat: Materializer,
                               executionContext: ExecutionContext,
                               webJarsUtil: org.webjars.play.WebJarsUtil) 
                               extends AbstractController(cc) with RequestMarkerContext {

  private type WSMessage = String

  private val logger = Logger(getClass)

  private implicit val logging = Logging(actorSystem.eventStream, logger.underlyingLogger.getName)

  private val (chatSink, chatSource) = {
    val source = MergeHub.source[WSMessage]
      .log("source")
      .recoverWithRetries(-1, { case _: Exception ⇒ Source.empty })

    val sink = BroadcastHub.sink[WSMessage]
    source.toMat(sink)(Keep.both).run()
  }

  private val userFlow: Flow[WSMessage, WSMessage, _] = {
     Flow.fromSinkAndSource(chatSink, chatSource)
  }

  def index: Action[AnyContent] = Action { implicit request: RequestHeader =>
    val webSocketUrl = routes.ChatController.chat().webSocketURL()
    logger.info(s"index: ")
    Ok(views.html.index(webSocketUrl))
  }

  def chat(): WebSocket = {
    WebSocket.acceptOrResult[WSMessage, WSMessage] {
      case rh if sameOriginCheck(rh) =>
        Future.successful(userFlow).map { flow =>
          Right(flow)
        }.recover {
          case e: Exception =>
            val msg = "Cannot create websocket"
            logger.error(msg, e)
            val result = InternalServerError(msg)
            Left(result)
        }

      case rejected =>
        logger.error(s"Request ${rejected} failed same origin check")
        Future.successful {
          Left(Forbidden("forbidden"))
        }
    }
  }

  private def sameOriginCheck(implicit rh: RequestHeader): Boolean = {
    logger.debug("Checking the ORIGIN ")
    
    rh.headers.get("Origin") match {
      case Some(originValue) if originMatches(originValue) =>
        logger.debug(s"originCheck: originValue = $originValue")
        true

      case Some(badOrigin) =>
        logger.error(s"originCheck: rejecting request because Origin header value ${badOrigin} is not in the same origin")
        false

      case None =>
        logger.error("originCheck: rejecting request because no Origin header found")
        false
    }
  }

  private def originMatches(origin: String): Boolean = {
    try {
      val url = new URI(origin)
      url.getHost == "localhost" &&
        (url.getPort match { case 9000 | 19001 => true; case _ => false })
    } catch {
      case e: Exception => false
    }
  }

} 
Example 105
Source File: AkkaStreamsHelloWorldApp3.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.akka.streams.hello

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep, RunnableGraph, Sink, Source}
import akka.{Done, NotUsed}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}

object AkkaStreamsHelloWorldApp3 extends App{

  implicit val actorSystem = ActorSystem("HelloWorldSystem")
  implicit val materializer = ActorMaterializer()

  val helloWorldSource:Source[String,NotUsed] = Source.single("Akka Streams Hello World")
  val helloWorldSink: Sink[String,Future[Done]] = Sink.foreach(println)
  val helloWorldFlow:Flow[String,String,NotUsed] = Flow[String].map(str => str.toUpperCase)

  val helloWorldGraph:RunnableGraph[NotUsed] = helloWorldSource
                                                  .via(helloWorldFlow)
                                                  .to(helloWorldSink)

  val helloWorldGraph2:RunnableGraph[Future[Done]] = helloWorldSource
                                                  .via(helloWorldFlow)
                                                  .toMat(helloWorldSink)(Keep.right)

  helloWorldGraph.run

  val helloWorldMaterializedValue: Future[Done] = helloWorldGraph2.run
  helloWorldMaterializedValue.onComplete{
    case Success(Done) =>
      println("HelloWorld Stream ran succssfully.")
    case Failure(exception) =>
      println(s"HelloWorld Stream ran into an issue: ${exception}.")
  }

  actorSystem.terminate
} 
Example 106
Source File: WFConsumerServiceImpl.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.wf.consumer.impl

import akka.stream.scaladsl.Flow
import akka.{Done, NotUsed}
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
import com.packt.publishing.wf.api.WFService
import com.packt.publishing.wf.api.model.WFMessage
import com.packt.publishing.wf.consumer.api.WFConsumerService
import com.packt.publishing.wf.consumer.impl.repositories.WFRepository
import com.packt.publishing.wf.consumer.api.models.WeatherForcasting

class WFConsumerServiceImpl(registry: PersistentEntityRegistry, wfService: WFService, wfRepository: WFRepository)
  extends WFConsumerService {

  wfService.wfTopic
    .subscribe
    .atLeastOnce(
      Flow[WFMessage].map { wf =>
        putWFMessage(wf)
        Done
      }
    )

  var lastObservedMessage: WeatherForcasting = _

  private def putWFMessage(wfMessage: WFMessage) = {
    entityRef(wfMessage.city.toString, wfMessage.temperature.toString).ask(SaveNewWF(wfMessage.city, wfMessage.temperature))
    lastObservedMessage = WeatherForcasting(wfMessage.city,wfMessage.temperature)
  }

  override def findTopTenWFData(): ServiceCall[NotUsed, Seq[WeatherForcasting]] = {
    ServiceCall {
      req => wfRepository.fetchWFData(10)
    }
  }

  override def findOneWFData(): ServiceCall[NotUsed, WeatherForcasting] = {
    ServiceCall {
      req => wfRepository.fetchOneWFData
    }
  }

  override def latestWF(): ServiceCall[NotUsed, WeatherForcasting] = {
    ServiceCall {
      req => scala.concurrent.Future.successful(lastObservedMessage)
    }
  }

  private def entityRef(city: String, temperature:String) = registry.refFor[WFEntity](city)
} 
Example 107
Source File: TsvRetrieverFromFile.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.dc.stream

import java.io.{BufferedWriter, File, FileWriter}

import akka.actor.ActorSystem
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.Supervision.Decider
import akka.stream.contrib.SourceGen
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import akka.util.ByteString
import cmwell.dc.LazyLogging
import cmwell.dc.stream.MessagesTypesAndExceptions.{DcInfo, InfotonData}
import cmwell.dc.stream.TsvRetriever.{logger, TsvFlowOutput}
import cmwell.util.resource._

import scala.concurrent.Future
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global


object TsvRetrieverFromFile extends LazyLogging {

  def apply(dcInfo: DcInfo)(implicit mat: Materializer,
                            system: ActorSystem): Source[InfotonData, (KillSwitch, Future[Seq[Option[String]]])] = {
    val persistFile = dcInfo.tsvFile.get + ".persist"

    def appendToPersistFile(str: String): Unit = {
      val bw = new BufferedWriter(new FileWriter(persistFile, true))
      bw.write(str)
      bw.close()
    }

    val linesToDrop = dcInfo.positionKey.fold {
      if (!new File(persistFile).exists) 0L
      else using(scala.io.Source.fromFile(persistFile))(_.getLines.toList.last.toLong)
    }(pos => pos.toLong)
    val positionKeySink = Flow[InfotonData]
      .recover {
        case e: Throwable => InfotonData(null, null, -1)
      }
      .scan(linesToDrop) {
        case (count, InfotonData(null, null, -1)) => {
          appendToPersistFile("crash at: " + count + "\n" + count.toString + "\n")
          count
        }
        case (count, _) => {
          val newCount = count + 1
          if (newCount % 10000 == 0) appendToPersistFile(newCount.toString + "\n")
          newCount
        }
      }
      .toMat(Sink.last)(
        (_, right) =>
          right.map { count =>
            appendToPersistFile(count.toString + "\n")
            Seq.fill(2)(Option(count.toString))
        }
      )

    Source
      .fromIterator(() => scala.io.Source.fromFile(dcInfo.tsvFile.get).getLines())
      .drop {
        logger.info(s"Dropping $linesToDrop initial lines from file ${dcInfo.tsvFile.get} for sync ${dcInfo.key}")
        linesToDrop
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .map(line => TsvRetriever.parseTSVAndCreateInfotonDataFromIt(ByteString(line)))
      .alsoToMat(positionKeySink)(Keep.both)
  }
} 
Example 108
Source File: ConcurrentFlow.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.dc.stream.akkautils

import akka.NotUsed
import akka.stream.{FlowShape, Graph}
import akka.stream.scaladsl.{Balance, Flow, GraphDSL, Merge}


object ConcurrentFlow {

  def apply[I, O](parallelism: Int)(flow: Graph[FlowShape[I, O], NotUsed]): Graph[FlowShape[I, O], NotUsed] =
    GraphDSL.create() { implicit builder =>
      import GraphDSL.Implicits._
      val balancer = builder.add(Balance[I](parallelism))
      val merger = builder.add(Merge[O](parallelism))
      for (i <- 0 until parallelism) {
        balancer.out(i) ~> flow.async ~> merger.in(i)
      }
      FlowShape(balancer.in, merger.out)
    }
} 
Example 109
Source File: AlgoFlow.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.dc.stream.algo

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Flow
import cmwell.dc.stream.MessagesTypesAndExceptions.{BaseInfotonData, DcInfo, InfotonData}
import cmwell.dc.stream.Util
import cmwell.dc.{LazyLogging, stream}
import cmwell.util.loading.ChildFirstURLClassLoader
import ddpc.data.{AlgoFlow, IndentityFlow}

import scala.concurrent.ExecutionContext

object AlgoFlow extends LazyLogging{

  def algoFlow(dcInfo: DcInfo)
              (implicit ec:ExecutionContext,  mat:ActorMaterializer, system:ActorSystem) = {
    val algoFlow = Util.extractDcType(dcInfo.key.id) match {
      case "remote" =>
        new IndentityFlow().runAlgo(Map.empty[String, String])
      case _ =>
        val algoInfo = dcInfo.dcAlgoData.get
        val algoFlowInstance = ChildFirstURLClassLoader.loadClassFromJar[AlgoFlow](algoInfo.algoClass, algoInfo.algoJarUrl
          , "ddpc.data", Seq("scala", "akka", "org.slf4j"))
        algoFlowInstance.runAlgo(algoInfo.algoParams)
        }
    Flow[InfotonData]
      .map(_.base.data)
      .via(algoFlow)
      .map(rdf => BaseInfotonData(rdf.takeWhile(_ != stream.space).utf8String, rdf))

  }
}
//TODO:
//persist poisiton per bulk 
Example 110
Source File: RefsEnricher.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.bg.imp

import akka.NotUsed
import akka.stream.FlowShape
import akka.stream.contrib.PartitionWith
import akka.stream.scaladsl.{Flow, GraphDSL, Merge, Partition}
import cmwell.bg.BGMetrics
import cmwell.common.formats.BGMessage
import cmwell.common._
import cmwell.zstore.ZStore
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.ExecutionContext

object RefsEnricher extends LazyLogging {

  def toSingle(bgm: BGMetrics, irwReadConcurrency: Int, zStore: ZStore)
              (implicit ec: ExecutionContext): Flow[BGMessage[Command], BGMessage[SingleCommand], NotUsed] = {

    Flow.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._

      // CommandRef goes left, all rest go right
      // update metrics for each type of command
      val commandsPartitioner = b.add(PartitionWith[BGMessage[Command], BGMessage[CommandRef], BGMessage[Command]] {
        case bgm @ BGMessage(_, CommandRef(_)) => Left(bgm.asInstanceOf[BGMessage[CommandRef]])
        case bgm => Right(bgm)
      })

      val commandRefsFetcher = Flow[BGMessage[CommandRef]].mapAsync(irwReadConcurrency) {
        case bgMessage @ BGMessage(_, CommandRef(ref)) => {
          zStore.get(ref).map { payload =>
            bgMessage.copy(message = CommandSerializer.decode(payload))
          }
        }
      }

      val singleCommandsMerge = b.add(Merge[BGMessage[Command]](2))

      commandsPartitioner.out0 ~> commandRefsFetcher ~> singleCommandsMerge.in(0)

      commandsPartitioner.out1 ~> singleCommandsMerge.in(1)

      FlowShape(commandsPartitioner.in,singleCommandsMerge.out.map {
        bgMessage => {
          // cast to SingleCommand while updating metrics
          bgMessage.message match {
            case wc: WriteCommand           => bgm.writeCommandsCounter += 1
                                               bgm.infotonCommandWeightHist += wc.infoton.weight
            case oc: OverwriteCommand       => bgm.overrideCommandCounter += 1
                                               bgm.infotonCommandWeightHist += oc.infoton.weight
            case _: UpdatePathCommand       => bgm.updatePathCommandsCounter += 1
            case _: DeletePathCommand       => bgm.deletePathCommandsCounter += 1
            case _: DeleteAttributesCommand => bgm.deleteAttributesCommandsCounter += 1
            case unknown                    => logger.error(s"unknown command [$unknown]")
          }
          bgm.commandMeter.mark()
          bgMessage.copy(message = bgMessage.message.asInstanceOf[SingleCommand])
        }
      }.outlet)
    })
  }
} 
Example 111
Source File: DynamoDbStreamsAkkaClient.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
// Auto-Generated
package com.github.j5ik2o.reactive.aws.dynamodb.streams.akka

import akka.NotUsed
import akka.stream.scaladsl.{ Flow, Source }
import com.github.j5ik2o.reactive.aws.dynamodb.streams.DynamoDbStreamsAsyncClient
import software.amazon.awssdk.services.dynamodb.model._

object DynamoDbStreamsAkkaClient {

  def apply(asyncClient: DynamoDbStreamsAsyncClient): DynamoDbStreamsAkkaClient = new DynamoDbStreamsAkkaClient {
    override val underlying: DynamoDbStreamsAsyncClient = asyncClient
  }

  val DefaultParallelism: Int = 1

}

trait DynamoDbStreamsAkkaClient {

  import DynamoDbStreamsAkkaClient._

  val underlying: DynamoDbStreamsAsyncClient

  def describeStreamSource(
      describeStreamRequest: DescribeStreamRequest,
      parallelism: Int = DefaultParallelism
  ): Source[DescribeStreamResponse, NotUsed] =
    Source.single(describeStreamRequest).via(describeStreamFlow(parallelism))

  def describeStreamFlow(
      parallelism: Int = DefaultParallelism
  ): Flow[DescribeStreamRequest, DescribeStreamResponse, NotUsed] =
    Flow[DescribeStreamRequest].mapAsync(parallelism) { describeStreamRequest =>
      underlying.describeStream(describeStreamRequest)
    }

  def describeStreamPaginatorFlow: Flow[DescribeStreamRequest, DescribeStreamResponse, NotUsed] =
    Flow[DescribeStreamRequest].flatMapConcat { request =>
      Source.fromPublisher(underlying.describeStreamPaginator(request))
    }

  def getRecordsSource(
      getRecordsRequest: GetRecordsRequest,
      parallelism: Int = DefaultParallelism
  ): Source[GetRecordsResponse, NotUsed] =
    Source.single(getRecordsRequest).via(getRecordsFlow(parallelism))

  def getRecordsFlow(parallelism: Int = DefaultParallelism): Flow[GetRecordsRequest, GetRecordsResponse, NotUsed] =
    Flow[GetRecordsRequest].mapAsync(parallelism) { getRecordsRequest =>
      underlying.getRecords(getRecordsRequest)
    }

  def getShardIteratorSource(
      getShardIteratorRequest: GetShardIteratorRequest,
      parallelism: Int = DefaultParallelism
  ): Source[GetShardIteratorResponse, NotUsed] =
    Source.single(getShardIteratorRequest).via(getShardIteratorFlow(parallelism))

  def getShardIteratorFlow(
      parallelism: Int = DefaultParallelism
  ): Flow[GetShardIteratorRequest, GetShardIteratorResponse, NotUsed] =
    Flow[GetShardIteratorRequest].mapAsync(parallelism) { getShardIteratorRequest =>
      underlying.getShardIterator(getShardIteratorRequest)
    }

  def listStreamsSource(
      listStreamsRequest: ListStreamsRequest,
      parallelism: Int = DefaultParallelism
  ): Source[ListStreamsResponse, NotUsed] =
    Source.single(listStreamsRequest).via(listStreamsFlow(parallelism))

  def listStreamsFlow(parallelism: Int = DefaultParallelism): Flow[ListStreamsRequest, ListStreamsResponse, NotUsed] =
    Flow[ListStreamsRequest].mapAsync(parallelism) { listStreamsRequest =>
      underlying.listStreams(listStreamsRequest)
    }

  def listStreamsSource(): Source[ListStreamsResponse, NotUsed] =
    Source.fromFuture(underlying.listStreams())

  def listStreamsPaginatorSource: Source[ListStreamsResponse, NotUsed] =
    Source.fromPublisher(underlying.listStreamsPaginator())

  def listStreamsPaginatorFlow: Flow[ListStreamsRequest, ListStreamsResponse, NotUsed] =
    Flow[ListStreamsRequest].flatMapConcat { request =>
      Source.fromPublisher(underlying.listStreamsPaginator(request))
    }

} 
Example 112
Source File: Graph1.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, ClosedShape }
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source }

import scala.collection.immutable
import scala.io.StdIn

object Graph1 extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val graph = g(1 to 2)

  graph.run()

  StdIn.readLine()
  system.terminate()

  def g(data: immutable.Iterable[Int]) =
    RunnableGraph.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] =>
      import GraphDSL.Implicits._
      val in = Source(data)
      val out = Sink.foreach(println)

      val bcast = b.add(Broadcast[Int](2))
      val merge = b.add(Merge[Int](2))

      val f1, f2, f3, f4 = Flow[Int].map(_ + 10)

      in ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out
      bcast ~> f4 ~> merge

      ClosedShape
    })
} 
Example 113
Source File: MusicCommands.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.examplecore.music

import ackcord._
import ackcord.commands.{CommandBuilder, CommandController, NamedCommand, VoiceGuildMemberCommandMessage}
import ackcord.data.{GuildId, TextChannel}
import ackcord.examplecore.music.MusicHandler.{NextTrack, QueueUrl, StopMusic, TogglePause}
import akka.NotUsed
import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.stream.typed.scaladsl.ActorFlow
import akka.util.Timeout

class MusicCommands(requests: Requests, guildId: GuildId, musicHandler: ActorRef[MusicHandler.Command])(
    implicit timeout: Timeout,
    system: ActorSystem[Nothing]
) extends CommandController(requests) {

  val VoiceCommand: CommandBuilder[VoiceGuildMemberCommandMessage, NotUsed] =
    GuildVoiceCommand.andThen(CommandBuilder.inOneGuild(guildId))

  val queue: NamedCommand[String] =
    VoiceCommand.named("&", Seq("q", "queue")).parsing[String].withSideEffects { m =>
      musicHandler.ask[MusicHandler.CommandAck.type](QueueUrl(m.parsed, m.textChannel, m.voiceChannel.id, _))
    }

  private def simpleCommand(
      aliases: Seq[String],
      mapper: (TextChannel, ActorRef[MusicHandler.CommandAck.type]) => MusicHandler.MusicHandlerEvents
  ): NamedCommand[NotUsed] = {
    VoiceCommand.andThen(CommandBuilder.inOneGuild(guildId)).named("&", aliases, mustMention = true).toSink {
      Flow[VoiceGuildMemberCommandMessage[NotUsed]]
        .map(_.textChannel)
        .via(ActorFlow.ask(requests.parallelism)(musicHandler)(mapper))
        .toMat(Sink.ignore)(Keep.none)
    }
  }

  val stop: NamedCommand[NotUsed] = simpleCommand(Seq("s", "stop"), StopMusic.apply)

  val next: NamedCommand[NotUsed] = simpleCommand(Seq("n", "next"), NextTrack.apply)

  val pause: NamedCommand[NotUsed] = simpleCommand(Seq("p", "pause"), TogglePause.apply)
} 
Example 114
Source File: ViewIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object ViewIndexer {

  private implicit val log = Logger[ViewIndexer.type]

  def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])(
      implicit projectCache: ProjectCache[F],
      F: Effect[F],
      as: ActorSystem,
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: AppConfig
  ): StreamSupervisor[F, Unit] = {

    implicit val authToken                = config.iam.serviceAccountToken
    implicit val indexing: IndexingConfig = config.keyValueStore.indexing
    implicit val ec: ExecutionContext     = as.dispatcher
    implicit val tm: Timeout              = Timeout(config.keyValueStore.askTimeout)
    val name                              = "view-indexer"

    def toView(event: Event): F[Option[View]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        views.fetchView(event.id).value.map {
          case Left(err) =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(view) => Some(view)
        }
      }

    val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.View.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toView)
      .collectSome[View]
      .runAsync(viewCache.put)()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 115
Source File: ResolverIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resolve.Resolver
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object ResolverIndexer {

  private implicit val log = Logger[ResolverIndexer.type]

  
  final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])(
      implicit
      projectCache: ProjectCache[F],
      as: ActorSystem,
      F: Effect[F],
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: AppConfig
  ): StreamSupervisor[F, Unit] = {
    implicit val authToken                = config.iam.serviceAccountToken
    implicit val indexing: IndexingConfig = config.keyValueStore.indexing
    implicit val ec: ExecutionContext     = as.dispatcher
    implicit val tm: Timeout              = Timeout(config.keyValueStore.askTimeout)

    val name = "resolver-indexer"

    def toResolver(event: Event): F[Option[Resolver]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        resolvers.fetchResolver(event.id).value.map {
          case Left(err) =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(resolver) => Some(resolver)
        }
      }

    val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Resolver.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toResolver)
      .collectSome[Resolver]
      .runAsync(resolverCache.put)()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 116
Source File: StorageIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import java.time.Instant

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.AdminClient
import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.kg.storage.Storage
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem}
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
object StorageIndexer {

  private implicit val log = Logger[StorageIndexer.type]

  def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])(
      implicit projectCache: ProjectCache[F],
      F: Effect[F],
      as: ActorSystem,
      projectInitializer: ProjectInitializer[F],
      adminClient: AdminClient[F],
      config: AppConfig
  ): StreamSupervisor[F, Unit] = {

    implicit val authToken                = config.iam.serviceAccountToken
    implicit val indexing: IndexingConfig = config.keyValueStore.indexing
    implicit val ec: ExecutionContext     = as.dispatcher
    implicit val tm: Timeout              = Timeout(config.keyValueStore.askTimeout)
    val name                              = "storage-indexer"

    def toStorage(event: Event): F[Option[(Storage, Instant)]] =
      fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project =>
        storages.fetchStorage(event.id).value.map {
          case Left(err) =>
            log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'")
            None
          case Right(timedStorage) => Some(timedStorage)
        }
      }

    val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Storage.value.show}", name)
    val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any]
      .collectCast[Event]
      .groupedWithin(indexing.batch, indexing.batchTimeout)
      .distinct()
      .mergeEmit()
      .mapAsync(toStorage)
      .collectSome[(Storage, Instant)]
      .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }()
      .flow
      .map(_ => ())

    StreamSupervisor.startSingleton(F.delay(source.via(flow)), name)
  }
}
// $COVERAGE-ON$ 
Example 117
Source File: JdbcFlow.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.connector.sql

import java.nio.charset.{ Charset, StandardCharsets }
import java.sql.ResultSet

import akka.NotUsed
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import fusion.jdbc.util.JdbcUtils

import scala.collection.immutable

case class JdbcResultSet(rs: ResultSet, values: immutable.IndexedSeq[AnyRef])

object JdbcFlow {
  def flowToText(valueSeparator: Char = ','): Flow[immutable.IndexedSeq[AnyRef], String, NotUsed] =
    Flow[immutable.IndexedSeq[AnyRef]].map { values =>
      val builder = new java.lang.StringBuilder()
      var i = 0
      while (i < values.length) {
        builder.append(values(i).toString)
        i += 1
        if (i < values.length) {
          builder.append(valueSeparator)
        }
      }
      builder.toString
    }

  def flowToSeq: Flow[ResultSet, immutable.IndexedSeq[AnyRef], NotUsed] =
    Flow[ResultSet].map { rs =>
      val metaData = rs.getMetaData
      (1 to rs.getMetaData.getColumnCount).map { i =>
        val typ = metaData.getColumnType(i)
        if (JdbcUtils.isString(typ)) {
          rs.getString(i)
        } else
          rs.getObject(i)
      }
    }

  def flowToByteString(
      valueSeparator: Char = ',',
      charset: Charset = StandardCharsets.UTF_8): Flow[immutable.IndexedSeq[AnyRef], ByteString, NotUsed] =
    Flow[immutable.IndexedSeq[AnyRef]].map { values =>
      val builder = ByteString.newBuilder
      var i = 0
      while (i < values.length) {
        builder.putBytes(values(i).toString.getBytes(charset))
        i += 1
        if (i < values.length) {
          builder.putByte(valueSeparator.toByte)
        }
      }
      builder.result()
    }

  def flowJdbcResultSet: Flow[ResultSet, JdbcResultSet, NotUsed] =
    Flow[ResultSet].map { rs =>
      val metaData = rs.getMetaData
      JdbcResultSet(rs, (1 to metaData.getColumnCount).map(i => rs.getObject(i)))
    }
} 
Example 118
Source File: MaterializeValue.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.graph

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Keep, RunnableGraph, Sink, Source, Tcp }
import akka.util.ByteString

import scala.concurrent.{ Future, Promise }

object MaterializeValue {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  case class MyClass(private val p: Promise[Option[Int]], conn: Tcp.OutgoingConnection) extends AutoCloseable {
    override def close(): Unit = p.trySuccess(None)
  }

  // Materializes to Promise[Option[Int]]
  val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int]

  // Materializes to NotUsed
  val flow1: Flow[Int, Int, NotUsed] = Flow[Int].take(100)

  // Materializes to Promise[Int]
  val nestedSource
      : Source[Int, Promise[Option[Int]]] = source.viaMat(flow1)(Keep.left).named("nestedSource") // viaMat === via()(Keep.left)
  //  val nestedSource2: Source[Int, NotUsed] = source.viaMat(flow1)(Keep.right)

  // Materializes to NotUsed
  val flow2: Flow[Int, ByteString, NotUsed] =
    Flow[Int].map(i => ByteString(i.toString))

  // Materializes to Future[Tcp.OutgoingConnection   (Keep.right)
  val flow3: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] =
    Tcp().outgoingConnection("localhost", 8080)

  val nestedFlow: Flow[Int, ByteString, Future[Tcp.OutgoingConnection]] =
    flow2.viaMat(flow3)(Keep.right)

  val nestedFlow2: Flow[Int, ByteString, NotUsed] =
    flow2.viaMat(flow3)(Keep.left) // flow2.via(flow3)
  val nestedFlow3: Flow[Int, ByteString, (NotUsed, Future[Tcp.OutgoingConnection])] =
    flow2.viaMat(flow3)(Keep.both)

  // Materializes to Future[String]   (Keep.right)
  val sink: Sink[ByteString, Future[String]] =
    Sink.fold[String, ByteString]("")(_ + _.utf8String)

  val nestedSink: Sink[Int, (Future[Tcp.OutgoingConnection], Future[String])] =
    nestedFlow.toMat(sink)(Keep.both)

  def f(p: Promise[Option[Int]], rest: (Future[Tcp.OutgoingConnection], Future[String])): Future[MyClass] = {
    val connFuture = rest._1
    connFuture.map(outConn => MyClass(p, outConn))
  }

  // Materializes to Future[MyClass]
  val runnableGraph: RunnableGraph[Future[MyClass]] =
    nestedSource.toMat(nestedSink)(f)

  val r: RunnableGraph[Promise[Option[Int]]] =
    nestedSource.toMat(nestedSink)(Keep.left)

  val r2: RunnableGraph[(Future[Tcp.OutgoingConnection], Future[String])] =
    nestedSource.toMat(nestedSink)(Keep.right)
} 
Example 119
Source File: PartialGraph.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.graph

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Balance, Broadcast, Flow, GraphDSL, Keep, Merge, RunnableGraph, Sink, Source }
import akka.stream.{ ActorMaterializer, FlowShape, SourceShape }

import scala.concurrent.Future
import scala.io.StdIn

object PartialGraph extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  def partial =
    GraphDSL
      .create() { implicit b =>
        import GraphDSL.Implicits._

        val B = b.add(Broadcast[Int](2))
        val C = b.add(Merge[Int](2))
        val D = Flow[Int].map(_ + 1)
        val E = b.add(Balance[Int](2))
        val F = b.add(Merge[Int](2))

        C <~ F
        B ~> C ~> F
        B ~> D ~> E ~> F

        FlowShape(B.in, E.out(1))
      }
      .named("partial")

  // 转换partial从FlowShape到Flow,可访问流DSL(比如:.filter() 函数)
  val flow = Flow.fromGraph(partial)

  val source = Source.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._
    val merge = b.add(Merge[Int](2))
    Source.single(0) ~> merge
    Source(List(2, 3, 4)) ~> merge
    SourceShape(merge.out)
  })

  val sink: Sink[Int, Future[Int]] = Flow[Int].map(_ * 2).drop(10).named("nestedFlow").toMat(Sink.head)(Keep.right)

  val closed: RunnableGraph[Future[Int]] =
    source.via(flow.filter(_ > 1)).toMat(sink)(Keep.right)

  closed.run().foreach(println)

  StdIn.readLine()
  system.terminate()
} 
Example 120
Source File: EchoDemo.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.streamio

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Framing, Sink, Source, Tcp }
import akka.util.ByteString
import example.akkastream.streamio.EchoServer.system

import scala.concurrent.Future
import scala.io.StdIn

object EchoServer extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val connections = Tcp().bind("localhost", 8888)
  connections.runForeach { connection =>
    println(s"New connection from: ${connection.remoteAddress}")

    val echo: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString]
      .via(Framing.delimiter(ByteString("\n"), 256, true))
      .map(_.utf8String)
      .map(_ + "!!!\n")
      .map(ByteString(_))

    connection.handleWith(echo)
  }

  StdIn.readLine()
  system.terminate()
}

object EchoClient extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val connection = Tcp().outgoingConnection("localhost", 8888)

  val replParser =
    Flow[String].takeWhile(_ != "q").concat(Source.single("BYE")).map { elem =>
      println(s"send msg: $elem")
      ByteString(s"$elem\n")
    }

  val repl = Flow[ByteString]
    .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true))
    .map(_.utf8String)
    .map(text => println("Server: " + text))
    .map(_ => StdIn.readLine("> "))
    .via(replParser)

  val connected: Future[Tcp.OutgoingConnection] = connection.join(repl).run()

  //  StdIn.readLine()
  //  system.terminate()
}

object EchoDemo {} 
Example 121
Source File: IPDiscoveryFlow.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.nio.ByteOrder

import scala.concurrent.{Future, Promise}

import akka.stream.scaladsl.Flow
import akka.stream.stage._
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import akka.util.ByteString

class IPDiscoveryFlow(openValve: () => Unit)
    extends GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Future[VoiceUDPFlow.FoundIP]] {

  val in: Inlet[ByteString]   = Inlet("IPDiscoveryFlow.in")
  val out: Outlet[ByteString] = Outlet("IPDiscoveryFlow.out")

  override def shape: FlowShape[ByteString, ByteString] = FlowShape(in, out)

  override def createLogicAndMaterializedValue(
      inheritedAttributes: Attributes
  ): (GraphStageLogic, Future[VoiceUDPFlow.FoundIP]) = {
    val promise = Promise[VoiceUDPFlow.FoundIP]
    val logic = new GraphStageLogicWithLogging(shape) with InHandler with OutHandler {

      override def onPush(): Unit = {
        val data = grab(in)
        log.debug(s"Grabbing data for IP discovery $data")
        val byteBuf = data.asByteBuffer.order(ByteOrder.BIG_ENDIAN)
        val tpe     = byteBuf.getShort

        require(tpe == 0x2, s"Was expecting IP discovery result, got $tpe")

        byteBuf.getShort //Length
        byteBuf.getInt   //SSRC
        val nullTermString = new Array[Byte](64)
        byteBuf.get(nullTermString)
        val address = new String(nullTermString, 0, nullTermString.iterator.takeWhile(_ != 0).length)
        val port    = byteBuf.getChar.toInt //Char is unsigned short

        promise.success(VoiceUDPFlow.FoundIP(address, port))
        log.debug("Success doing IP discovery")

        setHandler(
          in,
          new InHandler {
            override def onPush(): Unit = push(out, grab(in))
          }
        )

        openValve()
      }

      override def onPull(): Unit = pull(in)

      override def onUpstreamFailure(ex: Throwable): Unit = {
        promise.tryFailure(new Exception("Connection failed.", ex))
        super.onUpstreamFailure(ex)
      }

      setHandlers(in, out, this)
    }

    (logic, promise.future)
  }
}
object IPDiscoveryFlow {
  def flow(openValve: () => Unit): Flow[ByteString, ByteString, Future[VoiceUDPFlow.FoundIP]] =
    Flow.fromGraph(new IPDiscoveryFlow(openValve))
} 
Example 122
Source File: GraphComponent.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.actor.ActorSystem
import akka.stream.FanInShape.{ Init, Name }
import akka.stream._
import akka.stream.scaladsl.{ Balance, Flow, GraphDSL, Merge, MergePreferred, RunnableGraph, Sink, Source }

import scala.collection.immutable
import scala.io.StdIn

case class PriorityWorkerPoolShape[In, Out](jobsIn: Inlet[In], priorityJobsIn: Inlet[In], resultsOut: Outlet[Out])
    extends Shape {
  override def inlets: immutable.Seq[Inlet[_]] = jobsIn :: priorityJobsIn :: Nil

  override def outlets: immutable.Seq[Outlet[_]] = resultsOut :: Nil

  override def deepCopy(): Shape =
    PriorityWorkerPoolShape(jobsIn.carbonCopy(), priorityJobsIn.carbonCopy(), resultsOut.carbonCopy())
}

case class PriorityWorkerPoolShape2[In, Out](_init: Init[Out] = Name("PriorityWorkerPoolShape2"))
    extends FanInShape[Out](_init) {
  override protected def construct(init: Init[Out]): FanInShape[Out] =
    PriorityWorkerPoolShape2(init)

  val jobsIn: Inlet[In] = newInlet[In]("jobsIn")
  val priorityJobsIn: Inlet[In] = newInlet[In]("priorityJobsIn")
  // Outlet[Out] 使用名字 "out" 将被自动创建
}

object PriorityWorkerPool {
  def apply[In, Out](worker: Flow[In, Out, Any], workerCount: Int) =
    GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._

      val priorityMerge = b.add(MergePreferred[In](1))
      val balance = b.add(Balance[In](workerCount))
      val resultsMerge = b.add(Merge[Out](workerCount))

      for (i <- 0 until workerCount)
        balance.out(i) ~> worker ~> resultsMerge.in(i)

      // 在合并优先和普通作业后发送到平衡器
      priorityMerge ~> balance

      PriorityWorkerPoolShape(priorityMerge.in(0), priorityMerge.preferred, resultsMerge.out)
    }
}

object GraphComponent extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val worker1 = Flow[String].map("step 1 " + _)
  val worker2 = Flow[String].map("step 2 " + _)

  val g = RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._

    val priorityPool1 = b.add(PriorityWorkerPool(worker1, 4))
    val priorityPool2 = b.add(PriorityWorkerPool(worker2, 2))

    Source(1 to 10).map("job: " + _) ~> priorityPool1.jobsIn
    Source(1 to 10).map("priority job: " + _) ~> priorityPool1.priorityJobsIn

    priorityPool1.resultsOut ~> priorityPool2.jobsIn
    Source(1 to 10).map("one-step, priority " + _) ~> priorityPool2.priorityJobsIn

    priorityPool2.resultsOut ~> Sink.foreach(println)
    ClosedShape
  })

  g.run()

  StdIn.readLine()
  system.terminate()
} 
Example 123
Source File: PartialGraph2.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Sink, Source, Zip }
import akka.stream.{ ActorMaterializer, FlowShape, SourceShape }

import scala.io.StdIn

object PartialGraph2 extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val pairs: Source[(Int, Int), NotUsed] = Source.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._

    // prepare graph elements
    val zip = b.add(Zip[Int, Int]())

    def ints = Source.fromIterator(() => Iterator.from(1))

    // connect the graph
    ints.filter(_ % 2 != 0) ~> zip.in0
    ints.filter(_ % 2 == 0) ~> zip.in1

    // expose port
    SourceShape(zip.out)
  })

  val firstPair = pairs.runWith(Sink.head)
  firstPair.foreach(println)

  val pairUpWithToString = Flow.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._
    val broadcast = b.add(Broadcast[Int](2))
    val zip = b.add(Zip[Int, String]())

    broadcast.out(0)  ~> zip.in0
    broadcast.out(1).map(_.toString) ~> zip.in1

    FlowShape(broadcast.in, zip.out)
  })

  Source(List(1)).via(pairUpWithToString).runWith(Sink.head).foreach(println)

  StdIn.readLine()
  system.terminate()
} 
Example 124
Source File: Graph2.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, ClosedShape }
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, RunnableGraph, Sink, Source }

import scala.io.StdIn

object Graph2 extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val topHeadSink = Sink.head[Int]
  val bottomHeadSink = Sink.head[Int]
  val sharedDoubler = Flow[Int].map(_ * 2)

  val g = RunnableGraph.fromGraph(GraphDSL.create(topHeadSink, bottomHeadSink)((_, _)) {
    implicit builder => (topHS, bottomHS) =>
      import GraphDSL.Implicits._

      val broadcast = builder.add(Broadcast[Int](2))
      Source.single(1) ~> broadcast.in

      broadcast ~> sharedDoubler ~> topHS.in
      broadcast ~> sharedDoubler ~> bottomHS.in

      ClosedShape
  })

  val (topF, bottomF) = g.run()
  topF.foreach(v => println(s"top is $v"))
  bottomF.foreach(v => println(s"bottom is $v"))

  StdIn.readLine()
  system.terminate()
} 
Example 125
Source File: ExtrapolateExpand.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.buffer

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Source }

import scala.io.StdIn

object ExtrapolateExpand extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  //  val lastFlow = Flow[Double].extrapolate(Iterator.continually(_))
  //  Source((1 to 10).map(_.toDouble)).via(lastFlow).runWith(Sink.foreach(println))

  //  val initial = 2.0
  //  val seedFlow = Flow[Double].extrapolate(Iterator.continually(_), Some(initial))
  //  Source((1 to 10).map(_.toDouble)).via(seedFlow).runWith(Sink.foreach(println))

  //  val driftFlow = Flow[Double].map(_ -> 0).extrapolate[(Double, Int)] { case (i, _) => Iterator.from(1).map(i -> _) }
  //  Source((1 to 10).map(_.toDouble)).via(driftFlow).runForeach(println)

  val driftFlow = Flow[Double].expand(i => Iterator.from(0).map(i -> _))
  Source((1 to 10).map(_.toDouble)).via(driftFlow).runForeach(println)

  StdIn.readLine()
  system.terminate()
} 
Example 126
Source File: SimplePublishSubscribe.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.dynamichub

import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, KillSwitches, UniqueKillSwitch }
import akka.stream.scaladsl.{ BroadcastHub, Flow, Keep, MergeHub, Sink, Source }
import com.typesafe.scalalogging.StrictLogging

import scala.io.StdIn
import scala.concurrent.duration._

object SimplePublishSubscribe extends App with StrictLogging {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val (sink, source) =
    MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run()

  source.runWith(Sink.ignore)

  val busFlow: Flow[String, String, UniqueKillSwitch] = Flow
    .fromSinkAndSource(sink, source)
    .joinMat(KillSwitches.singleBidi[String, String])(Keep.right)
    .backpressureTimeout(3.seconds)

  val switch: UniqueKillSwitch =
    Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(v => logger.info(s"switch: $v"))).run()

  Thread.sleep(200)
  switch.shutdown()

  StdIn.readLine()
  system.terminate()
} 
Example 127
Source File: WebSocketRoute.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.http_api.ws

import java.util.UUID

import akka.actor.PoisonPill
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.ws.{ Message, TextMessage }
import akka.http.scaladsl.server.Route
import akka.stream._
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.util.Timeout
import io.vamp.common.akka.IoC._
import io.vamp.common.http.{ HttpApiDirectives, HttpApiHandlers, TerminateFlowStage }
import io.vamp.common.{ Config, Namespace }
import io.vamp.http_api.ws.WebSocketActor.{ SessionClosed, SessionEvent, SessionOpened, SessionRequest }
import io.vamp.http_api.{ AbstractRoute, LogDirective }

import scala.concurrent.Future

trait WebSocketRoute extends AbstractRoute with WebSocketMarshaller with HttpApiHandlers {
  this: HttpApiDirectives with LogDirective ⇒

  implicit def materializer: Materializer

  private lazy val limit = Config.int("vamp.http-api.websocket.stream-limit")

  protected def websocketApiHandler(implicit namespace: Namespace, timeout: Timeout): Route

  def websocketRoutes(implicit namespace: Namespace, timeout: Timeout) = {
    pathEndOrSingleSlash {
      get {
        extractRequest { request ⇒
          handleWebSocketMessages {
            websocket(request)
          }
        }
      }
    }
  }

  protected def filterWebSocketOutput(message: AnyRef)(implicit namespace: Namespace, timeout: Timeout): Future[Boolean] = Future.successful(true)

  private def apiHandler(implicit namespace: Namespace, timeout: Timeout) = Route.asyncHandler(log {
    websocketApiHandler
  })

  private def websocket(origin: HttpRequest)(implicit namespace: Namespace, timeout: Timeout): Flow[AnyRef, Message, Any] = {
    val id = UUID.randomUUID()

    val in = Flow[AnyRef].collect {
      case TextMessage.Strict(message)  ⇒ Future.successful(message)
      case TextMessage.Streamed(stream) ⇒ stream.limit(limit()).completionTimeout(timeout.duration).runFold("")(_ + _)
    }.mapAsync(parallelism = 3)(identity)
      .mapConcat(unmarshall)
      .map(SessionRequest(apiHandler, id, origin, _))
      .to(Sink.actorRef[SessionEvent](actorFor[WebSocketActor], SessionClosed(id)))

    val out = Source.actorRef[AnyRef](16, OverflowStrategy.dropHead)
      .mapMaterializedValue(actorFor[WebSocketActor] ! SessionOpened(id, _))
      .via(new TerminateFlowStage[AnyRef](_ == PoisonPill))
      .mapAsync(parallelism = 3)(message ⇒ filterWebSocketOutput(message).map(f ⇒ f → message))
      .collect { case (true, m) ⇒ m }
      .map(message ⇒ TextMessage.Strict(marshall(message)))

    Flow.fromSinkAndSource(in, out)
  }
} 
Example 128
Source File: FullStream.scala    From elastic-indexer4s   with MIT License 5 votes vote down vote up
package com.yannick_cw.elastic_indexer4s.indexing_logic

import akka.NotUsed
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import com.yannick_cw.elastic_indexer4s.Index_results.{IndexError, StageSucceeded, StageSuccess}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object FullStream extends LazyLogging {

  private def countAntLogSink[A](logPer: FiniteDuration): Sink[A, Future[Int]] =
    Flow[A]
      .groupedWithin(Int.MaxValue, logPer)
      .map(_.length)
      .map { elementsPerTime =>
        logger.info(s"Indexed $elementsPerTime elements last $logPer")
        elementsPerTime
      }
      .toMat(Sink.reduce[Int](_ + _))(Keep.right)

  def run[A](source: Source[A, NotUsed], sink: Sink[A, Future[Unit]], logSpeedInterval: FiniteDuration)(
      implicit materializer: ActorMaterializer,
      ex: ExecutionContext): Future[Either[IndexError, StageSucceeded]] =
    (for {
      count <- source
        .alsoToMat(countAntLogSink(logSpeedInterval))(Keep.right)
        .toMat(sink)(Keep.both)
        .mapMaterializedValue { case (fCount, fDone) => fDone.flatMap(_ => fCount) }
        .run()
    } yield Right(StageSuccess(s"Indexed $count documents successfully")))
      .recover {
        case NonFatal(t) =>
          Left(IndexError("Writing documents failed.", Some(t)))
      }
} 
Example 129
Source File: MinimumChunk.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.NotUsed
import akka.stream.scaladsl.Flow
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import akka.util.ByteString


@deprecated("Use FastParsingStage instead","akka-xml-parser 1.0.0")
object MinimumChunk {

  def parser(minimumChunkSize: Int):
  Flow[ByteString, ByteString, NotUsed] = {
    Flow.fromGraph(new StreamingXmlParser(minimumChunkSize))
  }

  private class StreamingXmlParser(minimumChunkSize: Int)
    extends GraphStage[FlowShape[ByteString, ByteString]]
      with StreamHelper
      with ParsingDataFunctions {

    val in: Inlet[ByteString] = Inlet("Chunking.in")
    val out: Outlet[ByteString] = Outlet("Chunking.out")
    override val shape: FlowShape[ByteString, ByteString] = FlowShape(in, out)

    override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
      new GraphStageLogic(shape) {
        private var buffer = ByteString.empty

        setHandler(in, new InHandler {
          override def onPush(): Unit = {
            val elem = grab(in)
            buffer ++= elem
            emitChunk()
          }

          override def onUpstreamFinish(): Unit = {
            emit(out, buffer)
            completeStage()
          }
        })

        setHandler(out, new OutHandler {
          override def onPull(): Unit = {
            pull(in)
          }
        })

        private def emitChunk(): Unit = {
          if (buffer.size > minimumChunkSize) {
            push(out, buffer)
            buffer = ByteString.empty
          } else {
            pull(in)
          }
        }

      }
  }

} 
Example 130
Source File: VoiceUDPFlow.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress
import java.nio.ByteOrder

import scala.concurrent.{Future, Promise}

import ackcord.data.{RawSnowflake, UserId}
import ackcord.util.UdpConnectedFlow
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BidiFlow, Concat, Flow, GraphDSL, Keep, Source}
import akka.stream.{BidiShape, OverflowStrategy}
import akka.util.ByteString

object VoiceUDPFlow {

  val silence = ByteString(0xF8, 0xFF, 0xFE)

  val SampleRate = 48000
  val FrameSize  = 960
  val FrameTime  = 20

  def flow[Mat](
      remoteAddress: InetSocketAddress,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      secretKeys: Source[Option[ByteString], Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[ByteString, AudioAPIMessage.ReceivedData, (Mat, Future[FoundIP])] =
    NaclBidiFlow
      .bidiFlow(ssrc, serverId, userId, secretKeys)
      .atopMat(voiceBidi(ssrc).reversed)(Keep.both)
      .async
      .join(Flow[ByteString].buffer(32, OverflowStrategy.backpressure).via(UdpConnectedFlow.flow(remoteAddress)))

  def voiceBidi(ssrc: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[FoundIP]] = {
    implicit val byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN
    val ipDiscoveryPacket = {
      val byteBuilder = ByteString.createBuilder
      byteBuilder.sizeHint(74)
      byteBuilder.putShort(0x1).putShort(70).putInt(ssrc)

      byteBuilder.putBytes(new Array[Byte](66))

      byteBuilder.result()
    }

    val valvePromise = Promise[Unit]
    val valve        = Source.future(valvePromise.future).drop(1).asInstanceOf[Source[ByteString, NotUsed]]

    val ipDiscoveryFlow = Flow[ByteString]
      .viaMat(new IPDiscoveryFlow(() => valvePromise.success(())))(Keep.right)

    BidiFlow
      .fromGraph(GraphDSL.create(ipDiscoveryFlow) { implicit b => ipDiscovery =>
        import GraphDSL.Implicits._

        val voiceIn = b.add(Flow[ByteString])

        val ipDiscoverySource           = b.add(Source.single(ipDiscoveryPacket) ++ valve)
        val ipDiscoveryAndThenVoiceData = b.add(Concat[ByteString]())

        ipDiscoverySource ~> ipDiscoveryAndThenVoiceData
        voiceIn ~> ipDiscoveryAndThenVoiceData

        BidiShape(
          ipDiscovery.in,
          ipDiscovery.out,
          voiceIn.in,
          ipDiscoveryAndThenVoiceData.out
        )
      })
  }

  
  case class FoundIP(address: String, port: Int)
} 
Example 131
Source File: CmdHelper.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.oldcommands

import ackcord.CacheSnapshot
import ackcord.data.raw.RawMessage
import ackcord.data.{Message, User}
import ackcord.requests.{CreateMessage, Request, Requests}
import ackcord.syntax._
import akka.NotUsed
import akka.stream.FlowShape
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL}

object CmdHelper {

  
  def isValidCommand[F[_]](needMention: Boolean, msg: Message)(
      implicit c: CacheSnapshot
  ): Option[List[String]] = {
    if (needMention) {
      val botUser = c.botUser
      //We do a quick check first before parsing the message
      val quickCheck = if (msg.mentions.contains(botUser.id)) Some(msg.content.split(" ").toList) else None

      quickCheck.flatMap { args =>
        MessageParser
          .parseEither(args, MessageParser[User])
          .toOption
          .flatMap {
            case (remaining, user) if user.id == botUser.id => Some(remaining)
            case (_, _)                                     => None
          }
      }
    } else Some(msg.content.split(" ").toList)
  }
} 
Example 132
Source File: StreamInstances.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.util

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Merge, Sink, Source}
import cats.{Alternative, Contravariant, Functor, MonadError, StackSafeMonad}

object StreamInstances {

  type SourceRequest[A] = Source[A, NotUsed]

  implicit val sourceInstance: MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] =
    new MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] with StackSafeMonad[SourceRequest] {

      override def empty[A]: SourceRequest[A] = Source.empty[A]

      override def pure[A](x: A): SourceRequest[A] = Source.single(x)

      override def map[A, B](fa: SourceRequest[A])(f: A => B): SourceRequest[B] = fa.map(f)

      override def flatMap[A, B](fa: SourceRequest[A])(f: A => SourceRequest[B]): SourceRequest[B] =
        fa.flatMapConcat[B, NotUsed](f)

      override def product[A, B](fa: SourceRequest[A], fb: SourceRequest[B]): SourceRequest[(A, B)] = fa.zip(fb)

      override def combineK[A](x: SourceRequest[A], y: SourceRequest[A]): SourceRequest[A] =
        Source.combine(x, y)(Merge.apply(_))

      override def raiseError[A](e: Throwable): SourceRequest[A] = Source.failed(e)
      override def handleErrorWith[A](fa: SourceRequest[A])(f: Throwable => SourceRequest[A]): SourceRequest[A] =
        fa.recoverWithRetries[A](
          5,
          {
            case e: Throwable => f(e).mapMaterializedValue(_ => NotUsed)
          }
        )
    }

  implicit def flowInstance[In, Mat]: Functor[Flow[In, *, Mat]] = new Functor[Flow[In, *, Mat]] {
    override def map[A, B](fa: Flow[In, A, Mat])(f: A => B): Flow[In, B, Mat] = fa.map(f)
  }

  implicit def sinkInstance[Mat]: Contravariant[Sink[*, Mat]] = new Contravariant[Sink[*, Mat]] {
    override def contramap[A, B](fa: Sink[A, Mat])(f: B => A): Sink[B, Mat] = fa.contramap(f)
  }

  //For syntax on Source can be brittle
  implicit class SourceFlatmap[A, M1](private val source: Source[A, M1]) extends AnyVal {
    def flatMap[B, M2](f: A => Source[B, M2]): Source[B, M1] = source.flatMapConcat(f)
  }
} 
Example 133
Source File: RepeatLast.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.util

import akka.NotUsed
import akka.stream.scaladsl.Flow
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}

class RepeatLast[A] extends GraphStage[FlowShape[A, A]] {
  val in: Inlet[A]                    = Inlet("RepeatLast.in")
  val out: Outlet[A]                  = Outlet("RepeatLast.out")
  override def shape: FlowShape[A, A] = FlowShape(in, out)

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
    new GraphStageLogic(shape) with InHandler with OutHandler {
      var elem: A = _

      override def onPush(): Unit = {
        elem = grab(in)
        push(out, elem)
      }

      override def onPull(): Unit = {
        if (elem != null && isAvailable(out)) push(out, elem)
        if (!hasBeenPulled(in)) pull(in)
      }

      setHandlers(in, out, this)
    }
}
object RepeatLast {

  
  def flow[A]: Flow[A, A, NotUsed] = Flow.fromGraph(new RepeatLast[A])
} 
Example 134
Source File: SupervisionStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.requests

import akka.actor.typed.ActorSystem
import akka.stream.javadsl.RunnableGraph
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{ActorAttributes, Attributes, Supervision}

object SupervisionStreams {

  def addLogAndContinueFunction[G](addAtributes: Attributes => G)(implicit system: ActorSystem[Nothing]): G =
    addAtributes(ActorAttributes.supervisionStrategy {
      case _: RetryFailedRequestException[_] => Supervision.Stop
      case e =>
        system.log.error("Unhandled exception in stream", e)
        Supervision.Resume
    })

  def logAndContinue[M](graph: RunnableGraph[M])(implicit system: ActorSystem[Nothing]): RunnableGraph[M] =
    addLogAndContinueFunction(graph.addAttributes)

  def logAndContinue[Out, Mat](source: Source[Out, Mat])(implicit system: ActorSystem[Nothing]): Source[Out, Mat] =
    addLogAndContinueFunction(source.addAttributes)

  def logAndContinue[In, Out, Mat](
      flow: Flow[In, Out, Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[In, Out, Mat] =
    addLogAndContinueFunction(flow.addAttributes)

  def logAndContinue[In, Mat](sink: Sink[In, Mat])(implicit system: ActorSystem[Nothing]): Sink[In, Mat] =
    addLogAndContinueFunction(sink.addAttributes)
} 
Example 135
Source File: CacheStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.collection.mutable

import ackcord.cachehandlers.CacheSnapshotBuilder
import ackcord.gateway.GatewayEvent.ReadyData
import ackcord.gateway.GatewayMessage
import ackcord.requests.SupervisionStreams
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Sink, Source}
import org.slf4j.Logger

object CacheStreams {

  
  def cacheUpdater(
      cacheProcessor: MemoryCacheSnapshot.CacheProcessor
  )(implicit system: ActorSystem[Nothing]): Flow[CacheEvent, (CacheEvent, CacheState), NotUsed] =
    Flow[CacheEvent].statefulMapConcat { () =>
      var state: CacheState    = null
      implicit val log: Logger = system.log

      //We only handle events when we are ready to, and we have received the ready event.
      def isReady: Boolean = state != null

      {
        case readyEvent @ APIMessageCacheUpdate(_: ReadyData, _, _, _, _) =>
          val builder = new CacheSnapshotBuilder(
            0,
            null, //The event will populate this,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            cacheProcessor
          )

          readyEvent.process(builder)

          val snapshot = builder.toImmutable
          state = CacheState(snapshot, snapshot)
          List(readyEvent -> state)
        case handlerEvent: CacheEvent if isReady =>
          val builder = CacheSnapshotBuilder(state.current)
          handlerEvent.process(builder)

          state = state.update(builder.toImmutable)
          List(handlerEvent -> state)
        case _ if !isReady =>
          log.error("Received event before ready")
          Nil
      }
    }
} 
Example 136
Source File: UpickleCustomizationSupport.scala    From akka-http-json   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.akkahttpupickle

import akka.http.javadsl.common.JsonEntityStreamingSupport
import akka.http.scaladsl.common.EntityStreamingSupport
import akka.http.scaladsl.marshalling.{ Marshaller, Marshalling, ToEntityMarshaller }
import akka.http.scaladsl.model.{ ContentTypeRange, HttpEntity, MediaType, MessageEntity }
import akka.http.scaladsl.model.MediaTypes.`application/json`
import akka.http.scaladsl.unmarshalling.{ FromEntityUnmarshaller, Unmarshal, Unmarshaller }
import akka.http.scaladsl.util.FastFuture
import akka.stream.scaladsl.{ Flow, Source }
import akka.util.ByteString
import UpickleCustomizationSupport._

import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.util.Try
import scala.util.control.NonFatal

// This companion object only exists for binary compatibility as adding methods with default implementations
// (including val's as they create synthetic methods) is not compatible.
private object UpickleCustomizationSupport {

  private def jsonStringUnmarshaller(support: UpickleCustomizationSupport) =
    Unmarshaller.byteStringUnmarshaller
      .forContentTypes(support.unmarshallerContentTypes: _*)
      .mapWithCharset {
        case (ByteString.empty, _) => throw Unmarshaller.NoContentException
        case (data, charset)       => data.decodeString(charset.nioCharset.name)
      }

  private def jsonSourceStringMarshaller(support: UpickleCustomizationSupport) =
    Marshaller.oneOf(support.mediaTypes: _*)(support.sourceByteStringMarshaller)

  private def jsonStringMarshaller(support: UpickleCustomizationSupport) =
    Marshaller.oneOf(support.mediaTypes: _*)(Marshaller.stringMarshaller)
}


  implicit def sourceMarshaller[A](implicit
      writes: apiInstance.Writer[A],
      support: JsonEntityStreamingSupport = EntityStreamingSupport.json()
  ): ToEntityMarshaller[SourceOf[A]] =
    jsonSourceStringMarshaller(this).compose(jsonSource[A])
} 
Example 137
Source File: LocalFilePersistService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import akka.Done
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.RestartFlow
import akka.stream.scaladsl.Sink
import com.netflix.atlas.akka.StreamOps
import com.netflix.atlas.akka.StreamOps.SourceQueue
import com.netflix.atlas.core.model.Datapoint
import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Singleton

import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration.Duration

@Singleton
class LocalFilePersistService @Inject()(
  val config: Config,
  val registry: Registry,
  // S3CopyService is actually NOT used by this service, it is here just to guarantee that the
  // shutdown callback (stopImpl) of this service is invoked before S3CopyService's
  val s3CopyService: S3CopyService,
  implicit val system: ActorSystem
) extends AbstractService
    with StrictLogging {
  implicit val ec = scala.concurrent.ExecutionContext.global
  implicit val mat = ActorMaterializer()

  private val queueSize = config.getInt("atlas.persistence.queue-size")

  private val fileConfig = config.getConfig("atlas.persistence.local-file")
  private val dataDir = fileConfig.getString("data-dir")
  private val maxRecords = fileConfig.getLong("max-records")
  private val maxDurationMs = fileConfig.getDuration("max-duration").toMillis
  private val maxLateDurationMs = fileConfig.getDuration("max-late-duration").toMillis
  private val rollingConf = RollingConfig(maxRecords, maxDurationMs, maxLateDurationMs)

  require(queueSize > 0)
  require(maxRecords > 0)
  require(maxDurationMs > 0)

  private var queue: SourceQueue[Datapoint] = _
  private var flowComplete: Future[Done] = _

  override def startImpl(): Unit = {
    logger.info("Starting service")
    val (q, f) = StreamOps
      .blockingQueue[Datapoint](registry, "LocalFilePersistService", queueSize)
      .via(getRollingFileFlow)
      .toMat(Sink.ignore)(Keep.both)
      .run
    queue = q
    flowComplete = f
  }

  private def getRollingFileFlow(): Flow[Datapoint, NotUsed, NotUsed] = {
    import scala.concurrent.duration._
    RestartFlow.withBackoff(
      minBackoff = 1.second,
      maxBackoff = 3.seconds,
      randomFactor = 0,
      maxRestarts = -1
    ) { () =>
      Flow.fromGraph(
        new RollingFileFlow(dataDir, rollingConf, registry)
      )
    }
  }

  // This service should stop the Akka flow when application is shutdown gracefully, and let
  // S3CopyService do the cleanup. It should trigger:
  //   1. stop taking more data points (monitor droppedQueueClosed)
  //   2. close current file writer so that last file is ready to copy to s3
  override def stopImpl(): Unit = {
    logger.info("Stopping service")
    queue.complete()
    Await.result(flowComplete, Duration.Inf)
    logger.info("Stopped service")
  }

  def persist(dp: Datapoint): Unit = {
    queue.offer(dp)
  }
} 
Example 138
Source File: ScalingPoliciesTestImpl.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.lwc.fwd.admin

import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.typesafe.config.Config

class ScalingPoliciesTestImpl(
  config: Config,
  dao: ScalingPoliciesDao,
  policies: Map[EddaEndpoint, List[ScalingPolicy]] = Map.empty[EddaEndpoint, List[ScalingPolicy]]
) extends ScalingPolicies(config, dao) {
  scalingPolicies = policies
  override def startPeriodicTimer(): Unit = {}
}

class ScalingPoliciesDaoTestImpl(
  policies: Map[EddaEndpoint, List[ScalingPolicy]]
) extends ScalingPoliciesDao {
  protected implicit val ec = scala.concurrent.ExecutionContext.global

  override def getScalingPolicies: Flow[EddaEndpoint, List[ScalingPolicy], NotUsed] = {
    Flow[EddaEndpoint]
      .map(policies(_))
  }
} 
Example 139
Source File: RestPi.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.ActorSystem
import akka.event.{ Logging, LoggingAdapter }
import akka.http.scaladsl._
import akka.http.scaladsl.common.{ EntityStreamingSupport, JsonEntityStreamingSupport }
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.{ Directives, Route }
import akka.stream.scaladsl.{ Flow, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.ByteString
import com.github.dnvriend.spark.CalculatePi
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ ExecutionContext, Future }

object RestPi extends App with Directives with SprayJsonSupport with DefaultJsonProtocol {
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val log: LoggingAdapter = Logging(system, this.getClass)

  val spark = SparkSession.builder()
    .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse")
    .config("spark.scheduler.mode", "FAIR")
    .config("spark.sql.crossJoin.enabled", "true")
    .master("local") // use as many threads as cores
    .appName("RestPi") // The appName parameter is a name for your application to show on the cluster UI.
    .getOrCreate()

  final case class Pi(pi: Double)

  implicit val piJsonFormat = jsonFormat1(Pi)
  val start = ByteString.empty
  val sep = ByteString("\n")
  val end = ByteString.empty
  implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json()
    .withFramingRenderer(Flow[ByteString].intersperse(start, sep, end))
    .withParallelMarshalling(parallelism = 8, unordered = true)

  def sparkContext: SparkContext = spark.newSession().sparkContext

  def calculatePi(num: Long = 1000000, slices: Int = 2): Future[Double] =
    Future(CalculatePi(sparkContext, num, slices)).map(count => slices.toDouble * count / (num - 1))

  val route: Route =
    pathEndOrSingleSlash {
      complete(calculatePi().map(Pi))
    } ~ path("pi" / LongNumber / IntNumber) { (num, slices) =>
      complete(calculatePi(num, slices).map(Pi))
    } ~ path("stream" / "pi" / LongNumber) { num =>
      complete(Source.fromFuture(calculatePi()).map(Pi)
        .flatMapConcat(Source.repeat).take(num))
    }

  Http().bindAndHandle(route, "0.0.0.0", 8008)

  sys.addShutdownHook {
    spark.stop()
    system.terminate()
  }
} 
Example 140
Source File: LogProgress.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.NotUsed
import akka.event.LoggingAdapter
import akka.stream.FlowShape
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Sink }

import scala.compat.Platform
import scala.collection.immutable._

object LogProgress {
  def flow[A](each: Long = 1000)(implicit log: LoggingAdapter = null): Flow[A, A, NotUsed] = Flow.fromGraph[A, A, NotUsed](GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._
    val logFlow = Flow[A].statefulMapConcat { () =>
      var last = Platform.currentTime
      var num = 0L
      (x: A) =>
        num += 1
        if (num % each == 0) {
          val duration = Platform.currentTime - last
          val logOpt = Option(log)
          Option(log).foreach(_.info("[{} ms / {}]: {}", duration, each, num))
          if (logOpt.isEmpty) println(s"[$duration ms / $each]: $num")
          last = Platform.currentTime
        }
        Iterable(x)
    }
    val bcast = b.add(Broadcast[A](2, eagerCancel = false))
    bcast ~> logFlow ~> Sink.ignore
    FlowShape.of(bcast.in, bcast.out(1))
  })
} 
Example 141
Source File: SocketWordCountTest.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.sstreaming

import akka.Done
import akka.stream.scaladsl.Tcp._
import akka.stream.scaladsl.{ Flow, Sink, Source, Tcp }
import akka.util.ByteString
import com.github.dnvriend.TestSpec
import org.scalatest.Ignore

import scala.collection.immutable._
import scala.concurrent.Future
import scala.concurrent.duration._

@Ignore
class SocketWordCountTest extends TestSpec {
  def withSocketServer(xs: Seq[String])(f: Future[Done] => Unit): Unit = {
    val connections: Source[IncomingConnection, Future[ServerBinding]] = Tcp().bind("127.0.0.1", 9999)
    val socketServer = connections.runForeach { connection =>
      println(s"New connection from: ${connection.remoteAddress}")
      val src = Source.cycle(() => xs.iterator).map(txt => ByteString(txt) ++ ByteString("\n"))
        .flatMapConcat(msg => Source.tick(0.seconds, 200.millis, msg))
      val echo = Flow.fromSinkAndSource(Sink.ignore, src)
      connection.handleWith(echo)
    }
    f(socketServer)
  }

  it should "a running word count of text data received via a TCP server" in withSparkSession { spark =>
    withSocketServer(List("apache spark")) { socketServer =>
      import spark.implicits._

      val lines = spark.readStream
        .format("socket")
        .option("host", "localhost")
        .option("port", 9999)
        .load()

      // Split the lines into words
      val words = lines.as[String].flatMap(_.split(" "))

      // Generate running word count
      val wordCounts = words.groupBy("value").count()

      // Start running the query that prints the running counts to the console
      val query = wordCounts.writeStream
        .outputMode("complete")
        .format("console")
        .start()

      query.awaitTermination(10.seconds)
    }
  }
} 
Example 142
Source File: AkkaStreamOps.scala    From phobos   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.phobos.ops

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Keep, Sink}
import javax.xml.stream.XMLStreamConstants
import ru.tinkoff.phobos.decoding._
import scala.concurrent.Future

private[phobos] trait AkkaStreamOps {

  
  def decodingFlowUnsafe[A: XmlDecoder](charset: String = "UTF-8"): Flow[Array[Byte], A, NotUsed] =
    decodingFlow(charset).map(_.fold(throw _, identity))

  def decodingSink[A: XmlDecoder](charset: String = "UTF-8"): Sink[Array[Byte], Future[Either[DecodingError, A]]] =
    decodingFlow(charset).toMat(Sink.head)(Keep.right)

  def decodingSinkUnsafe[A: XmlDecoder](charset: String = "UTF-8"): Sink[Array[Byte], Future[A]] =
    decodingFlowUnsafe(charset).toMat(Sink.head)(Keep.right)
}

private[phobos] case class SinkDecoderState[A](
    xmlStreamReader: XmlStreamReader,
    cursor: Cursor,
    elementDecoder: ElementDecoder[A]
) {
  def withEncoder(that: ElementDecoder[A]): SinkDecoderState[A] = copy(elementDecoder = that)
}

private[phobos] object SinkDecoderState {

  def initial[A](xmlDecoder: XmlDecoder[A], charset: String): SinkDecoderState[A] = {
    val sr: XmlStreamReader = XmlDecoder.createStreamReader(charset)
    val cursor              = new Cursor(sr)
    SinkDecoderState(
      xmlStreamReader = sr,
      cursor = cursor,
      elementDecoder = xmlDecoder.elementdecoder
    )
  }
} 
Example 143
Source File: EventsController.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package controllers

import akka.actor.ActorSystem
import controllers.actions.SecuredAuthContext
import domains.Domain.Domain
import domains.events.{EventStore, EventStoreContext}
import play.api.libs.EventSource
import play.api.libs.EventSource.{EventDataExtractor, EventIdExtractor, EventNameExtractor}
import play.api.libs.json.{JsString, Json}
import play.api.mvc.{AbstractController, ActionBuilder, AnyContent, ControllerComponents}
import libs.http.HttpContext
import akka.stream.scaladsl.Flow
import scala.util.Success
import scala.util.Failure
import libs.logs.IzanamiLogger
import java.time.LocalDateTime
import play.api.libs.json.JsValue
import scala.concurrent.duration.DurationDouble
import domains.auth.AuthInfo
import domains.Key

class EventsController(system: ActorSystem,
                       AuthAction: ActionBuilder[SecuredAuthContext, AnyContent],
                       cc: ControllerComponents)(implicit r: HttpContext[EventStoreContext])
    extends AbstractController(cc) {

  import libs.http._
  import domains.events.Events._
  import system.dispatcher

  private implicit val nameExtractor =
    EventNameExtractor[IzanamiEvent](_ => None) //Some(event.`type`))
  private implicit val idExtractor = EventIdExtractor[IzanamiEvent](event => Some(s"${event._id}")) //Some(event.key.key))
  private implicit val dataExtractor =
    EventDataExtractor[IzanamiEvent](event => Json.stringify(event.toJson))

  def allEvents(patterns: String, domains: String) =
    events(domains.split(",").toIndexedSeq, patterns)

  def eventsForADomain(domain: String, patterns: String) =
    events(domain.split(",").toIndexedSeq, patterns)

  val logEvent = Flow[IzanamiEvent].map { event =>
    event
  }

  case class KeepAliveEvent() extends IzanamiEvent {
    val _id: Long                          = 0
    val domain: Domain                     = domains.Domain.Unknown
    val authInfo: Option[AuthInfo.Service] = None
    val key: Key                           = Key("na")
    def timestamp: LocalDateTime           = LocalDateTime.now()
    val `type`: String                     = "KEEP_ALIVE"
    val payload: JsValue                   = Json.obj()
  }

  val keepAlive = Flow[IzanamiEvent].keepAlive(30.seconds, () => KeepAliveEvent())

  // TODO abilitations
  private def events[T <: IzanamiEvent](domains: Seq[String], patterns: String) =
    AuthAction.asyncTask[EventStoreContext] { ctx =>
      val allPatterns: Seq[String] = ctx.authorizedPatterns ++ patterns
        .split(",")
        .toList

      val lastEventId = ctx.request.headers.get("Last-Event-ID").map(_.toLong)
      val allDomains  = domains.map(JsString).flatMap(_.validate[Domain].asOpt)

      EventStore
        .events(allDomains, allPatterns, lastEventId)
        .map { source =>
          val eventSource = (source via keepAlive via logEvent via EventSource.flow).watchTermination() { (_, fDone) =>
            fDone.onComplete {
              case Success(_) =>
                IzanamiLogger.debug("SSE disconnected")
              case Failure(e) =>
                IzanamiLogger.error("Error during SSE ", e)
            }
            fDone
          }
          Ok.chunked(eventSource).as("text/event-stream")
        }
    }

} 
Example 144
Source File: ExperimentVariantEventTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package domains.abtesting
import java.time.LocalDateTime
import java.time.temporal.ChronoUnit

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}
import domains.Key
import domains.abtesting.events._
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import test.IzanamiSpec

class ExperimentVariantEventTest extends IzanamiSpec with ScalaFutures with IntegrationPatience {

  "ExperimentVariantEvent" must {
    "aggregate event" in {

      implicit val system: ActorSystem = ActorSystem()

      val variantId = "vId"
      val variant   = Variant(variantId, "None", None, Traffic(0), None)
      val flow: Flow[ExperimentVariantEvent, VariantResult, NotUsed] =
        ExperimentVariantEvent.eventAggregation("experiment.id", 1, ChronoUnit.HOURS)

      val firstDate = LocalDateTime.now().minus(5, ChronoUnit.HOURS)

      val experimentKey = Key(s"experiment:id")
      def experimentVariantEventKey(counter: Int): ExperimentVariantEventKey =
        ExperimentVariantEventKey(experimentKey, variantId, s"client:id:$counter", "namespace", s"$counter")
      def clientId(i: Int): String    = s"client:id:$i"
      def date(i: Int): LocalDateTime = firstDate.plus(15 * i, ChronoUnit.MINUTES)

      val source = (1 to 20)
        .flatMap { counter =>
          val d   = date(counter)
          val key = experimentVariantEventKey(counter)

          counter match {
            case i if i % 2 > 0 =>
              List(ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId))
            case i =>
              List(
                ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId),
                ExperimentVariantWon(key, experimentKey, clientId(i), variant, d, 0, variantId)
              )
          }
        }

      val expectedEvents = Seq(
        ExperimentResultEvent(experimentKey, variant, date(1), 0.0, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(5), 40.0, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(9), 44.44444444444444, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(13), 46.15384615384615, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(17), 47.05882352941177, "vId")
      )

      val evts      = Source(source).via(flow).runWith(Sink.seq).futureValue
      val allEvents = evts.flatMap(_.events)

      allEvents must be(expectedEvents)
    }
  }

} 
Example 145
Source File: GrpcProtocolWeb.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import akka.grpc.GrpcProtocol._
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.HttpEntity.{ Chunk, ChunkStreamPart }
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import io.grpc.{ Status, StatusException }

abstract class GrpcProtocolWebBase(subType: String) extends AbstractGrpcProtocol(subType) {
  protected def postEncode(frame: ByteString): ByteString
  protected def preDecode(frame: ByteString): ByteString

  override protected def writer(codec: Codec): GrpcProtocolWriter =
    AbstractGrpcProtocol.writer(this, codec, frame => encodeFrame(codec, frame))

  override protected def reader(codec: Codec): GrpcProtocolReader =
    AbstractGrpcProtocol.reader(codec, decodeFrame, flow => Flow[ByteString].map(preDecode).via(flow))

  @inline
  private def encodeFrame(codec: Codec, frame: Frame): ChunkStreamPart = {
    val dataFrameType = AbstractGrpcProtocol.fieldType(codec)
    val (frameType, data) = frame match {
      case DataFrame(data)       => (dataFrameType, data)
      case TrailerFrame(trailer) => (ByteString(dataFrameType(0) | 0x80), encodeTrailer(trailer))
    }
    val framed = AbstractGrpcProtocol.encodeFrameData(frameType, codec.compress(data))
    Chunk(postEncode(framed))
  }

  @inline
  private final def decodeFrame(frameHeader: Int, data: ByteString): Frame = {
    (frameHeader & 80) match {
      case 0 => DataFrame(data)
      case 1 => TrailerFrame(decodeTrailer(data))
      case f => throw new StatusException(Status.INTERNAL.withDescription(s"Unknown frame type [$f]"))
    }
  }

  @inline
  private final def encodeTrailer(trailer: Seq[HttpHeader]): ByteString =
    ByteString(trailer.mkString("", "\r\n", "\r\n"))

  @inline
  private final def decodeTrailer(data: ByteString): List[HttpHeader] = ???

}


object GrpcProtocolWebText extends GrpcProtocolWebBase("grpc-web-text") {

  override final def postEncode(framed: ByteString): ByteString = framed.encodeBase64

  override final def preDecode(frame: ByteString): ByteString = frame.decodeBase64
} 
Example 146
Source File: ProperShutdownStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.Done
import akka.actor.ActorRef
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object ProperShutdownStream {
  val genCount = new AtomicLong(0L)
}

class ProperShutdownStream extends PerpetualStream[(ActorRef, Future[Long])] {
  import ProperShutdownStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val managedSource = LifecycleManaged().source(Source fromIterator generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(managedSource, counter)((a, b) => (a._2, b)) {
    implicit builder =>
    (source, sink) =>
      import GraphDSL.Implicits._
      source ~> throttle ~> sink
      ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>
      val (_, fCount) = matValue

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! fCount
  }

  override def shutdown() = {
    super.shutdown()
    import context.dispatcher
    val (actorRef, fCount) = matValue
    val fStopped = gracefulStop(actorRef, awaitMax)
    for { _ <- fCount; _ <- fStopped } yield Done
  }
} 
Example 147
Source File: WebSocketMessageHandler.scala    From asura   with MIT License 5 votes vote down vote up
package asura.core.actor.flow

import akka.NotUsed
import akka.actor.{ActorRef, PoisonPill}
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Sink, Source}
import asura.common.actor.{ActorEvent, SenderMessage}
import asura.common.exceptions.InvalidStatusException
import asura.core.CoreConfig
import asura.core.util.JacksonSupport

import scala.concurrent.duration._

object WebSocketMessageHandler {

  val DEFAULT_BUFFER_SIZE = CoreConfig.DEFAULT_WS_ACTOR_BUFFER_SIZE
  val KEEP_ALIVE_INTERVAL = 2

  def newHandleFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = {
    val incomingMessages: Sink[Message, NotUsed] =
      Flow[Message].map {
        case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass)
        case _ => throw InvalidStatusException("Unsupported message type")
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[Message, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => TextMessage(JacksonSupport.stringify(result)))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict(""))
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def newHandleStringFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = {
    val incomingMessages: Sink[Message, NotUsed] =
      Flow[Message].map {
        case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass)
        case _ => throw InvalidStatusException("Unsupported message type")
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[Message, NotUsed] =
      Source.actorRef[String](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => TextMessage(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict(""))
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[String, String, NotUsed] = {
    val incomingMessages: Sink[String, NotUsed] =
      Flow[String].map {
        case text: String => JacksonSupport.parse(text, msgClass)
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[String, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => JacksonSupport.stringify(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "")
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef): Flow[String, String, NotUsed] = {
    val incomingMessages: Sink[String, NotUsed] =
      Flow[String].to(Sink.actorRef[String](workActor, PoisonPill))
    val outgoingMessages: Source[String, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => JacksonSupport.stringify(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "")
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }
} 
Example 148
Source File: TelnetEchoApp.scala    From asura   with MIT License 5 votes vote down vote up
package asura.dubbo.telnet

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Framing, Tcp}
import akka.util.ByteString
import com.typesafe.scalalogging.Logger

object TelnetEchoApp {

  val logger = Logger("TelnetEchoApp")
  implicit val system = ActorSystem("telnet")
  implicit val ec = system.dispatcher
  implicit val materializer = ActorMaterializer()

  val echo = Flow[ByteString]
    .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = false))
    .map(_.utf8String)
    .map(txt => {
      logger.info(s"got(${txt.length}):${txt}")
      txt + "\n"
    })
    .map(ByteString(_))

  def main(args: Array[String]): Unit = {
    val connections = Tcp().bind("127.0.0.1", 8888)
    connections runForeach { connection =>
      logger.info(s"New connection from: ${connection.remoteAddress}")
      connection.handleWith(echo)
    }
  }
} 
Example 149
Source File: Webservice.scala    From akka-http-scala-js-websocket-chat   with MIT License 5 votes vote down vote up
package example.akkawschat

import java.util.Date

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ws.{ Message, TextMessage }

import scala.concurrent.duration._
import akka.http.scaladsl.server.Directives
import akka.stream.scaladsl.Flow
import upickle.default._
import shared.Protocol
import shared.Protocol._

import scala.util.Failure

class Webservice(implicit system: ActorSystem) extends Directives {
  val theChat = Chat.create()
  import system.dispatcher
  system.scheduler.scheduleAtFixedRate(15.second, 15.second) { () =>
    theChat.injectMessage(ChatMessage(sender = "clock", s"Bling! The time is ${new Date().toString}."))
  }

  def route =
    get {
      pathSingleSlash {
        getFromResource("web/index.html")
      } ~
        // Scala-JS puts them in the root of the resource directory per default,
        // so that's where we pick them up
        path("frontend-launcher.js")(getFromResource("frontend-launcher.js")) ~
        path("frontend-fastopt.js")(getFromResource("frontend-fastopt.js")) ~
        path("chat") {
          parameter("name") { name =>
            handleWebSocketMessages(websocketChatFlow(sender = name))
          }
        }
    } ~
      getFromResourceDirectory("web")

  def websocketChatFlow(sender: String): Flow[Message, Message, Any] =
    Flow[Message]
      .collect {
        case TextMessage.Strict(msg) => msg // unpack incoming WS text messages...
        // This will lose (ignore) messages not received in one chunk (which is
        // unlikely because chat messages are small) but absolutely possible
        // FIXME: We need to handle TextMessage.Streamed as well.
      }
      .via(theChat.chatFlow(sender)) // ... and route them through the chatFlow ...
      .map {
        case msg: Protocol.Message =>
          TextMessage.Strict(write(msg)) // ... pack outgoing messages into WS JSON messages ...
      }
      .via(reportErrorsFlow) // ... then log any processing errors on stdin

  def reportErrorsFlow[T]: Flow[T, T, Any] =
    Flow[T]
      .watchTermination()((_, f) => f.onComplete {
        case Failure(cause) =>
          println(s"WS stream failed with $cause")
        case _ => // ignore regular completion
      })
} 
Example 150
Source File: ChatCLI.scala    From akka-http-scala-js-websocket-chat   with MIT License 5 votes vote down vote up
package example.akkawschat.cli

import akka.actor.ActorSystem

import akka.stream.scaladsl.{ Flow, Source }
import akka.http.scaladsl.model.Uri
import shared.Protocol

import scala.util.{ Failure, Success }

object ChatCLI extends App {
  def promptForName(): String = {
    Console.out.print("What's your name? ")
    Console.out.flush()
    Console.in.readLine()
  }

  val endpointBase = "ws://localhost:8080/chat"
  val name = promptForName()

  val endpoint = Uri(endpointBase).withQuery(Uri.Query("name" -> name))

  implicit val system = ActorSystem()
  import system.dispatcher

  import Console._
  def formatCurrentMembers(members: Seq[String]): String =
    s"(${members.size} people chatting: ${members.map(m ⇒ s"$YELLOW$m$RESET").mkString(", ")})"

  object ChatApp extends ConsoleDSL[String] {
    type State = Seq[String] // current chat members
    def initialState: Seq[String] = Nil

    def run(): Unit = {
      lazy val initialCommands =
        Command.PrintLine("Welcome to the Chat!") ~ readLineAndEmitLoop

      val inputFlow =
        Flow[Protocol.Message]
          .map {
            case Protocol.ChatMessage(sender, message) ⇒ Command.PrintLine(s"$YELLOW$sender$RESET: $message")
            case Protocol.Joined(member, all)          ⇒ Command.PrintLine(s"$YELLOW$member$RESET ${GREEN}joined!$RESET ${formatCurrentMembers(all)}") ~ Command.SetState(all)
            case Protocol.Left(member, all)            ⇒ Command.PrintLine(s"$YELLOW$member$RESET ${RED}left!$RESET ${formatCurrentMembers(all)}") ~ Command.SetState(all)
          }
          // inject initial commands before the commands generated by the server
          .prepend(Source.single(initialCommands))

      val appFlow =
        inputFlow
          .via(consoleHandler)
          .filterNot(_.trim.isEmpty)
          .watchTermination()((_, f) => f onComplete {
            case Success(_) =>
              println("\nFinishing...")
              system.terminate()
            case Failure(e) ⇒
              println(s"Connection to $endpoint failed because of '${e.getMessage}'")
              system.terminate()
          })

      println("Connecting... (Use Ctrl-D to exit.)")
      ChatClient.connect(endpoint, appFlow)
    }

    val basePrompt = s"($name) >"

    lazy val readLineAndEmitLoop: Command =
      readWithParticipantNameCompletion { line ⇒
        Command.Emit(line) ~ readLineAndEmitLoop
      }

    def readWithParticipantNameCompletion(andThen: String ⇒ Command): Command = {
      import Command._

       ⇒
                if (namePrefix.isEmpty) simpleMode(prefix)
                else mentionMode(prefix, namePrefix.dropRight(1))
            }
          }
      }

      simpleMode("")
    }

  }
  ChatApp.run()
} 
Example 151
Source File: PromptFlow.scala    From akka-http-scala-js-websocket-chat   with MIT License 5 votes vote down vote up
package example.akkawschat.cli

import akka.stream._
import akka.stream.scaladsl.{ Flow, Source, GraphDSL }
import akka.stream.stage.{ InHandler, GraphStageLogic, GraphStage }

import scala.concurrent.ExecutionContext

object Prompt {
   ⇒
              collectedString = collectedString.dropRight(1)
              prompt()
            case x ⇒
              //println(s"Got ${x.toInt}")
              collectedString += x
              print(x)
              pull(characterInput)
          }
        }
      })
      setHandler(outputLinesIn, new InHandler {
        def onPush(): Unit = {
          print(s"$RESTORE$ERASE_LINE${grab(outputLinesIn)}\n$SAVE$promptLine")
          pull(outputLinesIn)
        }
      })
      setHandler(readLinesOut, eagerTerminateOutput)

      override def preStart(): Unit = {
        pull(outputLinesIn)
        print(SAVE) // to make sure we don't jump back to former SAVE position in the terminal
        prompt()
      }

      def promptLine = s"$RESTORE$ERASE_LINE$SAVE> $collectedString"

      def prompt(): Unit = {
        print(promptLine)
        pull(characterInput)
      }
    }
} 
Example 152
Source File: ChatClient.scala    From akka-http-scala-js-websocket-chat   with MIT License 5 votes vote down vote up
package example.akkawschat.cli

import scala.concurrent.Future

import akka.actor.ActorSystem

import akka.stream.scaladsl.{ Keep, Source, Sink, Flow }

import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.model.ws._

import upickle.default._

import shared.Protocol

object ChatClient {
  def connect[T](endpoint: Uri, handler: Flow[Protocol.Message, String, T])(implicit system: ActorSystem): Future[T] = {
    val wsFlow: Flow[Message, Message, T] =
      Flow[Message]
        .collect {
          case TextMessage.Strict(msg) ⇒ read[Protocol.Message](msg)
        }
        .viaMat(handler)(Keep.right)
        .map(TextMessage(_))

    val (fut, t) = Http().singleWebSocketRequest(WebSocketRequest(endpoint), wsFlow)
    fut.map {
      case v: ValidUpgrade                         ⇒ t
      case InvalidUpgradeResponse(response, cause) ⇒ throw new RuntimeException(s"Connection to chat at $endpoint failed with $cause")
    }(system.dispatcher)
  }

  def connect[T](endpoint: Uri, in: Sink[Protocol.Message, Any], out: Source[String, Any])(implicit system: ActorSystem): Future[Unit] =
    connect(endpoint, Flow.fromSinkAndSource(in, out)).map(_ ⇒ ())(system.dispatcher)

  def connect[T](endpoint: Uri, onMessage: Protocol.Message ⇒ Unit, out: Source[String, Any])(implicit system: ActorSystem): Future[Unit] =
    connect(endpoint, Sink.foreach(onMessage), out)
} 
Example 153
Source File: TestServiceImpl.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.interop

import akka.NotUsed
import akka.actor.ActorSystem
import akka.grpc.GrpcServiceException
import akka.stream.{ Materializer, SystemMaterializer }
import akka.stream.scaladsl.{ Flow, Source }

import com.google.protobuf.ByteString

import io.grpc.Status
import io.grpc.testing.integration.empty.Empty

import scala.concurrent.{ ExecutionContext, Future }

// Generated by our plugin
import io.grpc.testing.integration.messages._
import io.grpc.testing.integration.test.TestService

object TestServiceImpl {
  val parametersToResponseFlow: Flow[ResponseParameters, StreamingOutputCallResponse, NotUsed] =
    Flow[ResponseParameters].map { parameters =>
      StreamingOutputCallResponse(Some(Payload(body = ByteString.copyFrom(new Array[Byte](parameters.size)))))
    }
}


class TestServiceImpl(implicit sys: ActorSystem) extends TestService {
  import TestServiceImpl._

  implicit val mat: Materializer = SystemMaterializer(sys).materializer
  implicit val ec: ExecutionContext = sys.dispatcher

  override def emptyCall(req: Empty) =
    Future.successful(Empty())

  override def unaryCall(req: SimpleRequest): Future[SimpleResponse] =
    req.responseStatus match {
      case None =>
        Future.successful(SimpleResponse(Some(Payload(ByteString.copyFrom(new Array[Byte](req.responseSize))))))
      case Some(requestStatus) =>
        val responseStatus = Status.fromCodeValue(requestStatus.code).withDescription(requestStatus.message)
        //  - Either one of the following works
        // Future.failed(new GrpcServiceException(responseStatus))
        throw new GrpcServiceException(responseStatus)
    }

  override def cacheableUnaryCall(in: SimpleRequest): Future[SimpleResponse] = ???

  override def fullDuplexCall(
      in: Source[StreamingOutputCallRequest, NotUsed]): Source[StreamingOutputCallResponse, NotUsed] =
    in.map(req => {
      req.responseStatus.foreach(reqStatus =>
        throw new GrpcServiceException(Status.fromCodeValue(reqStatus.code).withDescription(reqStatus.message)))
      req
    }).mapConcat(_.responseParameters.toList)
      .via(parametersToResponseFlow)

  override def halfDuplexCall(
      in: Source[StreamingOutputCallRequest, NotUsed]): Source[StreamingOutputCallResponse, NotUsed] = ???

  override def streamingInputCall(in: Source[StreamingInputCallRequest, NotUsed]): Future[StreamingInputCallResponse] =
    in.map(_.payload.map(_.body.size).getOrElse(0)).runFold(0)(_ + _).map { sum => StreamingInputCallResponse(sum) }

  override def streamingOutputCall(in: StreamingOutputCallRequest): Source[StreamingOutputCallResponse, NotUsed] =
    Source(in.responseParameters.toList).via(parametersToResponseFlow)

  override def unimplementedCall(in: Empty): Future[Empty] = ???
} 
Example 154
Source File: KillSwitchStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchStream extends PerpetualStream[Future[Long]] {
  import KillSwitchStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }
} 
Example 155
Source File: TestServiceImpl.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.interop

import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.reflect.ClassTag
import scala.collection.immutable

import akka.grpc.scaladsl.{GrpcMarshalling}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.grpc._
import akka.stream.scaladsl.{Flow, Source}
import akka.stream.{ Materializer, SystemMaterializer }

import com.google.protobuf.ByteString
import io.grpc.{ Status, StatusRuntimeException }

// Generated by our plugin
import io.grpc.testing.integration.test.TestService
import io.grpc.testing.integration.messages._
import io.grpc.testing.integration.empty.Empty

object TestServiceImpl {
  val parametersToResponseFlow: Flow[ResponseParameters, StreamingOutputCallResponse, NotUsed] =
    Flow[ResponseParameters]
      .map { parameters =>
        StreamingOutputCallResponse(
          Some(Payload(body = ByteString.copyFrom(new Array[Byte](parameters.size)))))
      }
}


class TestServiceImpl(implicit sys: ActorSystem) extends TestService {
  import TestServiceImpl._

  implicit val mat: Materializer = SystemMaterializer(sys).materializer
  implicit val ec: ExecutionContext = sys.dispatcher
  
  override def emptyCall(req: Empty) =
    Future.successful(Empty())

  override def unaryCall(req: SimpleRequest): Future[SimpleResponse] = {
    req.responseStatus match {
      case None =>
        Future.successful(SimpleResponse(Some(Payload(ByteString.copyFrom(new Array[Byte](req.responseSize))))))
      case Some(requestStatus) =>
        val responseStatus = Status.fromCodeValue(requestStatus.code).withDescription(requestStatus.message)
        //  - Either one of the following works
        Future.failed(new GrpcServiceException(responseStatus))
        // throw new GrpcServiceException(responseStatus)
    }
  }

  override def cacheableUnaryCall(in: SimpleRequest): Future[SimpleResponse] = ???

  override def fullDuplexCall(in: Source[StreamingOutputCallRequest, NotUsed]): Source[StreamingOutputCallResponse, NotUsed] =
    in.map(req => {
      req.responseStatus.foreach(reqStatus =>
        throw new GrpcServiceException(
          Status.fromCodeValue(reqStatus.code).withDescription(reqStatus.message)))
      req
    }).mapConcat(
      _.responseParameters.to[immutable.Seq]).via(parametersToResponseFlow)

  override def halfDuplexCall(in: Source[StreamingOutputCallRequest, NotUsed]): Source[StreamingOutputCallResponse, NotUsed] = ???

  override def streamingInputCall(in: Source[StreamingInputCallRequest, NotUsed]): Future[StreamingInputCallResponse] = {
    in
      .map(_.payload.map(_.body.size).getOrElse(0))
      .runFold(0)(_ + _)
      .map { sum =>
        StreamingInputCallResponse(sum)
      }
  }

  override def streamingOutputCall(in: StreamingOutputCallRequest): Source[StreamingOutputCallResponse, NotUsed] =
    Source(in.responseParameters.to[immutable.Seq]).via(parametersToResponseFlow)

  override def unimplementedCall(in: Empty): Future[Empty] = ???
} 
Example 156
Source File: TestUtils.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master

import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.stream.scaladsl.Flow
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{Await, Future, Promise}

trait TestUtils {

  implicit class AwaitSyntax[A](f: => Future[A]) {
    def await: A = Await.result(f, Duration.Inf)
    def await(d: FiniteDuration): A = Await.result(f, d)
  }

}
object TestUtils extends TestUtils {

  val cfgStr =
    """
      |context-defaults {
      | downtime = Inf
      | streaming-duration = 1 seconds
      | max-parallel-jobs = 20
      | precreated = false
      | spark-conf = { }
      | worker-mode = "shared"
      | run-options = "--opt"
      | max-conn-failures = 5
      |}
      |
      |context {
      |
      |  foo {
      |    spark-conf {
      |       spark.master = "local[2]"
      |    }
      |  }
      |}
    """.stripMargin

  val contextSettings = {
    val cfg = ConfigFactory.parseString(cfgStr)
    ContextsSettings(cfg)
  }

  val FooContext = contextSettings.contexts.get("foo").get




  object MockHttpServer {

    import akka.actor.ActorSystem
    import akka.http.scaladsl.Http
    import akka.stream.ActorMaterializer
    import akka.util.Timeout

    import scala.concurrent.duration._

    def onServer[A](
      routes: Flow[HttpRequest, HttpResponse, _],
      f: (Http.ServerBinding) => A): Future[A] = {

      implicit val system = ActorSystem("mock-http-cli")
      implicit val materializer = ActorMaterializer()

      implicit val executionContext = system.dispatcher
      implicit val timeout = Timeout(1.seconds)

      val binding = Http().bindAndHandle(routes, "localhost", 0)

      val close = Promise[Http.ServerBinding]
      close.future
        .flatMap(binding => binding.unbind())
        .onComplete(_ => {
          materializer.shutdown()
          Await.result(system.terminate(), Duration.Inf)
        })

      val result = binding.flatMap(binding => {
        try {
          Future.successful(f(binding))
        } catch {
          case e: Throwable =>
            Future.failed(e)
        } finally {
          close.success(binding)
        }
      })
      result
    }
  }

} 
Example 157
Source File: WSApi.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.interfaces.http

import akka.http.scaladsl.model.ws._
import akka.http.scaladsl.server.{Directives, Route}
import akka.stream.ActorAttributes.supervisionStrategy
import akka.stream.Supervision.resumingDecider
import akka.stream.{ActorAttributes, Supervision}
import akka.stream.scaladsl.{Flow, Sink}
import io.hydrosphere.mist.master.EventsStreamer
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.interfaces.JsonCodecs

import scala.concurrent.duration._
import spray.json._

import scala.language.postfixOps


class WSApi(streamer: EventsStreamer)(implicit val keepAliveTimeout: FiniteDuration) {

  import Directives._
  import JsonCodecs._

  val route: Route = {
    pathPrefix("v2" / "api"/ "ws" ) { parameter('withLogs ? false)  { withLogs =>
      path("all") {
        get {
          handleWebSocketMessagesWithKeepAlive(allEventsWsFlow(withLogs))
        }
      } ~
      path("jobs" / Segment) { jobId =>
        get {
          handleWebSocketMessagesWithKeepAlive(jobWsFlow(jobId, withLogs))
        }
      }
    }}
  }

  private def handleWebSocketMessagesWithKeepAlive(handler: Flow[Message, Message, akka.NotUsed]): Route =
    handleWebSocketMessages(handler
      .withAttributes(supervisionStrategy(resumingDecider))
      .keepAlive(
        keepAliveTimeout,
        () => TextMessage.Strict(KeepAliveEvent.asInstanceOf[SystemEvent].toJson.toString())
      ))


  private def jobWsFlow(id: String, withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = {
    val source = streamer.eventsSource()
      .filter({
        case e: UpdateStatusEvent => e.id == id
        case e: ReceivedLogs if withLogs => e.id == id
        case _ => false
      })
      .map(toWsMessage)

    val sink = Sink.ignore
    Flow.fromSinkAndSource(sink, source)
  }

  private def allEventsWsFlow(withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = {
    val source = streamer.eventsSource()
      .filter({
        case _: ReceivedLogs => withLogs
        case _ => true
      })
      .map(toWsMessage)

    val sink = Sink.ignore
    Flow.fromSinkAndSource(sink, source)
  }

  private def toWsMessage(e: SystemEvent): Message = TextMessage.Strict(e.toJson.toString())
} 
Example 158
Source File: ThroughputMeasurementFlow.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.events

import akka.NotUsed
import akka.actor.ActorRef
import akka.stream.scaladsl.Flow
import akkaviz.events.types.{ThroughputMeasurement, ReceivedWithId, BackendEvent}

import scala.concurrent.duration._

object ThroughputMeasurementFlow {
  def apply(period: FiniteDuration): Flow[BackendEvent, ThroughputMeasurement, NotUsed] = {
    Flow[BackendEvent]
      .collect { case r: ReceivedWithId => r.actorRef }
      .groupedWithin(Int.MaxValue, period)
      .map { refs =>
        refs.groupBy(identity).mapValues(_.length)
      }
      .scan(Map[ActorRef, Int]()) {
        case (previous, current) =>
          // produce zero for actors that have been measured previously but didn't receive any messages during `period`
          current ++ (for { k <- previous.keySet.diff(current.keySet) } yield k -> 0)
      }
      .mapConcat { m =>
        for {
          (ref, count) <- m
        } yield ThroughputMeasurement(ref, count / (period.toMillis.toDouble / 1.second.toMillis.toDouble))
      }
  }
} 
Example 159
Source File: BackendEventsMarshalling.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.server

import akka.actor.ActorRef
import akka.stream.scaladsl.Flow
import akkaviz.events.Helpers
import akkaviz.events.types._
import akkaviz.protocol
import akkaviz.serialization.MessageSerialization

import scala.collection._

trait BackendEventsMarshalling {

  @inline
  private[this] implicit val actorRefToString: Function1[ActorRef, String] = Helpers.actorRefToString

  def backendEventToProtocolFlow: Flow[BackendEvent, protocol.ApiServerMessage, _] = Flow[BackendEvent].map {
    case ReceivedWithId(eventId, sender, receiver, message, handled) =>
      protocol.Received(eventId, sender, receiver, message.getClass.getName, Some(MessageSerialization.render(message)), handled)

    case AvailableMessageTypes(types) =>
      protocol.AvailableClasses(types.map(_.getName))

    case Spawned(ref) =>
      protocol.Spawned(ref)

    case ActorSystemCreated(system) =>
      protocol.ActorSystemCreated(system.name)

    case Instantiated(ref, clazz) =>
      protocol.Instantiated(ref, clazz.getClass.getName)

    case FSMTransition(ref, currentState, currentData, nextState, nextData) =>
      protocol.FSMTransition(
        ref,
        currentState = MessageSerialization.render(currentState),
        currentStateClass = currentState.getClass.getName,
        currentData = MessageSerialization.render(currentData),
        currentDataClass = currentData.getClass.getName,
        nextState = MessageSerialization.render(nextState),
        nextStateClass = nextState.getClass.getName,
        nextData = MessageSerialization.render(nextData),
        nextDataClass = nextData.getClass.getName
      )

    case CurrentActorState(ref, actor) =>
      protocol.CurrentActorState(ref, MessageSerialization.render(actor))

    case MailboxStatus(owner, size) =>
      protocol.MailboxStatus(owner, size)

    case ReceiveDelaySet(current) =>
      protocol.ReceiveDelaySet(current)

    case Killed(ref) =>
      protocol.Killed(ref)

    case ActorFailure(ref, ex, decision, ts) =>
      protocol.ActorFailure(ref, MessageSerialization.render(ex), decision.toString, ts)

    case Question(id, senderOpt, ref, msg) =>
      protocol.Question(
        id,
        senderOpt.map(x => actorRefToString(x)),
        ref,
        MessageSerialization.render(msg)
      )

    case Answer(questionId, msg) =>
      protocol.Answer(questionId, MessageSerialization.render(msg))

    case AnswerFailed(questionId, ex) =>
      protocol.AnswerFailed(questionId, MessageSerialization.render(ex))

    case ReportingDisabled =>
      protocol.ReportingDisabled
    case ReportingEnabled =>
      protocol.ReportingEnabled
    case SnapshotAvailable(s) =>
      protocol.SnapshotAvailable(
        s.liveActors.map(ref => ref -> s.classNameFor(ref))(breakOut),
        s.dead.map(ref => ref -> s.classNameFor(ref))(breakOut),
        s.receivedFrom
      )
    case ThroughputMeasurement(ref, msgs, ts) =>
      protocol.ThroughputMeasurement(ref, msgs, ts)

    case Restarted(ref) =>
      protocol.Restarted(ref)
  }

}

object BackendEventsMarshalling extends BackendEventsMarshalling 
Example 160
Source File: ProtocolSerializationSupport.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.server

import akka.http.scaladsl.model.ws.{BinaryMessage, Message}
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import akkaviz.protocol

trait ProtocolSerializationSupport {

  def protocolServerMessageToByteString: Flow[protocol.ApiServerMessage, ByteString, Any] = Flow[protocol.ApiServerMessage].map {
    msg => ByteString(protocol.IO.write(msg))
  }

  def websocketMessageToClientMessage: Flow[Message, protocol.ApiClientMessage, _] = Flow[Message].collect {
    case BinaryMessage.Strict(msg) =>
      protocol.IO.readClient(msg.asByteBuffer)
  }

}

object ProtocolSerializationSupport extends ProtocolSerializationSupport 
Example 161
Source File: ArchiveSupport.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.server

import akka.http.scaladsl.marshalling.Marshaller
import akka.http.scaladsl.marshalling.Marshalling.WithFixedContentType
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.{Flow, Source}
import akkaviz.config.Config
import akkaviz.persistence.{PersistenceSources, ReceivedRecord}
import akkaviz.rest
import com.datastax.driver.core.utils.UUIDs
import org.reactivestreams.Publisher

import scala.concurrent.ExecutionContext.Implicits.global

trait ArchiveSupport {

  def isArchiveEnabled: Boolean

  def receivedOf(ref: String): Source[ReceivedRecord, _]

  def receivedBetween(ref: String, ref2: String): Source[ReceivedRecord, _]

  def archiveRouting: Route = get {
    pathPrefix("messages") {
      if (isArchiveEnabled) {
        path("of" / Segment) {
          ref =>
            AkkaHttpHelpers.completeAsJson(receivedOf(ref).via(receivedRecordToRestReceived))
        } ~
          path("between" / Segment / Segment) {
            (ref, ref2) =>
              AkkaHttpHelpers.completeAsJson(receivedBetween(ref, ref2).via(receivedRecordToRestReceived))
          }
      } else {
        reject
      }
    }
  }

  private[this] implicit val receivedRecordMarshaller: Marshaller[rest.Received, String] = Marshaller.strict {
    received =>
      WithFixedContentType(MediaTypes.`application/json`, () => upickle.default.write(received))
  }

  private[this] def receivedRecordToRestReceived = Flow[ReceivedRecord].map {
    rr =>
      rest.Received(rr.millis, rr.direction, rr.first, rr.second, rr.data)
  }

} 
Example 162
Source File: AkkaHttpHelpers.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.server

import akka.http.scaladsl.marshalling.{Marshal, Marshaller}
import akka.http.scaladsl.model.HttpEntity.ChunkStreamPart
import akka.http.scaladsl.model.{HttpEntity, HttpResponse, MediaTypes}
import akka.http.scaladsl.server.{Directives, StandardRoute}
import akka.stream.scaladsl.{Flow, Source}

import scala.concurrent.ExecutionContext

trait AkkaHttpHelpers {

  def asJsonArray[T](implicit m: Marshaller[T, String], ec: ExecutionContext): Flow[T, HttpEntity.ChunkStreamPart, _] = {
    Flow.apply[T]
      .mapAsync[String](4)(t => Marshal(t).to[String])
      .scan[Option[ChunkStreamPart]](None) {
        case (None, s: String) => Some(ChunkStreamPart(s))
        case (_, s: String)    => Some(ChunkStreamPart(s",${s}"))
      }.mapConcat(_.toList)
      .prepend(Source.single(ChunkStreamPart("[")))
      .concat(Source.single(ChunkStreamPart("]")))
  }

  def completeAsJson[T](source: Source[T, _])(implicit m: Marshaller[T, String], ec: ExecutionContext): StandardRoute = {
    Directives.complete(HttpResponse(
      entity = HttpEntity.Chunked(MediaTypes.`application/json`, source.via(asJsonArray))
    ))
  }
}

object AkkaHttpHelpers extends AkkaHttpHelpers 
Example 163
Source File: KillSwitchWithChildActorStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{Actor, Props}
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchWithChildActorStream {
  val genCount = new AtomicLong(0L)
}

class DummyChildActor extends Actor {
  def receive = PartialFunction.empty
}

class KillSwitchWithChildActorStream extends PerpetualStream[Future[Long]] {
  import KillSwitchWithChildActorStream._
  import org.squbs.unicomplex.Timeouts._

  val dummyChildActor = context.actorOf(Props[DummyChildActor])

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }

  override def shutdown() = {
    val f = super.shutdown()
    defaultMidActorStop(Seq(dummyChildActor))
    f
  }
} 
Example 164
Source File: KillSwitchMatStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import java.util.concurrent.atomic.AtomicLong

import akka.stream.{ClosedShape, KillSwitch, KillSwitches}
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchMatStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchMatStream extends PerpetualStream[(KillSwitch, Future[Long])] {
  import KillSwitchMatStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(KillSwitches.single[Int], counter)((_, _)) {
    implicit builder =>
      (kill, sink) =>
        import GraphDSL.Implicits._
        source ~> kill ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue._2
  }
} 
Example 165
Source File: DummyFlowSvc.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.dummyflowsvc

import akka.http.scaladsl.model.HttpEntity.{ChunkStreamPart, Chunked}
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.stream.scaladsl.{Flow, Source}
import org.squbs.unicomplex.{FlowDefinition, WebContext}

class DummyFlowSvc extends FlowDefinition with WebContext {

  val prefix = if (webContext.isEmpty) "" else '/' + webContext
  val pingPath = s"$prefix/ping"
  val chunkPath = s"$prefix/chunks"
  val exceptionPath = s"$prefix/throwit"

  def flow = Flow[HttpRequest].map {
    case HttpRequest(_, Uri(_, _, Path(`pingPath`), _, _), _, _, _) =>
      HttpResponse(StatusCodes.OK, entity = "pong")

    case req @ HttpRequest(_, Uri(_, _, Path(`chunkPath`), _, _), _, _, _) =>

      val responseChunks = req.entity.dataBytes.filter(_.nonEmpty)
        .map { b => (1, b.length) }
        .reduce { (a, b) => (a._1 + b._1, a._2 + b._2) }
        .map { case (chunkCount, byteCount) =>
          ChunkStreamPart(s"Received $chunkCount chunks and $byteCount bytes.\r\n")
        }
        .concat(Source.single(ChunkStreamPart("This is the last chunk!")))

      HttpResponse(StatusCodes.OK, entity = Chunked(ContentTypes.`text/plain(UTF-8)`, responseChunks))

    case HttpRequest(_, Uri(_, _, Path(`exceptionPath`), _, _), _, _, _) =>
      throw new IllegalArgumentException("This path is supposed to throw this exception!")

    case _ => HttpResponse(StatusCodes.NotFound, entity = "Path not found!")
  }
} 
Example 166
Source File: ConfigurableInitTimeActor.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.timeout

import akka.NotUsed
import akka.http.scaladsl.model.{HttpRequest, HttpResponse, StatusCodes}
import akka.stream.scaladsl.Flow
import org.slf4j.LoggerFactory
import org.squbs.unicomplex.FlowDefinition

object ConfigurableInitTimeActor {
  def log = LoggerFactory.getLogger(classOf[ConfigurableInitTimeActor])
}

class ConfigurableInitTimeActor extends FlowDefinition {
  import ConfigurableInitTimeActor.log

  override def flow: Flow[HttpRequest, HttpResponse, NotUsed] = {
    val system = this.context.system

    val initTime = Option(system.settings.config.getDuration("squbs.test.actor-init-time"))
      .get

    log.info(s"I'll be ready to go in $initTime")
    Thread.sleep(initTime.toMillis)
    log.info("Ready to work!")

    Flow[HttpRequest].map { r => HttpResponse(StatusCodes.OK, entity = "Hello") }
  }
} 
Example 167
Source File: FlowDefinitionSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Flow
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await

object FlowDefinitionSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath
  val classPath = dummyJarsDir + "/FlowDefinitionSpec/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = FlowDefinitionSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .initExtensions.start()

}

class TestFlowDefinition extends FlowDefinition with WebContext {
  val firstPath = s"/$webContext/first"
  val secondPath = s"/$webContext/second"
  val thirdPath = s"/$webContext/third"

  @volatile var count = 0
  def flow = Flow[HttpRequest].map {
    case HttpRequest(HttpMethods.GET, Uri(_, _, Path(`firstPath`), _, _), _, _, _) =>
      count += 1
      HttpResponse(StatusCodes.OK, entity = count.toString)

    case HttpRequest(HttpMethods.GET, Uri(_, _, Path(`secondPath`), _, _), _, _, _) =>
      count += 1
      HttpResponse(StatusCodes.OK, entity = count.toString)

    case HttpRequest(HttpMethods.GET, Uri(_, _, Path(`thirdPath`), _, _), _, _, _) =>
      HttpResponse(StatusCodes.OK, entity = {count += 1; count.toString})
  }
}

class FlowDefinitionSpec extends TestKit(
  FlowDefinitionSpec.boot.actorSystem) with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val am = ActorMaterializer()

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "The test actor" should "return correct count value" in {
    // The behaviour is different than Spray.  Not caching anymore.
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/first"), awaitMax) should be ("1")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/first"), awaitMax) should be ("2")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/second"), awaitMax) should be ("3")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/third"), awaitMax) should be ("4")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/third"), awaitMax) should be ("5")
  }
} 
Example 168
Source File: DemandSupplyMetrics.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.streams

import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream._
import akka.stream.scaladsl.Flow
import akka.stream.stage._
import com.codahale.metrics.MetricRegistry
import org.squbs.metrics.MetricsExtension

object DemandSupplyMetrics {

  
class DemandSupplyMetricsStage[T](name: String)(implicit system: ActorSystem) extends GraphStage[FlowShape[T, T]] {

  val domain = MetricsExtension(system).Domain
  val metrics = MetricsExtension(system).metrics

  val in = Inlet[T](Logging.simpleName(this) + ".in")
  val out = Outlet[T](Logging.simpleName(this) + ".out")

  override val shape = FlowShape.of(in, out)

  // naming convention "domain:key-property-list"
  val upstreamCounter = MetricRegistry.name(domain, s"$name-upstream-counter")
  val downstreamCounter = MetricRegistry.name(domain, s"$name-downstream-counter")

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
    new GraphStageLogic(shape) {

      setHandler(in, new InHandler {
        override def onPush(): Unit = {
          val elem = grab(in)
          metrics.meter(upstreamCounter).mark
          push(out, elem)
        }
      })

      setHandler(out, new OutHandler {
        override def onPull(): Unit = {
          metrics.meter(downstreamCounter).mark
          pull(in)
        }
      })
    }

} 
Example 169
Source File: JavaConverters.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package akka.http.org.squbs.util

import java.util.Optional

import akka.NotUsed
import akka.http.impl.util.JavaMapping
import akka.http.javadsl.{model => jm}
import akka.http.scaladsl.Http.HostConnectionPool
import akka.http.scaladsl.HttpsConnectionContext
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.http.{javadsl => jd}
import akka.japi.Pair
import akka.stream.scaladsl.{BidiFlow, Flow}
import akka.stream.{javadsl => js}

import scala.util.Try


object JavaConverters {
  def fromJava(connectionContext: Optional[jd.HttpsConnectionContext],
                                 settings: Optional[jd.settings.ConnectionPoolSettings]):
  (Option[HttpsConnectionContext], Option[ConnectionPoolSettings]) = {
    import scala.compat.java8.OptionConverters._
    val cCtx = connectionContext.asScala.asInstanceOf[Option[HttpsConnectionContext]]
    val sSettings = settings.asScala.asInstanceOf[Option[ConnectionPoolSettings]]
    (cCtx, sSettings)
  }

  def toJava[In1, Out1, In2, Out2, Context](bidiFlow: BidiFlow[(In1, Context), (Out1, Context), (In2, Context), (Out2, Context), NotUsed]):
  js.BidiFlow[Pair[In1, Context], Pair[Out1, Context], Pair[In2, Context], Pair[Out2, Context], NotUsed] = {
    implicit val sIn1Mapping = JavaMapping.identity[In1]
    implicit val sOut1Mapping = JavaMapping.identity[Out1]
    implicit val sIn2Mapping = JavaMapping.identity[In2]
    implicit val sOut2Mapping = JavaMapping.identity[Out2]
    implicit val contextMapping = JavaMapping.identity[Context]
    val javaToScalaAdapter = JavaMapping.adapterBidiFlow[Pair[In1, Context], (In1, Context), (Out2, Context), Pair[Out2, Context]]
    val scalaToJavaAdapter = JavaMapping.adapterBidiFlow[Pair[In2, Context], (In2, Context), (Out1, Context), Pair[Out1, Context]].reversed
    javaToScalaAdapter.atop(bidiFlow).atop(scalaToJavaAdapter).asJava
  }

  private def adaptTupleFlow[T](scalaFlow: Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool]):
  js.Flow[Pair[jm.HttpRequest, T], Pair[Try[jm.HttpResponse], T], jd.HostConnectionPool] = {
    implicit val _ = JavaMapping.identity[T]
    implicit object HostConnectionPoolMapping extends JavaMapping[jd.HostConnectionPool, HostConnectionPool] {
      def toScala(javaObject: jd.HostConnectionPool): HostConnectionPool =
        throw new UnsupportedOperationException("jd.HostConnectionPool cannot be converted to Scala")
      def toJava(scalaObject: HostConnectionPool): jd.HostConnectionPool = scalaObject.toJava
    }
    JavaMapping.toJava(scalaFlow)(JavaMapping.flowMapping[Pair[jm.HttpRequest, T], (HttpRequest, T),
      Pair[Try[jm.HttpResponse], T], (Try[HttpResponse], T), jd.HostConnectionPool, HostConnectionPool])
  }

  def toJava[T](flow: Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool]):
  js.Flow[Pair[jm.HttpRequest, T], Pair[Try[jm.HttpResponse], T], jd.HostConnectionPool] = {
    adaptTupleFlow[T](flow)
  }

  def toScala(uri: akka.http.javadsl.model.Uri) = JavaMapping.toScala(uri)
} 
Example 170
Source File: CirceStreamSupport.scala    From akka-stream-json   with Apache License 2.0 5 votes vote down vote up
package de.knutwalker.akka.stream
package support

import akka.NotUsed
import akka.stream.scaladsl.Flow
import akka.util.ByteString

import io.circe.CursorOp.DownField
import io.circe.jawn.CirceSupportParser._
import io.circe.{ CursorOp, Decoder, DecodingFailure, Encoder, HCursor, Json, Printer }
import jawn.AsyncParser

object CirceStreamSupport extends CirceStreamSupport

trait CirceStreamSupport {

  def decode[A: Decoder]: Flow[ByteString, A, NotUsed] =
    JsonStreamParser.flow[Json].map(decodeJson[A])

  def decode[A: Decoder](mode: AsyncParser.Mode): Flow[ByteString, A, NotUsed] =
    JsonStreamParser.flow[Json](mode).map(decodeJson[A])

  def encode[A](implicit A: Encoder[A], P: Printer = Printer.noSpaces): Flow[A, String, NotUsed] =
    Flow[A].map(a => P.pretty(A(a)))

  case class JsonParsingException(df: DecodingFailure, cursor: HCursor)
    extends Exception(errorMessage(df.history, cursor, df.message), df)

  private[knutwalker] def decodeJson[A](json: Json)(implicit decoder: Decoder[A]): A = {
    val cursor = json.hcursor
    decoder(cursor) match {
      case Right(e) => e
      case Left(f)  => throw JsonParsingException(f, cursor)
    }
  }


  private[this] def errorMessage(hist: List[CursorOp], cursor: HCursor, typeHint: String) = {
    val ac = cursor.replay(hist)
    if (ac.failed && lastWasDownField(hist)) {
      s"The field [${CursorOp.opsToPath(hist)}] is missing."
    } else {
      s"Could not decode [${ac.focus.getOrElse(Json.Null)}] at [${CursorOp.opsToPath(hist)}] as [$typeHint]."
    }
  }

  private[this] def lastWasDownField(hist: List[CursorOp]) = hist.headOption match {
    case Some(DownField(_)) => true
    case _                  => false
  }
} 
Example 171
Source File: TestProcessor.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package com.example

import cloudflow.akkastream._
import cloudflow.streamlets._
import cloudflow.streamlets.avro._
import cloudflow.akkastream.scaladsl.RunnableGraphStreamletLogic

import akka.stream.scaladsl.Flow

//tag::processor[]
class TestProcessor extends AkkaStreamlet {
  val in                   = AvroInlet[Data]("in")
  val out                  = AvroOutlet[Data]("out", _.id.toString)
  final override val shape = StreamletShape.withInlets(in).withOutlets(out)

  val flow = Flow[Data].filter(_.id % 2 == 0)
  override final def createLogic = new RunnableGraphStreamletLogic() {
    def runnableGraph = plainSource(in).via(flow).to(plainSink(out))
  }
}
//end::processor[] 
Example 172
Source File: AkkaHttpClient.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.ws.{Message, WebSocketRequest, WebSocketUpgradeResponse}
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route, RoutingLog}
import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings, ParserSettings, RoutingSettings}
import akka.http.scaladsl.{Http, HttpsConnectionContext}
import akka.stream.Materializer
import akka.stream.scaladsl.Flow

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

trait AkkaHttpClient {
  def singleRequest(
      request: HttpRequest,
      settings: ConnectionPoolSettings
  ): Future[HttpResponse]

  def singleWebsocketRequest[WS_RESULT](
      request: WebSocketRequest,
      clientFlow: Flow[Message, Message, WS_RESULT],
      settings: ClientConnectionSettings
  )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)]
}

object AkkaHttpClient {
  def default(
      system: ActorSystem,
      connectionContext: Option[HttpsConnectionContext],
      customLog: Option[LoggingAdapter]
  ): AkkaHttpClient =
    new AkkaHttpClient {
      private val http = Http()(system)

      override def singleRequest(
          request: HttpRequest,
          settings: ConnectionPoolSettings
      ): Future[HttpResponse] = {
        http.singleRequest(
          request,
          connectionContext.getOrElse(http.defaultClientHttpsContext),
          settings,
          customLog.getOrElse(system.log)
        )
      }

      override def singleWebsocketRequest[WS_RESULT](
          request: WebSocketRequest,
          clientFlow: Flow[Message, Message, WS_RESULT],
          settings: ClientConnectionSettings
      )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)] = {
        val (wsResponse, wsResult) = http.singleWebSocketRequest(
          request,
          clientFlow,
          connectionContext.getOrElse(http.defaultClientHttpsContext),
          None,
          settings,
          customLog.getOrElse(system.log)
        )
        wsResponse.map((_, wsResult))
      }
    }

  def stubFromAsyncHandler(run: HttpRequest => Future[HttpResponse]): AkkaHttpClient =
    new AkkaHttpClient {
      def singleRequest(request: HttpRequest, settings: ConnectionPoolSettings): Future[HttpResponse] =
        run(request)

      override def singleWebsocketRequest[WS_RESULT](
          request: WebSocketRequest,
          clientFlow: Flow[Message, Message, WS_RESULT],
          settings: ClientConnectionSettings
      )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)] =
        Future.failed(new RuntimeException("Websockets are not supported"))
    }

  def stubFromRoute(route: Route)(implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): AkkaHttpClient = stubFromAsyncHandler(Route.asyncHandler(route))
} 
Example 173
Source File: SttpBackendStubAkkaTests.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import sttp.client._
import sttp.model.Headers

import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class SttpBackendStubAkkaTests extends AnyFlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll {

  implicit val system: ActorSystem = ActorSystem()

  override protected def afterAll(): Unit = {
    Await.result(system.terminate().map(_ => ()), 5.seconds)
  }

  "backend stub" should "cycle through responses using a single sent request" in {
    // given
    implicit val backend = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenRespondCyclic("a", "b", "c")

    // when
    def r = basicRequest.get(uri"http://example.org/a/b/c").send().futureValue

    // then
    r.body shouldBe Right("a")
    r.body shouldBe Right("b")
    r.body shouldBe Right("c")
    r.body shouldBe Right("a")
  }

  it should "use given flow as web socket handler" in {
    // This test is an example how can we test client flow.
    // We check behavior of client when connected to echo server.
    // Client responsibility was to send two messages to the server and collect received messages.
    val useHandler: Flow[Message, Message, Future[Seq[Message]]] => Future[Seq[Message]] = clientFlow => {
      val ((outQueue, clientReceivedMessages), inQueue) = Source
        .queue(1, OverflowStrategy.fail)
        .viaMat(clientFlow)(Keep.both)
        .toMat(Sink.queue())(Keep.both)
        .run()

      def echoMsg(): Future[Unit] =
        inQueue.pull().flatMap {
          case None =>
            echoMsg()
          case Some(msg) =>
            outQueue.offer(TextMessage(s"echo: " + msg.asTextMessage.getStrictText)).map(_ => ())
        }

      (for {
        _ <- outQueue.offer(TextMessage("Hi!"))
        _ <- echoMsg()
        _ <- echoMsg()
        _ = outQueue.complete()
        _ <- outQueue.watchCompletion()
      } yield ()).flatMap(_ => clientReceivedMessages)
    }

    val clientFlow: Flow[Message, Message, Future[Seq[Message]]] = {
      Flow.fromSinkAndSourceMat(
        Sink.seq[Message],
        Source((1 to 2).map(i => TextMessage(s"test$i")))
      )(Keep.left)
    }

    implicit val b = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenHandleOpenWebSocket(Headers(List.empty), useHandler)

    val receivedMessages = basicRequest
      .get(uri"wss://echo.websocket.org")
      .openWebsocket(clientFlow)
      .flatMap(_.result)
      .futureValue
      .toList

    receivedMessages shouldBe List("Hi!", "echo: test1", "echo: test2").map(TextMessage(_))
  }
} 
Example 174
Source File: RouteGuideAkkaStreamService.scala    From grpcexample   with MIT License 5 votes vote down vote up
package io.grpc.routeguide

import java.util.concurrent.TimeUnit.NANOSECONDS
import java.util.logging.Logger

import akka.NotUsed
import akka.stream.scaladsl.Flow
import concurrency.AtomicRef

class RouteGuideAkkaStreamService(features: Seq[Feature]) extends RouteGuideGrpcAkkaStream.RouteGuide {

  val logger: Logger = Logger.getLogger(classOf[RouteGuideAkkaStreamService].getName)

  private val routeNotes: AtomicRef[Map[Point, Seq[RouteNote]]] = new AtomicRef(Map.empty)

  
  override def routeChat: Flow[RouteNote, RouteNote, NotUsed] =
    Flow[RouteNote].mapConcat { note =>
      addNote(note)
      getNotes(note.getLocation).to[collection.immutable.Iterable]
    }

  private def findFeature(point: Point): Feature = {
    features.find { feature =>
      feature.getLocation.latitude == point.latitude && feature.getLocation.longitude == point.longitude
    } getOrElse new Feature(location = Some(point))
  }

  private def getNotes(point: Point): Seq[RouteNote] = {
    routeNotes.get.getOrElse(point, Seq.empty)
  }

  private def addNote(note: RouteNote): Unit = {
    routeNotes.updateAndGet { notes =>
      val existingNotes = notes.getOrElse(note.getLocation, Seq.empty)
      val updatedNotes = existingNotes :+ note
      notes + (note.getLocation -> updatedNotes)
    }
  }
} 
Example 175
Source File: BankAccountReadModelUseCase.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.useCase

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Flow, Keep, Sink, Source, SourceQueueWithComplete }
import akka.stream.{ ActorMaterializer, OverflowStrategy }
import akka.{ Done, NotUsed }
import com.github.j5ik2o.bank.domain.model._
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol.{
  ResolveBankAccountEventsRequest,
  ResolveBankAccountEventsResponse
}
import com.github.j5ik2o.bank.useCase.port.{ BankAccountReadModelFlows, JournalReader }
import pureconfig._

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor, Future, Promise }

class BankAccountReadModelUseCase(bankAccountReadModelFlows: BankAccountReadModelFlows, journalReader: JournalReader)(
    implicit val system: ActorSystem
) extends UseCaseSupport {

  import UseCaseSupport._

  private val config = loadConfigOrThrow[BankAccountAggregateUseCaseConfig]("bank.use-case.bank-account-use-case")

  private val bufferSize: Int = config.bufferSize

  private implicit val mat: ActorMaterializer       = ActorMaterializer()
  private implicit val ec: ExecutionContextExecutor = system.dispatcher

  def resolveBankAccountEventsById(
      request: ResolveBankAccountEventsRequest
  )(implicit ec: ExecutionContext): Future[ResolveBankAccountEventsResponse] =
    offerToQueue(resolveBankAccountEventQueue)(request, Promise())

  private lazy val resolveBankAccountEventQueue
    : SourceQueueWithComplete[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])] =
    Source
      .queue[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])](bufferSize,
                                                                                           OverflowStrategy.dropNew)
      .via(bankAccountReadModelFlows.resolveBankAccountEventByIdFlow.zipPromise)
      .toMat(completePromiseSink)(Keep.left)
      .run()

  private val projectionFlow: Flow[(BankAccountEvent, Long), Int, NotUsed] =
    Flow[(BankAccountEvent, Long)].flatMapConcat {
      case (event: BankAccountOpened, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.openBankAccountFlow)
      case (event: BankAccountEventUpdated, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.updateAccountFlow)
      case (event: BankAccountDeposited, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.deposit, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.depositBankAccountFlow)
      case (event: BankAccountWithdrawn, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.withdraw, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.withdrawBankAccountFlow)
      case (event: BankAccountClosed, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.closeBankAccountFlow)
    }

  def execute(): Future[Done] = {
    bankAccountReadModelFlows.resolveLastSeqNrSource
      .flatMapConcat { lastSeqNr =>
        journalReader.eventsByTagSource(classOf[BankAccountEvent].getName, lastSeqNr + 1)
      }
      .map { eventBody =>
        (eventBody.event.asInstanceOf[BankAccountEvent], eventBody.sequenceNr)
      }
      .via(projectionFlow)
      .toMat(Sink.ignore)(Keep.right)
      .run()

  }
} 
Example 176
Source File: UseCaseSupport.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.useCase

import akka.{ Done, NotUsed }
import akka.stream.{ FlowShape, QueueOfferResult }
import akka.stream.scaladsl.{ Flow, GraphDSL, Sink, SourceQueueWithComplete, Unzip, Zip }

import scala.concurrent.{ ExecutionContext, Future, Promise }

object UseCaseSupport {
  implicit class FlowOps[A, B](val self: Flow[A, B, NotUsed]) extends AnyVal {
    def zipPromise: Flow[(A, Promise[B]), (B, Promise[B]), NotUsed] =
      Flow
        .fromGraph(GraphDSL.create() { implicit b =>
          import GraphDSL.Implicits._
          val unzip = b.add(Unzip[A, Promise[B]])
          val zip   = b.add(Zip[B, Promise[B]])
          unzip.out0 ~> self ~> zip.in0
          unzip.out1 ~> zip.in1
          FlowShape(unzip.in, zip.out)
        })
  }
}

trait UseCaseSupport {

  protected def offerToQueue[A, B](
      sourceQueue: SourceQueueWithComplete[(A, Promise[B])]
  )(request: A, promise: Promise[B])(implicit ec: ExecutionContext): Future[B] = {
    sourceQueue.offer((request, promise)).flatMap {
      case QueueOfferResult.Enqueued =>
        promise.future
      case QueueOfferResult.Failure(t) =>
        Future.failed(new Exception("Failed to offer request", t))
      case QueueOfferResult.Dropped =>
        Future.failed(
          new Exception(
            s"Failed to enqueue resolve request, the queue buffer was full, please check the bank.interface.buffer-size setting"
          )
        )
      case QueueOfferResult.QueueClosed =>
        Future.failed(new Exception("Failed to enqueue request batch write, the queue was closed"))
    }
  }

  protected def completePromiseSink[T]: Sink[(T, Promise[T]), Future[Done]] = Sink.foreach {
    case (response, promise) =>
      promise.success(response)
  }

} 
Example 177
Source File: BankAccountAggregateFlowsImpl.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.aggregate

import akka.NotUsed
import akka.actor.{ ActorRef, ActorSystem }
import akka.pattern.ask
import akka.stream.scaladsl.Flow
import akka.util.Timeout
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol
import com.github.j5ik2o.bank.useCase.port.BankAccountAggregateFlows
import pureconfig._

class BankAccountAggregateFlowsImpl(aggregateRef: ActorRef)(
    implicit val system: ActorSystem
) extends BankAccountAggregateFlows {

  import Protocol._

  private val config = loadConfigOrThrow[BankAccountAggregateFlowsConfig](
    system.settings.config.getConfig("bank.interface.bank-account-aggregate-flows")
  )

  private implicit val to: Timeout = Timeout(config.callTimeout)

  override def openBankAccountFlow: Flow[OpenBankAccountRequest, OpenBankAccountResponse, NotUsed] =
    Flow[OpenBankAccountRequest]
      .map { request =>
        BankAccountAggregate.Protocol.OpenBankAccountRequest(request.bankAccountId, request.name)
      }
      .mapAsync(1)(aggregateRef ? _)
      .map {
        case response: BankAccountAggregate.Protocol.OpenBankAccountSucceeded =>
          OpenBankAccountSucceeded(response.bankAccountId)
        case response: BankAccountAggregate.Protocol.OpenBankAccountFailed =>
          OpenBankAccountFailed(response.bankAccountId, response.error)
      }

  override def updateBankAccountFlow: Flow[UpdateBankAccountRequest, UpdateBankAccountResponse, NotUsed] =
    Flow[UpdateBankAccountRequest]
      .map { request =>
        BankAccountAggregate.Protocol.UpdateBankAccountRequest(request.bankAccountId, request.name)
      }
      .mapAsync(1)(aggregateRef ? _)
      .map {
        case response: BankAccountAggregate.Protocol.UpdateBankAccountSucceeded =>
          UpdateBankAccountSucceeded(response.bankAccountId)
        case response: BankAccountAggregate.Protocol.UpdateBankAccountFailed =>
          UpdateBankAccountFailed(response.bankAccountId, response.error)
      }

  override def addBankAccountEventFlow: Flow[AddBankAccountEventRequest, AddBankAccountEventResponse, NotUsed] =
    Flow[AddBankAccountEventRequest]
      .map {
        case request: DepositRequest =>
          BankAccountAggregate.Protocol.DepositRequest(request.bankAccountId, request.deposit)
        case request: WithdrawRequest =>
          BankAccountAggregate.Protocol.WithdrawRequest(request.bankAccountId, request.withdraw)
      }
      .mapAsync(1)(aggregateRef ? _)
      .map {
        case response: BankAccountAggregate.Protocol.DepositSucceeded =>
          DepositSucceeded(response.bankAccountId)
        case response: BankAccountAggregate.Protocol.DepositFailed =>
          DepositFailed(response.bankAccountId, response.error)
        case response: BankAccountAggregate.Protocol.WithdrawSucceeded =>
          WithdrawSucceeded(response.bankAccountId)
        case response: BankAccountAggregate.Protocol.WithdrawFailed =>
          WithdrawFailed(response.bankAccountId, response.error)
      }

  override def closeBankAccountFlow: Flow[CloseBankAccountRequest, CloseBankAccountResponse, NotUsed] =
    Flow[CloseBankAccountRequest]
      .map { request =>
        BankAccountAggregate.Protocol.CloseBankAccountRequest(request.bankAccountId)
      }
      .mapAsync(1)(aggregateRef ? _)
      .map {
        case response: BankAccountAggregate.Protocol.CloseBankAccountSucceeded =>
          CloseBankAccountSucceeded(response.bankAccountId)
        case response: BankAccountAggregate.Protocol.CloseBankAccountFailed =>
          CloseBankAccountFailed(response.bankAccountId, response.error)
      }

}