akka.stream.KillSwitches Scala Examples

The following examples show how to use akka.stream.KillSwitches. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: StaticTime.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.services.testing.time

import java.time.Instant
import java.util.concurrent.atomic.AtomicReference

import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, RunnableGraph, Sink}
import akka.stream.{ClosedShape, KillSwitches, Materializer, UniqueKillSwitch}
import com.daml.api.util.{TimeProvider, TimestampConversion}
import com.daml.api.util.TimestampConversion._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.{TimeService, TimeServiceStub}
import com.daml.ledger.client.LedgerClient

import scala.concurrent.{ExecutionContext, Future}

class StaticTime(
    timeService: TimeService,
    clock: AtomicReference[Instant],
    killSwitch: UniqueKillSwitch,
    ledgerId: String)
    extends TimeProvider
    with AutoCloseable {

  def getCurrentTime: Instant = clock.get

  def timeRequest(instant: Instant) =
    SetTimeRequest(
      ledgerId,
      Some(TimestampConversion.fromInstant(getCurrentTime)),
      Some(TimestampConversion.fromInstant(instant)))

  def setTime(instant: Instant)(implicit ec: ExecutionContext): Future[Unit] = {
    timeService.setTime(timeRequest(instant)).map { _ =>
      val _ = StaticTime.advanceClock(clock, instant)
    }
  }

  override def close(): Unit = killSwitch.shutdown()
}

object StaticTime {
  def advanceClock(clock: AtomicReference[Instant], instant: Instant): Instant = {
    clock.updateAndGet {
      case current if instant isAfter current => instant
      case current => current
    }
  }

  def updatedVia(timeService: TimeServiceStub, ledgerId: String, token: Option[String] = None)(
      implicit m: Materializer,
      esf: ExecutionSequencerFactory): Future[StaticTime] = {
    val clockRef = new AtomicReference[Instant](Instant.EPOCH)
    val killSwitchExternal = KillSwitches.single[Instant]
    val sinkExternal = Sink.head[Instant]

    RunnableGraph
      .fromGraph {
        GraphDSL.create(killSwitchExternal, sinkExternal) {
          case (killSwitch, futureOfFirstElem) =>
            // We serve this in a future which completes when the first element has passed through.
            // Thus we make sure that the object we serve already received time data from the ledger.
            futureOfFirstElem.map(_ => new StaticTime(timeService, clockRef, killSwitch, ledgerId))(
              DirectExecutionContext)
        } { implicit b => (killSwitch, sinkHead) =>
          import GraphDSL.Implicits._
          val instantSource = b.add(
            ClientAdapter
              .serverStreaming(
                GetTimeRequest(ledgerId),
                LedgerClient.stub(timeService, token).getTime)
              .map(r => toInstant(r.getCurrentTime)))

          val updateClock = b.add(Flow[Instant].map { i =>
            advanceClock(clockRef, i)
            i
          })

          val broadcastTimes = b.add(Broadcast[Instant](2))

          val ignore = b.add(Sink.ignore)

          // format: OFF
          instantSource ~> killSwitch ~> updateClock ~> broadcastTimes.in
                                                        broadcastTimes.out(0) ~> sinkHead
                                                        broadcastTimes.out(1) ~> ignore
          // format: ON

          ClosedShape
        }
      }
      .run()
  }

} 
Example 2
Source File: PrintMoreNumbers.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor_simple

import akka.actor.Actor
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{KillSwitches, UniqueKillSwitch}

import scala.concurrent.duration._

class PrintMoreNumbers extends Actor {
  implicit val system = context.system
  implicit val executionContext = context.system.dispatcher

  private val (killSwitch: UniqueKillSwitch, done) =
    Source.tick(0.seconds, 1.second, 1)
      .scan(0)(_ + _)
      .map(_.toString)
      .viaMat(KillSwitches.single)(Keep.right)
      .toMat(Sink.foreach(println))(Keep.both)
      .run()

  done.map(_ => self ! "done")

  override def receive: Receive = {
    //When the actor is stopped, it will also stop the stream
    case "stop" =>
      println("Stopping...")
      killSwitch.shutdown()
    case "done" =>
      println("Done")
      context.stop(self)
      context.system.terminate()
  }
} 
Example 3
Source File: Watcher.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.operator

import akka.{Done, NotUsed}
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.scaladsl.{Flow, Keep, RestartSource, Sink, Source}
import play.api.libs.json.Format
import skuber.{ListResource, ObjectResource, ResourceDefinition}
import skuber.api.client.{EventType, KubernetesClient, WatchEvent}

import scala.concurrent.duration._
import skuber.json.format._

import scala.concurrent.ExecutionContext

object Watcher {

  private implicit def listResourceFormat[Resource <: ObjectResource: Format]: Format[ListResource[Resource]] =
    ListResourceFormat(implicitly[Format[Resource]])

  def watch[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    // Summary of what we want our event loop to look like:
    // * We start by listing all the resources, and process them.
    // * Then we start watching from the resourceVersion that we got in our list, so we get all updates.
    // * But we also want to periodically recheck all resources, since sometimes there are race conditions
    //   between operators handling dependent resources (eg, if you deploy a journal and a service that uses
    //   it at the same time), so we only run the watch for a maximum of that time (eg, 5 minutes), before
    //   restarting.
    // * Also, if errors are encountered, we don't want to continually restart in a hot loop, so we use the
    //   RestartSource to restart with backoff.
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client
                  .list[ListResource[Resource]]()
                  .map { resources =>
                    val watch = client
                      .watchAllContinuously[Resource](sinceResourceVersion = Some(resources.resourceVersion))

                    Source(resources)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()

  def watchSingle[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      resourceName: String,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client.getOption[Resource](resourceName).map {
                  case Some(resource) =>
                    val watch =
                      client.watchContinuously[Resource](resourceName,
                                                         sinceResourceVersion = Some(resource.resourceVersion))
                    Source
                      .single(resource)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  case None =>
                    throw new RuntimeException(
                      s"Resource $resourceName not found in namespace ${client.namespaceName}!"
                    )
                }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()
} 
Example 4
Source File: CrdtsClient.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.samples

import akka.actor.ActorSystem
import akka.grpc.GrpcClientSettings
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.{ActorMaterializer, KillSwitches}
import com.example.crdts.crdt_example._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}


class CrdtsClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) {
  def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) =
    this(hostname, port, hostnameOverride, ActorSystem())
  private implicit val system = sys
  private implicit val materializer = ActorMaterializer()
  import sys.dispatcher

  val settings = {
    val s = GrpcClientSettings.connectToServiceAt(hostname, port).withTls(false)
    hostnameOverride.fold(s)(host => s.withChannelBuilderOverrides(_.overrideAuthority(host)))
  }
  println(s"Connecting to $hostname:$port")
  val service = CrdtExampleClient(settings)

  def shutdown(): Unit = {
    await(service.close())
    await(system.terminate())
  }

  def await[T](future: Future[T]): T = Await.result(future, 10.seconds)

  def getGCounter(id: String) = await(service.getGCounter(Get(id))).value

  def incrementGCounter(id: String, value: Long) = await(service.incrementGCounter(UpdateCounter(id, value))).value

  def getPNCounter(id: String) = await(service.getPNCounter(Get(id))).value

  def updatePNCounter(id: String, value: Long) = await(service.updatePNCounter(UpdateCounter(id, value))).value

  def getGSet(id: String) = await(service.getGSet(Get(id))).items

  def mutateGSet(id: String, values: Seq[SomeValue]) = await(service.mutateGSet(MutateSet(add = values))).size

  def getORSet(id: String) = await(service.getORSet(Get(id))).items

  def mutateORSet(id: String, add: Seq[SomeValue] = Nil, remove: Seq[SomeValue] = Nil, clear: Boolean = false) =
    await(service.mutateORSet(MutateSet(key = id, add = add, remove = remove, clear = clear))).size

  def connect(id: String) =
    service.connect(User(id)).viaMat(KillSwitches.single)(Keep.right).to(Sink.ignore).run()

  def monitor(monitorId: String, id: String) =
    service
      .monitor(User(id))
      .viaMat(KillSwitches.single)(Keep.right)
      .to(
        Sink.foreach(
          status =>
            println(
              s"Monitor $monitorId saw user $id go " + (if (status.online) "online"
                                                        else "offline")
            )
        )
      )
      .run()
} 
Example 5
Source File: ReadSide.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.example

import akka.actor.typed.pubsub.Topic
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, PostStop }
import akka.cluster.sharding.typed.{ ClusterShardingSettings, ShardedDaemonProcessSettings }
import akka.cluster.sharding.typed.scaladsl.ShardedDaemonProcess
import akka.stream.{ KillSwitches, SharedKillSwitch }
import com.typesafe.config.Config
import org.HdrHistogram.Histogram
import akka.actor.typed.scaladsl.LoggerOps
import scala.concurrent.duration._

object ReadSide {

  sealed trait Command
  private case object ReportMetrics extends Command

  object Settings {
    def apply(config: Config): Settings =
      Settings(config.getInt("processors"), config.getInt("tags-per-processor"))
  }

  case class Settings(nrProcessors: Int, tagsPerProcessor: Int) {
    val nrTags: Int = nrProcessors * tagsPerProcessor
  }

  def apply(
      system: ActorSystem[_],
      topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]],
      settings: Settings): Unit = {
    system.log.info("Running {} processors", settings.nrProcessors)
    val killSwitch: SharedKillSwitch = KillSwitches.shared("eventProcessorSwitch")
    ShardedDaemonProcess(system).init(
      "tag-processor",
      settings.nrProcessors - 1, // bug that creates +1 processor FIXME remove in 2.6.5
      i => behavior(topic, i, settings, killSwitch),
      ShardedDaemonProcessSettings(system).withShardingSettings(ClusterShardingSettings(system).withRole("read")),
      None)
  }

  private def behavior(
      topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]],
      nr: Int,
      settings: Settings,
      killSwitch: SharedKillSwitch): Behavior[Command] =
    Behaviors.withTimers { timers =>
      timers.startTimerAtFixedRate(ReportMetrics, 10.second)
      Behaviors.setup { ctx =>
        val start = (settings.tagsPerProcessor * nr)
        val end = start + (settings.tagsPerProcessor) - 1
        val tags = (start to end).map(i => s"tag-$i")
        ctx.log.info("Processor {} processing tags {}", nr, tags)
        // milliseconds, highest value = 1 minute
        val histogram = new Histogram(10 * 1000 * 60, 2)
        // maybe easier to just have these as different actors
        // my thinking is we can start with a large number of tags and scale out
        // read side processors later
        // having more tags will also increase write throughput/latency as it'll write to
        // many partitions
        // downside is running many streams/queries against c*
        tags.foreach(
          tag =>
            new EventProcessorStream[ConfigurablePersistentActor.Event](
              ctx.system,
              ctx.executionContext,
              s"processor-$nr",
              tag).runQueryStream(killSwitch, histogram))

        Behaviors
          .receiveMessage[Command] {
            case ReportMetrics =>
              if (histogram.getTotalCount > 0) {
                topic ! Topic.Publish(
                  ReadSideTopic.ReadSideMetrics(
                    histogram.getTotalCount,
                    histogram.getMaxValue,
                    histogram.getValueAtPercentile(99),
                    histogram.getValueAtPercentile(50)))
                histogram.reset()
              }
              Behaviors.same
          }
          .receiveSignal {
            case (_, PostStop) =>
              killSwitch.shutdown()
              Behaviors.same
          }
      }
    }

} 
Example 6
Source File: TsvRetrieverFromFile.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.dc.stream

import java.io.{BufferedWriter, File, FileWriter}

import akka.actor.ActorSystem
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.Supervision.Decider
import akka.stream.contrib.SourceGen
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import akka.util.ByteString
import cmwell.dc.LazyLogging
import cmwell.dc.stream.MessagesTypesAndExceptions.{DcInfo, InfotonData}
import cmwell.dc.stream.TsvRetriever.{logger, TsvFlowOutput}
import cmwell.util.resource._

import scala.concurrent.Future
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global


object TsvRetrieverFromFile extends LazyLogging {

  def apply(dcInfo: DcInfo)(implicit mat: Materializer,
                            system: ActorSystem): Source[InfotonData, (KillSwitch, Future[Seq[Option[String]]])] = {
    val persistFile = dcInfo.tsvFile.get + ".persist"

    def appendToPersistFile(str: String): Unit = {
      val bw = new BufferedWriter(new FileWriter(persistFile, true))
      bw.write(str)
      bw.close()
    }

    val linesToDrop = dcInfo.positionKey.fold {
      if (!new File(persistFile).exists) 0L
      else using(scala.io.Source.fromFile(persistFile))(_.getLines.toList.last.toLong)
    }(pos => pos.toLong)
    val positionKeySink = Flow[InfotonData]
      .recover {
        case e: Throwable => InfotonData(null, null, -1)
      }
      .scan(linesToDrop) {
        case (count, InfotonData(null, null, -1)) => {
          appendToPersistFile("crash at: " + count + "\n" + count.toString + "\n")
          count
        }
        case (count, _) => {
          val newCount = count + 1
          if (newCount % 10000 == 0) appendToPersistFile(newCount.toString + "\n")
          newCount
        }
      }
      .toMat(Sink.last)(
        (_, right) =>
          right.map { count =>
            appendToPersistFile(count.toString + "\n")
            Seq.fill(2)(Option(count.toString))
        }
      )

    Source
      .fromIterator(() => scala.io.Source.fromFile(dcInfo.tsvFile.get).getLines())
      .drop {
        logger.info(s"Dropping $linesToDrop initial lines from file ${dcInfo.tsvFile.get} for sync ${dcInfo.key}")
        linesToDrop
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .map(line => TsvRetriever.parseTSVAndCreateInfotonDataFromIt(ByteString(line)))
      .alsoToMat(positionKeySink)(Keep.both)
  }
} 
Example 7
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.process

import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M],
                 materializer: Materializer)(implicit F: Async[F]): F[Unit] =
      F.bracket(
        F.delay(
          source
            .viaMat(KillSwitches.single)(Keep.right)
            .toMat(Sink.ignore)(Keep.both)
            .run()(materializer)
        )
      )(x => F.fromFuture(x._2).void)(x => F.delay(x._1.shutdown()))

  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 8
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import aecor.distributedprocessing.DistributedProcessing._
import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M])(implicit F: Async[F],
                                          materializer: Materializer): Process[F] =
      Process(run = F.delay {
        val (killSwitch, terminated) = source
          .viaMat(KillSwitches.single)(Keep.right)
          .toMat(Sink.ignore)(Keep.both)
          .run()
        RunningProcess(F.fromFuture(terminated).void, F.delay(killSwitch.shutdown()))
      })
  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 9
Source File: KillSwitchMatStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import java.util.concurrent.atomic.AtomicLong

import akka.stream.{ClosedShape, KillSwitch, KillSwitches}
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchMatStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchMatStream extends PerpetualStream[(KillSwitch, Future[Long])] {
  import KillSwitchMatStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(KillSwitches.single[Int], counter)((_, _)) {
    implicit builder =>
      (kill, sink) =>
        import GraphDSL.Implicits._
        source ~> kill ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue._2
  }
} 
Example 10
Source File: EventProcessor.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.couchbase

import akka.actor.{Actor, ActorLogging, Props}
import akka.persistence.couchbase.UUIDs
import akka.persistence.couchbase.scaladsl.CouchbaseReadJournal
import akka.persistence.query._
import akka.stream.{ActorMaterializer, KillSwitches, Materializer}
import akka.stream.scaladsl.{RestartSource, Sink}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object EventProcessor {
  def props: Props =
    Props(new EventProcessor)
}

class EventProcessor extends Actor with ActorLogging {

  private val settings = Settings(context.system)
  private val eventProcessorId = settings.eventProcessorSettings.id
  private val tag = self.path.name
  private implicit val ec: ExecutionContext = context.dispatcher
  private implicit val materializer: Materializer = ActorMaterializer()(context.system)
  private val query =
    PersistenceQuery(context.system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
  private val killSwitch = KillSwitches.shared("eventProcessorSwitch")
  override val log = super.log // eager initialization because used from inside stream

  override def preStart(): Unit = {
    super.preStart()
    log.info("Starting event processor for tag: {}", tag)
    runQueryStream()
  }

  override def postStop(): Unit = {
    super.postStop()
    killSwitch.shutdown()
  }

  def receive = {
    case KeepAlive.Ping =>
      sender() ! KeepAlive.Pong
      log.debug(
        s"Event processor(${self.path.name}) @ ${context.system.settings.config
          .getString("akka.remote.artery.canonical.hostname")}:${context.system.settings.config.getInt("akka.remote.artery.canonical.port")}"
      )

    case message =>
      log.error("Got unexpected message: {}", message)
  }

  private def runQueryStream(): Unit =
    RestartSource
      .withBackoff(minBackoff = 500.millis, maxBackoff = 20.seconds, randomFactor = 0.1) { () =>
        // TODO offsets, this just starts from the beginning each time
        query
          .eventsByTag(tag, NoOffset)
          .map { eventEnvelope: EventEnvelope =>
            val now = System.currentTimeMillis()
            val publishTime = eventEnvelope.offset match {
              case t: TimeBasedUUID => UUIDs.timestampFrom(t)
            }
            val delay = now - publishTime
            log.info(s"#Eventprocessor($tag) got $eventEnvelope. Event is {} ms delayed", delay) // You would write to Kafka here
            eventEnvelope.offset
          }
      }
      .via(killSwitch.flow)
      .runWith(Sink.ignore)

} 
Example 11
Source File: SimplePublishSubscribe.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.dynamichub

import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, KillSwitches, UniqueKillSwitch }
import akka.stream.scaladsl.{ BroadcastHub, Flow, Keep, MergeHub, Sink, Source }
import com.typesafe.scalalogging.StrictLogging

import scala.io.StdIn
import scala.concurrent.duration._

object SimplePublishSubscribe extends App with StrictLogging {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val (sink, source) =
    MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run()

  source.runWith(Sink.ignore)

  val busFlow: Flow[String, String, UniqueKillSwitch] = Flow
    .fromSinkAndSource(sink, source)
    .joinMat(KillSwitches.singleBidi[String, String])(Keep.right)
    .backpressureTimeout(3.seconds)

  val switch: UniqueKillSwitch =
    Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(v => logger.info(s"switch: $v"))).run()

  Thread.sleep(200)
  switch.shutdown()

  StdIn.readLine()
  system.terminate()
} 
Example 12
Source File: CommandRegistration.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.commands

import scala.concurrent.Future

import akka.Done
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source}
import akka.stream.{KillSwitches, UniqueKillSwitch}

case class CommandRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) {

  def stop(): Unit = killSwitch.shutdown()
}
object CommandRegistration {
  def toSink[A, M](source: Source[A, M]): RunnableGraph[CommandRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) {
      case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch)
    }

  def withRegistration[A, M](source: Source[A, M]): Source[A, CommandRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).watchTermination() {
      case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch)
    }
} 
Example 13
Source File: MovedMonitor.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.lavaplayer

import ackcord.{APIMessage, Cache}
import akka.actor.typed._
import akka.actor.typed.scaladsl._
import akka.stream.scaladsl.Keep
import akka.stream.typed.scaladsl.ActorSink
import akka.stream.{KillSwitches, UniqueKillSwitch}

private[lavaplayer] object MovedMonitor {

  private[lavaplayer] def apply(
      cache: Cache,
      handler: ActorRef[LavaplayerHandler.Command]
  ): Behavior[Command] =
    Behaviors.setup { ctx =>
      implicit val system: ActorSystem[Nothing] = ctx.system

      val killSwitch = cache.subscribeAPI
        .collectType[APIMessage.VoiceStateUpdate]
        .viaMat(KillSwitches.single)(Keep.right)
        .to(
          ActorSink
            .actorRefWithBackpressure(ctx.self, ReceivedEvent, InitSink, AckSink, CompletedSink, _ => CompletedSink)
        )
        .run()

      running(ctx, killSwitch, handler)
    }

  private def running(
      ctx: ActorContext[Command],
      killSwitch: UniqueKillSwitch,
      handler: ActorRef[LavaplayerHandler.Command]
  ): Behavior[Command] =
    Behaviors.receiveMessage {
      case Stop =>
        killSwitch.shutdown()
        Behaviors.same

      case InitSink(ackTo) =>
        ackTo ! AckSink
        Behaviors.same

      case ReceivedEvent(ackTo, APIMessage.VoiceStateUpdate(state, c)) if state.userId == c.current.botUser.id =>
        handler ! LavaplayerHandler.VoiceChannelMoved(state.channelId)
        ackTo ! AckSink
        Behaviors.same

      case ReceivedEvent(ackTo, _) =>
        ackTo ! AckSink
        Behaviors.same

      case CompletedSink =>
        Behaviors.stopped
    }

  private case object AckSink

  sealed trait Command
  case object Stop extends Command

  private case class InitSink(ackTo: ActorRef[AckSink.type])                                            extends Command
  private case class ReceivedEvent(ackTo: ActorRef[AckSink.type], message: APIMessage.VoiceStateUpdate) extends Command
  private case object CompletedSink                                                                     extends Command
} 
Example 14
Source File: EventRegistration.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.concurrent.Future

import akka.Done
import akka.stream.{KillSwitches, UniqueKillSwitch}
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source}

case class EventRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) {

  def stop(): Unit = killSwitch.shutdown()
}
object EventRegistration {
  def toSink[A, M](source: Source[A, M]): RunnableGraph[EventRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) {
      case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch)
    }

  def withRegistration[A, M](source: Source[A, M]): Source[A, EventRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).watchTermination() {
      case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch)
    }
} 
Example 15
Source File: S3CopyService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import java.io.File
import java.nio.file.Files
import java.nio.file.Paths

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.KillSwitch
import akka.stream.KillSwitches
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Source
import com.netflix.atlas.core.util.Streams
import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Singleton

import scala.concurrent.duration._

@Singleton
class S3CopyService @Inject()(
  val config: Config,
  val registry: Registry,
  implicit val system: ActorSystem
) extends AbstractService
    with StrictLogging {

  private val dataDir = config.getString("atlas.persistence.local-file.data-dir")

  private implicit val mat = ActorMaterializer()

  private var killSwitch: KillSwitch = _
  private val s3Config = config.getConfig("atlas.persistence.s3")

  private val cleanupTimeoutMs = s3Config.getDuration("cleanup-timeout").toMillis
  private val maxInactiveMs = s3Config.getDuration("max-inactive-duration").toMillis
  private val maxFileDurationMs =
    config.getDuration("atlas.persistence.local-file.max-duration").toMillis

  require(
    maxInactiveMs > maxFileDurationMs,
    "`max-inactive-duration` MUST be longer than `max-duration`, otherwise file may be renamed before normal write competes"
  )

  override def startImpl(): Unit = {
    logger.info("Starting service")
    killSwitch = Source
      .tick(1.second, 5.seconds, NotUsed)
      .viaMat(KillSwitches.single)(Keep.right)
      .flatMapMerge(Int.MaxValue, _ => Source(FileUtil.listFiles(new File(dataDir))))
      .toMat(new S3CopySink(s3Config, registry, system))(Keep.left)
      .run()
  }

  override def stopImpl(): Unit = {
    logger.info("Stopping service")
    waitForCleanup()
    if (killSwitch != null) killSwitch.shutdown()
  }

  private def waitForCleanup(): Unit = {
    logger.info("Waiting for cleanup")
    val start = System.currentTimeMillis
    while (hasMoreFiles) {
      if (System.currentTimeMillis() > start + cleanupTimeoutMs) {
        logger.error("Cleanup timeout")
        return
      }
      Thread.sleep(1000)
    }
    logger.info("Cleanup done")
  }

  private def hasMoreFiles: Boolean = {
    try {
      Streams.scope(Files.list(Paths.get(dataDir))) { dir =>
        dir.anyMatch(f => Files.isRegularFile(f))
      }
    } catch {
      case e: Exception => {
        logger.error(s"Error checking hasMoreFiles in $dataDir", e)
        true // Assuming there's more files on error to retry
      }
    }
  }
} 
Example 16
Source File: ClientSpec.scala    From twitter4s   with Apache License 2.0 5 votes vote down vote up
package com.danielasfregola.twitter4s.helpers

import java.util.UUID

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.pattern.ask
import akka.stream.{KillSwitches, Materializer, SharedKillSwitch}
import akka.util.Timeout
import akka.util.Timeout.durationToTimeout
import com.danielasfregola.twitter4s.entities.streaming.StreamingMessage
import com.danielasfregola.twitter4s.http.clients.authentication.AuthenticationClient
import com.danielasfregola.twitter4s.http.clients.rest.RestClient
import com.danielasfregola.twitter4s.http.clients.streaming.StreamingClient

import scala.concurrent.Future
import scala.concurrent.duration.DurationInt

trait ClientSpec extends Spec {

  abstract class AuthenticationClientSpecContext extends RequestDSL with SpecContext {

    protected val authenticationClient = new AuthenticationClient(consumerToken) {

      override def sendAndReceive[T](request: HttpRequest, f: HttpResponse => Future[T])(
          implicit system: ActorSystem,
          materializer: Materializer): Future[T] = {
        implicit val ec = materializer.executionContext
        implicit val timeout: Timeout = DurationInt(20) seconds
        val requestStartTime = System.currentTimeMillis
        val responseR: Future[HttpResponse] = (transport.ref ? request).map(_.asInstanceOf[HttpResponse])
        for {
          response <- responseR
          t <- unmarshal[T](requestStartTime, f)(request, response, materializer)
        } yield t
      }
    }
  }

  abstract class RestClientSpecContext extends RequestDSL with SpecContext {

    protected val restClient = new RestClient(consumerToken, accessToken) {

      override def sendAndReceive[T](request: HttpRequest, f: HttpResponse => Future[T])(
          implicit system: ActorSystem,
          materializer: Materializer): Future[T] = {
        implicit val ec = materializer.executionContext
        implicit val timeout: Timeout = DurationInt(20) seconds
        val requestStartTime = System.currentTimeMillis
        val responseR: Future[HttpResponse] = (transport.ref ? request).map(_.asInstanceOf[HttpResponse])
        for {
          response <- responseR
          t <- unmarshal[T](requestStartTime, f)(request, response, materializer)
        } yield t
      }
    }
  }

  abstract class StreamingClientSpecContext extends RequestDSL with SpecContext {

    def dummyProcessing: PartialFunction[StreamingMessage, Unit] = { case _ => }

    val killSwitch = KillSwitches.shared(s"test-twitter4s-${UUID.randomUUID}")

    protected val streamingClient = new StreamingClient(consumerToken, accessToken) {

      override def processStreamRequest[T <: StreamingMessage: Manifest](
          request: HttpRequest
      )(
          f: PartialFunction[T, Unit],
          errorHandler: PartialFunction[Throwable, Unit]
      )(
          implicit
          system: ActorSystem,
          materializer: Materializer
      ): Future[SharedKillSwitch] = {
        implicit val ec = materializer.executionContext
        implicit val timeout: Timeout = DurationInt(20) seconds

        val responseR: Future[HttpResponse] = (transport.ref ? request).map(_.asInstanceOf[HttpResponse])
        for {
          response <- responseR
          _ <- Future.successful(processBody(response, killSwitch)(f)(manifest[T], request, materializer))
        } yield killSwitch
      }

    }
  }
}