akka.actor.PoisonPill Scala Examples

The following examples show how to use akka.actor.PoisonPill. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: FileMonitorActor.scala    From graphql-gateway   with Apache License 2.0 5 votes vote down vote up
package sangria.gateway.file

import java.nio.file.{NoSuchFileException, StandardWatchEventKinds}

import akka.actor.{Actor, ActorRef, Cancellable, PoisonPill, Props}
import akka.event.Logging
import better.files._
import sangria.gateway.file.FileWatcher._

import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration

class FileMonitorActor(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) extends Actor {
  import FileMonitorActor._

  import context.dispatcher

  val log = Logging(context.system, this)
  var watchers: Seq[ActorRef] = _
  val pendingFiles: mutable.HashSet[File] = mutable.HashSet[File]()
  var scheduled: Option[Cancellable] = None

  override def preStart(): Unit = {
    watchers = paths.map(_.newWatcher(recursive = true))

    watchers.foreach { watcher ⇒
      watcher ! when(events = StandardWatchEventKinds.ENTRY_CREATE, StandardWatchEventKinds.ENTRY_MODIFY, StandardWatchEventKinds.ENTRY_DELETE) {
        case (_, file) ⇒ self ! FileChange(file)
      }
    }
  }

  def receive = {
    case FileChange(file) ⇒
      try {
        if (file.exists && !file.isDirectory && globs.exists(file.glob(_, includePath = false).nonEmpty)) {
          pendingFiles += file

          if (scheduled.isEmpty)
            scheduled = Some(context.system.scheduler.scheduleOnce(threshold, self, Threshold))
        }
      } catch {
        case _: NoSuchFileException ⇒ // ignore, it's ok
      }

    case Threshold ⇒
      val files = pendingFiles.toVector.sortBy(_.name)

      if (files.nonEmpty)
        cb(files)

      pendingFiles.clear()
      scheduled = None
  }
}

object FileMonitorActor {
  case class FileChange(file: File)
  case object Threshold

  def props(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) =
    Props(new FileMonitorActor(paths, threshold, globs, cb))
} 
Example 2
Source File: LeagueLoader.scala    From eventsourcing-intro   with Apache License 2.0 5 votes vote down vote up
package eu.reactivesystems.league.impl

import akka.actor.PoisonPill
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings
}
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import com.lightbend.lagom.scaladsl.persistence.cassandra.WriteSideCassandraPersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.jdbc.ReadSideJdbcPersistenceComponents
import com.lightbend.lagom.scaladsl.playjson.{
  JsonSerializer,
  JsonSerializerRegistry
}
import com.lightbend.lagom.scaladsl.server._
import com.softwaremill.macwire._
import com.softwaremill.macwire.akkasupport._
import eu.reactivesystems.league.api.LeagueService
import play.api.db.HikariCPComponents
import play.api.libs.ws.ahc.AhcWSComponents

import scala.collection.immutable.Seq

class LeagueLoader extends LagomApplicationLoader {

  override def load(context: LagomApplicationContext): LagomApplication =
    new LeagueApplication(context) {
      override def serviceLocator: ServiceLocator = NoServiceLocator
    }

  override def loadDevMode(
      context: LagomApplicationContext): LagomApplication =
    new LeagueApplication(context) with LagomDevModeComponents

  override def describeServices = List(
    readDescriptor[LeagueService]
  )
}

abstract class LeagueApplication(context: LagomApplicationContext)
    extends LagomApplication(context)
    with WriteSideCassandraPersistenceComponents
    with ReadSideJdbcPersistenceComponents
    with HikariCPComponents
    with AhcWSComponents {

  // Bind the service that this server provides
  override lazy val lagomServer =
    serverFor[LeagueService](wire[LeagueServiceImpl])

  // Register the JSON serializer registry
  override lazy val jsonSerializerRegistry = LeagueSerializerRegistry

  // Register the league persistent entity
  persistentEntityRegistry.register(wire[LeagueEntity])

  // Register read side processor

  val leagueProjectionProps = wireProps[LeagueProjection]

  actorSystem.actorOf(
    ClusterSingletonManager.props(
      singletonProps = leagueProjectionProps,
      terminationMessage = PoisonPill,
      settings = ClusterSingletonManagerSettings(actorSystem)),
    name = "leagueProjection"
  )

}


object LeagueSerializerRegistry extends JsonSerializerRegistry {
  override def serializers: Seq[JsonSerializer[_]] = Seq(
    JsonSerializer[AddClub],
    JsonSerializer[AddGame],
    JsonSerializer[ChangeGame],
    JsonSerializer[ClubRegistered],
    JsonSerializer[GamePlayed],
    JsonSerializer[ResultRevoked],
    JsonSerializer[LeagueState]
  )
} 
Example 3
Source File: SubscriptionSessionManager.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.subscriptions.protocol

import akka.actor.{Actor, ActorRef, PoisonPill, Props, Terminated}
import cool.graph.akkautil.{LogUnhandled, LogUnhandledExceptions}
import cool.graph.bugsnag.BugSnagger
import cool.graph.messagebus.PubSubPublisher
import cool.graph.subscriptions.protocol.SubscriptionProtocolV05.Requests.{InitConnection, SubscriptionSessionRequestV05}
import cool.graph.subscriptions.protocol.SubscriptionProtocolV05.Responses.SubscriptionSessionResponseV05
import cool.graph.subscriptions.protocol.SubscriptionProtocolV07.Requests.{GqlConnectionInit, SubscriptionSessionRequest}
import cool.graph.subscriptions.protocol.SubscriptionProtocolV07.Responses.SubscriptionSessionResponse
import cool.graph.subscriptions.protocol.SubscriptionSessionManager.Requests.{EnrichedSubscriptionRequest, EnrichedSubscriptionRequestV05, StopSession}

import scala.collection.mutable

object SubscriptionSessionManager {
  object Requests {
    trait SubscriptionSessionManagerRequest

    case class EnrichedSubscriptionRequestV05(
        sessionId: String,
        projectId: String,
        request: SubscriptionSessionRequestV05
    ) extends SubscriptionSessionManagerRequest

    case class EnrichedSubscriptionRequest(
        sessionId: String,
        projectId: String,
        request: SubscriptionSessionRequest
    ) extends SubscriptionSessionManagerRequest

    case class StopSession(sessionId: String) extends SubscriptionSessionManagerRequest
  }
}

case class SubscriptionSessionManager(subscriptionsManager: ActorRef, bugsnag: BugSnagger)(
    implicit responsePublisher05: PubSubPublisher[SubscriptionSessionResponseV05],
    responsePublisher07: PubSubPublisher[SubscriptionSessionResponse]
) extends Actor
    with LogUnhandledExceptions
    with LogUnhandled {

  val sessions: mutable.Map[String, ActorRef] = mutable.Map.empty

  override def receive: Receive = logUnhandled {
    case EnrichedSubscriptionRequest(sessionId, projectId, request: GqlConnectionInit) =>
      val session = startSessionActorForCurrentProtocolVersion(sessionId, projectId)
      session ! request

    case EnrichedSubscriptionRequest(sessionId, _, request: SubscriptionSessionRequest) =>
      // we might receive session requests that are not meant for this box. So we might not find an actor for this session.
      sessions.get(sessionId).foreach { session =>
        session ! request
      }

    case EnrichedSubscriptionRequestV05(sessionId, projectId, request: InitConnection) =>
      val session = startSessionActorForProtocolVersionV05(sessionId, projectId)
      session ! request

    case EnrichedSubscriptionRequestV05(sessionId, _, request) =>
      // we might receive session requests that are not meant for this box. So we might not find an actor for this session.
      sessions.get(sessionId).foreach { session =>
        session ! request
      }

    case StopSession(sessionId) =>
      sessions.get(sessionId).foreach { session =>
        session ! PoisonPill
        sessions.remove(sessionId)
      }

    case Terminated(terminatedActor) =>
      sessions.find { _._2 == terminatedActor } match {
        case Some((sessionId, _)) => sessions.remove(sessionId)
        case None                 => // nothing to do; should not happen though
      }
  }

  private def startSessionActorForProtocolVersionV05(sessionId: String, projectId: String): ActorRef = {
    val props = Props(SubscriptionSessionActorV05(sessionId, projectId, subscriptionsManager, bugsnag, responsePublisher05))
    startSessionActor(sessionId, props)
  }

  private def startSessionActorForCurrentProtocolVersion(sessionId: String, projectId: String): ActorRef = {
    val props = Props(SubscriptionSessionActor(sessionId, projectId, subscriptionsManager, bugsnag, responsePublisher07))
    startSessionActor(sessionId, props)
  }

  private def startSessionActor(sessionId: String, props: Props): ActorRef = {
    sessions.get(sessionId) match {
      case None =>
        val ref = context.actorOf(props, sessionId)
        sessions += sessionId -> ref
        context.watch(ref)

      case Some(ref) =>
        ref
    }
  }
} 
Example 4
Source File: WebsocketSession.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.websockets

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorRef, PoisonPill, Props, ReceiveTimeout, Stash, Terminated}
import cool.graph.akkautil.{LogUnhandled, LogUnhandledExceptions}
import cool.graph.bugsnag.BugSnagger
import cool.graph.messagebus.QueuePublisher
import cool.graph.websockets.protocol.Request
import scala.collection.mutable
import scala.concurrent.duration._ // if you don't supply your own Protocol (see below)

object WebsocketSessionManager {
  object Requests {
    case class OpenWebsocketSession(projectId: String, sessionId: String, outgoing: ActorRef)
    case class CloseWebsocketSession(sessionId: String)

    case class IncomingWebsocketMessage(projectId: String, sessionId: String, body: String)
    case class IncomingQueueMessage(sessionId: String, body: String)
  }

  object Responses {
    case class OutgoingMessage(text: String)
  }
}

case class WebsocketSessionManager(
    requestsPublisher: QueuePublisher[Request],
    bugsnag: BugSnagger
) extends Actor
    with LogUnhandled
    with LogUnhandledExceptions {
  import WebsocketSessionManager.Requests._

  val websocketSessions = mutable.Map.empty[String, ActorRef]

  override def receive: Receive = logUnhandled {
    case OpenWebsocketSession(projectId, sessionId, outgoing) =>
      val ref = context.actorOf(Props(WebsocketSession(projectId, sessionId, outgoing, requestsPublisher, bugsnag)))
      context.watch(ref)
      websocketSessions += sessionId -> ref

    case CloseWebsocketSession(sessionId) =>
      websocketSessions.get(sessionId).foreach(context.stop)

    case req: IncomingWebsocketMessage =>
      websocketSessions.get(req.sessionId) match {
        case Some(session) => session ! req
        case None =>
          println(s"No session actor found for ${req.sessionId} | ${req.projectId} when processing websocket message. This should only happen very rarely.")
      }

    case req: IncomingQueueMessage =>
      websocketSessions.get(req.sessionId) match {
        case Some(session) => session ! req
        case None          => // Session already closed
      }

    case Terminated(terminatedActor) =>
      websocketSessions.retain {
        case (_, sessionActor) => sessionActor != terminatedActor
      }
  }
}

case class WebsocketSession(
    projectId: String,
    sessionId: String,
    outgoing: ActorRef,
    requestsPublisher: QueuePublisher[Request],
    bugsnag: BugSnagger
) extends Actor
    with LogUnhandled
    with LogUnhandledExceptions
    with Stash {
  import WebsocketSessionManager.Requests._
  import WebsocketSessionManager.Responses._
  import metrics.SubscriptionWebsocketMetrics._

  activeWsConnections.inc
  context.setReceiveTimeout(FiniteDuration(60, TimeUnit.MINUTES))

  def receive: Receive = logUnhandled {
    case IncomingWebsocketMessage(_, _, body) => requestsPublisher.publish(Request(sessionId, projectId, body))
    case IncomingQueueMessage(_, body)        => outgoing ! OutgoingMessage(body)
    case ReceiveTimeout                       => context.stop(self)
  }

  override def postStop = {
    activeWsConnections.dec
    outgoing ! PoisonPill
    requestsPublisher.publish(Request(sessionId, projectId, "STOP"))
  }
} 
Example 5
Source File: UserHandler.scala    From reactive-microservices   with MIT License 5 votes vote down vote up
import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props}
import akka.persistence.PersistentActor
import akka.routing.{RemoveRoutee, ActorRefRoutee, AddRoutee}
import btc.common.UserHandlerMessages._
import btc.common.WebSocketHandlerMessages.{OperationSuccessful, Alarm, AllSubscriptions}
import scala.collection.mutable
import scala.concurrent.duration._
import UserHandler._

object UserHandler {
  case object KeepAlive

  case class Ticker(max: BigDecimal, min: BigDecimal, last: BigDecimal, bid: BigDecimal, ask: BigDecimal, vwap: BigDecimal, average: BigDecimal, volume: BigDecimal)

  def props(userId: Long, wsActor: ActorRef, broadcaster: ActorRef, keepAliveTimeout: FiniteDuration) = {
    Props(new UserHandler(userId, wsActor, broadcaster, keepAliveTimeout))
  }
}

class UserHandler(userId: Long, wsActor: ActorRef, broadcaster: ActorRef, keepAliveTimeout: FiniteDuration) extends PersistentActor with ActorLogging {
  override val persistenceId: String = userId.toString

  override def preStart(): Unit = {
    super.preStart()
    broadcaster ! AddRoutee(ActorRefRoutee(self))
  }

  override def postStop(): Unit = {
    super.postStop()
    broadcaster ! RemoveRoutee(ActorRefRoutee(self))
  }

  override def receiveRecover: Receive = {
    case subscribe: Subscribe => updateState(subscribe)
    case unsubscribe: Unsubscribe => updateState(unsubscribe)
  }

  override def receiveCommand: Receive = {
    case KeepAlive if System.currentTimeMillis() - lastHeartBeatTime > keepAliveTimeout.toMillis =>
      log.info(s"Timeout while waiting for heartbeat for user $userId, stopping")
      self ! PoisonPill
    case Heartbeat =>
      log.debug(s"Got heartbeat for user $userId")
      lastHeartBeatTime = System.currentTimeMillis()
      sender() ! Heartbeat
    case QuerySubscriptions =>
      log.info(s"Got request for subscriptions for user $userId")
      wsActor ! AllSubscriptions(subscriptions.values.toList)
    case ticker: Ticker =>
      val alarms = getAlarmsForTicker(ticker)
      log.debug(s"Got ticker and sending alarms $alarms for user $userId")
      alarms.foreach(wsActor ! _)
    case subscribe: Subscribe =>
      log.debug(s"Got subscribe request $subscribe for user $userId")
      persist(subscribe) { e =>
        updateState(e)
        wsActor ! OperationSuccessful(e.id)
      }
    case unsubscribe: Unsubscribe =>
      log.debug(s"Got unsubscribe request $unsubscribe for user $userId")
      persist(unsubscribe) { e =>
        updateState(e)
        wsActor ! OperationSuccessful(e.id)
      }
  }

  private def updateState(subscribe: Subscribe) = subscriptions.put(subscribe.id, subscribe)

  private def updateState(unsubscribe: Unsubscribe) = subscriptions.remove(unsubscribe.id)

  private def getAlarmsForTicker(ticker: Ticker): List[Alarm] = {
    subscriptions.values.map {
      case SubscribeRateChange(id) => Option(Alarm(id, ticker.average))
      case SubscribeBidOver(id, threshold) => if (ticker.bid > threshold) Option(Alarm(id, ticker.bid)) else None
      case SubscribeAskBelow(id, threshold) => if (ticker.ask < threshold) Option(Alarm(id, ticker.ask)) else None
      case SubscribeVolumeOver(id, threshold) => if (ticker.volume > threshold) Option(Alarm(id, ticker.volume)) else None
      case SubscribeVolumeBelow(id, threshold) => if (ticker.volume < threshold) Option(Alarm(id, ticker.volume)) else None
    }.toList.flatten
  }

  private val subscriptions = mutable.Map.empty[Long, Subscribe]
  private var lastHeartBeatTime = System.currentTimeMillis()
} 
Example 6
Source File: Passivation.scala    From akka-dddd-template   with Apache License 2.0 5 votes vote down vote up
package com.boldradius.cqrs

import akka.actor.{PoisonPill, Actor, ReceiveTimeout}
import com.boldradius.util.ALogging
import akka.contrib.pattern.ShardRegion.Passivate

trait Passivation extends ALogging {
  this: Actor =>

  protected def passivate(receive: Receive): Receive = receive.orElse{
    // tell parent actor to send us a poisinpill
    case ReceiveTimeout =>
      self.logInfo( s => s" $s ReceiveTimeout: passivating. ")
      context.parent ! Passivate(stopMessage = PoisonPill)

    // stop
    case PoisonPill => context.stop(self.logInfo( s => s" $s PoisonPill"))
  }
} 
Example 7
Source File: ActorRefPathSelection.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
import akka.actor.{Actor, ActorPath, ActorRef, ActorSystem, PoisonPill, Props}

case object Shutdown

class SimpleActor extends Actor {
  def receive = {
    case Shutdown => context.stop(self)
  }
}

object ActorRefActorPathApp extends App {
  val actorSystem = ActorSystem("SimpleSystem")
  val actorRef1:ActorRef = actorSystem.actorOf(Props[SimpleActor],"SimpleActor")
  println(s"Actor Reference1 = ${actorRef1}")
  println(s"Actor Path1 = ${actorRef1.path}")
  val actorPath:ActorPath = actorSystem / "SimpleActor"
  println(s"Actor Path = ${actorPath}")

  actorRef1 ! Shutdown

  Thread.sleep(1000)

  val actorRef2:ActorRef = actorSystem.actorOf(Props[SimpleActor],"SimpleActor")
  println(s"Actor Reference2 = ${actorRef2}")
  println(s"Actor Path2 = ${actorRef2.path}")

  actorSystem.terminate
} 
Example 8
Source File: IteratorIdDispatcher.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package controllers

import akka.actor.{Actor, ActorRef, PoisonPill}
import cmwell.fts._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.FiniteDuration


case object GetID
sealed trait IterationStateInput
case class ScrollInput(actualEsScrollId: String) extends IterationStateInput
case class StartScrollInput(pathFilter: Option[PathFilter],
                            fieldFilters: Option[FieldFilter],
                            datesFilter: Option[DatesFilter],
                            paginationParams: PaginationParams,
                            scrollTTL: Long,
                            withHistory: Boolean,
                            withDeleted: Boolean,
                            withData: Boolean) extends IterationStateInput

case class IterationState(iterationStateInput: IterationStateInput, withHistory: Boolean, iteratorIdDispatcher: ActorRef)

class IteratorIdDispatcher(iterationStateInput: IterationStateInput, withHistory: Boolean, ttl: FiniteDuration) extends Actor {

  var cancelable = context.system.scheduler.scheduleOnce(ttl, self, PoisonPill)

  override def receive: Receive = {
    case GetID => {
      sender() ! IterationState(iterationStateInput, withHistory, self)
      cancelable.cancel()
      context.stop(self)
    }
  }
} 
Example 9
Source File: EntitySupport.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cqrs

import akka.actor.{ActorRef, ActorSystem, PoisonPill, Terminated}
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Second, Span}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._

abstract class EntitySupport(_system: ActorSystem)
  extends TestKit(_system)
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfter
  with Eventually {

  
  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 10
Source File: ClusterApp.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 11
Source File: RegionSpec.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.core.actor

import akka.actor.{ActorPath, ActorSystem, PoisonPill, Props}
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import io.amient.affinity.AffinityActorSystem
import io.amient.affinity.core.cluster.Coordinator
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.{Matchers, WordSpecLike}

import scala.concurrent.duration._
import scala.language.postfixOps


class RegionSpec extends WordSpecLike with Matchers with Eventually with IntegrationPatience {

  val system: ActorSystem = AffinityActorSystem.create(ConfigFactory.load("regionspec"))

  val testPartition = Props(new Partition {
    override def preStart(): Unit = {
      Thread.sleep(100)
      super.preStart()
    }

    override def handle: Receive = {
      case _: IllegalStateException => context.stop(self)
      case _ =>
    }
  })


  "A Region Actor" must {
    "must keep Coordinator Updated during partition failure & restart scenario" in {
      //      val zk = new EmbeddedZookeperServer {}
      try {
        val coordinator = Coordinator.create(system, "region")
        try {
          val d = 1 second
          implicit val timeout = Timeout(d)

          val region = system.actorOf(Props(new Container("region") {
            val partitions = List(0, 1, 2, 3)
            for (partition <- partitions) {
              context.actorOf(testPartition, name = partition.toString)
            }
          }), name = "region")
          eventually {
            coordinator.members.size should be(4)
          }

          //first stop Partition explicitly - it shouldn't be restarted
          import system.dispatcher
          system.actorSelection(ActorPath.fromString(coordinator.members.head._2)).resolveOne.foreach {
            case actorRef => system.stop(actorRef)
          }
          eventually {
            coordinator.members.size should be(3)
          }

          //now simulate error in one of the partitions
          val partitionToFail = coordinator.members.head._2
          system.actorSelection(ActorPath.fromString(partitionToFail)).resolveOne.foreach {
            case actorRef => actorRef ! new IllegalStateException("Exception expected by the Test")
          }
          eventually {
            coordinator.members.size should be(2)
          }
          eventually {
            coordinator.members should not contain (partitionToFail)
          }

          region ! PoisonPill

        } finally {
          coordinator.close
        }
      } finally {
        //        zk.close()
      }
    }
  }

}

class RegionSpecPartition extends Partition {
  override def preStart(): Unit = {
    Thread.sleep(100)
    super.preStart()
  }

  override def handle: Receive = {
    case _: IllegalStateException => context.stop(self)
    case _ =>
  }
} 
Example 12
Source File: S3DomainWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.domain

import java.util.concurrent.TimeUnit

import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props}
import com.typesafe.config.ConfigFactory
import shield.actors.config._
import shield.actors.{RestartLogging, ShieldActorMsgs}
import shield.config.DomainSettings

import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class S3DomainWatcher extends DomainWatcher with ActorLogging with RestartLogging {

  import context.system

  var domains = Map[String, DomainSettings]()
  var configWatchers = Map[String, ActorRef]()

  val config = settings.config.getConfig("shield.s3-domain-watcher")
  val s3WatcherService = context.actorOf(Props(
    classOf[S3ObjectWatcher],
    config.getString("bucket-name"),
    config.getString("config-filename")))

  val refreshInterval = Duration(config.getDuration("refresh-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS)
  var cancellable = system.scheduler.schedule(
    0.seconds,
    refreshInterval,
    s3WatcherService,
    Refresh)

  override def postStop() = {
    cancellable.cancel()
  }

  def teardownConfigWatcher(configWatcher: ActorRef) = {
    context.system.scheduler.scheduleOnce(60.seconds) {
      configWatcher ! PoisonPill
    }
  }

  def receive: Receive = {
    case ChangedContents(contents) =>
      Try { ConfigFactory.parseString(contents) } match {
        case Success(domainsConfig) =>
          log.debug("new parsed domains config")
          val foundDomains = domainsConfig.getConfigList("domains").map(c => c.getString("domain-name") -> new DomainSettings(c, context.system)).toMap
          val newDomains = foundDomains.keySet.diff(domains.keySet)
          for (d <- newDomains) {
            configWatchers += d -> context.actorOf(ConfigWatcher.props(foundDomains(d), context.parent), "config-watcher-" + d)
          }
          val removedDomains = domains.keySet.diff(foundDomains.keySet)
          for (d <- removedDomains) {
            if (configWatchers.contains(d)){
              teardownConfigWatcher(configWatchers(d))
              configWatchers -= d
            }
          }
          domains = foundDomains

          context.parent ! ShieldActorMsgs.DomainsUpdated(foundDomains)
        case Failure(e) => log.warning(s"Error encountered while parsing domain conf: $e")
    }
  }
} 
Example 13
Source File: ProxyBalancer.scala    From shield   with MIT License 5 votes vote down vote up
package shield.proxying

import akka.actor.{ActorContext, ActorRef, ActorRefFactory, PoisonPill}
import akka.pattern.ask
import akka.routing.RoundRobinGroup
import akka.util.Timeout
import shield.actors.Middleware
import shield.actors.config.{ProxyState, WeightedProxyState}
import shield.config.ServiceLocation
import shield.routing.{EndpointTemplate, Param}
import spray.http.{HttpRequest, HttpResponse}

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random

case class ProxyRequest(template: EndpointTemplate, request: HttpRequest)
case class ProxiedResponse(upstreamService: ServiceLocation, serviceName: String, template: EndpointTemplate, cacheParams: Set[Param], response: HttpResponse)


trait ProxyBalancer {
  def proxy(template: EndpointTemplate, request: HttpRequest) : Future[ProxiedResponse]
}

object FailBalancer extends ProxyBalancer {
  def proxy(template: EndpointTemplate, request: HttpRequest): Future[ProxiedResponse] =
    Future.failed(new NotImplementedError())
}

class AkkaBalancer(val balancer: ActorRef) extends ProxyBalancer {
  // todo: use global timeout config
  implicit val timeout = Timeout(60.seconds)
  def proxy(template: EndpointTemplate, request: HttpRequest) = (balancer ? ProxyRequest(template, request)).mapTo[ProxiedResponse]
}

trait ProxyBalancerBuilder[T <: ProxyBalancer] {
  val allMiddleware : List[Middleware]
  def build(actors: Set[ActorRef]) : ProxyBalancer
  def teardown() : Unit
}

object EmptyBalancerBuilder extends ProxyBalancerBuilder[ProxyBalancer] {
  val allMiddleware : List[Middleware] = Nil
  def build(actors: Set[ActorRef]) : ProxyBalancer = {
    FailBalancer
  }
  def teardown() : Unit = {}
}

// todo: weighted service instances (and dynamic weighting)
// todo: retry safe gets (config option to enable) via something like http://doc.akka.io/docs/akka/snapshot/scala/routing.html#TailChoppingPool_and_TailChoppingGroup
class RoundRobinBalancerBuilder(val allMiddleware: List[Middleware], factory: ActorRefFactory, hostProxies: Map[ActorRef, WeightedProxyState])(implicit execContext: ExecutionContext) extends ProxyBalancerBuilder[AkkaBalancer] {
  var balancers : List[ActorRef] = Nil

  // https://en.wikipedia.org/wiki/Euclidean_algorithm#Implementations
  // nb: gcd is associative, so we're safe to `reduce` the results
  @tailrec
  private def gcd(a: Int, b: Int) : Int =
    if (b == 0) {
      a
    } else {
      gcd(b, a % b)
    }


  def build(actors: Set[ActorRef]) : AkkaBalancer = {
    // todo: refactor this out to somewhere common when we have other balancer types
    val actorWeight = actors.map(hostProxies(_).weight)
    val totalWeight = actorWeight.sum
    val group = if (totalWeight == 0) {
      actors.toList
    } else {
      val actorGCD = actorWeight.filter(_ != 0).reduceLeftOption(gcd).getOrElse(1)
      Random.shuffle(actors.toList.flatMap(a => List.fill(hostProxies(a).weight / actorGCD)(a)))
    }
    val balancer = factory.actorOf(RoundRobinGroup(group.map(_.path.toString)).props())
    balancers = balancer :: balancers

    new AkkaBalancer(balancer)
  }

  def teardown() = {
    for (balancer <- balancers) {
      // just stop the router, not the host proxy behind them
      balancer ! PoisonPill
    }
    balancers = Nil
  }
} 
Example 14
Source File: Webservice.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.server

import akka.actor.{ActorRef, ActorSystem, Kill, PoisonPill}
import akka.http.scaladsl.coding.Gzip
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.ws.{BinaryMessage, Message}
import akka.http.scaladsl.server.Directives
import akka.stream.scaladsl._
import akka.stream.{Materializer, OverflowStrategy}
import akkaviz.config.Config
import akkaviz.events._
import akkaviz.events.types._
import akkaviz.persistence.{PersistenceSources, ReceivedRecord}
import akkaviz.protocol

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class Webservice(implicit materializer: Materializer, system: ActorSystem)
    extends Directives with SubscriptionSession with ReplSupport with AkkaHttpHelpers with ArchiveSupport
    with FrontendResourcesSupport with ProtocolSerializationSupport with BackendEventsMarshalling {

  def route: Flow[HttpRequest, HttpResponse, Any] = encodeResponseWith(Gzip) {
    get {
      path("stream") {
        handleWebSocketMessages(tracingEventsFlow.mapMaterializedValue(EventSystem.subscribe))
      }
    } ~
      archiveRouting ~
      replRouting ~
      frontendResourcesRouting
  }

  def tracingEventsFlow: Flow[Message, Message, ActorRef] = {
    val eventSrc = Source.actorRef[BackendEvent](Config.bufferSize, OverflowStrategy.dropNew)

    val wsIn = Flow[Message]
      .via(websocketMessageToClientMessage)
      .via(handleUserCommand)
      .scan(defaultSettings)(updateSettings)
      .expand(r => Iterator.continually(r))

    val out = wsIn.zipMat(eventSrc)((_, m) => m)
      .collect {
        case (settings, r: BackendEvent) if settings.eventAllowed(r) => r
      }.via(backendEventToProtocolFlow)
      .keepAlive(10.seconds, () => protocol.Ping)
      .via(protocolServerMessageToByteString)
      .map(BinaryMessage.Strict(_))

    out
  }

  private[this] val handleUserCommand: Flow[protocol.ApiClientMessage, ChangeSubscriptionSettings, _] = Flow[protocol.ApiClientMessage].mapConcat {
    case protocol.SetAllowedMessages(classNames) =>
      system.log.debug(s"Set allowed messages to $classNames")
      List(SetAllowedClasses(classNames))
    case protocol.ObserveActors(actors) =>
      system.log.debug(s"Set observed actors to $actors")
      List(SetActorEventFilter(actors))
    case protocol.SetReceiveDelay(duration) =>
      system.log.debug(s"Setting receive delay to $duration")
      EventSystem.setReceiveDelay(duration)
      Nil
    case protocol.SetEnabled(isEnabled) =>
      system.log.info(s"Setting EventSystem.setEnabled($isEnabled)")
      EventSystem.setEnabled(isEnabled)
      Nil
    case protocol.RefreshInternalState(actor) =>
      ActorSystems.refreshActorState(actor)
      Nil
    case protocol.PoisonPillActor(actor) =>
      ActorSystems.tell(actor, PoisonPill)
      Nil
    case protocol.KillActor(actor) =>
      ActorSystems.tell(actor, Kill)
      Nil
  }

  override def receivedOf(ref: String): Source[ReceivedRecord, _] = PersistenceSources.of(ref)

  override def receivedBetween(ref: String, ref2: String): Source[ReceivedRecord, _] = PersistenceSources.between(ref, ref2)

  override def isArchiveEnabled: Boolean = Config.enableArchive

} 
Example 15
Source File: CouchbaseSnapshotSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase
import akka.actor.{ActorSystem, PoisonPill}
import akka.persistence.couchbase.TestActor.{GetLastRecoveredEvent, SaveSnapshot}
import akka.stream.ActorMaterializer
import akka.testkit.{TestKit, TestProbe, WithLogCapturing}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Matchers, WordSpecLike}

import scala.concurrent.duration._

class CouchbaseSnapshotSpec
    extends TestKit(
      ActorSystem(
        "CouchbaseSnapshotSpec",
        ConfigFactory.parseString("""
            |akka.loggers = ["akka.testkit.SilenceAllTestEventListener"]
          """.stripMargin).withFallback(ConfigFactory.load())
      )
    )
    with WordSpecLike
    with BeforeAndAfterAll
    with Matchers
    with CouchbaseBucketSetup
    with BeforeAndAfterEach
    with WithLogCapturing {
  protected override def afterAll(): Unit = {
    super.afterAll()
    shutdown(system)
  }

  val waitTime = 100.millis
  implicit val materializer = ActorMaterializer()

  "entity" should {
    "recover" in {
      val senderProbe = TestProbe()
      implicit val sender = senderProbe.ref

      {
        val pa1 = system.actorOf(TestActor.props("p1"))
        pa1 ! "p1-evt-1"
        senderProbe.expectMsg("p1-evt-1-done")

        senderProbe.watch(pa1)
        pa1 ! PoisonPill
        senderProbe.expectTerminated(pa1)
      }
      {
        val pa1 = system.actorOf(TestActor.props("p1"))

        pa1 ! GetLastRecoveredEvent
        senderProbe.expectMsg("p1-evt-1")
      }
    }
    "recover after snapshot" in {
      val senderProbe = TestProbe()
      implicit val sender = senderProbe.ref

      {
        val pa1 = system.actorOf(TestActor.props("p2"))
        pa1 ! "p2-evt-1"
        senderProbe.expectMsg("p2-evt-1-done")

        pa1 ! SaveSnapshot
        senderProbe.expectMsgType[Long]

        senderProbe.watch(pa1)
        pa1 ! PoisonPill
        senderProbe.expectTerminated(pa1)
      }
      {
        val pa1 = system.actorOf(TestActor.props("p2"))

        pa1 ! GetLastRecoveredEvent
        senderProbe.expectMsg("p2-evt-1")
      }
    }
  }
} 
Example 16
Source File: WebSocketMessageHandler.scala    From asura   with MIT License 5 votes vote down vote up
package asura.core.actor.flow

import akka.NotUsed
import akka.actor.{ActorRef, PoisonPill}
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Sink, Source}
import asura.common.actor.{ActorEvent, SenderMessage}
import asura.common.exceptions.InvalidStatusException
import asura.core.CoreConfig
import asura.core.util.JacksonSupport

import scala.concurrent.duration._

object WebSocketMessageHandler {

  val DEFAULT_BUFFER_SIZE = CoreConfig.DEFAULT_WS_ACTOR_BUFFER_SIZE
  val KEEP_ALIVE_INTERVAL = 2

  def newHandleFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = {
    val incomingMessages: Sink[Message, NotUsed] =
      Flow[Message].map {
        case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass)
        case _ => throw InvalidStatusException("Unsupported message type")
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[Message, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => TextMessage(JacksonSupport.stringify(result)))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict(""))
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def newHandleStringFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = {
    val incomingMessages: Sink[Message, NotUsed] =
      Flow[Message].map {
        case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass)
        case _ => throw InvalidStatusException("Unsupported message type")
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[Message, NotUsed] =
      Source.actorRef[String](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => TextMessage(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict(""))
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[String, String, NotUsed] = {
    val incomingMessages: Sink[String, NotUsed] =
      Flow[String].map {
        case text: String => JacksonSupport.parse(text, msgClass)
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[String, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => JacksonSupport.stringify(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "")
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef): Flow[String, String, NotUsed] = {
    val incomingMessages: Sink[String, NotUsed] =
      Flow[String].to(Sink.actorRef[String](workActor, PoisonPill))
    val outgoingMessages: Source[String, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => JacksonSupport.stringify(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "")
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }
} 
Example 17
Source File: JobManualActor.scala    From asura   with MIT License 5 votes vote down vote up
package asura.core.job.actor

import akka.actor.{ActorRef, PoisonPill, Props, Status}
import akka.pattern.pipe
import asura.common.actor.{BaseActor, SenderMessage}
import asura.common.util.{LogUtils, StringUtils}
import asura.core.CoreConfig
import asura.core.es.model.{Job, JobReport}
import asura.core.es.service.{JobReportService, JobService}
import asura.core.job.{JobCenter, JobExecDesc}

class JobManualActor(jobId: String, user: String, out: ActorRef) extends BaseActor {

  implicit val executionContext = context.dispatcher
  if (null != out) self ! SenderMessage(out)

  override def receive: Receive = {
    case SenderMessage(sender) =>
      context.become(handleRequest(sender))
      self ! jobId
  }

  def handleRequest(wsActor: ActorRef): Receive = {
    case job: Job =>
      val jobImplOpt = JobCenter.classAliasJobMap.get(job.classAlias)
      if (jobImplOpt.isEmpty) {
        wsActor ! s"Can't find job implementation of ${job.classAlias}"
        wsActor ! JobExecDesc.STATUS_FAIL
        wsActor ! Status.Success
      } else {
        val jobImpl = jobImplOpt.get
        val (isOk, errMsg) = jobImpl.checkJobData(job.jobData)
        if (isOk) {
          JobExecDesc.from(jobId, job, JobReport.TYPE_MANUAL, null, user).map(jobExecDesc => {
            jobImpl.doTestAsync(jobExecDesc, logMsg => {
              wsActor ! logMsg
            }).pipeTo(self)
          }).recover {
            case t: Throwable =>
              self ! Status.Failure(t)
          }
        } else {
          wsActor ! errMsg
          wsActor ! Status.Success
        }
      }
    case jobId: String =>
      if (StringUtils.isNotEmpty(jobId)) {
        JobService.getJobById(jobId).pipeTo(self)
      } else {
        wsActor ! s"jobId is empty."
        wsActor ! Status.Success
      }
    case execDesc: JobExecDesc =>
      execDesc.prepareEnd()
      val report = execDesc.report
      JobReportService.indexReport(execDesc.reportId, report).map { _ =>
        val reportUrl = s"view report: ${CoreConfig.reportBaseUrl}/${execDesc.reportId}"
        wsActor ! reportUrl
        wsActor ! execDesc.report.result
        wsActor ! Status.Success
      }.recover {
        case t: Throwable =>
          self ! Status.Failure(t)
      }
    case Status.Failure(t) =>
      val stackTrace = LogUtils.stackTraceToString(t)
      log.warning(stackTrace)
      wsActor ! t.getMessage
      wsActor ! JobExecDesc.STATUS_FAIL
      wsActor ! Status.Success
  }

  override def postStop(): Unit = {
    log.debug(s"${self.path} is stopped")
  }
}

object JobManualActor {
  def props(jobId: String, user: String, out: ActorRef = null) = Props(new JobManualActor(jobId, user, out))
} 
Example 18
Source File: GracefulStopHelperSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.lifecycle

import akka.testkit.{TestActorRef, ImplicitSender, TestKit}
import akka.actor.{PoisonPill, ActorRef, Actor, ActorSystem}
import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpecLike}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future

class GracefulStopHelperSpec extends TestKit(ActorSystem("testSystem"))
  with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  import system.dispatcher

  "GracefulStopHelper" should "work when stop failed" in {

    val actorRef = TestActorRef(new Actor with GracefulStopHelper {
      def receive: Actor.Receive = {
        case "Stop" =>
          defaultMidActorStop(Seq(self))
          sender() ! "Done"
      }

      override def gracefulStop(target: ActorRef, timeout: FiniteDuration, stopMessage: Any = PoisonPill):
          Future[Boolean] = {
        Future {
          throw new RuntimeException("BadMan")
        }

      }
    })

    actorRef ! "Stop"
    expectMsg("Done")

  }

  "GracefulStopHelper" should "work" in {

    val actorRef = TestActorRef(new Actor with GracefulStopHelper {

      def receive: Actor.Receive = {
        case "Stop" =>
          defaultMidActorStop(Seq(self))
          sender() ! "Done"
      }

      override def gracefulStop(target: ActorRef, timeout: FiniteDuration, stopMessage: Any = PoisonPill):
          Future[Boolean] = {
        Future {
          true
        }
      }
    })

    actorRef ! "Stop"
    expectMsg("Done")
  }

} 
Example 19
Source File: AkkaKubernetes.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.actor.{Actor, ActorLogging, ActorSystem, PoisonPill, Props}
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.cluster.{Cluster, ClusterEvent}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("KubernetesTest")

  import system.{dispatcher, log}

  implicit val mat = ActorMaterializer()
  implicit val cluster = Cluster(system)

  log.info("Running with [{}]", new Resources())
  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  system.actorOf(
    ClusterSingletonManager.props(singletonProps = Props(new AkkaBoss("patriknw")),
                                  terminationMessage = PoisonPill,
                                  settings = ClusterSingletonManagerSettings(system)),
    "boss"
  )

  val bossProxy = system.actorOf(
    ClusterSingletonProxy.props(singletonManagerPath = "/user/boss", settings = ClusterSingletonProxySettings(system)),
    name = "bossProxy"
  )

  val teamMembers = ClusterSharding(system).start(
    "team-member",
    Props(new AkkaMember()),
    ClusterShardingSettings(system),
    AkkaMember.extractEntityId,
    AkkaMember.extractShardId
  )

  cluster.subscribe(system.actorOf(Props[ClusterWatcher]),
                    ClusterEvent.InitialStateAsEvents,
                    classOf[ClusterDomainEvent])

  val talkToTheBoss = new TalkToTheBossRouteRoute(bossProxy)
  val talkToATeamMember = new TalkToATeamMemberRoute(teamMembers)

  Http().bindAndHandle(
    concat(talkToTheBoss.route(),
           talkToATeamMember.route(),
           ClusterStateRoute.routeGetMembers(cluster),
           VersionRoute.versionRoute),
    "0.0.0.0",
    8080
  )

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  implicit val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 20
Source File: WebSocketRoute.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.http_api.ws

import java.util.UUID

import akka.actor.PoisonPill
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.ws.{ Message, TextMessage }
import akka.http.scaladsl.server.Route
import akka.stream._
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.util.Timeout
import io.vamp.common.akka.IoC._
import io.vamp.common.http.{ HttpApiDirectives, HttpApiHandlers, TerminateFlowStage }
import io.vamp.common.{ Config, Namespace }
import io.vamp.http_api.ws.WebSocketActor.{ SessionClosed, SessionEvent, SessionOpened, SessionRequest }
import io.vamp.http_api.{ AbstractRoute, LogDirective }

import scala.concurrent.Future

trait WebSocketRoute extends AbstractRoute with WebSocketMarshaller with HttpApiHandlers {
  this: HttpApiDirectives with LogDirective ⇒

  implicit def materializer: Materializer

  private lazy val limit = Config.int("vamp.http-api.websocket.stream-limit")

  protected def websocketApiHandler(implicit namespace: Namespace, timeout: Timeout): Route

  def websocketRoutes(implicit namespace: Namespace, timeout: Timeout) = {
    pathEndOrSingleSlash {
      get {
        extractRequest { request ⇒
          handleWebSocketMessages {
            websocket(request)
          }
        }
      }
    }
  }

  protected def filterWebSocketOutput(message: AnyRef)(implicit namespace: Namespace, timeout: Timeout): Future[Boolean] = Future.successful(true)

  private def apiHandler(implicit namespace: Namespace, timeout: Timeout) = Route.asyncHandler(log {
    websocketApiHandler
  })

  private def websocket(origin: HttpRequest)(implicit namespace: Namespace, timeout: Timeout): Flow[AnyRef, Message, Any] = {
    val id = UUID.randomUUID()

    val in = Flow[AnyRef].collect {
      case TextMessage.Strict(message)  ⇒ Future.successful(message)
      case TextMessage.Streamed(stream) ⇒ stream.limit(limit()).completionTimeout(timeout.duration).runFold("")(_ + _)
    }.mapAsync(parallelism = 3)(identity)
      .mapConcat(unmarshall)
      .map(SessionRequest(apiHandler, id, origin, _))
      .to(Sink.actorRef[SessionEvent](actorFor[WebSocketActor], SessionClosed(id)))

    val out = Source.actorRef[AnyRef](16, OverflowStrategy.dropHead)
      .mapMaterializedValue(actorFor[WebSocketActor] ! SessionOpened(id, _))
      .via(new TerminateFlowStage[AnyRef](_ == PoisonPill))
      .mapAsync(parallelism = 3)(message ⇒ filterWebSocketOutput(message).map(f ⇒ f → message))
      .collect { case (true, m) ⇒ m }
      .map(message ⇒ TextMessage.Strict(marshall(message)))

    Flow.fromSinkAndSource(in, out)
  }
} 
Example 21
Source File: ActorBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.util.Timeout
import com.typesafe.scalalogging.Logger
import io.vamp.common.{ ClassProvider, Namespace }
import org.slf4j.{ LoggerFactory, MDC }

import scala.concurrent.Future
import scala.reflect.{ ClassTag, classTag }

trait Bootstrap extends BootstrapLogger {

  def start(): Future[Unit] = Future.successful(())

  def stop(): Future[Unit] = Future.successful(())
}

trait ActorBootstrap extends BootstrapLogger {

  private var actors: Future[List[ActorRef]] = Future.successful(Nil)

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]]

  def start(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = {
    info(s"Starting ${getClass.getSimpleName}")
    actors = createActors(actorSystem, namespace, timeout)
    actors.map(_ ⇒ ())(actorSystem.dispatcher)
  }

  def restart(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = {
    stop.flatMap(_ ⇒ start)(actorSystem.dispatcher)
  }

  def stop(implicit actorSystem: ActorSystem, namespace: Namespace): Future[Unit] = {
    info(s"Stopping ${getClass.getSimpleName}")
    actors.map(_.reverse.foreach(_ ! PoisonPill))(actorSystem.dispatcher)
  }

  def alias[T: ClassTag](name: String, default: String ⇒ Future[ActorRef])(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    ClassProvider.find[T](name).map { clazz ⇒
      IoC.alias(classTag[T].runtimeClass, clazz)
      IoC.createActor(clazz)
    } getOrElse default(name)
  }
}

trait BootstrapLogger {

  protected val logger = Logger(LoggerFactory.getLogger(getClass))

  protected def info(message: String)(implicit namespace: Namespace): Unit = {
    MDC.put("namespace", namespace.name)
    try logger.info(message) finally MDC.remove("namespace")
  }
} 
Example 22
Source File: StatsSampleOneMaster.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.stats

import akka.actor.{ ActorSystem, PoisonPill, Props }
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import com.typesafe.config.ConfigFactory

object StatsSampleOneMaster {
  def main(args: Array[String]): Unit =
    if (args.isEmpty) {
      startup(Seq("2551", "2552", "0"))
      StatsSampleOneMasterClient.main(Array.empty)
    } else {
      startup(args)
    }

  def startup(ports: Seq[String]): Unit =
    ports foreach { port =>
      // Override the configuration of the port when specified as program argument
      val config =
        ConfigFactory
          .parseString(s"""
          akka.remote.netty.tcp.port=$port
          akka.remote.artery.canonical.port=$port
          """)
          .withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]"))
          .withFallback(ConfigFactory.load("stats2"))

      val system = ActorSystem("ClusterSystem", config)

      system.actorOf(
        ClusterSingletonManager.props(
          singletonProps = Props[StatsService],
          terminationMessage = PoisonPill,
          settings = ClusterSingletonManagerSettings(system).withRole("compute")),
        name = "statsService")

      system.actorOf(
        ClusterSingletonProxy.props(
          singletonManagerPath = "/user/statsService",
          settings = ClusterSingletonProxySettings(system).withRole("compute")),
        name = "statsServiceProxy")
    }
}

object StatsSampleOneMasterClient {
  def main(args: Array[String]): Unit = {
    // note that client is not a compute node, role not defined
    val system = ActorSystem("ClusterSystem")
    system.actorOf(Props(classOf[StatsSampleClient], "/user/statsServiceProxy"), "client")
  }
} 
Example 23
Source File: DeathPactTest.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test.week6

import akka.actor.Actor.emptyBehavior
import akka.actor.{ Actor, ActorRef, PoisonPill, Props }
import com.test.TestSpec

class DeathPactTest extends TestSpec {

  // let's create some lovers

  class Boy(girl: ActorRef) extends Actor {
    context.watch(girl) // sign deathpact
    override def receive = emptyBehavior
  }

  class Girl extends Actor {
    import scala.concurrent.duration._
    context.system.scheduler.scheduleOnce(100.millis, self, PoisonPill)
    override def receive: Receive = emptyBehavior
  }

  // yes I know, boy/girl, I am old fashioned..

  "Lovers" should "die together" in {
    val tp = probe
    val girl = system.actorOf(Props(new Girl))
    val boy = system.actorOf(Props(new Boy(girl)))
    tp watch boy
    tp watch girl
    tp.expectTerminated(girl)
    tp.expectTerminated(boy)
  }
} 
Example 24
Source File: TestSpec.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test

import java.io.IOException
import java.util.UUID

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.exceptions.TestFailedException
import org.scalatest._
import rx.lang.scala._

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.{ Random ⇒ Rnd, Try }

object Random {
  def apply(): Rnd = new Rnd()
}

trait TestSpec extends FlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with BeforeAndAfterAll {
  implicit val system: ActorSystem = ActorSystem("test")
  implicit val ec: ExecutionContextExecutor = system.dispatcher
  val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  implicit val timeout = Timeout(50.seconds)

  override protected def afterAll(): Unit = {
    system.terminate()
  }

  
  def cleanup(actors: ActorRef*): Unit = {
    actors.foreach { (actor: ActorRef) ⇒
      actor ! PoisonPill
      probe watch actor
    }
  }

  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class PimpedObservable[T](self: Observable[T]) {
    def waitFor: Unit = {
      self.toBlocking.toIterable.last
    }
  }

  implicit class MustBeWord[T](self: T) {
    def mustBe(pf: PartialFunction[T, Unit]): Unit =
      if (!pf.isDefinedAt(self)) throw new TestFailedException("Unexpected: " + self, 0)
  }

  object Socket { def apply() = new Socket }
  class Socket {
    def readFromMemory: Future[Array[Byte]] = Future {
      Thread.sleep(100) // sleep 100 millis
      "fromMemory".getBytes
    }

    def send(payload: Array[Byte], from: String, failed: Boolean): Future[Array[Byte]] =
      if (failed) Future.failed(new IOException(s"Network error: $from"))
      else {
        Future {
          Thread.sleep(250) // sleep 250 millis, not real life time, but hey
          s"${payload.getString}->$from".getBytes
        }
      }

    def sendToEurope(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromEurope", failed)

    def sendToUsa(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromUsa", failed)
  }
} 
Example 25
Source File: CoordinatorSpec.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import akka.actor.{ActorSystem, PoisonPill, Props}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import com.cave.metrics.data._
import init.AwsWrapper
import init.AwsWrapper.WorkItem
import org.mockito.Mockito._
import org.mockito.Matchers._
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{BeforeAndAfterAll, WordSpecLike}
import org.specs2.matcher.ShouldMatchers

import scala.concurrent.{Future, ExecutionContext}
import scala.util.Success

class CoordinatorSpec extends TestKit(ActorSystem()) with WordSpecLike with ShouldMatchers with ImplicitSender with BeforeAndAfterAll with AlertJsonData with MockitoSugar {

  val mockAwsWrapper = mock[AwsWrapper]
  val mockDataManager = mock[CacheDataManager]

  override def afterAll() = {
    system.shutdown()
  }

  "A coordinator" must {

    "create schedulers for all enabled alerts" in {

      val SomeId = "1234"
      val AnotherId = "4321"
      val OtherId = "12345"

      val alerts = List(
        Schedule(OrgName, Some(TeamName), None, NotificationUrl, Alert(Some(SomeId), AlertDescription, AlertEnabled, AlertPeriod, AlertCondition, Some(AlertHandbookUrl), Some(AlertRouting))),
        Schedule(OrgName, Some(TeamName), None, NotificationUrl, Alert(Some(AnotherId), AlertDescription, AlertEnabled, AlertPeriod, AlertCondition, Some(AlertHandbookUrl), Some(AlertRouting))))

      val moreAlerts = List(
        Schedule(TeamName, None, None, NotificationUrl, Alert(Some(OtherId), AlertDescription, AlertEnabled, AlertPeriod, AlertCondition, Some(AlertHandbookUrl), Some(AlertRouting)))
      )

      when(mockDataManager.getEnabledAlerts()).thenReturn(Success(Map(OrgName -> alerts, TeamName -> moreAlerts)))
      when(mockAwsWrapper.receiveMessages()(any[ExecutionContext])).thenReturn(Future.successful(List.empty[WorkItem]))
      val coordinator = TestActorRef(Props(new Coordinator(mockAwsWrapper, mockDataManager) {
        override def createScheduler(schedule: Schedule) = {}
      }))

      coordinator ! Coordinator.StatusRequest

      expectMsgPF() {
        case Coordinator.StatusResponse(cache, schedules) =>
          cache.schedulesByOrganization should haveSize(2)
          val forOrgName = cache.schedulesByOrganization(OrgName)
          forOrgName should haveSize(2)
          val forTeamName = cache.schedulesByOrganization(TeamName)
          forTeamName should haveSize(1)

          schedules should haveSize(3)

        case _ => fail("Unexpected message received.")
      }


      coordinator ! PoisonPill
      watch(coordinator)
      expectTerminated(coordinator)
    }
  }
} 
Example 26
Source File: ClusterApp.scala    From reactive-cli   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 27
Source File: ClusterApp.scala    From reactive-cli   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 28
Source File: ScanResultsActor.scala    From project-matt   with MIT License 5 votes vote down vote up
package org.datafy.aws.app.matt.app

import akka.actor.{Actor, ActorLogging, PoisonPill, Props}
import akka.event.Logging

class ScanResultsActor extends Actor {

  val log = Logging(context.system, this)

  def receive = {
    case text =>
      // save results here and send summary report
      log.info(s"New message from executing actor - ScanRequestActor: ${text}")
      sender() ! PoisonPill
      self ! PoisonPill
  }
}

object ScanResultsActor {
  val props = Props[ScanResultsActor]
  case object Initialize
  case class ScanResultsMessage(text: String)
} 
Example 29
Source File: TestSpec.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams

import akka.NotUsed
import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import com.github.dnvriend.streams.util.ClasspathResources
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.libs.json.{ Format, Json }
import play.api.test.WsTestClient

import scala.collection.immutable._
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

object Person {
  implicit val format: Format[Person] = Json.format[Person]
}

final case class Person(firstName: String, age: Int)

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with ClasspathResources
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]
  def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8081
  implicit val timeout: Timeout = 1.second
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]
  val log: LoggingAdapter = Logging(system, this.getClass)

  // ================================== Supporting Operations ====================================
  def id: String = java.util.UUID.randomUUID().toString

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, NotUsed]) {
    def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T =
    f(Source.fromIterator(() ⇒ Iterator from start))

  def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
    f(Source(xs).runWith(TestSink.probe(system)))

  def killActors(refs: ActorRef*): Unit = {
    val tp = TestProbe()
    refs.foreach { ref ⇒
      tp watch ref
      tp.send(ref, PoisonPill)
      tp.expectTerminated(ref)
    }
  }
} 
Example 30
Source File: ReliableFSMSpec.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkapersistence

import akka.actor.{ActorSystem, FSM, PoisonPill}
import akka.persistence._
import akka.testkit._
import org.scalatest._
import rhttpc.akkapersistence.impl._
import rhttpc.client.ReliableClientBaseSpec

import scala.concurrent.Await
import scala.concurrent.duration._

class ReliableFSMSpec
  extends TestKit(ActorSystem("ReliableFSMSpec"))
  with ReliableClientBaseSpec with ImplicitSender with Matchers {

  it should "save snapshot after message publication and reply with StateSaved" in { fixture =>
    createAndSendFoo(fixture, "12")
  }

  it should "recover in correct state not saving snapshot one more time" in { fixture =>
    val id = "123"
    val actor = createAndSendFoo(fixture, id)
    val snapshot = actor.underlyingActor.savedSnapshots.head

    watch(actor)
    actor ! PoisonPill
    expectTerminated(actor)

    val recovered = TestActorRef[FooBarActor](FooBarActor.props(id, fixture.client))
    recovered ! NotifyAboutRecoveryCompleted
    recovered.underlyingActor.recover(snapshot)
    expectMsg(RecoveryCompleted)

    Thread.sleep(500)
    recovered.underlyingActor.savedSnapshots shouldBe empty

    fixture.transport.replySubscriptionPromise.success("foo")
    awaitCond(recovered.underlyingActor.savedSnapshots.size == 1)
    Await.result(fixture.transport.ackOnReplySubscriptionFuture, 3 seconds)

    recovered ! CurrentState
    expectMsg(FooState)
  }

  def createAndSendFoo(fixture: FixtureParam, id: String): TestActorRef[FooBarActor] = {
    val actor = TestActorRef[FooBarActor](FooBarActor.props(id, fixture.client))
    actor ! SendMsg("foo")
    Thread.sleep(500)
    actor.underlyingActor.savedSnapshots shouldBe empty
    fixture.transport.publicationPromise.success(Unit)
    awaitCond(actor.underlyingActor.savedSnapshots.size == 1)
    expectMsg(StateSaved)
    actor
  }
}

trait MockReliableFSM[S, D] extends AbstractReliableFSM[S, D] with MockSnapshotter[S, D]

trait MockSnapshotter[S, D] extends AbstractSnapshotter { this: FSM[S, D] =>
  @volatile var savedSnapshots: List[SnapshotWithSeqNr] = List.empty

  override def saveSnapshotWithSeqNr(snapshot: Any, seqNr: Long): Unit = {
    savedSnapshots = SnapshotWithSeqNr(snapshot, seqNr) :: savedSnapshots
    self ! SaveSnapshotSuccess(SnapshotMetadata(persistenceId, seqNr))
  }

  def recover(snap: SnapshotWithSeqNr) = {
    self ! SnapshotOffer(SnapshotMetadata(persistenceId, snap.seqNr), snap.snapshot)
  }

  protected def handleRecover: Receive = {
    val handleOfferAndThanSendCompleted: Receive = {
      case offer: SnapshotOffer =>
        receiveRecover(offer)
        self ! RecoveryCompleted
    }
    handleOfferAndThanSendCompleted orElse receiveRecover
  }

  whenUnhandled {
    case Event(event, _) if handleRecover.isDefinedAt(event) =>
      handleRecover(event)
      stay()
  }

  override def deleteSnapshots(criteria: SnapshotSelectionCriteria): Unit = {}
}

case class SnapshotWithSeqNr(snapshot: Any, seqNr: Long) 
Example 31
Source File: Replica.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.{ OneForOneStrategy, Props, ActorRef, Actor }
import kvstore.Arbiter._
import scala.collection.immutable.Queue
import akka.actor.SupervisorStrategy.Restart
import scala.annotation.tailrec
import akka.pattern.{ ask, pipe }
import akka.actor.Terminated
import scala.concurrent.duration._
import akka.actor.PoisonPill
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy
import akka.util.Timeout

object Replica {
  sealed trait Operation {
    def key: String
    def id: Long
  }
  case class Insert(key: String, value: String, id: Long) extends Operation
  case class Remove(key: String, id: Long) extends Operation
  case class Get(key: String, id: Long) extends Operation

  sealed trait OperationReply
  case class OperationAck(id: Long) extends OperationReply
  case class OperationFailed(id: Long) extends OperationReply
  case class GetResult(key: String, valueOption: Option[String], id: Long) extends OperationReply

  def props(arbiter: ActorRef, persistenceProps: Props): Props = Props(new Replica(arbiter, persistenceProps))
}

class Replica(val arbiter: ActorRef, persistenceProps: Props) extends Actor {
  import Replica._
  import Replicator._
  import Persistence._
  import context.dispatcher

  
  val replica: Receive = {
    case _ =>
  }

} 
Example 32
Source File: ActorWaitSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, PoisonPill, Props}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.webtrends.harness.utils.ActorWaitHelper
import org.specs2.mutable.SpecificationLike

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class WaitedOnActor extends Actor with ActorWaitHelper {
  def receive: Receive = {
    case "message" => sender ! "waitedResponse"
  }
}

class WaitActor extends Actor with ActorWaitHelper {
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)
  val waited = awaitActor(Props[WaitedOnActor])

  def receive: Receive = {
    case "message" => sender ! "response"
    case "waited" => sender ! Await.result((waited ? "message").mapTo[String], Duration(5, "seconds"))
  }
}

class ActorWaitSpec extends TestKit(ActorSystem("wait-spec")) with SpecificationLike {
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)
  val waitActor = ActorWaitHelper.awaitActor(Props[WaitActor], system)

  sequential

  "ActorWaitSpec" should {
    "await the WaitActor successfully " in {
      Await.result((waitActor ? "message").mapTo[String], Duration(5, "seconds")) must beEqualTo("response")
    }

    "the WaitActor's awaited actor must have come up " in {
      Await.result((waitActor ? "waited").mapTo[String], Duration(5, "seconds")) must beEqualTo("waitedResponse")
    }
  }

  step {
    waitActor ! PoisonPill
  }
} 
Example 33
Source File: LeveldbDeletionActor.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log.leveldb

import java.io.Closeable

import akka.actor.Actor
import akka.actor.PoisonPill
import akka.actor.Props
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog._

import org.iq80.leveldb.DB
import org.iq80.leveldb.ReadOptions
import org.iq80.leveldb.WriteOptions

import scala.annotation.tailrec
import scala.concurrent.Promise

private object LeveldbDeletionActor {
  case object DeleteBatch

  def props(leveldb: DB, leveldbReadOptions: ReadOptions, leveldbWriteOptions: WriteOptions, batchSize: Int, toSequenceNr: Long, promise: Promise[Unit]): Props =
    Props(new LeveldbDeletionActor(leveldb, leveldbReadOptions, leveldbWriteOptions, batchSize, toSequenceNr, promise))
}

private class LeveldbDeletionActor(
  val leveldb: DB,
  val leveldbReadOptions: ReadOptions,
  val leveldbWriteOptions: WriteOptions,
  batchSize: Int,
  toSequenceNr: Long,
  promise: Promise[Unit])
  extends Actor with WithBatch {

  import LeveldbDeletionActor._

  val eventKeyIterator: CloseableIterator[EventKey] = newEventKeyIterator

  override def preStart() = self ! DeleteBatch

  override def postStop() = eventKeyIterator.close()

  override def receive = {
    case DeleteBatch =>
      withBatch { batch =>
        eventKeyIterator.take(batchSize).foreach { eventKey =>
          batch.delete(eventKeyBytes(eventKey.classifier, eventKey.sequenceNr))
        }
      }
      if (eventKeyIterator.hasNext) {
        self ! DeleteBatch
      } else {
        promise.success(())
        self ! PoisonPill
      }
  }

  private def newEventKeyIterator: CloseableIterator[EventKey] = {
    new Iterator[EventKey] with Closeable {
      val iterator = leveldb.iterator(leveldbReadOptions.snapshot(leveldb.getSnapshot))
      iterator.seek(eventKeyBytes(EventKey.DefaultClassifier, 1L))

      @tailrec
      override def hasNext: Boolean = {
        val key = eventKey(iterator.peekNext().getKey)
        key != eventKeyEnd &&
          (key.sequenceNr <= toSequenceNr || {
            iterator.seek(eventKeyBytes(key.classifier + 1, 1L))
            hasNext
          })
      }

      override def next() = eventKey(iterator.next().getKey)
      override def close() = {
        iterator.close()
        leveldbReadOptions.snapshot().close()
      }
    }
  }
} 
Example 34
Source File: HttpRequestRecorderSpec.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.persistence

import java.net.InetAddress

import akka.actor.{ ActorSystem, PoisonPill, Props }
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.http.scaladsl.model._
import akka.testkit.{ ImplicitSender, TestKit }
import com.ing.wbaa.rokku.proxy.data._
import com.ing.wbaa.rokku.proxy.persistence.HttpRequestRecorder.{ ExecutedRequestCmd, LatestRequests, LatestRequestsResult }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.immutable

class HttpRequestRecorderSpec extends TestKit(ActorSystem("RequestRecorderTest")) with ImplicitSender
  with AnyWordSpecLike with Diagrams with BeforeAndAfterAll {

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }

  private def convertStringsToAkkaHeaders(headers: List[String]): immutable.Seq[HttpHeader] = headers.map { p =>
    val kv = p.split("=")
    HttpHeader.parse(kv(0), kv(1)) match {
      case ParsingResult.Ok(header, _) => header
      case ParsingResult.Error(error)  => throw new Exception(s"Unable to convert to HttpHeader: ${error.summary}")
    }
  }

  val requestRecorder = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-1")

  val headers = List("Remote-Address=0:0:0:0:0:0:0:1:58170", "Host=localhost:8987",
    "X-Amz-Content-SHA256=02502914aca52472205417e4c418ee499ba39ca1b283d99da26e295df2eccf32",
    "User-Agent=aws-cli/1.16.30 Python/2.7.5 Linux/3.10.0-862.14.4.el7.x86_64 botocore/1.12.20",
    "Content-MD5=Wf7l+rCPsVw8eqc34kVJ1g==",
    "Authorization=AWS4-HMAC-SHA256 Credential=6r24619bHVWvrxR5AMHNkGZ6vNRXoGCP/20190704/us-east-1/s3/aws4_request",
    "SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date;x-amz-security-token",
    "Signature=271dda503da6fcf04cc058cb514b28a6d522a9b712ab553bfb88fb7814ab082f")

  val httpRequest = HttpRequest(
    HttpMethods.PUT,
    Uri("http://127.0.0.1:8010/home/testuser/file34"),
    convertStringsToAkkaHeaders(headers),
    HttpEntity.Empty.withContentType(ContentTypes.`application/octet-stream`).toString(),
    HttpProtocols.`HTTP/1.1`
  )
  val userSTS = User(UserName("okUser"), Set(UserGroup("okGroup")), AwsAccessKey("accesskey"), AwsSecretKey("secretkey"), UserAssumeRole(""))
  val clientIPAddress = RemoteAddress(InetAddress.getByName("localhost"), Some(1234))

  "RequestRecorder" should {
    "persist Http request event" in {
      requestRecorder ! ExecutedRequestCmd(httpRequest, userSTS, clientIPAddress)
      requestRecorder ! LatestRequests(1)
      expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress))))
      requestRecorder ! PoisonPill

      val requestRecorder1 = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-2")
      requestRecorder1 ! LatestRequests(1)
      expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress))))
    }
  }

} 
Example 35
Source File: SharedFlowManager.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.flows

import akka.actor.{ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.simulator.flows.FlowManager.ShutdownFlow
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit


object SharedFlowManager {
  def props(transmitter: ActorRef) =
    Props(new SharedFlowManager(transmitter))
}
class SharedFlowManager(transmitter: ActorRef) extends FlowManager {

  def receive = {
    case msg: Transmit => transmitter ! msg

    case ShutdownFlow =>
      transmitter ! PoisonPill
      context watch transmitter

    case Terminated(`transmitter`) =>
      context stop self
  }
} 
Example 36
Source File: TruckAndTrafficFlowManager.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.flows

import akka.actor.{ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.commons.models.{TrafficData, TruckData}
import com.orendainx.trucking.simulator.flows.FlowManager.ShutdownFlow
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit


object TruckAndTrafficFlowManager {

  def props(truckTransmitter: ActorRef, trafficTransmitter: ActorRef) =
    Props(new TruckAndTrafficFlowManager(truckTransmitter, trafficTransmitter))
}

class TruckAndTrafficFlowManager(truckTransmitter: ActorRef, trafficTransmitter: ActorRef) extends FlowManager {

  var transmittersTerminated = 0

  def receive = {
    case Transmit(data: TruckData) => truckTransmitter ! Transmit(data)
    case Transmit(data: TrafficData) => trafficTransmitter ! Transmit(data)

    case ShutdownFlow =>
      truckTransmitter ! PoisonPill
      trafficTransmitter ! PoisonPill
      context watch truckTransmitter
      context watch trafficTransmitter

    case Terminated(_) =>
      transmittersTerminated += 1
      if (transmittersTerminated == 2) context stop self
  }
} 
Example 37
Source File: AutomaticCoordinator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.coordinators

import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.simulator.coordinators.AutomaticCoordinator.TickGenerator
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator.AcknowledgeTick
import com.orendainx.trucking.simulator.flows.FlowManager
import com.orendainx.trucking.simulator.generators.DataGenerator
import com.typesafe.config.Config

import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.Random


  def props(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) =
    Props(new AutomaticCoordinator(eventCount, generators, flowManager))
}

class AutomaticCoordinator(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) extends GeneratorCoordinator with ActorLogging {

  // For receive messages and an execution context
  import context.dispatcher

  // Event delay settings, and initialize a counter for each data generator
  val eventDelay = config.getInt("generator.event-delay")
  val eventDelayJitter = config.getInt("generator.event-delay-jitter")
  val generateCounters = mutable.Map(generators.map((_, 0)): _*)

  // Insert each new generator into the simulation (at a random scheduled point) and begin "ticking"
  generators.foreach { generator =>
    context.system.scheduler.scheduleOnce(Random.nextInt(eventDelay + eventDelayJitter).milliseconds, self, TickGenerator(generator))
  }

  def receive = {
    case AcknowledgeTick(generator) =>
      self ! TickGenerator(generator) // Each ack triggers another tick

    case TickGenerator(generator) =>
      generateCounters.update(generator, generateCounters(generator)+1)

      if (generateCounters(generator) <= eventCount) {
        context.system.scheduler.scheduleOnce((eventDelay + Random.nextInt(eventDelayJitter)).milliseconds, generator, DataGenerator.GenerateData)
      } else {
        // Kill the individual generator, since we are done with it.
        generator ! PoisonPill

        // If all other generators have met their count, tell flow manager to shutdown
        if (!generateCounters.values.exists(_ <= eventCount)) {
          flowManager ! FlowManager.ShutdownFlow
          context watch flowManager
        }
      }

    // Once the flow manager and its transmitters terminate, shut it all down
    case Terminated(`flowManager`) =>
      context.system.terminate()
  }
} 
Example 38
Source File: SubscriberImpl.scala    From zio-akka-cluster   with Apache License 2.0 5 votes vote down vote up
package zio.akka.cluster.pubsub.impl

import akka.actor.{ Actor, ActorRef, ActorSystem, PoisonPill, Props }
import akka.cluster.pubsub.DistributedPubSubMediator.{ Subscribe, SubscribeAck }
import zio.Exit.{ Failure, Success }
import zio.akka.cluster.pubsub.impl.SubscriberImpl.SubscriberActor
import zio.akka.cluster.pubsub.{ MessageEnvelope, Subscriber }
import zio.{ Promise, Queue, Runtime, Task }

private[pubsub] trait SubscriberImpl[A] extends Subscriber[A] {
  val getActorSystem: ActorSystem
  val getMediator: ActorRef

  override def listenWith(topic: String, queue: Queue[A], group: Option[String] = None): Task[Unit] =
    for {
      rts        <- Task.runtime
      subscribed <- Promise.make[Nothing, Unit]
      _          <- Task(
                      getActorSystem.actorOf(Props(new SubscriberActor[A](getMediator, topic, group, rts, queue, subscribed)))
                    )
      _          <- subscribed.await
    } yield ()
}

object SubscriberImpl {
  private[impl] class SubscriberActor[A](
    mediator: ActorRef,
    topic: String,
    group: Option[String],
    rts: Runtime[Any],
    queue: Queue[A],
    subscribed: Promise[Nothing, Unit]
  ) extends Actor {

    mediator ! Subscribe(topic, group, self)

    def receive: PartialFunction[Any, Unit] = {
      case SubscribeAck(_)      =>
        rts.unsafeRunSync(subscribed.succeed(()))
        ()
      case MessageEnvelope(msg) =>
        rts.unsafeRunAsync(queue.offer(msg.asInstanceOf[A])) {
          case Success(_)     => ()
          case Failure(cause) => if (cause.interrupted) self ! PoisonPill // stop listening if the queue was shut down
        }
    }
  }
} 
Example 39
Source File: Cluster.scala    From zio-akka-cluster   with Apache License 2.0 5 votes vote down vote up
package zio.akka.cluster

import akka.actor.{ Actor, ActorSystem, Address, PoisonPill, Props }
import akka.cluster.ClusterEvent._
import zio.Exit.{ Failure, Success }
import zio.{ Has, Queue, Runtime, Task, ZIO }

object Cluster {

  private val cluster: ZIO[Has[ActorSystem], Throwable, akka.cluster.Cluster] =
    for {
      actorSystem <- ZIO.access[Has[ActorSystem]](_.get)
      cluster     <- Task(akka.cluster.Cluster(actorSystem))
    } yield cluster

  
  def clusterEventsWith(
    queue: Queue[ClusterDomainEvent],
    initialStateAsEvents: Boolean = false
  ): ZIO[Has[ActorSystem], Throwable, Unit] =
    for {
      rts         <- Task.runtime
      actorSystem <- ZIO.access[Has[ActorSystem]](_.get)
      _           <- Task(actorSystem.actorOf(Props(new SubscriberActor(rts, queue, initialStateAsEvents))))
    } yield ()

  private[cluster] class SubscriberActor(
    rts: Runtime[Any],
    queue: Queue[ClusterDomainEvent],
    initialStateAsEvents: Boolean
  ) extends Actor {

    val initialState: SubscriptionInitialStateMode =
      if (initialStateAsEvents) InitialStateAsEvents else InitialStateAsSnapshot
    akka.cluster.Cluster(context.system).subscribe(self, initialState, classOf[ClusterDomainEvent])

    def receive: PartialFunction[Any, Unit] = {
      case ev: ClusterDomainEvent =>
        rts.unsafeRunAsync(queue.offer(ev)) {
          case Success(_)     => ()
          case Failure(cause) => if (cause.interrupted) self ! PoisonPill // stop listening if the queue was shut down
        }
      case _                      =>
    }
  }

} 
Example 40
Source File: TestSpec.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.test.WsTestClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]

  def getAnnotatedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8080
  implicit val timeout: Timeout = 10.seconds
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]

  // ================================== Supporting Operations ====================================
  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, _]) {
    def testProbe(f: TestSubscriber.Probe[A] => Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def killActors(actors: ActorRef*): Unit = {
    val tp = TestProbe()
    actors.foreach { (actor: ActorRef) =>
      tp watch actor
      actor ! PoisonPill
      tp.expectTerminated(actor)
    }
  }

  override protected def beforeEach(): Unit = {
  }
} 
Example 41
Source File: ScanRequestActor.scala    From project-matt   with MIT License 5 votes vote down vote up
package org.datafy.aws.app.matt.app

import akka.actor.{Actor, Props, PoisonPill}
import org.datafy.aws.app.matt.classifiers.BaseClassifier
import org.datafy.aws.app.matt.models.FullScanStats
import akka.event.Logging

class ScanRequestActor(val bucketName: String = null, val s3Prefix: Option[String] = None)
  extends Actor  {

  val log = Logging(context.system, this)

  var execCount = 1

  import ScanRequestActor._

  if (bucketName == null) {
    throw new NullPointerException(s"Constructor AWS S3 BucketName cannot be null or empty")
    context.system.terminate()
  }

  val scanResultsActor = context.actorOf(ScanResultsActor.props, "scanResultsActor")

  def receive = {
    case Initialize =>
      if(execCount > 0) {
        execCount -= 1
        log.info(s"Initialied S3 Scan Request on " +
          s"Bucket: ${bucketName} and Prefix: ${s3Prefix.orNull} ")
        // do bucket scanning here and send message to ScanResultsActor
        val scanRequestMessage = BaseClassifier.setS3ScanInputPath(bucketName, s3Prefix.orNull)
        self ! ScanResultsActor.ScanResultsMessage("Done")
      } else {
        self ! PoisonPill
      }
  }
}

object ScanRequestActor {
  val props: Props = Props[ScanRequestActor]
  case object Initialize
  case class ScanRequestMessage(fullScanStats: FullScanStats)
} 
Example 42
Source File: TestSpec.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.serialization.SerializationExtension
import akka.stream.{ ActorMaterializer, Materializer }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.prop.PropertyChecks
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

trait TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually
    with PropertyChecks
    with AkkaPersistenceQueries
    with AkkaStreamUtils
    with InMemoryCleanup {

  implicit val timeout: Timeout = Timeout(10.seconds)
  implicit val system: ActorSystem = ActorSystem()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val mat: Materializer = ActorMaterializer()
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  val serialization = SerializationExtension(system)

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  def killActors(actors: ActorRef*): Unit = {
    val probe = TestProbe()
    actors.foreach { actor ⇒
      probe watch actor
      actor ! PoisonPill
      probe expectTerminated actor
    }
  }

  override protected def afterAll(): Unit = {
    system.terminate()
    system.whenTerminated.toTry should be a 'success
  }
} 
Example 43
Source File: MarathonLauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{Actor, Cancellable, PoisonPill}
import com.stratio.sparta.serving.core.marathon.MarathonService
import com.stratio.sparta.serving.core.actor.LauncherActor.Start
import com.stratio.sparta.serving.core.actor.StatusActor.ResponseStatus
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.models.submit.SubmitRequest
import com.stratio.sparta.serving.core.services.ClusterCheckerService
import com.stratio.sparta.serving.core.utils._
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success, Try}

class MarathonLauncherActor(val curatorFramework: CuratorFramework) extends Actor
  with LauncherUtils with SchedulerUtils with SparkSubmitUtils with ClusterListenerUtils with ArgumentsUtils
  with PolicyStatusUtils with RequestUtils {

  private val clusterCheckerService = new ClusterCheckerService(curatorFramework)
  private val checkersPolicyStatus = scala.collection.mutable.ArrayBuffer.empty[Cancellable]

  override def receive: PartialFunction[Any, Unit] = {
    case Start(policy: PolicyModel) => initializeSubmitRequest(policy)
    case ResponseStatus(status) => loggingResponsePolicyStatus(status)
    case _ => log.info("Unrecognized message in Marathon Launcher Actor")
  }

  override def postStop(): Unit = checkersPolicyStatus.foreach(_.cancel())

  def initializeSubmitRequest(policy: PolicyModel): Unit = {
    Try {
      log.info(s"Initializing options for submit Marathon application associated to policy: ${policy.name}")
      val zookeeperConfig = getZookeeperConfig
      val clusterConfig = SpartaConfig.getClusterConfig(Option(ConfigMesos)).get
      val master = clusterConfig.getString(Master).trim
      val driverFile = extractMarathonDriverSubmit(policy, DetailConfig, SpartaConfig.getHdfsConfig)
      val pluginsFiles = pluginsJars(policy)
      val driverArguments =
        extractDriverArguments(policy, driverFile, clusterConfig, zookeeperConfig, ConfigMesos, pluginsFiles)
      val (sparkSubmitArguments, sparkConfigurations) =
        extractSubmitArgumentsAndSparkConf(policy, clusterConfig, pluginsFiles)
      val submitRequest = SubmitRequest(policy.id.get, SpartaDriverClass, driverFile, master, sparkSubmitArguments,
        sparkConfigurations, driverArguments, ConfigMesos, killUrl(clusterConfig))
      val detailExecMode = getDetailExecutionMode(policy, clusterConfig)

      createRequest(submitRequest).getOrElse(throw new Exception("Impossible to create submit request in persistence"))

      (new MarathonService(context, curatorFramework, policy, submitRequest), detailExecMode)
    } match {
      case Failure(exception) =>
        val information = s"Error when initializing Sparta Marathon App options"
        log.error(information, exception)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = Failed, statusInfo = Option(information),
          lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, exception.toString))
        ))
        self ! PoisonPill
      case Success((marathonApp, detailExecMode)) =>
        val information = "Sparta Marathon App configurations initialized correctly"
        log.info(information)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = NotStarted,
          statusInfo = Option(information), lastExecutionMode = Option(detailExecMode)))
        marathonApp.launch(detailExecMode)
        addMarathonContextListener(policy.id.get, policy.name, context, Option(self))
        checkersPolicyStatus += scheduleOneTask(AwaitPolicyChangeStatus, DefaultAwaitPolicyChangeStatus)(
          clusterCheckerService.checkPolicyStatus(policy, self, context))
    }
  }
} 
Example 44
Source File: LocalLauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{Actor, PoisonPill}
import com.stratio.sparta.driver.factory.SparkContextFactory
import com.stratio.sparta.driver.service.StreamingContextService
import com.stratio.sparta.serving.core.actor.LauncherActor.Start
import com.stratio.sparta.serving.core.actor.StatusActor.ResponseStatus
import com.stratio.sparta.serving.core.constants.AppConstant
import com.stratio.sparta.serving.core.helpers.{JarsHelper, PolicyHelper, ResourceManagerLinkHelper}
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.{LauncherUtils, PolicyConfigUtils, PolicyStatusUtils}
import org.apache.curator.framework.CuratorFramework
import org.apache.spark.streaming.StreamingContext

import scala.util.{Failure, Success, Try}

class LocalLauncherActor(streamingContextService: StreamingContextService, val curatorFramework: CuratorFramework)
  extends Actor with PolicyConfigUtils with LauncherUtils with PolicyStatusUtils{

  override def receive: PartialFunction[Any, Unit] = {
    case Start(policy: PolicyModel) => doInitSpartaContext(policy)
    case ResponseStatus(status) => loggingResponsePolicyStatus(status)
    case _ => log.info("Unrecognized message in Local Launcher Actor")
  }

  private def doInitSpartaContext(policy: PolicyModel): Unit = {
    val jars = PolicyHelper.jarsFromPolicy(policy)

    jars.foreach(file => JarsHelper.addToClasspath(file))
    Try {
      val startingInfo = s"Starting Sparta local job for policy"
      log.info(startingInfo)
      updateStatus(PolicyStatusModel(
        id = policy.id.get,
        status = PolicyStatusEnum.NotStarted,
        statusInfo = Some(startingInfo),
        lastExecutionMode = Option(AppConstant.LocalValue)
      ))
      val (spartaWorkflow, ssc) = streamingContextService.localStreamingContext(policy, jars)
      spartaWorkflow.setup()
      ssc.start()
      val startedInformation = s"The Sparta local job was started correctly"
      log.info(startedInformation)
      updateStatus(PolicyStatusModel(
        id = policy.id.get,
        status = PolicyStatusEnum.Started,
        statusInfo = Some(startedInformation),
        resourceManagerUrl = ResourceManagerLinkHelper.getLink(executionMode(policy), policy.monitoringLink)
      ))
      ssc.awaitTermination()
      spartaWorkflow.cleanUp()
    } match {
      case Success(_) =>
        val information = s"Stopped correctly Sparta local job"
        log.info(information)
        updateStatus(PolicyStatusModel(
          id = policy.id.get, status = PolicyStatusEnum.Stopped, statusInfo = Some(information)))
        self ! PoisonPill
      case Failure(exception) =>
        val information = s"Error initiating Sparta local job"
        log.error(information, exception)
        updateStatus(PolicyStatusModel(
          id = policy.id.get,
          status = PolicyStatusEnum.Failed,
          statusInfo = Option(information),
          lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, exception.toString))
        ))
        SparkContextFactory.destroySparkContext()
        self ! PoisonPill
    }
  }
} 
Example 45
Source File: ExecutorSystem.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.appmaster

import akka.actor.{ActorRef, Address, PoisonPill}

import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.util.ActorSystemBooter.BindLifeCycle

case class WorkerInfo(workerId: WorkerId, ref: ActorRef)


case class ExecutorSystem(executorSystemId: Int, address: Address, daemon:
    ActorRef, resource: Resource, worker: WorkerInfo) {
  def bindLifeCycleWith(actor: ActorRef): Unit = {
    daemon ! BindLifeCycle(actor)
  }

  def shutdown(): Unit = {
    daemon ! PoisonPill
  }
} 
Example 46
Source File: DeadLetterTest.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.channels

import akka.testkit.{ ImplicitSender, TestProbe, TestKit }
import akka.actor.{ PoisonPill, Props, DeadLetter, ActorSystem }
import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers}
import java.util.Date

class DeadLetterTest extends TestKit(ActorSystem("DeadLetterTest"))
  with WordSpecLike with BeforeAndAfterAll with MustMatchers
  with ImplicitSender {

  override def afterAll()  {
    system.terminate()
  }

  "DeadLetter" must {
    "catch messages send to deadLetters" in {
      val deadLetterMonitor = TestProbe()

      system.eventStream.subscribe(
        deadLetterMonitor.ref,
        classOf[DeadLetter])

      val msg = new StateEvent(new Date(), "Connected")
      system.deadLetters ! msg

      val dead = deadLetterMonitor.expectMsgType[DeadLetter]
      dead.message must be(msg)
      dead.sender must be(testActor)
      dead.recipient must be(system.deadLetters)
    }
    "catch deadLetter messages send to deadLetters" in {

      val deadLetterMonitor = TestProbe()
      val actor = system.actorOf(Props[EchoActor], "echo")

      system.eventStream.subscribe(
        deadLetterMonitor.ref,
        classOf[DeadLetter])

      val msg = new Order("me", "Akka in Action", 1)
      val dead = DeadLetter(msg, testActor, actor)
      system.deadLetters ! dead

      deadLetterMonitor.expectMsg(dead)

      system.stop(actor)

    }

    "catch messages send to terminated Actor" in {

      val deadLetterMonitor = TestProbe()

      system.eventStream.subscribe(
        deadLetterMonitor.ref,
        classOf[DeadLetter])

      val actor = system.actorOf(Props[EchoActor], "echo")
      actor ! PoisonPill
      val msg = new Order("me", "Akka in Action", 1)
      actor ! msg

      val dead = deadLetterMonitor.expectMsgType[DeadLetter]
      dead.message must be(msg)
      dead.sender must be(testActor)
      dead.recipient must be(actor)

    }

  }
} 
Example 47
Source File: TicketSeller.scala    From 006877   with MIT License 5 votes vote down vote up
package com.goticks

import akka.actor.{ Actor, Props, PoisonPill }
object TicketSeller {
  def props(event: String) = Props(new TicketSeller(event))

  case class Add(tickets: Vector[Ticket])
  case class Buy(tickets: Int)
  case class Ticket(id: Int)
  case class Tickets(event: String,
                     entries: Vector[Ticket] = Vector.empty[Ticket])
  case object GetEvent
  case object Cancel

}


class TicketSeller(event: String) extends Actor {
  import TicketSeller._

  var tickets = Vector.empty[Ticket]

  def receive = {
    case Add(newTickets) => tickets = tickets ++ newTickets
    case Buy(nrOfTickets) =>
      val entries = tickets.take(nrOfTickets)
      if(entries.size >= nrOfTickets) {
        sender() ! Tickets(event, entries)
        tickets = tickets.drop(nrOfTickets)
      } else sender() ! Tickets(event)
    case GetEvent => sender() ! Some(BoxOffice.Event(event, tickets.size))
    case Cancel =>
      sender() ! Some(BoxOffice.Event(event, tickets.size))
      self ! PoisonPill
  }
} 
Example 48
Source File: TicketSeller.scala    From 006877   with MIT License 5 votes vote down vote up
package com.goticks

import akka.actor.{ Actor, Props, PoisonPill }
object TicketSeller {
  def props(event: String) = Props(new TicketSeller(event))

  case class Add(tickets: Vector[Ticket])
  case class Buy(tickets: Int)
  case class Ticket(id: Int)
  case class Tickets(event: String,
                     entries: Vector[Ticket] = Vector.empty[Ticket])
  case object GetEvent
  case object Cancel

}


class TicketSeller(event: String) extends Actor {
  import TicketSeller._

  var tickets = Vector.empty[Ticket]

  def receive = {
    case Add(newTickets) => tickets = tickets ++ newTickets
    case Buy(nrOfTickets) =>
      val entries = tickets.take(nrOfTickets).toVector
      if(entries.size >= nrOfTickets) {
        sender() ! Tickets(event, entries)
        tickets = tickets.drop(nrOfTickets)
      } else sender() ! Tickets(event)
    case GetEvent => sender() ! Some(BoxOffice.Event(event, tickets.size))
    case Cancel =>
      sender() ! Some(BoxOffice.Event(event, tickets.size))
      self ! PoisonPill
  }
} 
Example 49
Source File: SafePersistenceActorShutdownApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.{ActorSystem, PoisonPill, Props}

object SafePersistenceActorShutdownApp extends App {
  val system = ActorSystem("safe-shutdown")
  val persistentActor1 = system.actorOf(Props[SamplePersistenceActor])
  val persistentActor2 = system.actorOf(Props[SamplePersistenceActor])

  persistentActor1 ! UserUpdate("foo", Add)
  persistentActor1 ! UserUpdate("foo", Add)
  persistentActor1 ! PoisonPill
  persistentActor2 ! UserUpdate("foo", Add)
  persistentActor2 ! UserUpdate("foo", Add)
  persistentActor2 ! ShutdownPersistentActor
} 
Example 50
Source File: SocialMediaAggregator.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.actor.{Actor, ActorLogging, ActorRef, PoisonPill}
import akka.contrib.pattern.Aggregator

import scala.collection.mutable
import scala.concurrent.duration._

object SocialMediaAggregator {
  case object GetPosts
  case object ReturnPosts
  case class StartFetching(userId: String, period: FiniteDuration)
  case class StopFetching(userId: String)
  case class GetLatestPosts(userId: String)

  case class Post(title: String, content: String)
  case class LatestPostResult(socialNetwork: String, posts: Seq[Post])
  case class Report(list: List[LatestPostResult])
}

class SocialMediaAggregator(handlers: List[ActorRef]) extends Actor with Aggregator with ActorLogging {
  import SocialMediaAggregator._
  import context._

  val initBehavior : Receive = {
    case StartFetching(id, period) =>
      log.info(s"Fetching latest posts for $id")
      new LatestPostsAggregator(sender, id, period)
  }

  expectOnce(initBehavior)

  class LatestPostsAggregator(originalSender: ActorRef, userId: String, period: FiniteDuration) {
    val latestPosts = mutable.ArrayBuffer.empty[LatestPostResult]

    val returnPostCancel = context.system.scheduler.schedule(1.second, period, self, ReturnPosts)
    val getPostsCancel = context.system.scheduler.schedule(0.seconds, 400 millis, self, GetPosts)

    val behavior : Receive = {
      case GetPosts => handlers.foreach(_ ! GetLatestPosts(userId))
      case lpr : LatestPostResult => latestPosts += lpr
      case ReturnPosts =>
        originalSender ! Report(latestPosts.toList)
        latestPosts.clear()
      case StopFetching(id) =>
        log.info(s"Stopping latest posts fetching for $id")
        returnPostCancel.cancel()
        getPostsCancel.cancel()
        context.system.scheduler.scheduleOnce(5 seconds, self, PoisonPill)
    }

    expect(behavior)
  }

  override def postStop() = log.info(s"Stopped.")
} 
Example 51
Source File: Shutdown.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter1

import akka.actor.{PoisonPill, Props, ActorSystem, Actor}


object ShutdownApp extends App{
  val actorSystem = ActorSystem("HelloAkka")
  val shutdownActor1 = actorSystem.actorOf(Props[ShutdownActor], "shutdownActor1")
  shutdownActor1 ! "hello"
  shutdownActor1 ! PoisonPill
  shutdownActor1 ! "Are you there?"

  val shutdownActor2 = actorSystem.actorOf(Props[ShutdownActor], "shutdownActor2")
  shutdownActor2 ! "hello"
  shutdownActor2 ! Stop
  shutdownActor2 ! "Are you there?"

}

class ShutdownActor extends Actor {
  override def receive: Receive = {
    case msg:String => println(s"$msg")
    case Stop => context.stop(self)
  }
} {

case object Stop

} 
Example 52
Source File: ClusterSingletonApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.{ActorSystem, PoisonPill, Props}
import akka.cluster.Cluster
import akka.cluster.singleton._
import scala.concurrent.duration._

object ClusterSingletonApplication extends App {
  val actorSystem = ActorSystem("ClusterSystem")
  val cluster = Cluster(actorSystem)

  val clusterSingletonSettings = ClusterSingletonManagerSettings(actorSystem)
  val clusterSingletonManager = ClusterSingletonManager.props(Props[ClusterAwareSimpleActor], PoisonPill, clusterSingletonSettings)
  actorSystem.actorOf(clusterSingletonManager, "singletonClusteAwareSimpleActor")

  val singletonSimpleActor = actorSystem.actorOf(ClusterSingletonProxy.props(
    singletonManagerPath = "/user/singletonClusteAwareSimpleActor",
    settings = ClusterSingletonProxySettings(actorSystem)),
    name = "singletonSimpleActorProxy")

  import actorSystem.dispatcher
  actorSystem.scheduler.schedule(10 seconds, 5 seconds, singletonSimpleActor, "TEST")
} 
Example 53
Source File: ClusterApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher
    val cluster = Cluster(system)

    system.log.info("Starting Akka Management")
    AkkaManagement(system).start()
    ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)
      )
    )
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent]
    )

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>")
          )
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop..."
    )

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg => log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 54
Source File: CouchbaseReplaySpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase

import akka.actor.PoisonPill
import akka.persistence.couchbase.scaladsl.AbstractCouchbaseSpec
import akka.testkit.EventFilter
import com.couchbase.client.java.document.JsonDocument
import com.typesafe.config.ConfigFactory

class CouchbaseReplaySpec
    extends AbstractCouchbaseSpec(
      "CouchbaseReplaySpec",
      ConfigFactory.parseString("""
 akka.loggers = [akka.testkit.TestEventListener]
  """.stripMargin)
    )
    with CouchbaseBucketSetup {
  "Replay" must {
    "fail if next document found" in new Setup {
      override def initialPersistedEvents: Int = 2
      // pid-1 and pid-2 are used as the first two document ids
      probe.watch(persistentActor)
      persistentActor ! PoisonPill
      probe.expectTerminated(persistentActor)

      couchbaseSession.insert(JsonDocument.create(s"$pid-3")).futureValue

      EventFilter[RuntimeException](
        message = "Read highest sequence nr 2 but found document with id 1-3",
        occurrences = 1
      ).intercept {
        system.actorOf(TestActor.props(pid))
      }
    }
  }
}