akka.actor.ActorRef Scala Examples

The following examples show how to use akka.actor.ActorRef. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ConsumerSelfManaged.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor, Offsets}
import com.typesafe.config.Config
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "groupId",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new ConsumerSelfManaged(consumerConf, actorConf)))
  }
}

class ConsumerSelfManaged(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.ManualOffset(Offsets(Map((new TopicPartition("topic1", 0), 1))))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records)
      sender() ! Confirm(records.offsets)
  }

  private def processRecords(records: ConsumerRecords[String, String]) = {
    records.pairs.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
    log.info(s"Batch complete, offsets: ${records.offsets}")
  }
} 
Example 2
Source File: JsonGoogleParser.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }
import spray.http._
import scala.util._
import com.phasmid.hedge_fund.model._


  def decode(entity: HttpEntity): Deserialized[Results] = {
    val mediaTypeTextHtml = MediaTypes.`text/html`
    val mediaTypeJson = MediaTypes.`application/json`
    val contentTypeJson = ContentType(mediaTypeJson, HttpCharsets.`UTF-8`)
    //    val contentTypeText = ContentType(mediaTypeTextHtml, HttpCharsets.`ISO-8859-1`)
    entity match {
      case HttpEntity.NonEmpty(`contentTypeJson`, _) =>
        entity.as[Results]
      case HttpEntity.NonEmpty(ContentType(`mediaTypeTextHtml`, x), y) =>
        HttpEntity(ContentType(mediaTypeJson, x), fix(y)).as[Results]
      case HttpEntity.NonEmpty(x, _) => Left(MalformedContent(s"logic error: contentType=$x"))
      case _ => Left(MalformedContent("logic error"))
    }
  }

  def fix(data: HttpData): Array[Byte] = fix(data.asString).getBytes

  def fix(s: String): String = s.substring(3)

} 
Example 3
Source File: JsonGoogleOptionParser.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }
import spray.http._
import scala.util._
import com.phasmid.hedge_fund.model._


  def decode(entity: HttpEntity): Deserialized[OptionChain] = {
    val contentType = ContentType(MediaTypes.`application/json`, HttpCharsets.`UTF-8`)
    entity match {
      case HttpEntity.NonEmpty(`contentType`, y) =>
        HttpEntity(contentType, fix(y)).as[OptionChain]
      case HttpEntity.NonEmpty(s, y) =>
        Left(MalformedContent(s"entity content type: $s"))
      case _ => Left(MalformedContent("logic error"))
    }
  }

  def fix(data: HttpData): Array[Byte] = fix(data.asString).getBytes

  def fix(s: String): String = """([^,{:\s]+):""".r.replaceAllIn(s, """"$1":""")

} 
Example 4
Source File: JsonYQLParser.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }
import spray.http._
import scala.util._
import com.phasmid.hedge_fund.model._


class JsonYQLParser(blackboard: ActorRef) extends BlackboardActor(blackboard) {

  val model: Model = new YQLModel

  override def receive = {
    case ContentMessage(entity) => {
      log.debug("JsonYQLParser received ContentMessage")
      JsonYQLParser.decode(entity) match {
        case Right(response) => processQuote(response.query.results.quote)
        case Left(message) => log.warning(message.toString())
      }
    }
    case m => super.receive(m)
  }

  def processQuote(quotes: Seq[Map[String, Option[String]]]) = quotes foreach { q => processInstrument(q) }

  def processInstrument(quote: Map[String, Option[String]]) = model.getKey("symbol") match {
    case Some(s) =>
      quote.get(s) match {
        case Some(Some(symbol)) => updateMarket(symbol, quote)
        case _ => log.warning(s"symbol $s is undefined")
      }
    case _ => log.warning("'symbol' is undefined in model")
  }

  def updateMarket(symbol: String, quote: Map[String, Option[String]]) = blackboard ! KnowledgeUpdate(model, symbol, quote flatMap { case (k, Some(v)) => Option(k -> v); case _ => None })
}

object JsonYQLParser {
  import spray.json.DefaultJsonProtocol
  import spray.httpx.unmarshalling._
  import spray.httpx.marshalling._
  import spray.httpx.SprayJsonSupport._
  import spray.json._

  case class Response(query: Query)
  case class Query(count: Int, created: String, lang: String, diagnostics: Option[Diagnostics], results: Results)
  case class Diagnostics(url: Seq[Map[String, String]], publiclyCallable: String, `user-time`: String, `service-time`: String, `build-version`: String, query: DiagnosticsQuery,
    cache: DiagnosticsCache, javascript: DiagnosticsJavascript)
  case class DiagnosticsQuery(`execution-start-time`: String, `execution-stop-time`: String, `execution-time`: String, params: String, content: String)
  case class DiagnosticsCache(`execution-start-time`: String, `execution-stop-time`: String, `execution-time`: String, method: String, `type`: String, content: String)
  case class DiagnosticsJavascript(`execution-start-time`: String, `execution-stop-time`: String, `execution-time`: String, `instructions-used`: String, `table-name`: String)
  case class Results(quote: Seq[Map[String, Option[String]]]) {
    def get(index: Int, key: String): Option[String] = {
      Try { quote(index) } match {
        case Success(y) => y.get(key) match { case Some(x) => x; case None => None }
        case Failure(y) => None
      }
    }
  }

  object MyJsonProtocol extends DefaultJsonProtocol with NullOptions {
    implicit val diagnosticsQueryFormat = jsonFormat5(DiagnosticsQuery)
    implicit val diagnosticsCacheFormat = jsonFormat6(DiagnosticsCache)
    implicit val diagnosticsJavascriptFormat = jsonFormat5(DiagnosticsJavascript)
    implicit val diagnosticsFormat = jsonFormat8(Diagnostics)
    implicit val resultsFormat = jsonFormat1(Results)
    implicit val queryFormat = jsonFormat5(Query)
    implicit val entityFormat = jsonFormat1(Response)
  }

  import MyJsonProtocol._

  def decode(entity: HttpEntity) = entity.as[Response]

} 
Example 5
Source File: Blackboard.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ Actor, Props, ActorLogging, ActorRef }


class Blackboard(forwardMap: Map[Class[_ <: Any], String], actors: Map[String, Class[_ <: BlackboardActor]]) extends Actor with ActorLogging {

  val actorMap: Map[String, ActorRef] = actors map {
    case (k, v) => k -> context.actorOf(Props.create(v, self), k)
  }

  // To encode specific, non-forwarding behavior, override this method
  override def receive = {
    case message =>
      forwardMap.get(message.getClass) match {
        case Some(s) => actorMap.get(s) match {
          case Some(k) => k forward message
          case _ => log.warning(s"no actor established for key $s")
        }
        case _ => log.warning(s"no forward mapping established for message class ${message.getClass}")
      }
  }
} 
Example 6
Source File: HttpReader.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }
import spray.http._


  override def receive = {
    case HttpResult(queryProtocol, request, HttpResponse(status, entity, headers, protocol)) =>
      log.info("request sent: {}; protocol: {}; response status: {}", request, protocol, status)
      if (status.isSuccess)
        processResponse(entity, headers, queryProtocol)
      else
        log.error("HTTP transaction error: {}", status.reason)

    case m => super.receive(m)
  }

  def processResponse(entity: HttpEntity, headers: List[HttpHeader], protocol: String) = {
    log.debug("response headers: {}; entity: {}",headers,entity)
    entityParser ! EntityMessage(protocol, entity)
  }
}

// TODO add headers
// CONSIDER move into Blackboard
case class EntityMessage(protocol: String, entity: HttpEntity) 
Example 7
Source File: UpdateLogger.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.language.postfixOps
import com.phasmid.hedge_fund.model.Model
import com.phasmid.hedge_fund.portfolio._


class UpdateLogger(blackboard: ActorRef) extends BlackboardActor(blackboard) {

  var portfolio = new Portfolio("", Nil)

  override def receive =
    {
      case Confirmation(id, model, attrs) =>
        log.debug(s"update for identifier: $id")
        if (model.isOption)
          processOption(id, model, attrs)
        else
          processStock(id, model)

      case PortfolioUpdate(p) =>
        log.debug(s"portfolio update for: ${p.name}")
        portfolio = p
        showPortfolio

      case m => super.receive(m)
    }

  implicit val timeout = Timeout(5 seconds)

  def processStock(identifier: String, model: Model) = {
    model.getKey("price") match {
      case Some(p) => {
        // sender is the MarketData actor
        val future = (sender ? SymbolQuery(identifier, List(p))).mapTo[QueryResponse]
        // TODO why are we waiting for this here?
        val result = Await.result(future, timeout.duration)
        result match {
          case QueryResponseValid(k,a) =>
            a map {
              case (k, v) => log.info(s"$identifier attribute $k has been updated to: $v")
            }
          case _ =>
        }
      }
      case None => log.warning(s"'price' not defined in model")
    }
  }

  def processOption(identifier: String, model: Model, attributes: Map[String, Any]) = {
    val key = "underlying"
    attributes.get(key) match {
      case Some(value) =>
        val future = (blackboard ? OptionQuery("id", value)).mapTo[QueryResponse]
        // TODO why are we waiting for this here?
        val result = Await.result(future, timeout.duration)
        result match {
          case QueryResponseValid(k,a) =>
              println(s"Action Required: re: qualifying option $identifier with underlying symbol: $k and attributes: $a")
          case _ =>
        }
      case None => log.warning(s"processOption: value not present for $key")
    }
  }

  def showPortfolio {
    println(s"Portfolio for ${portfolio.name}")
    portfolio.positions foreach { showPosition(_) }
  }

  def showPosition(position: Position) {
    println(s"position for ${position.symbol}: quantity=${position.quantity}; options=")
    position.contracts foreach { showContract(_) }
  }

  def showContract(contract: Contract) {
    println(s"contract: $contract")
  }
} 
Example 8
Source File: HedgeFund.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund

import com.phasmid.hedge_fund.model._
import com.phasmid.hedge_fund.actors._
import com.phasmid.hedge_fund.portfolio.{Portfolio,PortfolioParser}
import akka.actor.{ Actor, ActorSystem, Props, ActorRef }
import com.typesafe.config.{ ConfigFactory, Config }
import scala.io.Source
import scala.concurrent.ExecutionContext.Implicits.global


object HedgeFund extends App {

    val config = ConfigFactory.load()
    implicit val system = ActorSystem("HedgeFund")
    println(s"""${config.getString("name")}, ${config.getString("appVersion")}""")
    val engines: Seq[Query] = config.getString("engine") match {
      case "YQL" => Seq(YQLQuery(config.getString("format"), false))
      case "Google" => Seq(GoogleQuery("NASDAQ"))
      case "YQL,Google" => Seq(YQLQuery(config.getString("format"), false),GoogleQuery("NASDAQ"))
      case _ => Seq()
    }
    println(s"engines: $engines")
    val portfolio = getPortfolio(config)
    val blackboard = system.actorOf(Props.create(classOf[HedgeFundBlackboard]), "blackboard")
    val symbols = getSymbols(config,portfolio)
    for (engine <- engines) blackboard ! ExternalLookup(engine.getProtocol, engine.createQuery(symbols))
    val optionEngine = new GoogleOptionQuery
    symbols foreach {
      s => blackboard ! ExternalLookup(optionEngine.getProtocol, optionEngine.createQuery(List(s)))
    }
    blackboard ! PortfolioUpdate(portfolio)

import scala.language.postfixOps
  def getSymbols(config: Config, portfolio: Portfolio) = {
    // TODO add in the symbols from the portfolio
    config.getString("symbols") split ("\\,") toList;
  }

def getPortfolio(config: Config): Portfolio = {
   val json = Source.fromFile(config.getString("portfolio")) mkString
   val portfolio = PortfolioParser.decode(json)
   println(s"portfolio: $portfolio")
  portfolio
  }
} 
Example 9
Source File: AutoPartitionConsumerWithManualOffset.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor._
import cakesolutions.kafka.akka.{ConsumerRecords, KafkaConsumerActor, Offsets}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new AutoPartitionConsumerWithManualOffset(consumerConf, actorConf)))
  }
}

class AutoPartitionConsumerWithManualOffset(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartitionWithManualOffset(List("topic1"), assignedListener, revokedListener)

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }

  private def assignedListener(tps: List[TopicPartition]): Offsets = {
    log.info("Partitions have been assigned" + tps.toString())

    // Should load the offsets from a persistent store and any related state
    val offsetMap = tps.map{ tp =>
      tp -> 0l
    }.toMap

    // Return the required offsets for the assigned partitions
    Offsets(offsetMap)
  }

  private def revokedListener(tps: List[TopicPartition]): Unit = {
    log.info("Partitions have been revoked" + tps.toString())
    // Opportunity to clear any state for the revoked partitions
    ()
  }
} 
Example 10
Source File: ConsumerRecovery.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, OneForOneStrategy, Props, SupervisorStrategy}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10) {
    case _: KafkaConsumerActor.ConsumerException =>
      log.info("Consumer exception caught. Restarting consumer.")
      SupervisorStrategy.Restart
    case _ =>
      SupervisorStrategy.Escalate
  }

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 11
Source File: ConsumerToProducer.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka._
import cakesolutions.kafka.{KafkaConsumer, KafkaProducer}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}

import scala.concurrent.duration._


  def apply(consumerConfig: Config, producerConfig: Config): ActorRef = {

    // Create KafkaConsumerActor config with bootstrap.servers specified in Typesafe config
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(consumerConfig)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds, 5)

    // Create KafkaProducerActor config with defaults and bootstrap.servers specified in Typesafe config
    val producerConf = KafkaProducer.Conf(new StringSerializer, new StringSerializer).withConf(producerConfig)

    val system = ActorSystem()
    system.actorOf(Props(new ConsumerToProducer(consumerConf, actorConf, producerConf)))
  }
}

class ConsumerToProducer(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf,
  producerConf: KafkaProducer.Conf[String, String]) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  // The KafkaConsumerActor
  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )
  context.watch(consumer)

  // The KafkaProducerActor
  private val producer = context.actorOf(KafkaProducerActor.props(producerConf))

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records)

    // Confirmed Offsets from KafkaProducer
    case o: Offsets =>
      consumer ! Confirm(o, commit = true)
  }

  // Demonstrates some transformation of the messages before forwarding to KafkaProducer
  private def processRecords(records: ConsumerRecords[String, String]) = {
    val transformedRecords = records.pairs.map { case (key, value) =>
      (key, value + ".")
    }

    // Send records to Topic2.  Offsets will be sent back to this actor once confirmed.
    producer ! ProducerRecords.fromKeyValues[String, String]("topic2", transformedRecords, Some(records.offsets), None)

    // Could have sent them like this if we didn't first transform:
    // producer ! ProducerRecords.fromConsumerRecords("topic2", records, None)
  }
} 
Example 12
Source File: AutoPartitionConsumer.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, KafkaConsumerActor}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new AutoPartitionConsumer(consumerConf, actorConf)))
  }
}

class AutoPartitionConsumer(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )
  context.watch(consumer)

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 13
Source File: EntityParser.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorRef, Props }


class EntityParser(blackboard: ActorRef) extends BlackboardActor(blackboard) {

  val parsers = Map("json:YQL" -> context.actorOf(Props.create(classOf[JsonYQLParser], blackboard), "JsonParserYQL"),
    "json:GF" -> context.actorOf(Props.create(classOf[JsonGoogleParser], blackboard), "JsonGoogleParser"),
    "json:GO" -> context.actorOf(Props.create(classOf[JsonGoogleOptionParser], blackboard), "JsonGoogleOptionParser"))

  override def receive = {
    case EntityMessage(protocol, entity) => {
      log.debug("EntityMessage received: protocol: {}", protocol)
      parsers.get(protocol) match {
        case Some(actorRef) => actorRef ! ContentMessage(entity)
        case None => log.warning("no parser for: {}", protocol)
      }
    }
    case m => super.receive(m)
  }
}

case class ContentMessage(content: spray.http.HttpEntity) 
Example 14
Source File: LinearRegressionActorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorRef, ActorSystem}
import akka.testkit.{TestProbe, TestActorRef, ImplicitSender, TestKit}
import io.coral.actors.CoralActorFactory
import io.coral.api.DefaultModule
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import akka.util.Timeout
import org.json4s.native.Serialization.write
import scala.concurrent.duration._

class LinearRegressionActorSpec(_system: ActorSystem)
	extends TestKit(_system)
	with ImplicitSender
	with WordSpecLike
	with Matchers
	with BeforeAndAfterAll {
	def this() = this(ActorSystem("LinearRegressionActorSpec"))

	implicit val timeout = Timeout(100.millis)
	implicit val injector = new DefaultModule(system.settings.config)

	override def afterAll() {
		TestKit.shutdownActorSystem(system)
	}

	def createLinearRegressionActor(intercept: Double, weights: Map[String, Double]) = {
		implicit val formats = DefaultFormats
		val str =
			s"""{ "type":"linearregression",
			   |"params": { "intercept": $intercept,
			   |"weights": ${write(weights)}
			   |}}""".stripMargin

		val createJson = parse(str).asInstanceOf[JObject]
		val props = CoralActorFactory.getProps(createJson).get
		val actorTestRef = TestActorRef[LinearRegressionActor](props)

		val probe = TestProbe()
		actorTestRef.underlyingActor.emitTargets += probe.ref
		(actorTestRef, probe)
	}

	"LinearRegressionActor" should {
		"Instantiate from companion object" in {
			val (actor, _) = createLinearRegressionActor(0, Map("salary" -> 2000))
			actor.underlyingActor.intercept should be(0)
			actor.underlyingActor.weights should be(Map("salary" -> 2000))
		}

		"process trigger data when all the features are available even with different order" in {
			val (actor, probe) = createLinearRegressionActor(0, Map("age" -> 0.2, "salary" -> 0.1))
			val message = parse( s"""{"salary": 4000, "age": 40}""").asInstanceOf[JObject]
			actor ! message

			probe.expectMsg(parse( s"""{"score": 408.0, "salary": 4000, "age": 40}"""))
		}

		"emit when score is calculated" in {
			val (actor, probe) = createLinearRegressionActor(0, Map("salary" -> 10))
			val message = parse( s"""{"salary": 2000}""").asInstanceOf[JObject]
			actor ! message

			probe.expectMsg(parse( s"""{"score": 20000.0, "salary": 2000}"""))
		}

		"not emit when keys are missing" in {
			val (actor, probe) = createLinearRegressionActor(0, Map("age" -> 0.2, "salary" -> 10))
			val message = parse( s"""{"salary": 2000}""").asInstanceOf[JObject]
			actor ! message

			probe.expectNoMsg
		}
	}
} 
Example 15
Source File: CurrentPersistenceIdsQuerySourceTest.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.sstreaming

import java.util.UUID
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{ ActorRef, Props }
import akka.persistence.PersistentActor
import akka.testkit.TestProbe
import com.github.dnvriend.TestSpec
import com.github.dnvriend.spark.datasources.SparkImplicits._
import com.github.dnvriend.spark.datasources.person.Person
import org.apache.spark.sql.streaming.{ OutputMode, ProcessingTime }
import org.scalatest.Ignore

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.language.implicitConversions

object PersonActor {
  final case class BlogPost(id: Long, text: String)
}
class PersonActor(val persistenceId: String, schedule: Boolean)(implicit ec: ExecutionContext) extends PersistentActor {
  val counter = new AtomicLong()
  def ping() = context.system.scheduler.scheduleOnce(200.millis, self, "persist")
  def randomId: String = UUID.randomUUID.toString
  override val receiveRecover: Receive = PartialFunction.empty
  override val receiveCommand: Receive = {
    case "persist" =>
      persist(Person(counter.incrementAndGet(), s"foo-$randomId", 20)) { _ =>
        sender() ! "ack"
      }
      if (schedule) ping()
  }
  if (schedule) ping()
}

@Ignore
class CurrentPersistenceIdsQuerySourceTest extends TestSpec {
  def withPersistentActor(pid: String = randomId, schedule: Boolean = false)(f: ActorRef => TestProbe => Unit): Unit = {
    val tp = TestProbe()
    val ref = system.actorOf(Props(new PersonActor(pid, schedule)))
    try f(ref)(tp) finally killActors(ref)
  }

  it should "query read journal" in withSparkSession { spark =>
    withPersistentActor() { ref => tp =>
      tp.send(ref, "persist")
      tp.expectMsg("ack")

      val jdbcReadJournal = spark.readStream
        .currentPersistenceIds("jdbc-read-journal")

      jdbcReadJournal.printSchema()

      println("Is the query streaming: " + jdbcReadJournal.isStreaming)
      println("Are there any streaming queries? " + spark.streams.active.isEmpty)

      val query = jdbcReadJournal
        .writeStream
        .format("console")
        .trigger(ProcessingTime(1.seconds))
        .queryName("consoleStream")
        .outputMode(OutputMode.Append())
        .start()

      query.awaitTermination(10.seconds)
    }
  }
} 
Example 16
Source File: CurrentEventsByPersistenceIdQueryTest.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.sstreaming

import akka.actor.{ ActorRef, Props }
import akka.testkit.TestProbe
import com.github.dnvriend.TestSpec
import com.github.dnvriend.spark.datasources.SparkImplicits._
import com.github.dnvriend.spark.mapper.PersonEventMapper
import org.apache.spark.sql.streaming.{ OutputMode, ProcessingTime }
import org.apache.spark.sql.functions._
import org.scalatest.Ignore

import scala.concurrent.duration._

@Ignore
class CurrentEventsByPersistenceIdQueryTest extends TestSpec {
  def withPersistentActor(pid: String = randomId, schedule: Boolean = false)(f: ActorRef => TestProbe => Unit): Unit = {
    val tp = TestProbe()
    val ref = system.actorOf(Props(new PersonActor(pid, schedule)))
    try f(ref)(tp) finally killActors(ref)
  }

  it should "read events for pid" in withSparkSession { spark =>
    import spark.implicits._
    withPersistentActor("person", schedule = true) { ref => tp =>

      tp.send(ref, "persist")
      tp.expectMsg("ack")

      val jdbcReadJournal = spark.readStream
        .schema(PersonEventMapper.schema)
        .option("pid", "person")
        .option("event-mapper", "com.github.dnvriend.spark.mapper.PersonEventMapper")
        .eventsByPersistenceId("jdbc-read-journal")

      jdbcReadJournal.printSchema()

      //      val numOfEvents = jdbcReadJournal
      //        .groupBy('persistence_id)
      //        .agg(count('sequence_number).as("number_of_events"))

      val query = jdbcReadJournal
        .writeStream
        .format("console")
        .trigger(ProcessingTime(1.seconds))
        .queryName("consoleStream")
        //        .outputMode(OutputMode.Complete())
        .outputMode(OutputMode.Append())
        .start()

      query.awaitTermination(20.seconds)
    }
  }
} 
Example 17
Source File: MessageTypes.scala    From sparkplug   with MIT License 5 votes vote down vote up
package springnz.sparkplug.executor

import akka.actor.ActorRef
import springnz.sparkplug.executor.MessageTypes.JobRequest
import springnz.sparkplug.core.SparkOperation

object MessageTypes {
  case object ClientReady

  case object ServerReady
  case object ShutDown
  case object CancelAllJobs

  case class ServerError(reason: Throwable)
  case class JobRequest(factoryClassName: String, data: Option[Any] = None)

  case class JobSuccess(jobRequest: JobRequest, response: Any)
  case class JobFailure(jobRequest: JobRequest, reason: Throwable)

  class SparkplugException(message: String) extends Exception(message)
}

object InternalMessageTypes {
  case class RoutedRequest(jobRequest: JobRequest, originator: ActorRef)
} 
Example 18
Source File: CoordinatorTests.scala    From sparkplug   with MIT License 5 votes vote down vote up
package springnz.sparkplug.client

import akka.actor.{ ExtendedActorSystem, ActorRef, ActorSystem }
import akka.pattern.ask
import akka.testkit.{ ImplicitSender, TestKit }
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import org.scalatest._
import springnz.sparkplug.executor.MessageTypes.{ JobFailure, JobRequest, JobSuccess, ShutDown }

import scala.concurrent.Await
import scala.concurrent.duration._

import scala.collection.JavaConverters._

class CoordinatorTests(_system: ActorSystem)
    extends TestKit(_system) with ImplicitSender with WordSpecLike with BeforeAndAfterAll with Matchers {

  def this() = this(ActorSystem(Constants.actorSystemName, ConfigFactory.parseMap(Map(
    "akka.remote.netty.tcp.port" -> new Integer(0)).asJava).withFallback(ClientExecutor.defaultClientAkkaConfig)))

  var coordinator: ActorRef = null

  "client coordinator" should {

    "successfuly execute a job request" in {
      val request = JobRequest("springnz.sparkplug.executor.LetterCountPlugin", None)
      coordinator ! request
      expectMsg[JobSuccess](30.seconds, JobSuccess(request, (2, 2)))
    }

    "successfuly execute a job request after a failure" in {
      val invalidRequest = JobRequest("springnz.sparkplug.executor.InvalidClass", None)
      coordinator ! invalidRequest
      expectMsgType[JobFailure](30.seconds)
      val goodRequest = JobRequest("springnz.sparkplug.executor.LetterCountPlugin", None)
      coordinator ! goodRequest
      expectMsg[JobSuccess](30.seconds, JobSuccess(goodRequest, (2, 2)))
    }

    "work with the ask pattern as well" in {
      implicit val timeout = Timeout(30.seconds)
      val request = JobRequest("springnz.sparkplug.executor.LetterCountPlugin", None)
      val replyFuture = coordinator ? request
      val result = Await.result(replyFuture, 30.seconds)
      result shouldBe JobSuccess(request, (2, 2))
    }

  }

  override def beforeAll {
    val configSection = s"sparkplug.${springnz.sparkplug.executor.Constants.defaultAkkaRemoteConfigSection}"
    val port = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress.port.get
    val akkaClientConfig = ConfigFactory.parseMap(Map(
      "akka.remote.netty.tcp.port" -> new Integer(port)).asJava).withFallback(ClientExecutor.defaultClientAkkaConfig)
    coordinator = system.actorOf(Coordinator.props(None,
      akkaRemoteConfig = Some(ConfigFactory.load.getConfig(configSection)),
      akkaClientConfig = akkaClientConfig), "TestCoordinator")
  }

  override def afterAll {
    system.actorSelection(s"/user/TestCoordinator") ! ShutDown
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

} 
Example 19
Source File: CoordinatorCleanupTests.scala    From sparkplug   with MIT License 5 votes vote down vote up
package springnz.sparkplug.client

import akka.actor.{ ActorRef, ActorSystem }
import akka.testkit.{ ImplicitSender, TestKit }
import org.scalatest._
import springnz.sparkplug.executor.MessageTypes.{ CancelAllJobs, JobRequest, JobSuccess, ShutDown }

import scala.concurrent.duration._

class CoordinatorCleanupTests(_system: ActorSystem)
    extends TestKit(_system) with ImplicitSender with WordSpecLike with BeforeAndAfterAll with Matchers {

  def this() = this(ActorSystem(Constants.actorSystemName, ClientExecutor.defaultClientAkkaConfig))

  var coordinator: ActorRef = null

  "client coordinator" should {

    "successfuly execute a job request" in {
      val request = JobRequest("springnz.sparkplug.executor.WaitPlugin", None)
      coordinator ! request
      expectMsgType[JobSuccess](30.seconds)
    }

    "cancel all job requests" in {
      val request = JobRequest("springnz.sparkplug.executor.WaitPlugin", None)
      coordinator ! request
      Thread.sleep(500)
      coordinator ! CancelAllJobs
      Thread.sleep(500)
      expectMsgType[JobSuccess](30.seconds)
    }

  }

  override def beforeAll {
    coordinator = system.actorOf(Coordinator.props(None), "TestCoordinator")
  }

  override def afterAll {
    system.actorSelection(s"/user/TestCoordinator") ! ShutDown
    TestKit.shutdownActorSystem(system)
    Thread.sleep(1000)
  }

} 
Example 20
Source File: TestSpec.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test

import java.io.IOException
import java.util.UUID

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.exceptions.TestFailedException
import org.scalatest._
import rx.lang.scala._

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.{ Random ⇒ Rnd, Try }

object Random {
  def apply(): Rnd = new Rnd()
}

trait TestSpec extends FlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with BeforeAndAfterAll {
  implicit val system: ActorSystem = ActorSystem("test")
  implicit val ec: ExecutionContextExecutor = system.dispatcher
  val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  implicit val timeout = Timeout(50.seconds)

  override protected def afterAll(): Unit = {
    system.terminate()
  }

  
  def cleanup(actors: ActorRef*): Unit = {
    actors.foreach { (actor: ActorRef) ⇒
      actor ! PoisonPill
      probe watch actor
    }
  }

  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class PimpedObservable[T](self: Observable[T]) {
    def waitFor: Unit = {
      self.toBlocking.toIterable.last
    }
  }

  implicit class MustBeWord[T](self: T) {
    def mustBe(pf: PartialFunction[T, Unit]): Unit =
      if (!pf.isDefinedAt(self)) throw new TestFailedException("Unexpected: " + self, 0)
  }

  object Socket { def apply() = new Socket }
  class Socket {
    def readFromMemory: Future[Array[Byte]] = Future {
      Thread.sleep(100) // sleep 100 millis
      "fromMemory".getBytes
    }

    def send(payload: Array[Byte], from: String, failed: Boolean): Future[Array[Byte]] =
      if (failed) Future.failed(new IOException(s"Network error: $from"))
      else {
        Future {
          Thread.sleep(250) // sleep 250 millis, not real life time, but hey
          s"${payload.getString}->$from".getBytes
        }
      }

    def sendToEurope(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromEurope", failed)

    def sendToUsa(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromUsa", failed)
  }
} 
Example 21
Source File: BankAccountTest.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test.week5

import akka.actor.Status.Failure
import akka.actor.{ Actor, ActorRef, Props }
import akka.event.LoggingReceive
import akka.pattern.ask
import com.test.TestSpec

class BankAccountTest extends TestSpec {

  "Actors" should "know itself" in {
    val ref: ActorRef = system.actorOf(Props(new Actor {
      override def receive: Receive = {
        case _ ⇒ sender() ! self
      }
    }))

    (ref ? "").futureValue shouldBe ref
    cleanup(ref)
  }

  it should "count" in {
    val ref: ActorRef = system.actorOf(Props(new Actor {
      def count(num: Int): Receive = LoggingReceive {
        case _ ⇒
          context.become(count(num + 1))
          sender() ! num
      }
      override def receive: Receive = LoggingReceive(count(0))
    }))
    (ref ? "").futureValue shouldBe 0
    (ref ? "").futureValue shouldBe 1
    (ref ? "").futureValue shouldBe 2
    (ref ? "").futureValue shouldBe 3
    cleanup(ref)
  }

  it should "be a BankAccount" in {
    object BankAccount {
      case class Transfer(from: ActorRef, to: ActorRef, amount: BigInt)
      case class Deposit(amount: BigInt)
      case class Withdraw(amount: BigInt)
      case object Info
      case class Done(amount: BigInt)
      case object Failed
    }
    class BankAccount extends Actor {
      import BankAccount._
      var balance: BigInt = BigInt(0)
      override def receive: Receive = LoggingReceive {
        case Deposit(amount) ⇒
          balance += amount
          sender() ! Done(balance)
        case Withdraw(amount) ⇒
          balance -= amount
          sender() ! Done(balance)
        case Info ⇒
          sender() ! Done(balance)
        case _ ⇒ sender() ! Failure
      }
    }

    import BankAccount._
    val account1 = system.actorOf(Props(new BankAccount))
    (account1 ? Info).futureValue shouldBe Done(0)
    (account1 ? Deposit(100)).futureValue shouldBe Done(100)
    (account1 ? Deposit(100)).futureValue shouldBe Done(200)

    val account2 = system.actorOf(Props(new BankAccount))

    val tom = system.actorOf(Props(new Actor {
      def awaitDeposit(client: ActorRef): Receive = LoggingReceive {
        case Done(amount) ⇒
          client ! Done(amount)
          context.stop(self)
      }
      def awaitWithdraw(to: ActorRef, amount: BigInt, client: ActorRef): Receive = LoggingReceive {
        case Done(_) ⇒
          to ! Deposit(amount)
          context.become(awaitDeposit(client))
        case Failed ⇒
          client ! Failed
          context.stop(self)
      }
      override def receive = {
        case Transfer(from, to, amount) ⇒
          from ! Withdraw(amount)
          context.become(awaitWithdraw(to, amount, sender()))
      }
    }))

    (tom ? Transfer(account1, account2, 50)).futureValue shouldBe Done(50)
    (account1 ? Info).futureValue shouldBe Done(150)
    (account2 ? Info).futureValue shouldBe Done(50)
    cleanup(account1, account2, tom)
  }
} 
Example 22
Source File: DeathPactTest.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test.week6

import akka.actor.Actor.emptyBehavior
import akka.actor.{ Actor, ActorRef, PoisonPill, Props }
import com.test.TestSpec

class DeathPactTest extends TestSpec {

  // let's create some lovers

  class Boy(girl: ActorRef) extends Actor {
    context.watch(girl) // sign deathpact
    override def receive = emptyBehavior
  }

  class Girl extends Actor {
    import scala.concurrent.duration._
    context.system.scheduler.scheduleOnce(100.millis, self, PoisonPill)
    override def receive: Receive = emptyBehavior
  }

  // yes I know, boy/girl, I am old fashioned..

  "Lovers" should "die together" in {
    val tp = probe
    val girl = system.actorOf(Props(new Girl))
    val boy = system.actorOf(Props(new Boy(girl)))
    tp watch boy
    tp watch girl
    tp.expectTerminated(girl)
    tp.expectTerminated(boy)
  }
} 
Example 23
Source File: Constructr.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr

import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated }
import akka.cluster.{ Cluster, Member }
import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberExited, MemberLeft, MemberRemoved }
import akka.cluster.MemberStatus.Up
import de.heikoseeberger.constructr.coordination.Coordination
import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS }

object Constructr {

  final val Name = "constructr"

  def props: Props =
    Props(new Constructr)
}

final class Constructr private extends Actor with ActorLogging {

  override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

  private val cluster = Cluster(context.system)

  if (cluster.settings.SeedNodes.isEmpty) {
    log.info("Creating constructr-machine, because no seed-nodes defined")
    cluster.subscribe(self,
                      InitialStateAsEvents,
                      classOf[MemberLeft],
                      classOf[MemberExited],
                      classOf[MemberRemoved])
    context.become(active(context.watch(createConstructrMachine())))
  } else {
    log.info("Stopping self, because seed-nodes defined")
    context.stop(self)
  }

  override def receive = Actor.emptyBehavior

  private def active(machine: ActorRef): Receive = {
    case Terminated(`machine`) =>
      val selfAddress = cluster.selfAddress
      def isSelfAndUp(member: Member) =
        member.address == selfAddress && member.status == Up
      if (cluster.state.members.exists(isSelfAndUp)) {
        log.error("Leaving, because constructr-machine terminated!")
        cluster.leave(selfAddress)
      } else {
        log.error("Terminating system, because constructr-machine terminated!")
        context.system.terminate()
      }

    case MemberRemoved(member, _) if member.address == cluster.selfAddress =>
      log.error("Terminating system, because member has been removed!")
      context.system.terminate()
  }

  private def createConstructrMachine() = {
    val config = context.system.settings.config
    def getDuration(key: String) =
      FiniteDuration(config.getDuration(key).toNanos, NANOSECONDS)

    val coordinationTimeout   = getDuration("constructr.coordination-timeout")
    val nrOfRetries           = config.getInt("constructr.nr-of-retries")
    val retryDelay            = getDuration("constructr.retry-delay")
    val refreshInterval       = getDuration("constructr.refresh-interval")
    val ttlFactor             = config.getDouble("constructr.ttl-factor")
    val maxNrOfSeedNodes      = config.getInt("constructr.max-nr-of-seed-nodes")
    val joinTimeout           = getDuration("constructr.join-timeout")
    val abortOnJoinTimeout    = config.getBoolean("constructr.abort-on-join-timeout")
    val ignoreRefreshFailures = config.getBoolean("constructr.ignore-refresh-failures")

    context.actorOf(
      ConstructrMachine.props(
        cluster.selfAddress,
        Coordination(context.system.name, context.system),
        coordinationTimeout,
        nrOfRetries,
        retryDelay,
        refreshInterval,
        ttlFactor,
        if (maxNrOfSeedNodes <= 0) Int.MaxValue else maxNrOfSeedNodes,
        joinTimeout,
        abortOnJoinTimeout,
        ignoreRefreshFailures
      ),
      ConstructrMachine.Name
    )
  }
} 
Example 24
Source File: MQPublisher.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.workflowexecutor.rabbitmq

import akka.actor.ActorRef
import com.thenewmotion.akka.rabbitmq.{Channel, ChannelMessage}

import io.deepsense.commons.utils.Logging
import io.deepsense.workflowexecutor.communication.mq.MQSerializer


case class MQPublisher(
    exchange: String,
    messageSerializer: MQSerializer,
    publisherActor: ActorRef)
  extends Logging {

  def publish(topic: String, message: Any): Unit = {
    val data: Array[Byte] = messageSerializer.serializeMessage(message)
    publisherActor  ! ChannelMessage(publish(topic, data), dropIfNoChannel = false)
  }

  private def publish(topic: String, data: Array[Byte])(channel: Channel): Unit = {
    channel.basicPublish(exchange, topic, null, data)
  }
} 
Example 25
Source File: RestInterface.scala    From akka-sharding-example   with MIT License 5 votes vote down vote up
package com.michalplachta.shoesorter.api

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.io.IO
import akka.pattern.ask
import com.michalplachta.shoesorter.Domain.{Container, Junction}
import com.michalplachta.shoesorter.Messages._
import spray.can.Http
import spray.httpx.SprayJsonSupport._
import spray.routing._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class RestInterface(decider: ActorRef, exposedPort: Int) extends Actor with HttpServiceBase with ActorLogging {
  val route: Route = {
    path("junctions" / IntNumber / "decisionForContainer" / IntNumber) { (junctionId, containerId) =>
      get {
        complete {
          log.info(s"Request for junction $junctionId and container $containerId")
          val junction = Junction(junctionId)
          val container = Container(containerId)
          decider.ask(WhereShouldIGo(junction, container))(5 seconds).mapTo[Go]
        }
      }
    }
  }

  def receive = runRoute(route)

  implicit val system = context.system
  IO(Http) ! Http.Bind(self, interface = "0.0.0.0", port = exposedPort)
} 
Example 26
Source File: HttpManagementServer.scala    From akka-cluster-manager   with MIT License 5 votes vote down vote up
package io.orkestra.cluster.management

import java.util.concurrent.atomic.AtomicReference

import akka.Done
import akka.actor.{ActorSystem, ActorRef}
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
import io.orkestra.cluster.protocol.Response.{Failure, Success}
import io.orkestra.cluster.routing.ClusterListener._
import akka.pattern.ask
import play.api.libs.json.Json
import scala.concurrent.{Promise, Future, ExecutionContext}
import scala.concurrent.duration._

class HttpManagementServer(clusterListener: ActorRef, hostName: String = "127.0.0.1", port: Int = 33333)(
    implicit
    val system:                ActorSystem,
    implicit val materializer: Materializer,
    implicit val executer:     ExecutionContext
) {

  import PlayJsonSupport._

  def handleOrkestraRequest(req: ManagementReguest) =
    (clusterListener ? req)(3.seconds).map {
      case res: Success =>
        res.httpStatusCode -> res.asJson
      case res: Failure =>
        res.httpStatusCode -> res.asJson
    }

  def orkestraRoutes =
    pathPrefix("orkestra" / "routers") {
      pathEndOrSingleSlash {
        get {
          complete(handleOrkestraRequest(GetRouters))
        }
      } ~
        path(Segment ~ Slash.?) { role =>
          get {
            complete(handleOrkestraRequest(GetRouter(role)))
          }
        } ~
        path(Segment / Remaining ~ Slash.?) { (role, routeePath) =>
          delete {
            complete(handleOrkestraRequest(DeleteRoutee(role, routeePath)))
          }
        }
    }

  private val bindingFuture = new AtomicReference[Future[Http.ServerBinding]]()

  def start() = {
    val serverBindingPromise = Promise[Http.ServerBinding]()
    if (bindingFuture.compareAndSet(null, serverBindingPromise.future)) {
      Http().bindAndHandle(orkestraRoutes, hostName, port)
      println(Console.CYAN + s"cluster http management server online at http://${hostName}:${port}/" + Console.WHITE)
    }
  }

  def shutdown =
    if (bindingFuture.get() == null) {
      Future(Done)
    } else {
      val stopFuture = bindingFuture.get().flatMap(_.unbind()).map(_ => Done)
      bindingFuture.set(null)
      stopFuture
    }

} 
Example 27
Source File: package.scala    From akka-cluster-manager   with MIT License 5 votes vote down vote up
package io.orkestra.cluster

import akka.actor.ActorRef
import akka.http.scaladsl.model.{StatusCodes, StatusCode}
import play.api.libs.json.{JsString, Format, Json, JsValue}

package object protocol {

  case class Register(member: ActorRef, role: String)
  case class RegisterInternal(member: ActorRef, role: String)

  sealed trait Response {
    def asJson: JsValue
    def httpStatusCode: StatusCode
  }

  object Response {

    trait Success extends Response {
      override val httpStatusCode: StatusCode = StatusCodes.OK
    }

    object Success {

      case class Router(name: String, routees: List[String]) extends Success {
        override val asJson = Json.toJson(this)
      }
      object Router {
        implicit val fmt: Format[Router] = Json.format[Router]
      }

      case class Routers(routers: Iterable[JsValue]) extends Success {
        override val asJson = Json.toJson(routers)
      }

      case class RouteeDeleted(role: String, path: String) extends Success {
        override val asJson = JsString(s"routee: $path with role: $role successfully deleted")
      }

    }

    trait Failure extends Response

    object Failure {
      case class RouterNotFound(role: String) extends Failure {
        override val httpStatusCode: StatusCode = StatusCodes.NotFound
        override val asJson: JsValue = Json.obj("error" -> s"router with role: $role not found")
      }
    }
  }

} 
Example 28
Source File: PluginRegistry.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core

import akka.actor.{Actor, ActorLogging, ActorRef}
import com.sumologic.sumobot.core.PluginRegistry.{Plugin, PluginList, RequestPluginList}
import com.sumologic.sumobot.plugins.BotPlugin.{PluginAdded, PluginRemoved}

object PluginRegistry {

  case class Plugin(plugin: ActorRef, help: String)

  case object RequestPluginList
  case class PluginList(plugins: Seq[Plugin])
}

class PluginRegistry extends Actor with ActorLogging {

  private var list = List.empty[Plugin]

  override def receive: Receive = {
    case PluginAdded(plugin, help) =>
      val name = plugin.path.name
      log.info(s"Plugin added: $name")
      if (list.exists(_.plugin.path.name == name)) {
        log.error(s"Attempt to register duplicate plugin: $name")
      } else {
        list +:= Plugin(plugin, help)
      }

    case PluginRemoved(plugin) =>
      val name = plugin.path.name
      list = list.filterNot(_.plugin.path.name == name)
      log.info(s"Plugin removed: $name")

    case RequestPluginList =>
      sender() ! PluginList(list)
  }
} 
Example 29
Source File: HttpReceptionist.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core

import java.time.Instant

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import com.sumologic.sumobot.core.model.PublicChannel
import com.sumologic.sumobot.plugins.BotPlugin.{InitializePlugin, PluginAdded, PluginRemoved}
import play.api.libs.json.{JsObject, JsValue}
import slack.api.RtmStartState
import slack.models.{Channel, Group, Im, Team, User}
import slack.rtm.RtmState

object HttpReceptionist {
  private[core] val DefaultChannel = Channel("C0001SUMO", "sumobot", Instant.now().getEpochSecond(),
    Some("U0001SUMO"), Some(false), Some(true), Some(false), Some(false), Some(true), None, Some(false), Some(false), None, None, None, None, None, None, None, None)
  val DefaultSumoBotChannel = PublicChannel(DefaultChannel.id, DefaultChannel.name)

  val DefaultBotUser = User("U0001SUMO", "sumobot-bot", None, None, None, None, None, None, None, None, None, None, None, None, None, None)
  val DefaultClientUser = User("U0002SUMO", "sumobot-client", None, None, None, None, None, None, None, None, None, None, None, None, None, None)

  private[core] val StateUrl = ""
  private[core] val StateTeam = Team("T0001SUMO", "Sumo Bot", "sumobot", "sumologic.com", 30, false, new JsObject(Map.empty), "std")
  private[core] val StateUsers: Seq[User] = Array(DefaultBotUser, DefaultClientUser)
  private[core] val StateChannels: Seq[Channel] = Array(DefaultChannel)
  private[core] val StateGroups: Seq[Group] = Seq.empty
  private[core] val StateIms: Seq[Im] = Seq.empty
  private[core] val StateBots: Seq[JsValue] = Seq.empty

  private[core] val StartState = RtmStartState(StateUrl, DefaultBotUser, StateTeam, StateUsers, StateChannels, StateGroups, StateIms, StateBots)
  private[core] val State = new RtmState(StartState)
}

class HttpReceptionist(brain: ActorRef) extends Actor with ActorLogging {
  private val pluginRegistry = context.system.actorOf(Props(classOf[PluginRegistry]), "plugin-registry")

  override def receive: Receive = {
    case message@PluginAdded(plugin, _) =>
      plugin ! InitializePlugin(HttpReceptionist.State, brain, pluginRegistry)
      pluginRegistry ! message

    case message@PluginRemoved(_) =>
      pluginRegistry ! message
  }
} 
Example 30
Source File: HttpOutcomingSender.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.http_frontend

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.http.scaladsl.model.ws.TextMessage
import com.sumologic.sumobot.core.model.OutgoingMessage

class HttpOutcomingSender(publisherRef: ActorRef) extends Actor with ActorLogging {
  override def preStart(): Unit = {
    Seq(classOf[OutgoingMessage]).foreach(context.system.eventStream.subscribe(self, _))
  }

  override def receive: Receive = {
    case OutgoingMessage(_, text, _) =>
      publisherRef ! TextMessage(text)
  }

  override def postStop(): Unit = {
    context.stop(publisherRef)
    context.system.eventStream.unsubscribe(self)
  }
} 
Example 31
Source File: HttpIncomingReceiver.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.http_frontend

import java.time.Instant
import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.pattern.pipe
import scala.concurrent.ExecutionContext.Implicits.global
import akka.http.scaladsl.model.ws.TextMessage
import akka.stream.ActorMaterializer
import com.sumologic.sumobot.core.HttpReceptionist
import com.sumologic.sumobot.core.model.{IncomingMessage, UserSender}

import scala.concurrent.duration.Duration

object HttpIncomingReceiver {
  case class StreamEnded()
  private val StrictTimeout = Duration.create(5, TimeUnit.SECONDS)
}

class HttpIncomingReceiver(outcomingRef: ActorRef) extends Actor with ActorLogging {
  private implicit val materializer = ActorMaterializer()

  override def receive: Receive = {
    case streamedMsg: TextMessage.Streamed =>
      streamedMsg.toStrict(HttpIncomingReceiver.StrictTimeout).pipeTo(self)(sender())

    case strictMsg: TextMessage.Strict =>
      val contents = strictMsg.getStrictText
      val incomingMessage = IncomingMessage(contents, true, HttpReceptionist.DefaultSumoBotChannel,
        formatDateNow(), None, Seq.empty, UserSender(HttpReceptionist.DefaultClientUser))
      context.system.eventStream.publish(incomingMessage)

    case HttpIncomingReceiver.StreamEnded =>
      context.stop(outcomingRef)
      context.stop(self)
  }

  private def formatDateNow(): String = {
    s"${Instant.now().getEpochSecond}.000000"
  }
} 
Example 32
Source File: BlockingBrain.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.brain

import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import com.sumologic.sumobot.brain.Brain._

import scala.concurrent.Await
import scala.concurrent.duration._

class BlockingBrain(brain: ActorRef) {

  def retrieve(key: String): Option[String] = {
    implicit val timeout = Timeout(2.seconds)
    Await.result(brain ? Retrieve(key), 2.seconds) match {
      case ValueRetrieved(_, value) => Some(value)
      case ValueMissing(_) => None
    }
  }

  def listValues(prefix: String = ""): Map[String, String] = {
    implicit val timeout = Timeout(2.seconds)
    Await.result(brain ? ListValues(prefix), 2.seconds) match {
      case ValueMap(map) => map
    }
  }

  def store(key: String, value: String): Unit = {
    brain ! Store(key, value)
  }

  def remove(key: String): Unit = {
    brain ! Remove(key)
  }
} 
Example 33
Source File: SessionWorkflowExecutorActorProvider.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.workflowexecutor

import scala.concurrent.duration.FiniteDuration

import akka.actor.{ActorContext, ActorRef}

import io.deepsense.commons.utils.Logging
import io.deepsense.deeplang.CommonExecutionContext
import io.deepsense.models.workflows.Workflow

class SessionWorkflowExecutorActorProvider(
  executionContext: CommonExecutionContext,
  workflowManagerClientActor: ActorRef,
  heartbeatPublisher: ActorRef,
  notebookTopicPublisher: ActorRef,
  workflowManagerTimeout: Int,
  publisher: ActorRef,
  sessionId: String,
  heartbeatInterval: FiniteDuration
) extends Logging {
  def provide(context: ActorContext, workflowId: Workflow.Id): ActorRef = {
    context.actorOf(
      SessionWorkflowExecutorActor.props(
        executionContext,
        workflowManagerClientActor,
        publisher,
        heartbeatPublisher,
        notebookTopicPublisher,
        workflowManagerTimeout,
        sessionId,
        heartbeatInterval),
      workflowId.toString)
  }
} 
Example 34
Source File: RetryActor.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.utils

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration
import scala.util.{Failure, Success, Try}

import akka.actor.{Actor, ActorRef, Status}

class RetryActor[T](
    retryInterval: FiniteDuration,
    retryCountLimit: Int,
    workCode: => Future[T],
    workDescription: Option[String]) extends Actor
    with Logging {

  import RetryActor._

  private implicit val ec: ExecutionContext = context.system.dispatcher

  override def receive: Receive = {
    case Trigger => doWork(sender, 0)
    case Retry(initialSender, retryCount) => doWork(initialSender, retryCount)
  }

  val workDescriptionForLogs: String = workDescription.map(" " + _).getOrElse(" some work")

  private def doWork(initialSender: ActorRef, retryCount: Int): Unit = {
    workCode.onComplete {
      case Success(t) => initialSender ! t
      case Failure(RetriableException(msg, cause)) if retryCount < retryCountLimit =>
        logFailure(msg, cause)
        logger.info(s"Will retry$workDescriptionForLogs in $retryInterval.")
        context.system.scheduler.scheduleOnce(retryInterval, self, Retry(initialSender, retryCount + 1))
      case Failure(RetriableException(msg, cause)) if retryCount >= retryCountLimit =>
        logFailure(msg, cause)
        val retryLimitReachedException =
          RetryLimitReachedException(s"Retry limit of $retryCountLimit reached, last error was $cause", cause)
        logger.error(s"Retry limit reached for$workDescriptionForLogs.", retryLimitReachedException)
        initialSender ! Status.Failure(retryLimitReachedException)
      case Failure(f) =>
        logFailure(f.getMessage, Some(f))
        logger.error(s"Unexpected exception when performing$workDescriptionForLogs.", f)
        initialSender ! Status.Failure(f)
    }
  }

  private def logFailure(msg: String, tOpt: Option[Throwable]): Unit = {
    val msgText = s"Exception when performing$workDescriptionForLogs. The message was: $msg"
    tOpt match {
      case Some(t) => logger.info(msgText, t)
      case None => logger.info(msgText)
    }
  }
}

object RetryActor {
  sealed trait Message
  case object Trigger extends Message
  case class Retry(initialSender: ActorRef, retryCount: Int) extends Message

  case class RetryLimitReachedException(msg: String, lastError: Option[Throwable]) extends Exception(msg)
  case class RetriableException(msg: String, cause: Option[Throwable]) extends Exception(msg, cause.orNull)

} 
Example 35
Source File: RestServiceActors.scala    From kafka-with-akka-streams-kafka-streams-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.scala.akkastream.queryablestate.actors

import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives.{complete, get, onSuccess, path}
import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.lightbend.scala.akkastream.modelserver.actors.{GetModels, GetModelsResult, GetState}
import com.lightbend.scala.modelServer.model.ModelToServeStats
import de.heikoseeberger.akkahttpjackson.JacksonSupport

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration._


object RestServiceActors {

  // See http://localhost:5500/models
  // Then select a model shown and try http://localhost:5500/state/<model>, e.g., http://localhost:5500/state/wine
  def startRest(modelserver: ActorRef)(implicit system: ActorSystem, materializer: ActorMaterializer): Unit = {

    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    // Use with HTTP methods that accept an implicit timeout argument
    // implicit val timeout = Timeout(10.seconds)
    val host = "127.0.0.1"
    val port = 5500
    val routes: Route = QueriesAkkaHttpResource.storeRoutes(modelserver)

    Http().bindAndHandle(routes, host, port) map
      { binding => println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
}

object QueriesAkkaHttpResource extends JacksonSupport {

  implicit val askTimeout: Timeout = Timeout(30.seconds)

  def storeRoutes(modelserver: ActorRef): Route =
    get {
      path("state"/Segment) { datatype =>
        onSuccess(modelserver ? GetState(datatype)) {
          case info: ModelToServeStats =>
            complete(info)
        }
      } ~
        path("models") {
          onSuccess(modelserver ? GetModels()) {
            case models: GetModelsResult =>
              complete(models)
          }
        }
    }
} 
Example 36
Source File: MQSubscriber.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.workflowexecutor.rabbitmq

import akka.actor.ActorRef
import com.thenewmotion.akka.rabbitmq.{BasicProperties, Channel, DefaultConsumer, Envelope}

import io.deepsense.commons.serialization.Serialization
import io.deepsense.commons.utils.Logging
import io.deepsense.workflowexecutor.communication.mq.MQDeserializer

case class MQSubscriber(
  subscriberActor: ActorRef,
  mqMessageDeserializer: MQDeserializer,
  channel: Channel
) extends DefaultConsumer(channel)
    with Logging
    with Serialization {

  override def handleDelivery(
      consumerTag: String,
      envelope: Envelope,
      properties: BasicProperties,
      body: Array[Byte]): Unit = {
    try {
      subscriberActor ! mqMessageDeserializer.deserializeMessage(body)
    } catch {
      case e: Exception => logger.error("Message deserialization failed", e)
    }
  }
} 
Example 37
Source File: ParallelWork.scala    From hyperspark   with Apache License 2.0 5 votes vote down vote up
package pfsp.parallel;

import it.polimi.hyperh.solution.Solution
import it.polimi.hyperh.solution.EvaluatedSolution
import pfsp.problem.PfsProblem
import scala.util.Random
import akka.actor.Actor
import akka.actor.Props
import akka.event.Logging
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.routing.RoundRobinPool
import pfsp.solution.PfsSolution
import pfsp.solution.PfsEvaluatedSolution

object ParallelWork extends App {
  override def main(args: Array[String]) {
    
    def calculate(p:PfsProblem, evOldSolution:PfsEvaluatedSolution, nrOfWorkers: Int, sizeOfNeighbourhood: Int) {
      // Create an Akka system
      val system = ActorSystem("ParallelSystem")

      // create the result listener, which will print the result and 
      // shutdown the system
      val listener = system.actorOf(Props[Listener], name = "listener")

      // create the master
      val master = system.actorOf(Props(new Master(p, evOldSolution, nrOfWorkers, sizeOfNeighbourhood, listener)),
        name = "master")

      // start the calculation
      master ! Calculate
    }
    val p = PfsProblem.fromResources("inst_ta001.txt")
    val permutationList = Random.shuffle(p.jobs.toList)
    val oldSolution = PfsSolution(permutationList)
    var evOldSolution = p.evaluate(oldSolution).asInstanceOf[PfsEvaluatedSolution]
    calculate(p, evOldSolution, 7, 300)
  }
  case object Calculate
  case class Work(p: PfsProblem, solution: PfsSolution, initEndTimesMatrix: Array[Array[Int]])
  case class SingleResult(evSolution: EvaluatedSolution)
  case class FinalResult(evSolution: EvaluatedSolution, startMillis: Long)
}
class Worker extends Actor {
  import ParallelWork._
  def receive = {
    case Work(p, solution, initEndTimesMatrix) =>
      val evSolution = p.evaluatePartialSolution(solution.permutation)
      sender ! SingleResult(evSolution)
  }
}
class Listener extends Actor {
  import ParallelWork._
  override def receive = {
    case FinalResult(evSolution, duration) =>
      println("bestSolution: " + evSolution + " millis: " + duration)
      context.system.shutdown()
  }
}
class Master(p: PfsProblem,
             evOldSolution: PfsEvaluatedSolution,
             nrOfWorkers: Int,
             sizeOfNeighbourhood: Int,
             listener: ActorRef) extends Actor {
  import ParallelWork._
  var nrOfResults: Int = 0
  val startMillis: Long = System.currentTimeMillis
  val initEndTimesMatrix = p.jobsInitialTimes()
  var bestSolution: EvaluatedSolution = evOldSolution
  val workerRouter = context.actorOf(
    Props[Worker].withRouter(RoundRobinPool(nrOfWorkers)), name = "workerRouter")

  override def receive = {
    case Calculate =>
      for (i <- 0 until sizeOfNeighbourhood)
        workerRouter ! Work(p, PfsSolution(Random.shuffle(p.jobs.toList)), initEndTimesMatrix)
    case SingleResult(evNewSolution) =>
      nrOfResults += 1
      bestSolution = List(evNewSolution, bestSolution).min
      if (nrOfResults == sizeOfNeighbourhood) {
        // Send the result to the listener
        listener ! FinalResult(bestSolution, System.currentTimeMillis - startMillis)
        // Stops this actor and all its supervised children
        context.stop(self)
      }
  }
} 
Example 38
Source File: Reducer.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package edu.neu.coe.scala.mapreduce

import akka.actor.{ Actor, ActorLogging, ActorRef }
//import scala.collection.mutable.HashMap
import scala.util._


class Reducer_Fold[K2,V2,V3](g: (V3,V2)=>V3, z: =>V3) extends ReducerBase[K2,V2,V3] {  
  def getValue(vs: Seq[V2]): V3 = vs.foldLeft(z)(g)
}

abstract class ReducerBase[K2,V2,V3] extends Actor with ActorLogging {
  
  override def receive = {
    case i: Intermediate[K2,V2] =>
      log.info(s"received $i")
      log.debug(s"with elements ${i.vs}")
      sender ! (i.k, Master.sequence(Try(getValue(i.vs))))
    case q =>
      log.warning(s"received unknown message type: $q")
  }
  
  override def postStop = {
    log.debug("has shut down")
  }
  
  def getValue(vs: Seq[V2]): V3
}


case class Intermediate[K2, V2](k: K2, vs: Seq[V2]) {
  override def toString = s"Intermediate: with k=$k and ${vs.size} elements"
} 
Example 39
Source File: Mapper.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package edu.neu.coe.scala.mapreduce

import akka.actor.{ Actor, ActorLogging, ActorRef }
import scala.collection.mutable.HashMap
import scala.util._


class Mapper_Forgiving[K1,V1,K2,V2](f: (K1,V1)=>(K2,V2)) extends Mapper[K1,V1,K2,V2](f) {
  
  override def prepareReply(v2k2ts: Seq[Try[(K2,V2)]]) = {
      val v2sK2m = HashMap[K2,Seq[V2]]() // mutable
      val xs = Seq[Throwable]() // mutable
      for (v2k2t <- v2k2ts; v2k2e = Master.sequence(v2k2t))
        v2k2e match {
          case Right((k2,v2)) => v2sK2m put(k2, v2+:(v2sK2m get(k2) getOrElse(Nil)))
          case Left(x) => xs :+ x
      }
      (v2sK2m.toMap, xs.toSeq)
  }
}

case class Incoming[K, V](m: Seq[(K,V)]) {
  override def toString = s"Incoming: with ${m.size} elements"
}

object Incoming {
  def sequence[K,V](vs: Seq[V]): Incoming[K,V] = Incoming((vs zip Stream.continually(null.asInstanceOf[K])).map{_.swap})
  def map[K, V](vKm: Map[K,V]): Incoming[K,V] = Incoming(vKm.toSeq)
}

object Mapper {
} 
Example 40
Source File: CalculatorSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package actors

import org.scalatest._
import akka.testkit.TestActorRef
import scala.concurrent.duration._
import scala.concurrent.Await
import akka.pattern.ask
import scala.util._
import scala.io.Source
import scala.concurrent._
import scala.concurrent.duration._
import com.typesafe.config.{ ConfigFactory, Config }
import akka.actor.{ Actor, ActorSystem, Props, ActorRef }
import akka.util.Timeout
import java.net.URL
import org.scalatest.concurrent._
import org.scalatest._
import org.scalatest.time._
import edu.neu.coe.scala.numerics.Rational
import models._


class CalculatorSpec extends FlatSpec with Matchers with Futures with ScalaFutures with Inside {
  implicit val system = ActorSystem("CountWords")  
  import play.api.libs.concurrent.Execution.Implicits.defaultContext
  implicit val timeout: Timeout = Timeout(10 seconds)

  "Rational Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Rational]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  it should "yield 1 for 1" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  it should "yield 1 when given floating point problem" in {
      val lookup: String=>Option[Rational] = RationalMill.constants.get _
      val conv: String=>Try[Rational] = RationalMill.valueOf _
      val parser = new ExpressionParser[Rational](conv,lookup)
      val mill: Mill[Rational] = RationalMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "0.2 0.1 + 10 * 3 /").mapTo[Try[Rational]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(Rational(1,1)) => }
  }
  "Double Calculator" should "yield empty list for /" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xsf = (taf ? View).mapTo[Seq[Double]]
      val nf = xsf map { case xs => xs.size }
      whenReady(nf, timeout(Span(6, Seconds))) { case 0 => }
  }
  
  // This test suffers from a very peculiar bug which might even be a bug
  // in the Scala compiler. Kudos to you if you can fix it!!
  ignore should "yield 1 for 1" in {
      val lookup: String=>Option[Double] = DoubleMill.constants.get _
      val conv: String=>Try[Double] = DoubleMill.valueOf _
      val parser = new ExpressionParser[Double](conv,lookup)
      val mill: Mill[Double] = DoubleMill()
      val props = Props(new Calculator(mill,parser))
      val taf = TestActorRef(props)
      val xtf = (taf ? "1").mapTo[Try[Double]]
      whenReady(xtf, timeout(Span(6, Seconds))) { case Success(1.0) => }
  }
} 
Example 41
Source File: Application.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package controllers

import play.api._
import play.api.mvc._
import akka.actor.{ActorSystem, Props}
import akka.util.Timeout
import akka.pattern.ask
import scala.concurrent._
import scala.concurrent.duration._
import scala.util._
import edu.neu.coe.scala.numerics.Rational
import akka.actor.ActorRef
import com.typesafe.config.{ ConfigFactory, Config }
import actors._
import models._
import spire.math.Real

class Application extends Controller {
  
  val config = ConfigFactory.load()
  val which = config.getString("calculator")
  
  import play.api.libs.concurrent.Execution.Implicits.defaultContext
  implicit val timeout: Timeout = Timeout(10 seconds)
  implicit val system = ActorSystem("RPN-Calculator")
  val setup = which match {
    case "rational" => Application.getSetupForRational
    case "double" => Application.getSetupForDouble
    case "spire" => Application.getSetupForSpire
    case _ => Console.err.println(s"Unsupported calculator type: $which"); Application.getSetupForRational
  }
  val calculator = system.actorOf(setup _1,setup _2)
  val name: String = setup _3;
  println(s"$name is ready")

  def index() = Action.async {
    val xsf = (calculator ? View).mapTo[Seq[_]]
    xsf map {
      case xs => Ok(s"$name: calculator has the following elements (starting with top): $xs")
    }
  }

  def command(s: String) = Action.async {
    val xtf = (calculator ? s).mapTo[Try[_]] 
    xtf map {
      case Success(x) => Ok(s"""$name: you have entered "$s" and got back $x""")
      case Failure(e) => if (s=="clr") Ok("$name: cleared") else Ok(s"""$name: you entered "$s" which caused error: $e""")
//      case Failure(e) => if (s=="clr") redirect("/") else  Ok(s"""$name: you entered "$s" which caused error: $e""")
    }
  }
}

object Application {
  // TODO move these to model classes
  def getSetupForDouble(implicit system: ActorSystem) = {
		  implicit val lookup: String=>Option[Double] = DoubleMill.constants.get _
      implicit val conv: String=>Try[Double] = DoubleMill.valueOf _
			implicit val parser = new ExpressionParser[Double](conv,lookup)
			val mill = DoubleMill()
			// Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
      // TODO for these methods, return mill and parser instead of props
			(props,"doubleCalculator","Double Calculator")
  }
  // CONSIDER This assumes that we have Rational in our classpath already.
  // I'd like to try the possibility of dynamically loading the Rational stuff.
  // But, that's going to be very tricky, so we'll leave it for now.
    def getSetupForRational(implicit system: ActorSystem) = {
      implicit val lookup: String=>Option[Rational] = RationalMill.constants.get _
      implicit val conv: String=>Try[Rational] = RationalMill.valueOf _
      implicit val parser = new ExpressionParser[Rational](conv,lookup)
      val mill = RationalMill()
      // Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
      (props,"rationalCalculator","Rational Calculator")
  }
  // CONSIDER This assumes that we have Spire in our classpath already.
  def getSetupForSpire(implicit system: ActorSystem) = {
    import spire.implicits._
    import spire.math._
		  implicit val lookup: String=>Option[Real] = SpireMill.constants.get _
      implicit val conv: String=>Try[Real] = SpireMill.valueOf _
			implicit val parser = new ExpressionParser[Real](conv,lookup)
			val mill = SpireMill()
			// Note: the following pattern should NOT be used within an actor
      val props = Props(new Calculator(mill,parser))
			(props,"spireCalculator","Spire Calculator")
  }
} 
Example 42
Source File: Calculator.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package actors

import akka.actor.{ Actor, ActorLogging, ActorRef }
import scala.util._
import models._


class Calculator[A](mill: Mill[A], parser: ExpressionParser[A]) extends Actor with ActorLogging {
  
  override def receive = {
    case View => sender ! mill.toSeq
    case x: String =>
      log.info(s"we got $x")
      try {
        val response = mill.parse(x)(parser)
        log.info(s"response: $response")
        sender ! response
      }
      catch {
        case t: Throwable => println("should never hit this line"); log.error(t, "logic error: should never log this issue")
      }
    case z =>
      log.warning(s"received unknown message type: $z")
  }
}

object View 
Example 43
Source File: PortfolioSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.portfolio

import org.scalatest.{ WordSpecLike, Matchers, BeforeAndAfterAll, Inside }
import akka.actor.{ ActorSystem, Actor, Props, ActorRef }
import akka.testkit._
import scala.concurrent.duration._
import org.scalatest.Inside
import akka.actor.actorRef2Scala
import com.phasmid.hedge_fund.HedgeFund
import com.phasmid.hedge_fund.actors._
import com.typesafe.config.ConfigFactory
import com.phasmid.hedge_fund.model.GoogleOptionModel


class PortfolioSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
    with WordSpecLike with Matchers with Inside with BeforeAndAfterAll {

  def this() = this(ActorSystem("MockPortfolioBlackboard"))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }
  
  "read portfolio" in {
    val config = ConfigFactory.load
    val portfolio = HedgeFund.getPortfolio(config)
    portfolio.name shouldEqual "Test Portfolio"
    println(s"portfolio: $portfolio")
  }

  "send back" in {
    val model = new GoogleOptionModel()
    val blackboard = system.actorOf(Props.create(classOf[MockPortfolioBlackboard], testActor), "blackboard")
    blackboard ! CandidateOption(model, "XX375", true, Map("strike" -> "45.2"), Map("underlying_id" -> "1234", "Sharpe" -> 0.45, "EV" -> 37132000000.0, "EBITDA" -> 3046000000.0))
    val confirmationMsg = expectMsgClass(3.seconds, classOf[Confirmation])
    println("confirmation msg received: " + confirmationMsg)
    inside(confirmationMsg) {
      case Confirmation(id, model, details) =>
        println(s"confirmation1 details: $details")
        id shouldEqual "XX375"
        blackboard ! KnowledgeUpdate(model, "XX", Map("id" -> "1234"))
        val confirmationMsg2 = expectMsgClass(3.seconds, classOf[Confirmation])
        println("confirmation msg2 received: " + confirmationMsg2)
        // Note that the key "id" is in the model for symbols, not options
        blackboard ! OptionQuery("id", "1234")
        val responseMsg = expectMsgClass(3.seconds, classOf[QueryResponse])
        println("msg received: " + responseMsg)
        inside(responseMsg) {
          case QueryResponseValid(symbol, attributes) =>
            symbol shouldEqual "XX"
            println(s"attributes: $attributes")
        }
    }
  }
}

class MockPortfolioBlackboard(testActor: ActorRef) extends Blackboard(Map(classOf[KnowledgeUpdate] -> "marketData", classOf[SymbolQuery] -> "marketData", classOf[OptionQuery] -> "marketData", classOf[CandidateOption] -> "optionAnalyzer", classOf[PortfolioUpdate] -> "updateLogger", classOf[Confirmation] -> "updateLogger"),
  Map("marketData" -> classOf[MarketData], "optionAnalyzer" -> classOf[OptionAnalyzer], "updateLogger" -> classOf[UpdateLogger])) {

  override def receive =
    {
      case msg: Confirmation => testActor forward msg
      case msg: QueryResponse => testActor forward msg
      case msg => super.receive(msg)
    }
} 
Example 44
Source File: OptionAnalyzerSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import org.scalatest.{ WordSpecLike, Matchers, BeforeAndAfterAll, Inside }
import akka.actor.{ ActorSystem, Actor, Props, ActorRef }
import akka.testkit._
import scala.concurrent.duration._
import org.scalatest.Inside
import akka.actor.actorRef2Scala
import com.phasmid.hedge_fund.model._


class OptionAnalyzerSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
    with WordSpecLike with Matchers with Inside with BeforeAndAfterAll {

  def this() = this(ActorSystem("OptionAnalyzerSpec"))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  "send back" in {
    val model = new GoogleOptionModel()
    val blackboard = system.actorOf(Props.create(classOf[MockAnalyzerBlackboard], testActor), "blackboard")
    blackboard ! CandidateOption(model, "XX375", true, Map("strike" -> "54.2"), Map("underlying_id" -> "1234", "Sharpe" -> 0.45, "EV" -> "37.132B", "EBITDA" -> "3.046B"))
    val confirmationMsg = expectMsgClass(3.seconds, classOf[Confirmation])
    println("confirmation msg received: " + confirmationMsg)
    inside(confirmationMsg) {
      case Confirmation(id, model, details) =>
        println(s"confirmation1 details: $details")
        id shouldEqual "XX375"
        blackboard ! KnowledgeUpdate(model, "XX", Map("id" -> "1234"))
        val confirmationMsg2 = expectMsgClass(3.seconds, classOf[Confirmation])
        println("confirmation msg2 received: " + confirmationMsg2)
        // Note that the key "id" is in the model for symbols, not options
        blackboard ! OptionQuery("id", "1234")
        val responseMsg = expectMsgClass(3.seconds, classOf[QueryResponseValid])
        println("msg received: " + responseMsg)
        inside(responseMsg) {
          case QueryResponseValid(symbol, attributes) =>
            symbol shouldEqual "XX"
            println(s"attributes: $attributes")
        }
    }
  }
}

class MockAnalyzerBlackboard(testActor: ActorRef) extends Blackboard(Map(classOf[KnowledgeUpdate] -> "marketData", classOf[SymbolQuery] -> "marketData", classOf[OptionQuery] -> "marketData", classOf[CandidateOption] -> "optionAnalyzer", classOf[Confirmation] -> "updateLogger"),
  Map("marketData" -> classOf[MarketData], "optionAnalyzer" -> classOf[OptionAnalyzer], "updateLogger" -> classOf[UpdateLogger])) {

  override def receive =
    {
      case msg: Confirmation => testActor forward msg
      case msg: QueryResponseValid => testActor forward msg
      case msg => super.receive(msg)
    }
} 
Example 45
Source File: JsonYQLParserSpec.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ ActorSystem, Actor, Props, ActorRef }
import akka.testkit._
import org.scalatest.{ WordSpecLike, Matchers, BeforeAndAfterAll }
import scala.io.Source
import scala.concurrent.duration._
import spray.http._
import spray.http.MediaTypes._
import org.scalatest.Inside
import scala.language.postfixOps
import spray.http.ContentType.apply


class JsonYQLParserSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
    with WordSpecLike with Matchers with Inside with BeforeAndAfterAll {

  def this() = this(ActorSystem("JsonYQLParserSpec"))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  import scala.language.postfixOps
  val json = Source.fromFile("src/test/resources/yqlExample.json") mkString

  "json conversion" in {
    val body = HttpEntity(MediaTypes.`application/json`, json.getBytes())
    val ok = JsonYQLParser.decode(body) match {
      case Right(x) =>
        val count = x.query.count
        count should equal(4)
        x.query.results.quote.length should equal(count)
        x.query.results.get(count - 1, "symbol") should matchPattern { case Some("MSFT") => }

      case Left(x) =>
        fail("decoding error: " + x)
    }
  }

  "send back" in {
    val blackboard = system.actorOf(Props.create(classOf[MockYQLBlackboard], testActor), "blackboard")
    val entityParser = _system.actorOf(Props.create(classOf[EntityParser], blackboard), "entityParser")
    val entity = HttpEntity(MediaTypes.`application/json`, json.getBytes())
    entityParser ! EntityMessage("json:YQL", entity)
    val msg = expectMsgClass(3.seconds, classOf[QueryResponseValid])
    println("msg received: " + msg)
    msg should matchPattern {
      case QueryResponseValid("MSFT", _) =>
    }
    inside(msg) {
      case QueryResponseValid(symbol, attributes) => attributes.get("Ask") should matchPattern { case Some("46.17") => }
    }
  }

}

import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import scala.concurrent.Await
import com.phasmid.hedge_fund.model.Model

class MockYQLUpdateLogger(blackboard: ActorRef) extends UpdateLogger(blackboard) {
  override def processStock(identifier: String, model: Model) = {
    model.getKey("price") match {
      case Some(p) => {
        // sender is the MarketData actor
        val future = sender ? SymbolQuery(identifier, List(p))
        val result = Await.result(future, timeout.duration).asInstanceOf[QueryResponseValid]
        result.attributes map {
          case (k, v) =>
            log.info(s"$identifier attribute $k has been updated to: $v")
            blackboard ! result
        }
      }
      case None => log.warning(s"'price' not defined in model")
    }
  }
}

class MockYQLBlackboard(testActor: ActorRef) extends Blackboard(Map(classOf[KnowledgeUpdate] -> "marketData", classOf[SymbolQuery] -> "marketData", classOf[OptionQuery] -> "marketData", classOf[CandidateOption] -> "optionAnalyzer", classOf[Confirmation] -> "updateLogger"),
  Map("marketData" -> classOf[MarketData], "optionAnalyzer" -> classOf[OptionAnalyzer], "updateLogger" -> classOf[MockYQLUpdateLogger])) {

  override def receive =
    {
      case msg: Confirmation => msg match {
        // Cut down on the volume of messages
        case Confirmation("MSFT", _, _) => super.receive(msg)
        case _ =>
      }
      case msg: QueryResponseValid => testActor forward msg

      case msg => super.receive(msg)
    }
} 
Example 46
Source File: NotificationProvider.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.notification

import akka.actor.{ AbstractLoggingActor, Actor, ActorRef }
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory

trait NotificationProvider {
  def message(notification: Notification): String

  def info(notification: Notification)

  def reportException(notification: Notification): Exception

  def throwException(notification: Notification) = throw reportException(notification)
}

trait LoggingNotificationProvider extends NotificationProvider {
  this: MessageResolverProvider ⇒

  private val logger = Logger(LoggerFactory.getLogger(classOf[Notification]))

  def message(notification: Notification) = messageResolver.resolve(notification)

  def info(notification: Notification) = logger.info(message(notification))

  def reportException(notification: Notification): Exception = {
    val msg = message(notification)
    logger.error(msg)

    notification match {
      case error: ErrorNotification ⇒ error.reason match {
        case reason: Throwable ⇒ logger.error(reason.getMessage, reason)
        case reason            ⇒ logger.error(reason.toString)
      }
      case _ ⇒
    }

    NotificationErrorException(notification, msg)
  }
}

trait ActorNotificationProvider extends NotificationProvider {
  this: Actor with MessageResolverProvider ⇒

  protected val notificationActor: ActorRef

  def message(notification: Notification) = messageResolver.resolve(notification)

  def info(notification: Notification) = {
    notificationActor ! Info(notification, message(notification))
  }

  def reportException(notification: Notification): Exception = {
    val msg = message(notification)
    notificationActor ! Error(notification, msg)
    NotificationErrorException(notification, msg)
  }
}

trait ActorLoggingNotificationProvider extends NotificationProvider {
  this: AbstractLoggingActor with MessageResolverProvider ⇒

  protected val notificationActor: ActorRef

  def message(notification: Notification) = messageResolver.resolve(notification)

  def info(notification: Notification) = {
    val msg = message(notification)
    log.info(msg)
    notificationActor ! Info(notification, msg)
  }

  def reportException(notification: Notification): Exception = {
    val msg = message(notification)
    log.error(msg)
    notificationActor ! Error(notification, msg)
    NotificationErrorException(notification, msg)
  }
} 
Example 47
Source File: HeartbeatClientSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.{TestProbe, ImplicitSender, TestKit}
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, FunSpecLike}
import org.mockito.Matchers._
import org.mockito.Mockito._

class HeartbeatClientSpec extends TestKit(ActorSystem("HeartbeatActorSpec"))
  with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {

  describe("HeartbeatClientActor") {
    val socketFactory = mock[SocketFactory]
    val mockActorLoader = mock[ActorLoader]
    val probe : TestProbe = TestProbe()
    when(socketFactory.HeartbeatClient(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(probe.ref)

    val heartbeatClient = system.actorOf(Props(
      classOf[HeartbeatClient], socketFactory, mockActorLoader, true
    ))

    describe("send heartbeat") {
      it("should send ping ZMQMessage") {
        heartbeatClient ! HeartbeatMessage
        probe.expectMsgClass(classOf[ZMQMessage])
      }
    }
  }
} 
Example 48
Source File: ShellClientSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import java.util.UUID

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.{TestProbe, ImplicitSender, TestKit}
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.communication.security.SecurityActorType
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, FunSpecLike}
import org.mockito.Mockito._
import org.mockito.Matchers._
import play.api.libs.json.Json

class ShellClientSpec extends TestKit(ActorSystem("ShellActorSpec"))
  with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
  private val SignatureEnabled = true

  describe("ShellClientActor") {
    val socketFactory = mock[SocketFactory]
    val mockActorLoader = mock[ActorLoader]
    val probe : TestProbe = TestProbe()
    when(socketFactory.ShellClient(
      any(classOf[ActorSystem]), any(classOf[ActorRef])
    )).thenReturn(probe.ref)

    val signatureManagerProbe = TestProbe()
    doReturn(system.actorSelection(signatureManagerProbe.ref.path.toString))
      .when(mockActorLoader).load(SecurityActorType.SignatureManager)

    val shellClient = system.actorOf(Props(
      classOf[ShellClient], socketFactory, mockActorLoader, SignatureEnabled
    ))

    describe("send execute request") {
      it("should send execute request") {
        val request = ExecuteRequest(
          "foo", false, true, UserExpressions(), true
        )
        val header = Header(
          UUID.randomUUID().toString, "spark",
          UUID.randomUUID().toString, MessageType.Incoming.ExecuteRequest.toString,
          "5.0"
        )
        val kernelMessage = KernelMessage(
          Seq[Array[Byte]](), "",
          header, HeaderBuilder.empty,
          Metadata(), Json.toJson(request).toString
        )
        shellClient ! kernelMessage

        // Echo back the kernel message sent to have a signature injected
        signatureManagerProbe.expectMsgClass(classOf[KernelMessage])
        signatureManagerProbe.reply(kernelMessage)

        probe.expectMsgClass(classOf[ZMQMessage])
      }
    }
  }
} 
Example 49
Source File: HeartbeatClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.{ActorRef, Actor}
import akka.util.{ByteString, Timeout}
import org.apache.toree.communication.ZMQMessage
import akka.pattern.ask
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.apache.toree.utils.LogLike
import org.apache.toree.kernel.protocol.v5.UUID
import scala.collection.concurrent.{Map, TrieMap}
import scala.concurrent.duration._

object HeartbeatMessage {}

class HeartbeatClient(
  socketFactory : SocketFactory,
  actorLoader: ActorLoader,
  signatureEnabled: Boolean
) extends Actor with LogLike {
  logger.debug("Created new Heartbeat Client actor")
  implicit val timeout = Timeout(1.minute)

  val futureMap: Map[UUID, ActorRef] = TrieMap[UUID, ActorRef]()
  val socket = socketFactory.HeartbeatClient(context.system, self)

  override def receive: Receive = {
    // from Heartbeat
    case message: ZMQMessage =>
      val id = message.frames.map((byteString: ByteString) =>
        new String(byteString.toArray)).mkString("\n")
      logger.info(s"Heartbeat client receive:$id")
      futureMap(id) ! true
      futureMap.remove(id)

    // from SparkKernelClient
    case HeartbeatMessage =>
      import scala.concurrent.ExecutionContext.Implicits.global
      val id = java.util.UUID.randomUUID().toString
      futureMap += (id -> sender)
      logger.info(s"Heartbeat client send: $id")
      val future = socket ? ZMQMessage(ByteString(id.getBytes))
      future.onComplete {
        // future always times out because server "tells" response {
        case(_) => futureMap.remove(id)
      }
  }
} 
Example 50
Source File: SignatureCheckerActorSpecForIntegration.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package integration.security

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.communication.security.{Hmac, SignatureCheckerActor}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, FunSpecLike, Matchers}
import play.api.libs.json.Json

object SignatureCheckerActorSpecForIntegration {
  val config = """
    akka {
      loglevel = "WARNING"
    }"""
}

class SignatureCheckerActorSpecForIntegration extends TestKit(
  ActorSystem(
    "SignatureCheckerActorSpec",
    ConfigFactory.parseString(SignatureCheckerActorSpecForIntegration.config)
  )
) with ImplicitSender with FunSpecLike with Matchers with BeforeAndAfter
{

  private val sigKey = "12345"
  private val signature =
    "1c4859a7606fd93eb5f73c3d9642f9bc860453ba42063961a00d02ed820147b5"
  private val goodMessage =
    KernelMessage(
      null, signature,
      Header("a", "b", "c", "d", "e"),
      ParentHeader("f", "g", "h", "i", "j"),
      Metadata(),
      "<STRING>"
    )
  private val badMessage =
    KernelMessage(
      null, "wrong signature",
      Header("a", "b", "c", "d", "e"),
      ParentHeader("f", "g", "h", "i", "j"),
      Metadata(),
      "<STRING>"
    )

  private var signatureChecker: ActorRef = _

  before {
    val hmac = Hmac(sigKey)
    signatureChecker =
      system.actorOf(Props(classOf[SignatureCheckerActor], hmac))
  }

  after {
    signatureChecker = null
  }

  describe("SignatureCheckerActor") {
    describe("#receive") {
      it("should return true if the kernel message is valid") {
        val blob =
          Json.stringify(Json.toJson(goodMessage.header)) ::
          Json.stringify(Json.toJson(goodMessage.parentHeader)) ::
          Json.stringify(Json.toJson(goodMessage.metadata)) ::
          goodMessage.contentString ::
          Nil
        signatureChecker ! ((goodMessage.signature, blob))
        expectMsg(true)
      }

      it("should return false if the kernel message is invalid") {
        val blob =
          Json.stringify(Json.toJson(badMessage.header)) ::
          Json.stringify(Json.toJson(badMessage.parentHeader)) ::
          Json.stringify(Json.toJson(badMessage.metadata)) ::
          badMessage.contentString ::
          Nil
        signatureChecker ! ((badMessage.signature, blob))
        expectMsg(false)
      }
    }
  }
} 
Example 51
Source File: SignatureProducerActorSpecForIntegration.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package integration.security

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.communication.security.{Hmac, SignatureProducerActor}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, FunSpecLike, Matchers}

object SignatureProducerActorSpecForIntegration {
  val config = """
    akka {
      loglevel = "WARNING"
    }"""
}

class SignatureProducerActorSpecForIntegration extends TestKit(
  ActorSystem(
    "SignatureProducerActorSpec",
    ConfigFactory.parseString(SignatureProducerActorSpecForIntegration.config)
  )
) with ImplicitSender with FunSpecLike with Matchers with BeforeAndAfter
{

  private val sigKey = "12345"

  private var signatureProducer: ActorRef = _

  before {
    val hmac = Hmac(sigKey)
    signatureProducer =
      system.actorOf(Props(classOf[SignatureProducerActor], hmac))

  }

  after {
    signatureProducer = null
  }

  describe("SignatureProducerActor") {
    describe("#receive") {
      it("should return the correct signature for a kernel message") {
        val expectedSignature =
          "1c4859a7606fd93eb5f73c3d9642f9bc860453ba42063961a00d02ed820147b5"
        val message =
          KernelMessage(
            null, "",
            Header("a", "b", "c", "d", "e"),
            ParentHeader("f", "g", "h", "i", "j"),
            Metadata(),
            "<STRING>"
          )

        signatureProducer ! message
        expectMsg(expectedSignature)
      }
    }
  }
} 
Example 52
Source File: RouterSocketActor.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.communication.actors

import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import org.apache.toree.communication.{SocketManager, ZMQMessage}
import org.apache.toree.utils.LogLike
import org.zeromq.ZMQ


class RouterSocketActor(connection: String, listener: ActorRef)
  extends Actor with LogLike
{
  logger.debug(s"Initializing router socket actor for $connection")
  private val manager: SocketManager = new SocketManager
  private val socket = manager.newRouterSocket(connection, (message: Seq[Array[Byte]]) => {
    listener ! ZMQMessage(message.map(ByteString.apply): _*)
  })

  override def postStop(): Unit = {
    manager.closeSocket(socket)
  }

  override def receive: Actor.Receive = {
    case zmqMessage: ZMQMessage =>
      val frames = zmqMessage.frames.map(byteString => byteString.toArray )
    socket.send(frames: _*)
  }
} 
Example 53
Source File: RepSocketActor.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.communication.actors

import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import org.apache.toree.communication.{SocketManager, ZMQMessage}
import org.apache.toree.utils.LogLike
import org.zeromq.ZMQ


class RepSocketActor(connection: String, listener: ActorRef)
  extends Actor with LogLike
{
  logger.debug(s"Initializing reply socket actor for $connection")
  private val manager: SocketManager = new SocketManager
  private val socket = manager.newRepSocket(connection, (message: Seq[Array[Byte]]) => {
    listener ! ZMQMessage(message.map(ByteString.apply): _*)
  })

  override def postStop(): Unit = {
    manager.closeSocket(socket)
  }

  override def receive: Actor.Receive = {
    case zmqMessage: ZMQMessage =>
      val frames = zmqMessage.frames.map(byteString => byteString.toArray )
      socket.send(frames: _*)
  }
} 
Example 54
Source File: SubSocketActor.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.communication.actors

import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import org.apache.toree.communication.{ZMQMessage, SocketManager}
import org.apache.toree.utils.LogLike


class SubSocketActor(connection: String, listener: ActorRef)
  extends Actor with LogLike
{
  logger.debug(s"Initializing subscribe socket actor for $connection")
  private val manager: SocketManager = new SocketManager
  private val socket = manager.newSubSocket(connection, (message: Seq[Array[Byte]]) => {
    listener ! ZMQMessage(message.map(ByteString.apply): _*)
  })

  override def postStop(): Unit = {
    manager.closeSocket(socket)
  }

  override def receive: Actor.Receive = {
    case _ =>
  }
} 
Example 55
Source File: DealerSocketActor.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.communication.actors

import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import org.apache.toree.communication.{ZMQMessage, SocketManager}
import org.apache.toree.utils.LogLike
import org.zeromq.ZMQ


class DealerSocketActor(connection: String, listener: ActorRef)
  extends Actor with LogLike
{
  logger.debug(s"Initializing dealer socket actor for $connection")
  private val manager: SocketManager = new SocketManager
  private val socket = manager.newDealerSocket(connection, (message: Seq[Array[Byte]]) => {
    listener ! ZMQMessage(message.map(ByteString.apply): _*)
  })

  override def postStop(): Unit = {
    manager.closeSocket(socket)
  }

  override def receive: Actor.Receive = {
    case zmqMessage: ZMQMessage =>
      val frames = zmqMessage.frames.map(byteString => byteString.toArray )
      socket.send(frames: _*)
  }
} 
Example 56
Source File: ReqSocketActor.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.communication.actors

import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import org.apache.toree.communication.{ZMQMessage, SocketManager}
import org.apache.toree.utils.LogLike
import org.zeromq.ZMQ


class ReqSocketActor(connection: String, listener: ActorRef)
  extends Actor with LogLike
{
  logger.debug(s"Initializing request socket actor for $connection")
  private val manager: SocketManager = new SocketManager
  private val socket = manager.newReqSocket(connection, (message: Seq[Array[Byte]]) => {
    listener ! ZMQMessage(message.map(ByteString.apply): _*)
  })

  override def postStop(): Unit = {
    manager.closeSocket(socket)
  }

  override def receive: Actor.Receive = {
    case zmqMessage: ZMQMessage =>
      val frames = zmqMessage.frames.map(byteString => byteString.toArray )
      socket.send(frames: _*)
  }
} 
Example 57
Source File: SignatureManagerActor.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.communication.security

import akka.actor.{Props, ActorRef, Actor}
import akka.util.Timeout
import org.apache.toree.communication.utils.OrderedSupport
import org.apache.toree.kernel.protocol.v5.KernelMessage
import org.apache.toree.utils.LogLike

import scala.concurrent.duration._
import akka.pattern.ask
import akka.pattern.pipe

class SignatureManagerActor(
  key: String, scheme: String
) extends Actor with LogLike with OrderedSupport {
  private val hmac = Hmac(key, HmacAlgorithm(scheme))

  def this(key: String) = this(key, HmacAlgorithm.SHA256.toString)

  // NOTE: Required to provide the execution context for futures with akka
  import context._

  // NOTE: Required for ask (?) to function... maybe can define elsewhere?
  implicit val timeout = Timeout(5.seconds)

  //
  // List of child actors that the signature manager contains
  //
  private var signatureChecker: ActorRef = _
  private var signatureProducer: ActorRef = _

  
  override def orderedTypes(): Seq[Class[_]] = Seq(
    classOf[(String, Seq[_])],
    classOf[KernelMessage]
  )
} 
Example 58
Source File: InputRequestReplyHandler.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.handler

import akka.actor.ActorRef
import org.apache.toree.comm.{CommRegistrar, CommStorage}
import org.apache.toree.communication.utils.OrderedSupport
import org.apache.toree.kernel.protocol.v5.{SystemActorType, KernelMessage}
import org.apache.toree.kernel.protocol.v5.content.{InputReply, CommOpen}
import org.apache.toree.kernel.protocol.v5.kernel.{Utilities, ActorLoader}
import org.apache.toree.kernel.protocol.v5
import org.apache.toree.utils.MessageLogSupport
import play.api.libs.json.Json

import scala.concurrent.{Promise, Future}


class InputRequestReplyHandler(
  actorLoader: ActorLoader,
  responseMap: collection.mutable.Map[String, ActorRef]
) extends OrderedSupport with MessageLogSupport
{
  // TODO: Is there a better way than storing actor refs?
  def receive = {
    case kernelMessage: KernelMessage =>
      startProcessing()

      val kernelMessageType = kernelMessage.header.msg_type
      val inputRequestType = v5.MessageType.Outgoing.InputRequest.toString
      val inputReplyType = v5.MessageType.Incoming.InputReply.toString

      // Is this an outgoing message to request data?
      if (kernelMessageType == inputRequestType) {
        val session = kernelMessage.parentHeader.session
        responseMap(session) = sender

        logger.debug("Associating input request with session " + session)

        actorLoader.load(SystemActorType.KernelMessageRelay) ! kernelMessage

      // Is this an incoming response to a previous request for data?
      } else if (kernelMessageType == inputReplyType) {
        val session = kernelMessage.header.session
        val inputReply = Json.parse(kernelMessage.contentString).as[InputReply]

        logger.debug(s"Received input reply for session $session with value " +
          s"'${inputReply.value}'")

        responseMap(session) ! inputReply.value
        responseMap.remove(session)
      }

      finishedProcessing()
  }

  override def orderedTypes() : Seq[Class[_]] = {Seq(classOf[KernelMessage])}
} 
Example 59
Source File: LogPublisherHub.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import akka.actor.{ ActorRef, ActorSystem }
import ch.qos.logback.classic.filter.ThresholdFilter
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.classic.{ Level, LoggerContext, Logger ⇒ LogbackLogger }
import ch.qos.logback.core.AppenderBase
import io.vamp.common.Namespace
import org.slf4j.{ Logger, LoggerFactory }

import scala.collection.mutable

object LogPublisherHub {

  private val logger = LoggerFactory.getLogger(LogPublisherHub.getClass)

  private val context = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
  private val rootLogger = context.getLogger(Logger.ROOT_LOGGER_NAME)

  private val sessions: mutable.Map[String, LogPublisher] = new mutable.HashMap()

  def subscribe(to: ActorRef, level: String, loggerName: Option[String], encoder: (ILoggingEvent) ⇒ AnyRef)(implicit actorSystem: ActorSystem, namespace: Namespace): Unit = {
    val appenderLevel = Level.toLevel(level, Level.INFO)
    val appenderLogger = loggerName.map(context.getLogger).getOrElse(rootLogger)

    val exists = sessions.get(to.toString).exists { publisher ⇒
      publisher.level == appenderLevel && publisher.logger.getName == appenderLogger.getName
    }

    if (!exists) {
      unsubscribe(to)
      if (appenderLevel != Level.OFF) {
        logger.info(s"Starting log publisher [${appenderLevel.levelStr}] '${appenderLogger.getName}': $to")
        val publisher = LogPublisher(to, appenderLogger, appenderLevel, encoder)
        publisher.start()
        sessions.put(to.toString, publisher)
      }
    }
  }

  def unsubscribe(to: ActorRef): Unit = {
    sessions.remove(to.toString).foreach { publisher ⇒
      logger.info(s"Stopping log publisher: $to")
      publisher.stop()
    }
  }
}

private case class LogPublisher(to: ActorRef, logger: LogbackLogger, level: Level, encoder: (ILoggingEvent) ⇒ AnyRef)(implicit actorSystem: ActorSystem, namespace: Namespace) {

  private val filter = new ThresholdFilter()
  filter.setLevel(level.levelStr)

  private val appender = new AppenderBase[ILoggingEvent] {
    override def append(loggingEvent: ILoggingEvent) = to ! encoder(loggingEvent)
  }

  appender.addFilter(filter)
  appender.setName(to.toString)

  def start() = {
    val context = logger.getLoggerContext
    filter.setContext(context)
    appender.setContext(context)
    filter.start()
    appender.start()
    logger.addAppender(appender)
  }

  def stop() = {
    appender.stop()
    filter.stop()
    logger.detachAppender(appender)
  }
} 
Example 60
Source File: ActorBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.util.Timeout
import com.typesafe.scalalogging.Logger
import io.vamp.common.{ ClassProvider, Namespace }
import org.slf4j.{ LoggerFactory, MDC }

import scala.concurrent.Future
import scala.reflect.{ ClassTag, classTag }

trait Bootstrap extends BootstrapLogger {

  def start(): Future[Unit] = Future.successful(())

  def stop(): Future[Unit] = Future.successful(())
}

trait ActorBootstrap extends BootstrapLogger {

  private var actors: Future[List[ActorRef]] = Future.successful(Nil)

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]]

  def start(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = {
    info(s"Starting ${getClass.getSimpleName}")
    actors = createActors(actorSystem, namespace, timeout)
    actors.map(_ ⇒ ())(actorSystem.dispatcher)
  }

  def restart(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = {
    stop.flatMap(_ ⇒ start)(actorSystem.dispatcher)
  }

  def stop(implicit actorSystem: ActorSystem, namespace: Namespace): Future[Unit] = {
    info(s"Stopping ${getClass.getSimpleName}")
    actors.map(_.reverse.foreach(_ ! PoisonPill))(actorSystem.dispatcher)
  }

  def alias[T: ClassTag](name: String, default: String ⇒ Future[ActorRef])(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    ClassProvider.find[T](name).map { clazz ⇒
      IoC.alias(classTag[T].runtimeClass, clazz)
      IoC.createActor(clazz)
    } getOrElse default(name)
  }
}

trait BootstrapLogger {

  protected val logger = Logger(LoggerFactory.getLogger(getClass))

  protected def info(message: String)(implicit namespace: Namespace): Unit = {
    MDC.put("namespace", namespace.name)
    try logger.info(message) finally MDC.remove("namespace")
  }
} 
Example 61
Source File: PulseInitializationActor.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.pulse

import akka.actor.{ Actor, ActorRef }
import akka.util.Timeout
import io.vamp.common.Config
import io.vamp.common.akka.CommonSupportForActors
import io.vamp.pulse.PulseInitializationActor.Initialize
import io.vamp.pulse.notification.PulseNotificationProvider

object PulseInitializationActor {

  object Initialize

}

class PulseInitializationActor extends ElasticsearchPulseInitializationActor with CommonSupportForActors with PulseNotificationProvider{

  implicit lazy val timeout: Timeout = PulseActor.timeout()

  def receive: Actor.Receive = {
    case Initialize ⇒ initialize()
    case _          ⇒ done(sender())
  }

  private def initialize(): Unit = {
    val receiver = sender()
    val pulse = Config.string("vamp.pulse.type")().toLowerCase
    log.info(s"Initializing pulse of type: $pulse")

    pulse match {
      case "elasticsearch" | "nats" ⇒ initializeElasticsearch().foreach(_ ⇒ done(receiver))
      case _               ⇒ done(receiver)
    }
  }

  private def done(receiver: ActorRef): Unit = {
    log.info(s"Pulse has been initialized.")
    receiver ! true
  }
} 
Example 62
Source File: ElasticsearchBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.pulse

import akka.actor.{ ActorRef, ActorSystem }
import akka.util.Timeout
import io.vamp.common.Namespace
import io.vamp.common.akka.IoC.logger
import io.vamp.common.akka.{ ActorBootstrap, IoC }
import io.vamp.pulse.notification.PulseNotificationProvider

import scala.concurrent.{ ExecutionContext, Future }

class ElasticsearchBootstrap
    extends ActorBootstrap
    with PulseNotificationProvider {

  def createActors(implicit actorSystem: ActorSystem,
                   namespace: Namespace,
                   timeout: Timeout): Future[List[ActorRef]] = {
    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    Future.sequence(IoC.createActor[PulseInitializationActor] :: Nil)
  }

  override def start(implicit actorSystem: ActorSystem,
                     namespace: Namespace,
                     timeout: Timeout): Future[Unit] = {
    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    super.start.flatMap {
      _ => {
        IoC.actorFor[PulseInitializationActor] ! PulseInitializationActor.Initialize
        Future.unit
      }
    }
  }
} 
Example 63
Source File: Percolator.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.pulse

import akka.actor.{ Actor, ActorRef }
import io.vamp.common.akka.CommonActorLogging
import io.vamp.model.event.Event

import scala.collection.mutable

object Percolator {

  sealed trait PercolatorMessage

  case class GetPercolator(name: String) extends PercolatorMessage

  case class RegisterPercolator(name: String, tags: Set[String], `type`: Option[String], message: Any) extends PercolatorMessage

  case class UnregisterPercolator(name: String) extends PercolatorMessage

}

trait Percolator {
  this: Actor with CommonActorLogging ⇒

  case class PercolatorEntry(tags: Set[String], `type`: Option[String], actor: ActorRef, message: Any)

  protected val percolators = mutable.Map[String, PercolatorEntry]()

  def getPercolator(name: String) = percolators.get(name)

  def registerPercolator(name: String, tags: Set[String], `type`: Option[String], message: Any) = {
    percolators.put(name, PercolatorEntry(tags, `type`, sender(), message)) match {
      case Some(entry) if entry.tags == tags && entry.`type` == `type` ⇒
      case _ ⇒ log.info(s"Percolator '$name' has been registered for tags '${tags.mkString(", ")}'.")
    }
  }

  def unregisterPercolator(name: String) = {
    if (percolators.remove(name).nonEmpty)
      log.info(s"Percolator successfully removed for '$name'.")
  }

  def percolate(publishEventValue: Boolean): (Event ⇒ Event) = { (event: Event) ⇒
    percolators.foreach {
      case (name, percolator) ⇒
        if (percolator.tags.forall(event.tags.contains) && (percolator.`type`.isEmpty || percolator.`type`.get == event.`type`)) {
          log.debug(s"Percolate match for '$name'.")
          val send = if (publishEventValue) event else event.copy(value = None)
          percolator.actor ! (percolator.message → send)
        }
    }
    event
  }
} 
Example 64
Source File: PersistenceBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.persistence

import akka.actor.{ ActorRef, ActorSystem }
import akka.util.Timeout
import io.vamp.common.akka.ActorBootstrap
import io.vamp.common.{ Config, Namespace }

import scala.concurrent.{ ExecutionContext, Future }

object PersistenceBootstrap {

  def databaseType()(implicit namespace: Namespace): String = Config.string("vamp.persistence.database.type")().toLowerCase

  def keyValueStoreType()(implicit namespace: Namespace): String = Config.string("vamp.persistence.key-value-store.type")().toLowerCase
}

class PersistenceBootstrap extends ActorBootstrap {

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]] = {
    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    for {
      storage ← new PersistenceStorageBootstrap().createActors
      keyValue ← new KeyValueBootstrap().createActors
    } yield storage ++ keyValue
  }
}

class PersistenceStorageBootstrap extends ActorBootstrap {

  import PersistenceBootstrap._

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]] = {
    val db = databaseType()
    info(s"Database: $db")

    val dbActor = alias[PersistenceActor](db, (`type`: String) ⇒ {
      throw new RuntimeException(s"Unsupported database type: ${`type`}")
    })

    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    dbActor.map(_ :: Nil)
  }
}

class KeyValueBootstrap extends ActorBootstrap {

  import PersistenceBootstrap._

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]] = {
    val kv = keyValueStoreType()
    info(s"Key-Value store: $kv")

    val kvActor = alias[KeyValueStoreActor](kv, (`type`: String) ⇒ {
      throw new RuntimeException(s"Unsupported key-value store type: ${`type`}")
    })

    implicit val executionContext: ExecutionContext = actorSystem.dispatcher
    kvActor.map(_ :: Nil)
  }
} 
Example 65
Source File: GatewayDriverBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.gateway_driver

import akka.actor.{ ActorRef, ActorSystem }
import akka.util.Timeout
import io.vamp.common.{ ClassProvider, Config, Namespace }
import io.vamp.common.akka.{ ActorBootstrap, IoC }

import scala.concurrent.Future
import scala.io.Source
import scala.language.postfixOps

class GatewayDriverBootstrap extends ActorBootstrap {

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]] = {

    val marshallers: Map[String, GatewayMarshallerDefinition] = Config.list("vamp.gateway-driver.marshallers")().collect {
      case config: Map[_, _] ⇒
        val name = config.asInstanceOf[Map[String, String]]("name").trim
        val clazz = config.asInstanceOf[Map[String, String]].get("type").flatMap(ClassProvider.find[GatewayMarshaller]).get

        info(s"Gateway marshaller: ${config.asInstanceOf[Map[String, String]].getOrElse("type", "")}")

        val template = config.asInstanceOf[Map[String, Map[String, AnyRef]]].getOrElse("template", Map())
        val file = template.get("file").map(_.asInstanceOf[String].trim).getOrElse("")
        val resource = template.get("resource").map(_.asInstanceOf[String].trim).getOrElse("")

        name → GatewayMarshallerDefinition(
          clazz.newInstance,
          if (file.nonEmpty) Source.fromFile(file).mkString else if (resource.nonEmpty) Source.fromURL(getClass.getResource(resource)).mkString else ""
        )
    } toMap

    IoC.createActor[GatewayDriverActor](marshallers).map(_ :: Nil)(actorSystem.dispatcher)
  }
} 
Example 66
Source File: KubernetesWorkflowActor.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.workflow_driver

import akka.actor.ActorRef
import akka.pattern.ask
import io.vamp.common.ClassMapper
import io.vamp.common.akka.IoC
import io.vamp.container_driver.kubernetes.KubernetesDriverActor.{ CreateJob, DeleteJob }
import io.vamp.container_driver.kubernetes.{ Job, K8sClientConfig, KubernetesDriverActor }
import io.vamp.container_driver.{ ContainerDriverMapping, ContainerDriverValidation, DeployableType, DockerDeployableType }
import io.vamp.model.artifact._
import io.vamp.model.event.Event
import io.vamp.model.resolver.WorkflowValueResolver
import io.vamp.persistence.PersistenceActor
import io.vamp.pulse.Percolator.GetPercolator
import io.vamp.pulse.PulseActor

import scala.concurrent.Future

class KubernetesWorkflowActorMapper extends ClassMapper {
  val name = "kubernetes"
  val clazz: Class[_] = classOf[KubernetesWorkflowActor]
}

class KubernetesWorkflowActor extends DaemonWorkflowDriver with WorkflowValueResolver with ContainerDriverMapping with ContainerDriverValidation {

  override protected lazy val supportedDeployableTypes: List[DeployableType] = DockerDeployableType :: Nil

  override protected lazy val info: Future[Map[_, _]] = Future.successful(Map("kubernetes" → Map("url" → K8sClientConfig(customNamespace).url)))

  override protected lazy val driverActor: ActorRef = IoC.actorFor[KubernetesDriverActor]

  protected override def request: PartialFunction[Workflow, Unit] = ({
    case workflow if workflow.schedule.isInstanceOf[EventSchedule] ⇒
      logger.debug("KubernetesWorkflowActor - Workflow schedule is an instance of EventSchedule")

      IoC.actorFor[PulseActor] ? GetPercolator(WorkflowDriverActor.percolator(workflow)) map {
        case Some(_) if runnable(workflow) ⇒
          if (workflow.instances.isEmpty) {
            logger.debug(s"KubernetesWorkflowActor - workflow.instances.isEmpty : ${workflow.instances.isEmpty}")
            IoC.actorFor[PersistenceActor] ! PersistenceActor.UpdateWorkflowInstances(workflow, Instance(workflow.name, "", Map(), deployed = true) :: Nil)
          }
        case _ ⇒
          if (workflow.instances.nonEmpty) {
            logger.debug(s"KubernetesWorkflowActor - workflow.instances.nonEmpty : ${workflow.instances.nonEmpty}")
            IoC.actorFor[PersistenceActor] ! PersistenceActor.UpdateWorkflowInstances(workflow, Nil)
          }
      }
    //    case workflow ⇒
    //      logger.debug("KubernetesWorkflowActor - workflow schedule is not an instance of EventSchedule - {}", workflow.toString)
    //      super.request

  }: PartialFunction[Workflow, Unit]) orElse {
    logger.debug("KubernetesWorkflowActor - workflow schedule is not an instance of EventSchedule")
    super.request
  }

  protected override def schedule(data: Any): PartialFunction[Workflow, Future[Any]] = super.schedule(data) orElse {
    case w if data.isInstanceOf[Event] && w.schedule.isInstanceOf[EventSchedule] ⇒ enrich(w, data).flatMap { workflow ⇒

      validateDeployable(workflow.breed.asInstanceOf[DefaultBreed].deployable)

      val name = s"workflow-${workflow.lookupName}-${data.asInstanceOf[Event].timestamp.toInstant.toEpochMilli}"
      val scale = workflow.scale.get.asInstanceOf[DefaultScale]

      driverActor ? CreateJob(Job(
        name = name,
        group = group(workflow),
        docker = docker(workflow),
        cpu = scale.cpu.value,
        mem = Math.round(scale.memory.value).toInt,
        environmentVariables = environment(workflow)
      ))
    }
  }

  protected override def unschedule(): PartialFunction[Workflow, Future[Any]] = super.unschedule() orElse {
    case w if w.schedule.isInstanceOf[EventSchedule] ⇒ driverActor ? DeleteJob(group(w))
  }

  override def resolverClasses: List[String] = super[WorkflowValueResolver].resolverClasses

  private def group(workflow: Workflow) = s"workflow-${workflow.lookupName}"
} 
Example 67
Source File: LogApiController.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.operation.controller

import java.time.{ OffsetDateTime, ZoneId }
import java.util.Date

import akka.actor.{ ActorRef, Props }
import akka.stream.actor.ActorPublisher
import akka.stream.actor.ActorPublisherMessage.{ Cancel, Request }
import akka.stream.scaladsl.Source
import ch.qos.logback.classic.spi.ILoggingEvent
import akka.http.scaladsl.model.sse.ServerSentEvent
import io.vamp.common.Namespace
import io.vamp.common.akka._
import io.vamp.common.json.{ OffsetDateTimeSerializer, SerializationFormat }
import org.json4s.native.Serialization._

import scala.concurrent.duration.FiniteDuration

case class LogEvent(logger: String, level: String, message: String, timestamp: OffsetDateTime)

trait LogApiController extends AbstractController {

  private val eventType = "log"

  def sourceLog(level: String, logger: Option[String], keepAlivePeriod: FiniteDuration)(implicit namespace: Namespace): Source[ServerSentEvent, ActorRef] = {
    Source.actorPublisher[ServerSentEvent](Props(new ActorPublisher[ServerSentEvent] {
      def receive: Receive = {
        case Request(_) ⇒ openLogStream(self, level, logger, { event ⇒
          ServerSentEvent(write(encode(event))(SerializationFormat(OffsetDateTimeSerializer)), eventType)
        })
        case Cancel                                  ⇒ closeLogStream(self)
        case sse: ServerSentEvent if totalDemand > 0 ⇒ onNext(sse)
        case _                                       ⇒
      }

    })).keepAlive(keepAlivePeriod, () ⇒ ServerSentEvent.heartbeat)
  }

  def openLogStream(to: ActorRef, level: String, logger: Option[String], encoder: (ILoggingEvent) ⇒ AnyRef)(implicit namespace: Namespace): Unit = {
    LogPublisherHub.subscribe(to, level, logger, encoder)
  }

  def closeLogStream(to: ActorRef): Unit = LogPublisherHub.unsubscribe(to)

  def encode(loggingEvent: ILoggingEvent) = LogEvent(
    loggingEvent.getLoggerName,
    loggingEvent.getLevel.toString,
    loggingEvent.getFormattedMessage,
    OffsetDateTime.ofInstant(new Date(loggingEvent.getTimeStamp).toInstant, ZoneId.of("UTC"))
  )
} 
Example 68
Source File: OperationBootstrap.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.operation

import akka.actor.{ ActorRef, ActorSystem, Props }
import akka.util.Timeout
import io.vamp.common.akka.SchedulerActor.Period
import io.vamp.common.akka.{ ActorBootstrap, IoC, SchedulerActor }
import io.vamp.common.{ Config, Namespace }
import io.vamp.operation.config.ConfigurationLoaderActor
import io.vamp.operation.deployment.{ DeploymentActor, DeploymentSynchronizationActor, DeploymentSynchronizationSchedulerActor }
import io.vamp.operation.gateway.{ GatewayActor, GatewaySynchronizationActor, GatewaySynchronizationSchedulerActor }
import io.vamp.operation.metrics.KamonMetricsActor
import io.vamp.operation.sla.{ EscalationActor, EscalationSchedulerActor, SlaActor, SlaSchedulerActor }
import io.vamp.operation.workflow.{ WorkflowActor, WorkflowSynchronizationActor, WorkflowSynchronizationSchedulerActor }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.language.postfixOps

class OperationBootstrap extends ActorBootstrap {

  val config = "vamp.operation"

  val synchronizationMailbox = "vamp.operation.synchronization.mailbox"

  def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]] = {
    implicit val ec: ExecutionContext = actorSystem.dispatcher
    implicit val delay = Config.duration(s"$config.synchronization.initial-delay")()

    val slaPeriod = Config.duration(s"$config.sla.period")()
    val escalationPeriod = Config.duration(s"$config.escalation.period")()
    val synchronizationPeriod = Config.duration(s"$config.synchronization.period")()

    val actors = List(
      IoC.createActor[ConfigurationLoaderActor],
      IoC.createActor[KamonMetricsActor],

      IoC.createActor[DeploymentActor],

      IoC.createActor(Props(classOf[DeploymentSynchronizationActor]).withMailbox(synchronizationMailbox)),
      IoC.createActor[DeploymentSynchronizationSchedulerActor],

      IoC.createActor[GatewayActor],

      IoC.createActor(Props(classOf[GatewaySynchronizationActor]).withMailbox(synchronizationMailbox)),
      IoC.createActor[GatewaySynchronizationSchedulerActor],

      IoC.createActor[SlaActor],
      IoC.createActor[SlaSchedulerActor],

      IoC.createActor[EscalationActor],
      IoC.createActor[EscalationSchedulerActor],

      IoC.createActor[WorkflowActor],
      IoC.createActor[WorkflowSynchronizationActor],
      IoC.createActor[WorkflowSynchronizationSchedulerActor]
    )

    val result = Future.sequence(actors)

    result.onComplete { _ ⇒
      kick(classOf[DeploymentSynchronizationSchedulerActor], Period(synchronizationPeriod))
      kick(classOf[GatewaySynchronizationSchedulerActor], Period(synchronizationPeriod, synchronizationPeriod / 3))
      kick(classOf[WorkflowSynchronizationSchedulerActor], Period(synchronizationPeriod, 2 * synchronizationPeriod / 3))

      kick(classOf[SlaSchedulerActor], Period(slaPeriod))
      kick(classOf[EscalationSchedulerActor], Period(escalationPeriod))
    }

    result
  }

  override def stop(implicit actorSystem: ActorSystem, namespace: Namespace) = {

    IoC.actorFor[DeploymentSynchronizationSchedulerActor] ! SchedulerActor.Period(0 seconds)
    IoC.actorFor[GatewaySynchronizationSchedulerActor] ! SchedulerActor.Period(0 seconds)
    IoC.actorFor[WorkflowSynchronizationSchedulerActor] ! SchedulerActor.Period(0 seconds)
    IoC.actorFor[SlaSchedulerActor] ! SchedulerActor.Period(0 seconds)
    IoC.actorFor[EscalationSchedulerActor] ! SchedulerActor.Period(0 seconds)

    super.stop(actorSystem, namespace)
  }

  protected def kick(clazz: Class[_], period: Period)(implicit actorSystem: ActorSystem, namespace: Namespace, delay: FiniteDuration): Unit = {
    actorSystem.scheduler.scheduleOnce(delay)({
      IoC.actorFor(clazz) ! period
    })(actorSystem.dispatcher)
  }
} 
Example 69
Source File: Sender.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.remote.benchmark

import akka.actor.{ Actor, ActorIdentity, ActorRef, ActorSystem, Identify, Props, ReceiveTimeout, Terminated }
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

class Sender(path: String, totalMessages: Int, burstSize: Int, payloadSize: Int) extends Actor {
  import Sender._
  val payload: Array[Byte] = Vector.fill(payloadSize)("a").mkString.getBytes
  println(s"payload bytes: ${payload.length}")
  var startTime = 0L
  var maxRoundTripMillis = 0L

  context.setReceiveTimeout(3.seconds) // 设置Actor自身接收消息超时时长
  sendIdentifyRequest() // 发请求确认远程actor的路径是否有效。

  override def receive: Receive = identifying

  def identifying: Receive = {
    case ActorIdentity(`path`, Some(actor)) =>
      context.watch(actor)
      context.become(active(actor))
      context.setReceiveTimeout(Duration.Undefined) // 重置超时时间
      self ! Warmup

    case ActorIdentity(`path`, None) =>
      println(s"远程actor无效:$path")

    case ReceiveTimeout =>
      sendIdentifyRequest() // 超时,再次确认远程actor是否有效
  }

  def active(actor: ActorRef): Receive = {
    case Warmup => // 热身,不计入统计
      sendBatch(actor, burstSize)
      actor ! Start

    case Start =>
      println(s"启动基准测试一共 $totalMessages 消息,分帧大小 $burstSize,有效负载 $payloadSize")
      startTime = System.nanoTime()
      val remaining = sendBatch(actor, totalMessages)
      if (remaining == 0)
        actor ! Done
      else
        actor ! Continue(remaining, startTime, startTime, burstSize)

    case c @ Continue(remaining, t0, t1, n) =>
      val now = System.nanoTime()
      val duration = (now - t0).nanos.toMillis // 从发出 Continue 指令到收到指令回复花费的时间
      val roundTripMillis = (now - t1).nanos.toMillis
      maxRoundTripMillis = math.max(maxRoundTripMillis, roundTripMillis)
      if (duration >= 500) { // 以500ms为间隔作统计
        val throughtput = (n * 1000.0 / duration).toInt
        println(s"花费 ${duration}ms 发送了 $n 条消息,吞吐量 ${throughtput}msg/s,")
      }

      val nextRemaining = sendBatch(actor, remaining)
      if (nextRemaining == 0)
        actor ! Done
      else if (duration >= 500) // 一个批次的数量已发完
        actor ! Continue(nextRemaining, now, now, burstSize)
      else // 间隔时间不足500ms,更新 剩余数量、(分帧)起始时间、分帧发送数量
        actor ! c.copy(remaining = nextRemaining, burstStartTime = now, n = n + burstSize)

    case Done =>
      val took = (System.nanoTime - startTime).nanos.toMillis
      val throughtput = (totalMessages * 1000.0 / took).toInt
      println(
        s"一共花费 ${took}ms 发送了 ${totalMessages}消息, 吞吐量 ${throughtput}msg/s, " +
        s"最大往返时间 ${maxRoundTripMillis}ms, 分帧数据大小 $burstSize, " +
        s"有效负载 $payloadSize")
      actor ! Shutdown

    case Terminated(`actor`) =>
      println("Receiver terminated")
      context.system.terminate()
  }

  
  case class Continue(remaining: Int, startTime: Long, burstStartTime: Long, n: Int) extends Echo

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("Sys", ConfigFactory.load("calculator"))
    val remoteHostPort = if (args.nonEmpty) args(0) else "127.0.0.1:2553"
    val remotePath = s"akka.tcp://Sys@$remoteHostPort/user/rcv"
    val totalMessages = if (args.length >= 2) args(1).toInt else 500000
    val burstSize = if (args.length >= 3) args(2).toInt else 5000
    val payloadSize = if (args.length >= 4) args(3).toInt else 100

    system.actorOf(Sender.props(remotePath, totalMessages, burstSize, payloadSize), "snd")
  }

  def props(path: String, totalMessages: Int, burstSize: Int, payloadSize: Int) =
    Props(new Sender(path, totalMessages, burstSize, payloadSize))
} 
Example 70
Source File: AkkaActorsKafkaConsumer.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, OneForOneStrategy, Props, SupervisorStrategy}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor}

import scala.concurrent.duration._

object AkkaActorsKafkaConsumer extends App {
  ConsumerRecovery()
}

object ConsumerRecovery {
  def apply(): ActorRef ={
    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)
    val system = ActorSystem()
    system.actorOf(Props(new ConsumerRecovery(kafkaConsumerConf, actorConf)))
  }
}

class ConsumerRecovery(kafkaConfig: KafkaConsumer.Conf[String, String],
                        actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10) {
    case _: KafkaConsumerActor.ConsumerException =>
      log.info("Consumer exception caught. Restarting consumer.")
      SupervisorStrategy.Restart
    case _ =>
      SupervisorStrategy.Escalate
  }

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartition(List(topic))

  override def receive: Receive = {
    // Consume from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 71
Source File: IssuesActor.scala    From BacklogMigration-Redmine   with MIT License 5 votes vote down vote up
package com.nulabinc.backlog.r2b.exporter.actor

import java.util.concurrent.CountDownLatch

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorRef, OneForOneStrategy, Props}
import akka.routing.SmallestMailboxPool
import com.nulabinc.backlog.migration.common.conf.BacklogConfiguration
import com.nulabinc.backlog.migration.common.domain.BacklogTextFormattingRule
import com.nulabinc.backlog.migration.common.utils.{ConsoleOut, Logging, ProgressBar}
import com.nulabinc.backlog.r2b.exporter.core.ExportContext
import com.nulabinc.backlog4j.BacklogAPIException
import com.osinka.i18n.Messages

import scala.concurrent.duration._


private[exporter] class IssuesActor(exportContext: ExportContext, backlogTextFormattingRule: BacklogTextFormattingRule) extends Actor with BacklogConfiguration with Logging {

  private[this] val strategy =
    OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 10 seconds) {
      case e: BacklogAPIException if e.getMessage.contains("429") =>
        Restart
      case e: BacklogAPIException if e.getMessage.contains("Stream closed") =>
        Restart
      case e =>
        ConsoleOut.error("Fatal error: " + e.getMessage)
        logger.error(e.getStackTrace.mkString("\n"))
        sys.exit(2)
    }

  private[this] val limit      = exportLimitAtOnce
  private[this] val allCount   = exportContext.issueService.countIssues()
  private[this] val completion = new CountDownLatch(allCount)

  private[this] val console =
    (ProgressBar.progress _)(Messages("common.issues"), Messages("message.exporting"), Messages("message.exported"))
  private[this] val issuesInfoProgress =
    (ProgressBar.progress _)(Messages("common.issues_info"), Messages("message.collecting"), Messages("message.collected"))

  def receive: Receive = {
    case IssuesActor.Do =>
      val router     = SmallestMailboxPool(akkaMailBoxPool, supervisorStrategy = strategy)
      val issueActor = context.actorOf(router.props(Props(new IssueActor(exportContext, backlogTextFormattingRule))))

      (0 until (allCount, limit))
        .foldLeft(Seq.empty[Int]) { (acc, offset) =>
          acc concat issueIds(offset)
        }
        .map(issues)
        .foreach(_(issueActor))

      completion.await
      sender() ! IssuesActor.Done
  }

  private[this] def issueIds(offset: Int): Seq[Int] = {
    val params = Map(
      "offset"        -> offset.toString,
      "limit"         -> limit.toString,
      "project_id"    -> exportContext.projectId.value.toString,
      "status_id"     -> "*",
      "subproject_id" -> "!*"
    )
    val ids = exportContext.issueService.allIssues(params).map(_.getId.intValue())
    issuesInfoProgress(((offset / limit) + 1), ((allCount / limit) + 1))
    ids
  }

  private[this] def issues(issueId: Int)(issueActor: ActorRef): Unit = {
    issueActor ! IssueActor.Do(issueId, completion, allCount, console)
  }

}

private[exporter] object IssuesActor {

  val name = "IssuesActor"

  case object Do

  case object Done

} 
Example 72
Source File: IssuesActor.scala    From BacklogMigration-Redmine   with MIT License 5 votes vote down vote up
package com.nulabinc.backlog.r2b.mapping.collector.actor

import java.util.concurrent.CountDownLatch

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorRef, OneForOneStrategy, Props}
import akka.routing.SmallestMailboxPool
import com.nulabinc.backlog.migration.common.conf.BacklogConfiguration
import com.nulabinc.backlog.migration.common.utils.{ConsoleOut, Logging, ProgressBar}
import com.nulabinc.backlog.r2b.mapping.collector.core.{MappingContext, MappingData}
import com.nulabinc.backlog4j.BacklogAPIException
import com.osinka.i18n.Messages
import com.taskadapter.redmineapi.bean.User

import scala.concurrent.duration._


private[collector] class IssuesActor(mappingContext: MappingContext) extends Actor with BacklogConfiguration with Logging {

  private[this] val strategy =
    OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 10 seconds) {
      case e: BacklogAPIException if e.getMessage.contains("429") =>
        Restart
      case e: BacklogAPIException if e.getMessage.contains("Stream closed") =>
        Restart
      case e =>
        ConsoleOut.error("Fatal error: " + e.getMessage)
        logger.error(e.getStackTrace.mkString("\n"))
        sys.exit(2)
    }

  private[this] val limit: Int = exportLimitAtOnce
  private[this] val allCount   = mappingContext.issueService.countIssues()
  private[this] val completion = new CountDownLatch(allCount)
  private[this] val console =
    (ProgressBar.progress _)(Messages("common.issues"), Messages("message.analyzing"), Messages("message.analyzed"))
  private[this] val issuesInfoProgress =
    (ProgressBar.progress _)(Messages("common.issues_info"), Messages("message.collecting"), Messages("message.collected"))

  def receive: Receive = {
    case IssuesActor.Do(mappingData: MappingData, allUsers: Seq[User]) =>
      val router     = SmallestMailboxPool(akkaMailBoxPool, supervisorStrategy = strategy)
      val issueActor = context.actorOf(router.props(Props(new IssueActor(mappingContext.issueService, mappingData, allUsers))))

      (0 until (allCount, limit))
        .foldLeft(Seq.empty[Int]) { (acc, offset) =>
          acc concat issueIds(offset)
        }
        .map(issues)
        .foreach(_(issueActor))

      completion.await
      sender() ! IssuesActor.Done
  }

  private[this] def issueIds(offset: Int): Seq[Int] = {
    val params =
      Map("offset"        -> offset.toString,
          "limit"         -> limit.toString,
          "project_id"    -> mappingContext.projectId.value.toString,
          "status_id"     -> "*",
          "subproject_id" -> "!*")
    val ids = mappingContext.issueService.allIssues(params).map(_.getId.intValue())
    issuesInfoProgress(((offset / limit) + 1), ((allCount / limit) + 1))
    ids
  }

  private[this] def issues(issueId: Int)(issueActor: ActorRef) = {
    issueActor ! IssueActor.Do(issueId, completion, allCount, console)
  }

}

private[collector] object IssuesActor {

  case class Do(mappingData: MappingData, allUsers: Seq[User])

  case object Done

} 
Example 73
Source File: WeedOutMasterActor.scala    From speedo   with Apache License 2.0 5 votes vote down vote up
package com.htc.speedo.akka

import scala.collection.mutable.Buffer

import akka.actor.ActorRef

import com.twitter.scalding.Args

import MasterActor._


  var updateIndex = 0

  override def strategyName = "weed-out"

  override def parseTrainResult(loss: Double) = {
    val needMerge =
      if (lastUpdates.size < maxInterval) {
        // For the first few iterations, always do merge
        lastUpdates += sender
        true
      } else {
        // If the sender exists in [[lastUpdates]], then we consider it not delay
        // and merge its delta into snapshot weight
        val merge = lastUpdates.contains(sender)
        // update the last updated workers in the queue
        lastUpdates(updateIndex) = sender
        merge
      }
    // update the next index in queue
    updateIndex += 1
    if (updateIndex == maxInterval) updateIndex = 0
    // always start training for the worker
    ParsedTrainResult(if (needMerge) MergeResultSender else MergeResultNone)
  }

  override def workerCreated(worker: ActorRef) = {
    lastUpdates.insert(updateIndex, worker) // insert worker as oldest updater
    updateIndex += 1 // update next index
    maxInterval += 1 // the interval is increaed by 1
    super.workerCreated(worker) // start training
  }

  override def workerTerminated(worker: ActorRef) = {
    // remove oldest element
    if (updateIndex < lastUpdates.size) lastUpdates.remove(updateIndex, 1)
    maxInterval -= 1 // the interval is removed by 1
    if (updateIndex == maxInterval) updateIndex = 0 // update next index
  }
} 
Example 74
Source File: PSMasterActor.scala    From speedo   with Apache License 2.0 5 votes vote down vote up
package com.htc.speedo.akka

import scala.collection.mutable.{ Map => MutableMap, Set => MutableSet }

import akka.actor.ActorRef

import com.twitter.scalding.Args

import MasterActor._


  var catchup = false

  override def strategyName = "psc"

  override def parseTrainResult(loss: Double) = {
    if (catchup) {
      catchupWorkers -= sender
      // if in catch-up status and the catchupWorkers is empty
      if (catchupWorkers.isEmpty) {
        catchup = false
        log.info("Back to normal running status...")
        workers.foreach(workerIters.update(_, 1))
        ParsedTrainResult(train = StartTrainAll)
      } else ParsedTrainResult(train = StartTrainNone)
    } else { // if in normal running status
      val lastIter = workerIters.get(sender).get
      workerIters.update(sender, lastIter + 1)
      val values = workerIters.values
      catchup = values.max - values.min >= maxAdvance
      if (catchup) {
        catchupWorkers.clear
        catchupWorkers ++= workers
        catchupWorkers -= sender
        log.info("Change to catchup status, advanced actor: {}", sender.path.name)
      }
      ParsedTrainResult(train = if (catchup) StartTrainNone else StartTrainSender)
    }
  }

  override def workerCreated(worker: ActorRef) = {
    // if we are catching up, we do nothing and wait until catch up is over
    if (!catchup) {
      // set current iteration to quickest worker
      workerIters.update(worker, workerIters.values.max)
      super.workerCreated(worker) // start training
    }
  }

  override def workerTerminated(worker: ActorRef) = {
    // clean-up for the worker
    workerIters -= worker
    if (catchup) {
      // if we are catching up, we need to check if we are waiting for worker
      catchupWorkers -= worker
      if (catchupWorkers.isEmpty) { // all other workers are finished
        catchup = false
        log.info("Back to normal running status...")
        workers.foreach { w =>
          workerIters.update(w, 1)
          dbActor ! Forward(w, trainMessage)
        }
      }
    }
  }
} 
Example 75
Source File: SynchronousMasterActor.scala    From speedo   with Apache License 2.0 5 votes vote down vote up
package com.htc.speedo.akka

import scala.collection.mutable.{ Buffer, Set => MutableSet }

import akka.actor.ActorRef

import com.twitter.scalding.Args

import MasterActor._


  val lossList = Buffer[Double]()

  override def strategyName = "synchronous"

  override def parseTrainResult(loss: Double) = {
    // remove the sender from waiting list
    waitingSet -= sender
    if (loss >= 0) { // if the train is not faked by [[workerTerminated]]
      mergeSet += sender
      lossList += loss
    }
    if (waitingSet.isEmpty) {
      waitingSet ++= workers
      val average = lossList.sum / lossList.size
      if (mergeSet != lastUpdateWorkers) {
        lastUpdateWorkers = mergeSet.toSet // to immutable set
        dbActor ! UpdateWorkers(lastUpdateWorkers.map(_.path.name))
      }
      mergeSet.clear
      lossList.clear
      ParsedTrainResult(MergeResultAll(average), StartTrainAll)
    } else ParsedTrainResult(MergeResultWait, StartTrainNone)
  }

  // don't need to do anything, just wait for next iteration
  override def workerCreated(worker: ActorRef) = {}

  // just remove the worker from waiting list
  override def workerTerminated(worker: ActorRef) = {
    waitingSet -= worker
    // fakes train is finished to trigger normal progress of finished iteration
    if (waitingSet.isEmpty) self.tell(Trained(-1), worker)
  }
} 
Example 76
Source File: User.scala    From chat-with-akka-http-websockets   with Apache License 2.0 5 votes vote down vote up
package chat

import akka.actor.{Actor, ActorRef}

object User {
  case class Connected(outgoing: ActorRef)
  case class IncomingMessage(text: String)
  case class OutgoingMessage(text: String)
}

class User(chatRoom: ActorRef) extends Actor {
  import User._

  def receive = {
    case Connected(outgoing) =>
      context.become(connected(outgoing))
  }

  def connected(outgoing: ActorRef): Receive = {
    chatRoom ! ChatRoom.Join

    {
      case IncomingMessage(text) =>
        chatRoom ! ChatRoom.ChatMessage(text)

      case ChatRoom.ChatMessage(text) =>
        outgoing ! OutgoingMessage(text)
    }
  }

} 
Example 77
Source File: ProjectAttributesCoordinator.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.async

import akka.actor.{ActorRef, ActorSystem}
import cats.effect.Async
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.kg.async.ProjectAttributesCoordinatorActor.Msg._
import ch.epfl.bluebrain.nexus.kg.cache.ProjectCache
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.kg.resources.syntax._
import ch.epfl.bluebrain.nexus.kg.resources.{Files, OrganizationRef}
import ch.epfl.bluebrain.nexus.kg.storage.Storage.StorageOperations.FetchAttributes
import ch.epfl.bluebrain.nexus.sourcing.projections.Projections
import monix.eval.Task


  def stop(projectRef: ProjectRef): F[Unit] = {
    ref ! Stop(projectRef.id)
    F.unit
  }
}

object ProjectAttributesCoordinator {
  def apply(files: Files[Task], projectCache: ProjectCache[Task])(
      implicit config: AppConfig,
      fetchAttributes: FetchAttributes[Task],
      as: ActorSystem,
      P: Projections[Task, String]
  ): ProjectAttributesCoordinator[Task] = {
    val coordinatorRef = ProjectAttributesCoordinatorActor.start(files, None, config.cluster.shards)
    new ProjectAttributesCoordinator[Task](projectCache, coordinatorRef)
  }
} 
Example 78
Source File: SparqlIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.stream.scaladsl.Source
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.sparql.client.{BlazegraphClient, SparqlWriteQuery}
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.indexing.View.SparqlView
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.kg.routes.Clients
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress
import ch.epfl.bluebrain.nexus.sourcing.projections._

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
@SuppressWarnings(Array("MaxParameters"))
object SparqlIndexer {

  
  final def start[F[_]: Timer](
      view: SparqlView,
      resources: Resources[F],
      project: Project,
      restartOffset: Boolean
  )(
      implicit as: ActorSystem,
      actorInitializer: (Props, String) => ActorRef,
      projections: Projections[F, String],
      F: Effect[F],
      clients: Clients[F],
      config: AppConfig
  ): StreamSupervisor[F, ProjectionProgress] = {

    implicit val ec: ExecutionContext          = as.dispatcher
    implicit val p: Project                    = project
    implicit val indexing: IndexingConfig      = config.sparql.indexing
    implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true)
    implicit val tm: Timeout                   = Timeout(config.sparql.askTimeout)

    val client: BlazegraphClient[F] =
      clients.sparql.copy(namespace = view.index).withRetryPolicy(config.sparql.indexing.retry)

    def buildInsertOrDeleteQuery(res: ResourceV): SparqlWriteQuery =
      if (res.deprecated && !view.filter.includeDeprecated) view.buildDeleteQuery(res)
      else view.buildInsertQuery(res)

    val initFetchProgressF: F[ProjectionProgress] =
      if (restartOffset)
        projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress)
      else view.createIndex >> projections.progress(view.progressId)

    val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial =>
      val flow = ProgressFlowElem[F, Any]
        .collectCast[Event]
        .groupedWithin(indexing.batch, indexing.batchTimeout)
        .distinct()
        .mapAsync(view.toResource(resources, _))
        .collectSome[ResourceV]
        .collect {
          case res if view.allowedSchemas(res) && view.allowedTypes(res) => buildInsertOrDeleteQuery(res)
          case res if view.allowedSchemas(res)                           => view.buildDeleteQuery(res)
        }
        .runAsyncBatch(client.bulk(_))()
        .mergeEmit()
        .toPersistedProgress(view.progressId, initial)
      cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset)
        .via(flow)
        .via(kamonViewMetricsFlow(view, project))
    }
    StreamSupervisor.start(sourceF, view.progressId, actorInitializer)
  }
}
// $COVERAGE-ON$ 
Example 79
Source File: ElasticSearchIndexer.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.indexing

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.stream.scaladsl.Source
import akka.util.Timeout
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient
import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient.BulkOp
import ch.epfl.bluebrain.nexus.kg.config.AppConfig
import ch.epfl.bluebrain.nexus.kg.config.AppConfig._
import ch.epfl.bluebrain.nexus.kg.indexing.View.ElasticSearchView
import ch.epfl.bluebrain.nexus.kg.resources._
import ch.epfl.bluebrain.nexus.kg.routes.Clients
import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress
import ch.epfl.bluebrain.nexus.sourcing.projections._
import com.typesafe.scalalogging.Logger

import scala.concurrent.ExecutionContext

// $COVERAGE-OFF$
@SuppressWarnings(Array("MaxParameters"))
object ElasticSearchIndexer {

  private implicit val log: Logger = Logger[ElasticSearchIndexer.type]

  
  final def start[F[_]: Timer](
      view: ElasticSearchView,
      resources: Resources[F],
      project: Project,
      restartOffset: Boolean
  )(
      implicit as: ActorSystem,
      actorInitializer: (Props, String) => ActorRef,
      projections: Projections[F, String],
      F: Effect[F],
      clients: Clients[F],
      config: AppConfig
  ): StreamSupervisor[F, ProjectionProgress] = {

    implicit val ec: ExecutionContext          = as.dispatcher
    implicit val p: Project                    = project
    implicit val indexing: IndexingConfig      = config.elasticSearch.indexing
    implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true)
    implicit val tm: Timeout                   = Timeout(config.elasticSearch.askTimeout)

    val client: ElasticSearchClient[F] = clients.elasticSearch.withRetryPolicy(config.elasticSearch.indexing.retry)

    def deleteOrIndex(res: ResourceV): Option[BulkOp] =
      if (res.deprecated && !view.filter.includeDeprecated) Some(delete(res))
      else view.toDocument(res).map(doc => BulkOp.Index(view.index, res.id.value.asString, doc))

    def delete(res: ResourceV): BulkOp =
      BulkOp.Delete(view.index, res.id.value.asString)

    val initFetchProgressF: F[ProjectionProgress] =
      if (restartOffset)
        projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress)
      else view.createIndex >> projections.progress(view.progressId)

    val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial =>
      val flow = ProgressFlowElem[F, Any]
        .collectCast[Event]
        .groupedWithin(indexing.batch, indexing.batchTimeout)
        .distinct()
        .mapAsync(view.toResource(resources, _))
        .collectSome[ResourceV]
        .collect {
          case res if view.allowedSchemas(res) && view.allowedTypes(res) => deleteOrIndex(res)
          case res if view.allowedSchemas(res)                           => Some(delete(res))
        }
        .collectSome[BulkOp]
        .runAsyncBatch(client.bulk(_))()
        .mergeEmit()
        .toPersistedProgress(view.progressId, initial)

      cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset)
        .via(flow)
        .via(kamonViewMetricsFlow(view, project))
    }
    StreamSupervisor.start(sourceF, view.progressId, actorInitializer)
  }
}
// $COVERAGE-ON$ 
Example 80
Source File: DaemonWorkflowDriver.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.workflow_driver

import akka.actor.{ Actor, ActorRef }
import akka.pattern.ask
import com.typesafe.scalalogging.LazyLogging
import io.vamp.common.akka.IoC.actorFor
import io.vamp.container_driver.ContainerDriverActor.{ DeployWorkflow, GetWorkflow, UndeployWorkflow }
import io.vamp.container_driver.ContainerWorkflow
import io.vamp.model.artifact.{ DaemonSchedule, DefaultBreed, Instance, Workflow }
import io.vamp.persistence.PersistenceActor

import scala.concurrent.Future

trait DaemonWorkflowDriver extends WorkflowDriver with LazyLogging {

  protected def driverActor: ActorRef

  override def receive: Actor.Receive = super.receive orElse {
    case ContainerWorkflow(workflow, containers, health) ⇒

      logger.info("DaemonWorkflowDriver - received ContainerWorkflow {}", workflow.name)

      if (workflow.health != health) actorFor[PersistenceActor] ! PersistenceActor.UpdateWorkflowHealth(workflow, health)

      val instances = containers.map(_.instances.map { instance ⇒
        val ports: Map[String, Int] = {
          workflow.breed match {
            case breed: DefaultBreed ⇒ breed.ports.map(_.name) zip instance.ports
            case _                   ⇒ Map[String, Int]()
          }
        }.toMap

        logger.info("DaemonWorkflowDriver - Ports for ContainerInstance {} are {}", instance.toString, ports.toString)

        Instance(instance.name, instance.host, ports, instance.deployed)
      }).getOrElse(Nil)

      if (workflow.instances != instances) actorFor[PersistenceActor] ! PersistenceActor.UpdateWorkflowInstances(workflow, instances)

    case _ ⇒ logger.info("DaemonWorkflowDriver - received an unrecognised message")
  }

  protected override def request(workflows: List[Workflow]): Unit = workflows.foreach(request)

  protected def request: PartialFunction[Workflow, Unit] = {
    case workflow if workflow.schedule == DaemonSchedule ⇒ driverActor ! GetWorkflow(workflow, self)
    case workflow                                        ⇒ logger.info("DaemonWorkflowDriver - Workflow schedule is {} instead of DaemonSchedule", workflow.schedule)
  }

  protected override def schedule(data: Any): PartialFunction[Workflow, Future[Any]] = {
    case workflow if workflow.schedule == DaemonSchedule ⇒ {
      logger.info("DaemonWorkflowDriver - Workflow number of instances is {}", workflow.instances.size)
      enrich(workflow, data).flatMap { enriched ⇒ driverActor ? DeployWorkflow(enriched, update = workflow.instances.nonEmpty) }
    }
  }

  protected override def unschedule(): PartialFunction[Workflow, Future[Any]] = {
    case workflow if workflow.schedule == DaemonSchedule && workflow.instances.nonEmpty ⇒ driverActor ? UndeployWorkflow(workflow)
  }
} 
Example 81
Source File: TransformationFrontend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.transformation

import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.{ Actor, ActorRef, ActorSystem, Props, Terminated }
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

//#frontend
class TransformationFrontend extends Actor {
  var backends = IndexedSeq.empty[ActorRef]
  var jobCounter = 0

  def receive = {
    case job: TransformationJob if backends.isEmpty =>
      sender() ! JobFailed("Service unavailable, try again later", job)

    case job: TransformationJob =>
      jobCounter += 1
      backends(jobCounter % backends.size) forward job

    case BackendRegistration if !backends.contains(sender()) =>
      context watch sender()
      backends = backends :+ sender()

    case Terminated(a) =>
      backends = backends.filterNot(_ == a)
  }
}
//#frontend

object TransformationFrontend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]"))
      .withFallback(ConfigFactory.load("simple-cluster"))

    val system = ActorSystem("ClusterSystem", config)
    val frontend =
      system.actorOf(Props[TransformationFrontend], name = "frontend")

    val counter = new AtomicInteger
    import system.dispatcher
    system.scheduler.schedule(2.seconds, 2.seconds) {
      implicit val timeout = Timeout(5 seconds)
      (frontend ? TransformationJob("hello-" + counter.incrementAndGet())) foreach {
        case result => println(result)
      }
    }
    Future {
      TimeUnit.SECONDS.sleep(80)
      system.terminate()
    }(Implicits.global)
  }
} 
Example 82
Source File: StatsService.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.stats

import akka.actor.{ Actor, ActorRef, Props, ReceiveTimeout }
import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope
import akka.routing.FromConfig

import scala.concurrent.duration._

//#service
class StatsService extends Actor {
  // This router is used both with lookup and deploy of routees. If you
  // have a router with only lookup of routees you can use Props.empty
  // instead of Props[StatsWorker.class].
  val workerRouter =
    context.actorOf(FromConfig.props(Props[StatsWorker]), name = "workerRouter")

  def receive = {
    case StatsJob(text) if text != "" =>
      val words = text.split(" ")
      val replyTo = sender() // important to not close over sender()
      // create actor that collects replies from workers
      val aggregator =
        context.actorOf(Props(classOf[StatsAggregator], words.size, replyTo))
      words foreach { word =>
        workerRouter.tell(ConsistentHashableEnvelope(word, word), aggregator)
      }
  }
}

class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor {
  var results = IndexedSeq.empty[Int]
  context.setReceiveTimeout(3.seconds)

  def receive = {
    case wordCount: Int =>
      results = results :+ wordCount
      if (results.size == expectedResults) {
        val meanWordLength = results.sum.toDouble / results.size
        replyTo ! StatsResult(meanWordLength)
        context.stop(self)
      }
    case ReceiveTimeout =>
      replyTo ! JobFailed("Service unavailable, try again later")
      context.stop(self)
  }
}
//#service 
Example 83
Source File: BalancingPoolDemo.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example

import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props }
import example.Worker.FibonacciNumber

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.io.StdIn

object Worker {
  case class FibonacciNumber(nbr: Int, delay: FiniteDuration)

  case class GetResult(nr: Int, source: ActorRef)

  def props: Props = Props(new Worker)
}

class Worker extends Actor with ActorLogging {
  import Worker._
  import context.dispatcher

  override def preStart(): Unit =
    log.info(s"$self started")

  override def postStop(): Unit =
    log.info(s"$self stopped")

  override def receive: Receive = {
    case FibonacciNumber(nr, delay) =>
      context.system.scheduler.scheduleOnce(delay, self, GetResult(nr, sender()))

    case GetResult(nr, source) =>
      val result = fibonacci(nr)
      log.info(s"$nr! = $result")
  }

  private def fibonacci(n: Int): Int = {
    @tailrec
    def fib(n: Int, b: Int, a: Int): Int = n match {
      case 0 => a
      case _ =>
        fib(n - 1, a + b, b)
    }
    fib(n, 1, 0)
  }
}

object BalancingPoolDemo extends App {
  implicit val system = ActorSystem()

  val worker = system.actorOf(Worker.props, "worker")
  worker ! FibonacciNumber(50, 50.millis)
  worker ! FibonacciNumber(33, 50.millis)
  worker ! FibonacciNumber(68, 50.millis)
  worker ! FibonacciNumber(53, 50.millis)
  worker ! FibonacciNumber(45, 50.millis)

  StdIn.readLine()
  system.terminate()
} 
Example 84
Source File: StatusDispatchSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.dispatch

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.{TestKit, TestProbe}
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.content.KernelStatus
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FunSpecLike, Matchers}
import play.api.libs.json.Json
import test.utils.MaxAkkaTestTimeout

class StatusDispatchSpec extends TestKit(
  ActorSystem(
    "StatusDispatchSystem",
    None,
    Some(org.apache.toree.Main.getClass.getClassLoader)
  )
)
with FunSpecLike with Matchers with MockitoSugar with BeforeAndAfter{
  var statusDispatchRef: ActorRef = _
  var relayProbe: TestProbe = _
  before {
    //  Mock the relay with a probe
    relayProbe = TestProbe()
    //  Mock the ActorLoader
    val mockActorLoader: ActorLoader = mock[ActorLoader]
    when(mockActorLoader.load(SystemActorType.KernelMessageRelay))
      .thenReturn(system.actorSelection(relayProbe.ref.path.toString))

    statusDispatchRef = system.actorOf(Props(classOf[StatusDispatch],mockActorLoader))
  }


  describe("StatusDispatch") {
    describe("#receive( KernelStatusType )") {
      it("should send a status message to the relay") {
        statusDispatchRef ! KernelStatusType.Busy
        //  Check the kernel message is the correct type
        val statusMessage: KernelMessage = relayProbe.receiveOne(MaxAkkaTestTimeout).asInstanceOf[KernelMessage]
        statusMessage.header.msg_type should be (MessageType.Outgoing.Status.toString)
        //  Check the status is what we sent
        val status: KernelStatus = Json.parse(statusMessage.contentString).as[KernelStatus]
         status.execution_state should be (KernelStatusType.Busy.toString)
      }
    }

    describe("#receive( KernelStatusType, Header )") {
      it("should send a status message to the relay") {
        val tuple = Tuple2(KernelStatusType.Busy, mock[Header])
        statusDispatchRef ! tuple
        //  Check the kernel message is the correct type
        val statusMessage: KernelMessage = relayProbe.receiveOne(MaxAkkaTestTimeout).asInstanceOf[KernelMessage]
        statusMessage.header.msg_type should be (MessageType.Outgoing.Status.toString)
        //  Check the status is what we sent
        val status: KernelStatus = Json.parse(statusMessage.contentString).as[KernelStatus]
        status.execution_state should be (KernelStatusType.Busy.toString)
      }
    }
  }
} 
Example 85
Source File: StdinSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel.socket

import java.nio.charset.Charset

import akka.actor.{Props, ActorSelection, ActorRef, ActorSystem}
import akka.testkit.{TestProbe, ImplicitSender, TestKit}
import akka.util.ByteString
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.kernel.protocol.v5.kernel.Utilities._
import org.apache.toree.kernel.protocol.v5Test._
import org.apache.toree.kernel.protocol.v5.{KernelMessage, SystemActorType}
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import com.typesafe.config.ConfigFactory
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, FunSpecLike}
import org.mockito.Mockito._
import org.mockito.Matchers._
import test.utils.MaxAkkaTestTimeout

object StdinSpec {
  val config ="""
    akka {
      loglevel = "WARNING"
    }"""
}

class StdinSpec extends TestKit(ActorSystem(
  "StdinActorSpec",
  ConfigFactory.parseString(StdinSpec.config),
  org.apache.toree.Main.getClass.getClassLoader
)) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
  describe("Stdin") {
    val socketFactory = mock[SocketFactory]
    val actorLoader = mock[ActorLoader]
    val socketProbe : TestProbe = TestProbe()
    when(socketFactory.Stdin(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(socketProbe.ref)

    val relayProbe : TestProbe = TestProbe()
    val relaySelection : ActorSelection = system.actorSelection(relayProbe.ref.path)
    when(actorLoader.load(SystemActorType.KernelMessageRelay)).thenReturn(relaySelection)

    val stdin = system.actorOf(Props(classOf[Stdin], socketFactory, actorLoader))

    describe("#receive") {
      it("( KernelMessage ) should reply with a ZMQMessage via the socket") {
        //  Use the implicit to convert the KernelMessage to ZMQMessage
        val MockZMQMessage : ZMQMessage = MockKernelMessage

        stdin ! MockKernelMessage
        socketProbe.expectMsg(MockZMQMessage)
      }

      it("( ZMQMessage ) should forward ZMQ Strings and KernelMessage to Relay") {
        //  Use the implicit to convert the KernelMessage to ZMQMessage
        val MockZMQMessage : ZMQMessage = MockKernelMessage

        stdin ! MockZMQMessage

        // Should get the last four (assuming no buffer) strings in UTF-8
        val zmqStrings = MockZMQMessage.frames.map((byteString: ByteString) =>
          new String(byteString.toArray, Charset.forName("UTF-8"))
        ).takeRight(4)

        val kernelMessage: KernelMessage = MockZMQMessage

        relayProbe.expectMsg(MaxAkkaTestTimeout, (zmqStrings, kernelMessage))
      }
    }
  }
} 
Example 86
Source File: HeartbeatSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel.socket

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import akka.util.ByteString
import org.apache.toree.communication.ZMQMessage
import com.typesafe.config.ConfigFactory
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpecLike, Matchers}
import test.utils.MaxAkkaTestTimeout

object HeartbeatSpec {
  val config = """
    akka {
      loglevel = "WARNING"
    }"""
}

class HeartbeatSpec extends TestKit(
  ActorSystem(
    "HeartbeatActorSpec",
    ConfigFactory.parseString(HeartbeatSpec.config),
    org.apache.toree.Main.getClass.getClassLoader
  ))
with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
  val SomeMessage: String = "some message"
  val SomeZMQMessage: ZMQMessage = ZMQMessage(ByteString(SomeMessage.getBytes))

  describe("HeartbeatActor") {
    val socketFactory = mock[SocketFactory]
    val probe : TestProbe = TestProbe()
    when(socketFactory.Heartbeat(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(probe.ref)

    val heartbeat = system.actorOf(Props(classOf[Heartbeat], socketFactory))

    describe("send heartbeat") {
      it("should receive and send same ZMQMessage") {
        heartbeat ! SomeZMQMessage
        probe.expectMsg(MaxAkkaTestTimeout, SomeZMQMessage)
      }
    }
  }
} 
Example 87
Source File: ShellSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel.socket

import java.nio.charset.Charset

import akka.actor.{ActorSelection, ActorRef, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import akka.util.ByteString
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.kernel.{ActorLoader, Utilities}
import org.apache.toree.kernel.protocol.v5Test._
import Utilities._
import com.typesafe.config.ConfigFactory
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpecLike, Matchers}
import test.utils.MaxAkkaTestTimeout

object ShellSpec {
  val config ="""
    akka {
      loglevel = "WARNING"
    }"""
}

class ShellSpec extends TestKit(
  ActorSystem(
    "ShellActorSpec",
    ConfigFactory.parseString(ShellSpec.config),
    org.apache.toree.Main.getClass.getClassLoader
  ))
  with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {

  describe("Shell") {
    val socketFactory = mock[SocketFactory]
    val actorLoader = mock[ActorLoader]
    val socketProbe : TestProbe = TestProbe()
    when(socketFactory.Shell(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(socketProbe.ref)

    val relayProbe : TestProbe = TestProbe()
    val relaySelection : ActorSelection = system.actorSelection(relayProbe.ref.path)
    when(actorLoader.load(SystemActorType.KernelMessageRelay)).thenReturn(relaySelection)

    val shell = system.actorOf(Props(classOf[Shell], socketFactory, actorLoader))

    describe("#receive") {
      it("( KernelMessage ) should reply with a ZMQMessage via the socket") {
        //  Use the implicit to convert the KernelMessage to ZMQMessage
        val MockZMQMessage : ZMQMessage = MockKernelMessage

        shell ! MockKernelMessage
        socketProbe.expectMsg(MockZMQMessage)
      }

      it("( ZMQMessage ) should forward ZMQ Strings and KernelMessage to Relay") {
        //  Use the implicit to convert the KernelMessage to ZMQMessage
        val MockZMQMessage : ZMQMessage = MockKernelMessage

        shell ! MockZMQMessage

        // Should get the last four (assuming no buffer) strings in UTF-8
        val zmqStrings = MockZMQMessage.frames.map((byteString: ByteString) =>
          new String(byteString.toArray, Charset.forName("UTF-8"))
        ).takeRight(4)

        val kernelMessage: KernelMessage = MockZMQMessage

        relayProbe.expectMsg(MaxAkkaTestTimeout, (zmqStrings, kernelMessage))
      }
    }
  }
} 
Example 88
Source File: GenericSocketMessageHandlerSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.handler

import akka.actor.{ActorSystem, Props, ActorRef, ActorSelection}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5Test._
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, FunSpecLike}
import test.utils.MaxAkkaTestTimeout

class GenericSocketMessageHandlerSpec extends TestKit(
  ActorSystem(
    "GenericSocketMessageHandlerSystem",
    None,
    Some(org.apache.toree.Main.getClass.getClassLoader)
  ))
with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
  describe("GenericSocketMessageHandler( ActorLoader, SocketType )") {
    //  Create a mock ActorLoader for the Relay we are going to test
    val actorLoader: ActorLoader = mock[ActorLoader]

    //  Create a probe for the ActorSelection that the ActorLoader will return
    val selectionProbe: TestProbe = TestProbe()
    val selection: ActorSelection = system.actorSelection(selectionProbe.ref.path.toString)
    when(actorLoader.load(SocketType.Control)).thenReturn(selection)

    //  The Relay we are going to be testing against
    val genericHandler: ActorRef = system.actorOf(
      Props(classOf[GenericSocketMessageHandler], actorLoader, SocketType.Control)
    )

    describe("#receive( KernelMessage )") {
      genericHandler ! MockKernelMessage

      it("should send the message to the selected actor"){
        selectionProbe.expectMsg(MaxAkkaTestTimeout, MockKernelMessage)
      }
    }
  }
} 
Example 89
Source File: SocketFactory.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel.socket

import akka.actor.{Props, ActorRef, ActorSystem}
import org.apache.toree.communication.actors.{RouterSocketActor, RepSocketActor, PubSocketActor}

object SocketFactory {
  def apply(socketConfig: SocketConfig) = {
    new SocketFactory(socketConfig)
  }
}


  def IOPub(system: ActorSystem) : ActorRef =
    system.actorOf(Props(classOf[PubSocketActor], IOPubConnection.toString))
//    ZeroMQExtension(system).newPubSocket(
//      Bind(IOPubConnection.toString)
//    )
} 
Example 90
Source File: ZeromqKernelMessageSocket.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.kernel.socket

import java.nio.charset.Charset

import akka.actor.{ActorSelection, ActorSystem, ActorRef, Actor}
import akka.util.ByteString
import org.apache.toree.communication.ZMQMessage

//import org.apache.toree.kernel.protocol.v5.kernel.ZMQMessage
import org.apache.toree.kernel.protocol.v5.KernelMessage
import org.apache.toree.kernel.protocol.v5.kernel.Utilities._
import org.apache.toree.utils.MessageLogSupport


abstract class ZeromqKernelMessageSocket(
  actorSocketFunc: (ActorSystem, ActorRef) => ActorRef,
  actorForwardFunc: () => ActorSelection
) extends Actor with MessageLogSupport {
  val actorSocketRef = actorSocketFunc(context.system, self)
  val actorForwardRef = actorForwardFunc()

  override def receive: Receive = {
    case message: ZMQMessage =>
      val kernelMessage: KernelMessage = message
      logMessage(kernelMessage)

      // Grab the strings to use for signature verification
      val zmqStrings = message.frames.map((byteString: ByteString) =>
        new String(byteString.toArray, Charset.forName("UTF-8"))
      ).takeRight(4) // TODO: This assumes NO extra buffers, refactor?

      // Forward along our message (along with the strings used for
      // signatures)
      actorForwardRef ! ((zmqStrings, kernelMessage))

    case message: KernelMessage =>
      val zmqMessage: ZMQMessage = message
      logMessage(message)
      actorSocketRef ! zmqMessage
  }
} 
Example 91
Source File: BootstrapEndpointTestActors.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.endpoints

import java.util.UUID

import akka.actor.{ActorRef, Props}
import com.typesafe.config.Config
import hydra.avro.registry.ConfluentSchemaRegistry
import hydra.core.akka.SchemaRegistryActor
import hydra.kafka.model.TopicMetadata
import hydra.kafka.services.{StreamsManagerActor, TopicBootstrapActor}
import hydra.kafka.util.KafkaUtils
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient
import org.joda.time.DateTime

trait BootstrapEndpointTestActors extends BootstrapEndpointActors {

  class StreamsActorTest(
      bootstrapKafkaConfig: Config,
      bootstrapServers: String,
      schemaRegistryClient: SchemaRegistryClient
  ) extends StreamsManagerActor(
        bootstrapKafkaConfig,
        bootstrapServers,
        schemaRegistryClient
      ) {

    override val metadataMap: Map[String, TopicMetadata] =
      Map[String, TopicMetadata] {
        "exp.test-existing.v1.SubjectPreexisted" -> TopicMetadata(
          "exp.test-existing.v1.SubjectPreexisted",
          0,
          "",
          derived = false,
          None,
          "",
          "",
          None,
          None,
          UUID.randomUUID(),
          DateTime.now().minusSeconds(10)
        )
      }
  }

  object StreamsActorTest {

    def props(
        bootstrapKafkaConfig: Config,
        bootstrapServers: String,
        schemaRegistryClient: SchemaRegistryClient
    ) = {
      Props(
        new StreamsActorTest(
          bootstrapKafkaConfig,
          bootstrapServers,
          schemaRegistryClient
        )
      )
    }
  }

  private[kafka] val streamsManagerPropsTest = StreamsActorTest.props(
    bootstrapKafkaConfig,
    KafkaUtils.BootstrapServers,
    ConfluentSchemaRegistry.forConfig(applicationConfig).registryClient
  )

  override val bootstrapActor: ActorRef = system.actorOf(
    TopicBootstrapActor.props(
      schemaRegistryActor,
      kafkaIngestor,
      streamsManagerPropsTest,
      Some(bootstrapKafkaConfig)
    )
  )

} 
Example 92
Source File: TransportCallback.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.transport

import akka.actor.ActorRef
import hydra.core.protocol.{RecordNotProduced, RecordProduced}
import hydra.core.transport.Transport.{Confirm, TransportError}


class TransportSupervisorCallback(transport: ActorRef)
    extends TransportCallback {

  override def onCompletion(
      deliveryId: Long,
      md: Option[RecordMetadata],
      exception: Option[Throwable]
  ): Unit = {
    md match {
      case Some(_) => transport ! Confirm(deliveryId)
      case None    => transport ! TransportError(deliveryId)
    }
  }
} 
Example 93
Source File: InitializingActor.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.akka

import akka.actor.{Actor, ActorRef, ReceiveTimeout, Stash}
import akka.pattern.pipe
import hydra.common.config.ActorConfigSupport
import hydra.common.logging.LoggingAdapter
import hydra.core.HydraException
import hydra.core.akka.InitializingActor.{InitializationError, Initialized}
import hydra.core.protocol.HydraMessage
import retry.Success

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.control.NonFatal

trait InitializingActor
    extends Actor
    with ActorConfigSupport
    with Stash
    with LoggingAdapter {

  
  def initializationError(ex: Throwable): Receive
}

object InitializingActor {

  case object Initialized extends HydraMessage

  case class InitializationError(cause: Throwable) extends HydraMessage

}

@SerialVersionUID(1L)
class ActorInitializationException(
    ingestor: ActorRef,
    message: String,
    cause: Throwable
) extends HydraException(
      ActorInitializationException.enrichedMessage(ingestor, message),
      cause
    ) {
  def getActor: ActorRef = ingestor
}

object ActorInitializationException {

  private def enrichedMessage(actor: ActorRef, message: String) =
    Option(actor).map(a => s"${a.path}: $message").getOrElse(message)

  private[hydra] def apply(
      actor: ActorRef,
      message: String,
      cause: Throwable = null
  ) =
    new ActorInitializationException(actor, message, cause)

  def unapply(
      ex: ActorInitializationException
  ): Option[(ActorRef, String, Throwable)] =
    Some((ex.getActor, ex.getMessage, ex.getCause))
} 
Example 94
Source File: TransportOpsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.ingest

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.TestActors.ForwardActor
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import com.pluralsight.hydra.reflect.DoNotScan
import hydra.core.akka.ActorInitializationException
import hydra.core.protocol.{IngestorError, Produce}
import hydra.core.test.TestRecordFactory
import hydra.core.transport.AckStrategy.NoAck
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._


class TransportOpsSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender
    with ScalaFutures {

  override def afterAll() = TestKit.shutdownActorSystem(system)

  val supervisor = TestProbe()

  val tm = TestProbe()

  val transport =
    system.actorOf(Props(new ForwardActor(tm.ref)), "test-transport")

  describe("TransportOps") {
    it("looks up a transport") {
      val t =
        system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref))
      t ! "hello"
      expectMsg("hi!")
    }

    it("won't initialize if transport can't be found") {
      val t = system.actorOf(Props[TestTransportIngestorError])
      t ! "hello"
      expectNoMessage()
    }

    it("transports a record") {
      val req = HydraRequest("123", "test-produce")
      val t =
        system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref))
      t ! req
      whenReady(TestRecordFactory.build(req))(r =>
        tm.expectMsg(Produce(r, self, NoAck))
      )
    }
  }
}

@DoNotScan
class TestTransportIngestor(supervisor: ActorRef)
    extends Ingestor
    with TransportOps {

  override val recordFactory = TestRecordFactory

  override def initTimeout = 500 millis

  ingest {
    case "hello" => sender ! "hi!"
    case req: HydraRequest =>
      val record = Await.result(TestRecordFactory.build(req), 3.seconds)
      transport(record, NoAck)
  }

  override def transportName = "test-transport"
}

class TestTransportIngestorError extends Ingestor with TransportOps {
  override val recordFactory = TestRecordFactory

  override def transportName = "test-transport-unknown"
} 
Example 95
Source File: Heartbeat.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey.performer

import akka.actor.ActorRef
import org.apache.iota.fey.FeyGenericActor

import scala.collection.immutable.Map
import scala.concurrent.duration._

class Heartbeat(override val params: Map[String, String] = Map.empty,
                override val backoff: FiniteDuration = 1.minutes,
                override val connectTo: Map[String, ActorRef] = Map.empty,
                override val schedulerTimeInterval: FiniteDuration = 30.seconds,
                override val orchestrationName: String = "",
                override val orchestrationID: String = "",
                override val autoScale: Boolean = false) extends FeyGenericActor {

  override def onStart : Unit = {
  }

  override def onStop : Unit=  {
  }

  override def onRestart(reason: Throwable) : Unit = {
    // Called after actor is up and running - after self restart
  }

  override def customReceive: Receive = {
    case x => log.debug(s"Untreated $x")
  }

  override def processMessage[T](message: T, sender: ActorRef): Unit = {
  }

  override def execute() : Unit = {
    log.debug("alive")
    propagateMessage("alive")
  }

} 
Example 96
Source File: RandomDouble.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey.performer

import akka.actor.ActorRef
import org.apache.iota.fey.FeyGenericActor

import scala.collection.immutable.Map
import scala.concurrent.duration._

class RandomDouble(override val params: Map[String, String] = Map.empty,
                   override val backoff: FiniteDuration = 1.minutes,
                   override val connectTo: Map[String, ActorRef] = Map.empty,
                   override val schedulerTimeInterval: FiniteDuration = 30.seconds,
                   override val orchestrationName: String = "",
                   override val orchestrationID: String = "",
                   override val autoScale: Boolean = false) extends FeyGenericActor {

  override def onStart : Unit = {
  }

  override def onStop : Unit = {
  }

  override def onRestart(reason: Throwable) : Unit = {
    // Called after actor is up and running - after self restart
  }

  override def customReceive: Receive = {
    case x => log.debug(s"Untreated $x")
  }

  override def processMessage[T](message: T, sender: ActorRef): Unit = {
  }

  override def execute() : Unit = {
    val rd = scala.util.Random.nextGaussian().toString
    log.debug(rd)
    propagateMessage(rd)
  }

} 
Example 97
Source File: RandomUUID.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey.performer

import akka.actor.ActorRef
import org.apache.iota.fey.FeyGenericActor

import scala.collection.immutable.Map
import scala.concurrent.duration._

class RandomUUID(override val params: Map[String, String] = Map.empty,
                 override val backoff: FiniteDuration = 1.minutes,
                 override val connectTo: Map[String, ActorRef] = Map.empty,
                 override val schedulerTimeInterval: FiniteDuration = 30.seconds,
                 override val orchestrationName: String = "",
                 override val orchestrationID: String = "",
                 override val autoScale: Boolean = false) extends FeyGenericActor {

  override def onStart : Unit = {
  }

  override def onStop : Unit = {
  }

  override def onRestart(reason: Throwable) : Unit = {
    // Called after actor is up and running - after self restart
  }

  override def customReceive: Receive = {
    case x => log.debug(s"Untreated $x")
  }

  override def processMessage[T](message: T, sender: ActorRef): Unit = {
  }

  override def execute() : Unit = {
    val uuid = java.util.UUID.randomUUID.toString
    log.debug(uuid)
    propagateMessage(uuid)
  }

} 
Example 98
Source File: RandomInteger.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey.performer

import akka.actor.ActorRef
import org.apache.iota.fey.FeyGenericActor

import scala.collection.immutable.Map
import scala.concurrent.duration._

class RandomInteger(override val params: Map[String, String] = Map.empty,
                    override val backoff: FiniteDuration = 1.minutes,
                    override val connectTo: Map[String, ActorRef] = Map.empty,
                    override val schedulerTimeInterval: FiniteDuration = 30.seconds,
                    override val orchestrationName: String = "",
                    override val orchestrationID: String = "",
                    override val autoScale: Boolean = false) extends FeyGenericActor {

  override def onStart : Unit = {
  }

  override def onStop : Unit = {
  }

  override def onRestart(reason: Throwable) : Unit = {
    // Called after actor is up and running - after self restart
  }

  override def customReceive: Receive = {
    case x => log.debug(s"Untreated $x")
  }

  override def processMessage[T](message: T, sender: ActorRef): Unit = {
  }

  override def execute() : Unit = {
    val ri = scala.util.Random.nextInt().toString
    log.debug(ri)
    propagateMessage(ri)
  }

} 
Example 99
Source File: Timestamp.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey.performer

import akka.actor.ActorRef
import org.apache.iota.fey.FeyGenericActor

import scala.collection.immutable.Map
import scala.concurrent.duration._

class Timestamp(override val params: Map[String, String] = Map.empty,
                override val backoff: FiniteDuration = 1.minutes,
                override val connectTo: Map[String, ActorRef] = Map.empty,
                override val schedulerTimeInterval: FiniteDuration = 30.seconds,
                override val orchestrationName: String = "",
                override val orchestrationID: String = "",
                override val autoScale: Boolean = false) extends FeyGenericActor {

  override def onStart : Unit = {
  }

  override def onStop : Unit = {
  }

  override def onRestart(reason: Throwable) : Unit = {
    // Called after actor is up and running - after self restart
  }

  override def customReceive: Receive = {
    case x => log.debug(s"Untreated $x")
  }

  override def processMessage[T](message: T, sender: ActorRef): Unit = {
  }

  override def execute() : Unit = {
    val ts = java.lang.System.currentTimeMillis().toString
    log.debug(ts)
    propagateMessage(ts)
  }

} 
Example 100
Source File: GlobalWatchService.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.{Files, Path, Paths, WatchEvent}

import akka.actor.{Actor, ActorLogging, ActorRef}
import org.apache.iota.fey.GlobalWatchService.REGISTER_WATCHER_PERFORMER
import org.apache.iota.fey.WatchingDirectories.STOPPED

class GlobalWatchService extends Actor with ActorLogging{

  //WatchService
  var watchThread:Thread = null
  val watchFileTask:GlobalWatchServiceTask = new GlobalWatchServiceTask(self)

  override def preStart(): Unit = {
    startWatcher("PRE-START")
  }

  override def postStop(): Unit = {
    stopWatcher("POST-STOP")
  }

  private def startWatcher(from: String) = {
    log.info(s"Starting Global Watcher from $from")
    watchThread = new Thread(watchFileTask, "FEY_GLOBAL_WATCH_SERVICE_PERFORMERS")
    watchThread.setDaemon(true)
    watchThread.start()
  }

  private def stopWatcher(from: String) = {
    log.info(s"Stopping Global Watcher from $from")
    if(watchThread != null && watchThread.isAlive){
      watchThread.interrupt()
      watchThread = null
    }
  }

  override def receive: Receive = {
    case REGISTER_WATCHER_PERFORMER(path, file_name, actor, events, loadExists) =>
      registerPath(path,file_name,actor,events,loadExists)
    case STOPPED =>
      stopWatcher("STOPPED-THREAD")
      startWatcher("STOPPED-THREAD")
    case x => log.error(s"Unknown message $x")
  }

  private def broadcastMessageIfFileExists(actor: ActorRef, pathWithFile: String) = {
    val filePath = Paths.get(pathWithFile)
    if(Files.exists(filePath)){
      log.info(s"File $pathWithFile exists. Broadcasting message to actor ${actor.path.toString}")
      actor ! GlobalWatchService.ENTRY_CREATED(filePath)
    }
  }

  private def registerPath(dir_path: String, file_name:Option[String], actor: ActorRef, events: Array[WatchEvent.Kind[_]], loadExists: Boolean) = {
    WatchingDirectories.actorsInfo.get((dir_path,file_name)) match {
      case Some(info) =>
        val newInfo:Map[WatchEvent.Kind[_], Array[ActorRef]] = events.map(event => {
          info.get(event) match {
            case Some(actors) => (event, (Array(actor) ++ actors))
            case None => (event, Array(actor))
          }
        }).toMap
        WatchingDirectories.actorsInfo.put((dir_path,file_name), info ++ newInfo)
        watchFileTask.watch(Paths.get(dir_path),actor.path.toString,events)
      case None =>
        val tmpEvents:Map[WatchEvent.Kind[_], Array[ActorRef]] = events.map(event => {(event, Array(actor))}).toMap
        WatchingDirectories.actorsInfo.put((dir_path,file_name), tmpEvents)
        watchFileTask.watch(Paths.get(dir_path),actor.path.toString,events)
    }

    if(file_name.isDefined && loadExists){
      log.info(s"Checking if file $dir_path/${file_name.get} already exist")
      broadcastMessageIfFileExists(actor, s"$dir_path/${file_name.get}")
    }

  }

}

object GlobalWatchService{
  sealed case class ENTRY_CREATED(path:Path)
  sealed case class ENTRY_MODIFIED(path:Path)
  sealed case class ENTRY_DELETED(path:Path)
  sealed case class REGISTER_WATCHER_PERFORMER(dir_path: String, file_name:Option[String],
                                               actor: ActorRef, events: Array[WatchEvent.Kind[_]],
                                               loadIfExists: Boolean)
} 
Example 101
Source File: JsonReceiverActor.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.Paths
import java.io.File

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import play.api.libs.json.{JsValue, Json}

class JsonReceiverActor extends Actor with ActorLogging {

  import JsonReceiverActor._

  val monitoring_actor = FEY_MONITOR.actorRef
  var watchFileTask: WatchServiceReceiver = _
  var watchThread: Thread = _

  override def preStart() {
    prepareDynamicJarRepo()
    processCheckpointFiles()

    watchFileTask = new WatchServiceReceiver(self)
    watchThread = new Thread(watchFileTask, GLOBAL_DEFINITIONS.WATCH_SERVICE_THREAD)

    monitoring_actor  ! Monitor.START(Utils.getTimestamp)
    watchThread.setDaemon(true)
    watchThread.start()

    watchFileTask.watch(Paths.get(CONFIG.JSON_REPOSITORY))
  }

  private def prepareDynamicJarRepo() = {
    val jarDir = new File(CONFIG.DYNAMIC_JAR_REPO)
    if (!jarDir.exists()){
      jarDir.mkdir()
    }else if(CONFIG.DYNAMIC_JAR_FORCE_PULL){
      jarDir.listFiles().foreach(_.delete())
    }
  }


  private def processCheckpointFiles() = {
    if (CONFIG.CHEKPOINT_ENABLED) {
      val checkpoint = new CheckpointProcessor(self)
      checkpoint.run()
    }
  }

  override def postStop() {
    monitoring_actor  ! Monitor.STOP(Utils.getTimestamp)
    watchThread.interrupt()
    watchThread.join()
  }

  override def postRestart(reason: Throwable): Unit = {
    monitoring_actor  ! Monitor.RESTART(reason, Utils.getTimestamp)
    preStart()
  }

  override def receive: Receive = {
    case JSON_RECEIVED(json, file) =>
      log.info(s"JSON RECEIVED => ${Json.stringify(json)}")
      context.parent ! FeyCore.ORCHESTRATION_RECEIVED(json, Some(file))

    case _ =>
  }

}

object JsonReceiverActor {

  case class JSON_RECEIVED(json: JsValue, file: File)

} 
Example 102
Source File: WatchServiceReceiver.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.StandardWatchEventKinds._
import java.nio.file.{FileSystems, Path}
import java.io.File
import akka.actor.ActorRef
import org.apache.iota.fey.JsonReceiverActor.JSON_RECEIVED
import play.api.libs.json._

import scala.io.Source

class WatchServiceReceiver(receiverActor: ActorRef) extends JsonReceiver{

  processInitialFiles()

  private val watchService = FileSystems.getDefault.newWatchService()

  def watch(path: Path) : Unit = path.register(watchService, ENTRY_CREATE, ENTRY_MODIFY)

  def getJsonObject(params: String): Option[JsValue] = {
    try{
      val stringJson = Source.fromFile(params).getLines.mkString
      Option(Json.parse(stringJson))
    }catch{
      case e: Exception =>
        log.error("Could not parse JSON", e)
        None
    }
  }

  override def execute(): Unit = {

    val key = watchService.take()
    val eventsIterator = key.pollEvents().iterator()

    while(eventsIterator.hasNext) {
      val event = eventsIterator.next()
      val relativePath = event.context().asInstanceOf[Path]
      val path = key.watchable().asInstanceOf[Path].resolve(relativePath)

      log.debug(s"${event.kind()} --- $path")
      event.kind() match {
        case (ENTRY_CREATE | ENTRY_MODIFY) if path.toString.endsWith(CONFIG.JSON_EXTENSION) =>
          processJson(path.toString, path.toFile)
        case _ =>
      }
    }

    key.reset()
  }

  private[fey] def processJson(path: String, file: File) = {
    try{
      getJsonObject(path) match {
        case Some(orchestrationJSON) =>
          val valid = validJson(orchestrationJSON)
          if(valid && (orchestrationJSON \ JSON_PATH.COMMAND).as[String].toUpperCase != "DELETE"){
            checkForLocation(orchestrationJSON)
          }
          if(valid) {
            receiverActor ! JSON_RECEIVED(orchestrationJSON, file)
          }else{
            log.warn(s"File $path not processed. Incorrect JSON schema")
          }
        case None =>
      }
    } catch {
      case e: Exception =>
        log.error(s"File $path will not be processed", e)
    }
  }

  private def processInitialFiles() = {
    Utils.getFilesInDirectory(CONFIG.JSON_REPOSITORY)
      .filter(file => file.getName.endsWith(CONFIG.JSON_EXTENSION))
      .foreach(file => {
        processJson(file.getAbsolutePath, file)
      })
  }

  override def exceptionOnRun(e: Exception): Unit = {
    e match {
      case e: InterruptedException =>
      case e: Exception => log.error("Watch Service stopped", e)
    }
    watchService.close()
  }

} 
Example 103
Source File: FeyGenericActorReceiver.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.io.{File, FileOutputStream}
import java.net.URL
import java.nio.file.{Files, Paths}
import com.eclipsesource.schema._
import akka.actor.ActorRef
import com.eclipsesource.schema.SchemaValidator
import org.apache.commons.io.IOUtils
import play.api.libs.json._
import scala.concurrent.duration._
import scala.util.Properties._

abstract class FeyGenericActorReceiver(override val params: Map[String,String] = Map.empty,
                                       override val backoff: FiniteDuration = 1.minutes,
                                       override val connectTo: Map[String,ActorRef] = Map.empty,
                                       override val schedulerTimeInterval: FiniteDuration = 2.seconds,
                                       override val orchestrationName: String = "",
                                       override val orchestrationID: String = "",
                                       override val autoScale: Boolean = false) extends FeyGenericActor{

  private[fey] val feyCore = FEY_CORE_ACTOR.actorRef

  override final def processMessage[T](message: T, sender: ActorRef): Unit = {
    try {
      val jsonString = getJSONString(message)
      if(jsonString != "{}") {
        processJson(jsonString)
      }
      startBackoff()
    }catch{
      case e: Exception => log.error(e, s"Could not process message $message")
    }
  }

  private[fey] def processJson(jsonString: String) = {
    var orchID:String = "None"
    try{
      val orchestrationJSON = Json.parse(jsonString)
      orchID = (orchestrationJSON \ JSON_PATH.GUID).as[String]
      val valid = validJson(orchestrationJSON)
      if(valid && (orchestrationJSON \ JSON_PATH.COMMAND).as[String].toUpperCase != "DELETE"){
        checkForLocation(orchestrationJSON)
      }
      if(valid) {
        feyCore ! FeyCore.ORCHESTRATION_RECEIVED(orchestrationJSON, None)
      }else{
        log.warning(s"Could not forward Orchestration $orchID. Invalid JSON schema")
      }
    } catch {
      case e: Exception =>
        log.error(e, s"Orchestration $orchID could not be forwarded")
    }
  }

  
  def resolveCredentials(credentials: Option[JsObject]):Option[(String, String)] = {
    credentials match {
      case None => None
      case Some(cred) =>
        val user = (cred \ JSON_PATH.JAR_CRED_USER).as[String]
        val password = (cred \ JSON_PATH.JAR_CRED_PASSWORD).as[String]
        Option(envOrElse(user,user), envOrElse(password,password))
    }
  }

} 
Example 104
Source File: CheckpointProcessor.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.io.File

import akka.actor.ActorRef
import org.apache.iota.fey.JsonReceiverActor.JSON_RECEIVED
import play.api.libs.json.{JsValue, Json}

import scala.io.Source


class CheckpointProcessor(receiverActor: ActorRef) extends JsonReceiver{

  override def run(): Unit = {
    processCheckpointFiles()
  }

  def getJsonObject(params: String): Option[JsValue] = {
    try{
      val stringJson = Source.fromFile(params).getLines.mkString
      Option(Json.parse(stringJson))
    }catch{
      case e: Exception =>
        log.error("Could not parse JSON", e)
        None
    }
  }

  private def processJson(path: String, file: File) = {
    try{
      getJsonObject(path) match {
        case Some(orchestrationJSON) =>
          val valid = validJson(orchestrationJSON)
          if(valid && (orchestrationJSON \ JSON_PATH.COMMAND).as[String].toUpperCase != "DELETE"){
            checkForLocation(orchestrationJSON)
          }
          if(valid) {
            receiverActor ! JSON_RECEIVED(orchestrationJSON, file)
          }else{
            log.warn(s"File $path not processed. Incorrect JSON schema")
          }
          file.delete()
        case None =>
      }
    } catch {
      case e: Exception =>
        log.error(s"File $path will not be processed", e)
    }
  }

  private def processCheckpointFiles() = {
    Utils.getFilesInDirectory(CONFIG.CHECKPOINT_DIR)
      .filter(file => file.getName.endsWith(CONFIG.JSON_EXTENSION))
      .foreach(file => {
        processJson(file.getAbsolutePath, file)
      })
  }

  override def execute(): Unit = {}
  override def exceptionOnRun(e: Exception): Unit = {}
} 
Example 105
Source File: GlobalPerformer.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Terminated}
import akka.routing._
import play.api.libs.json.JsObject

import scala.collection.mutable.HashMap
import scala.concurrent.duration._

protected class GlobalPerformer(val orchestrationID: String,
                                val orchestrationName: String,
                                val globalPerformers: List[JsObject],
                                val ensemblesSpec :  List[JsObject]) extends Actor with ActorLogging{

  val monitoring_actor = FEY_MONITOR.actorRef
  var global_metadata: Map[String, Performer] = Map.empty[String, Performer]

  override def receive: Receive = {

    case GlobalPerformer.PRINT_GLOBAL =>
      context.actorSelection(s"*") ! FeyGenericActor.PRINT_PATH

    case Terminated(actor) =>
      monitoring_actor  ! Monitor.TERMINATE(actor.path.toString, Utils.getTimestamp)
      log.error(s"DEAD Global Performers ${actor.path.name}")
      context.children.foreach{ child =>
        context.unwatch(child)
        context.stop(child)
      }
      throw new RestartGlobalPerformers(s"DEAD Global Performer ${actor.path.name}")

    case GetRoutees => //Discard

    case x => log.warning(s"Message $x not treated by Global Performers")
  }

  
  private def loadClazzFromJar(classPath: String, jarLocation: String, jarName: String):Class[FeyGenericActor] = {
    try {
      Utils.loadActorClassFromJar(jarLocation,classPath,jarName)
    }catch {
      case e: Exception =>
        log.error(e,s"Could not load class $classPath from jar $jarLocation. Please, check the Jar repository path as well the jar name")
        throw e
    }
  }

}

object GlobalPerformer{

  val activeGlobalPerformers:HashMap[String, Map[String, ActorRef]] = HashMap.empty[String, Map[String, ActorRef]]

  case object PRINT_GLOBAL
} 
Example 106
Source File: FeyGenericActorTest.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.ActorRef
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.duration.FiniteDuration

class FeyGenericActorTest(override val params: Map[String,String] = Map.empty,
               override val backoff: FiniteDuration = 1.minutes,
               override val connectTo: Map[String,ActorRef] = Map.empty,
               override val schedulerTimeInterval: FiniteDuration = 2.seconds,
               override val orchestrationName: String = "",
               override val orchestrationID: String = "",
               override val autoScale: Boolean = false) extends FeyGenericActor {

  var count = 0
  var started = false
  var processed = false
  var executing = false
  var stopped = false
  var restarted = false

  override def onStart(): Unit = {
    started = true
  }

  override def processMessage[T](message: T, sender: ActorRef): Unit = {
    processed = true
    log.info(s"Processing message ${message.toString}")
    propagateMessage(s"PROPAGATING FROM ${self.path.name} - Message: ${message.toString}")
    startBackoff()
  }

  override def execute(): Unit = {
    log.info(s"Executing action in ${self.path.name}")
    executing = true
  }

  override def customReceive: Receive = {
    case "TEST_CUSTOM" => count+=1
  }

  override def onStop(): Unit = {
    log.info(s"Actor ${self.path.name} stopped.")
    stopped = true
  }

  override def onRestart(reason: Throwable): Unit = {
    restarted = true
  }
} 
Example 107
Source File: JsonReceiverSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.{Files, Paths}

import akka.actor.ActorRef
import akka.testkit.{EventFilter, TestProbe}
import ch.qos.logback.classic.Level
import scala.concurrent.duration.{DurationInt, FiniteDuration}

class JsonReceiverSpec extends BaseAkkaSpec with LoggingTest{


  class ReceiverTest(verifyActor: ActorRef) extends JsonReceiver{

    override def execute(): Unit = {
      verifyActor ! "EXECUTED"
      Thread.sleep(500)
    }

    override def exceptionOnRun(e: Exception): Unit = {
      verifyActor ! "INTERRUPTED"
    }

  }

  val verifyTB = TestProbe("RECEIVER-TEST")
  val receiver = new ReceiverTest(verifyTB.ref)

  "Executing validJson in JsonReceiver" should {
    "return false when json schema is not right" in {
      receiver.validJson(getJSValueFromString(Utils_JSONTest.test_json_schema_invalid)) should be(false)
    }
    "log message to Error" in {
      ("Incorrect JSON schema \n/ensembles/0 \n\tErrors: Property command missing") should beLoggedAt(Level.ERROR)
    }
    "return true when Json schema is valid" in {
      receiver.validJson(getJSValueFromString(Utils_JSONTest.create_json_test)) should be(true)
    }
  }

  "Executing checkForLocation in JsonReceiver" should {
    "log message at Debug level" in {
      receiver.checkForLocation(getJSValueFromString(Utils_JSONTest.test_json_schema_invalid))
      "Location not defined in JSON" should beLoggedAt(Level.DEBUG)
    }
    "download jar dynamically from URL" in {
      receiver.checkForLocation(getJSValueFromString(Utils_JSONTest.location_test))
      Files.exists(Paths.get(s"${CONFIG.DYNAMIC_JAR_REPO}/fey-stream.jar")) should be(true)
    }
  }

  var watchThread: Thread = _
  "Start a Thread with the JSON receiver" should {
    "Start Thread" in {
      watchThread = new Thread(receiver, "TESTING-RECEIVERS-IN-THREAD")
      watchThread.setDaemon(true)
      watchThread.start()
      TestProbe().isThreadRunning("TESTING-RECEIVERS-IN-THREAD") should be(true)
    }
    "execute execute() method inside run" in {
      verifyTB.expectMsgAllOf(600.milliseconds,"EXECUTED","EXECUTED")
    }
  }

  "Interrupting the receiver Thread" should {
    "Throw Interrupted exception" in {
      EventFilter[InterruptedException]() intercept {
        watchThread.interrupt()
        watchThread.join()
      }
    }
    "execute exceptionOnRun method" in {
      verifyTB.receiveWhile(1200.milliseconds) {
        case "EXECUTED" =>
      }
      verifyTB.expectMsg("INTERRUPTED")
    }
  }


} 
Example 108
Source File: BaseAkkaSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.Paths

import akka.actor.{ActorIdentity, ActorRef, ActorSystem, Identify, Props}
import akka.testkit.{EventFilter, TestEvent, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import play.api.libs.json._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.Await

class BaseAkkaSpec extends BaseSpec with BeforeAndAfterAll with LoggingTest{

  //Load default configuration for Fey when running tests
  resetCapturedLogs()
  CONFIG.loadUserConfiguration(Paths.get(TestSetup.configTest.toURI()).toFile().getAbsolutePath)
  TestSetup.setup()

  val systemName = "FEY-TEST"
  implicit val system = ActorSystem(systemName, ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]"""))
  system.eventStream.publish(TestEvent.Mute(EventFilter.debug()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.info()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.warning()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.error()))

  val globalIdentifierName = "GLOBAL-IDENTIFIER"
  val globalIdentifierRef = system.actorOf(Props[IdentifyFeyActors],globalIdentifierName)

  override protected def afterAll(): Unit = {
    //Force reload of GenericActor's jar
    Utils.loadedJars.remove("fey-test-actor.jar")
    Monitor.events.removeAllNodes()
    Await.ready(system.terminate(), 20.seconds)
  }

  implicit class TestProbeOps(probe: TestProbe) {

    def expectActor(path: String, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def expectActorInSystem(path: String, lookInSystem: ActorSystem, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (lookInSystem actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def verifyActorTermination(actor: ActorRef)(implicit system: ActorSystem): Unit = {
      val watcher = TestProbe()
      watcher.watch(actor)
      watcher.expectTerminated(actor)
    }

    def notExpectActor(path: String, max: FiniteDuration = 3.seconds): Unit = {
      probe.within(max) {
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, None) =>
          }
        }
      }
    }

    def isThreadRunning(threadName: String): Boolean = {
      Thread.getAllStackTraces.keySet().toArray
        .map(_.asInstanceOf[Thread])
        .find(_.getName == threadName) match {
        case Some(thread) =>
          if(thread.isAlive) true else false
        case None => false
      }
    }
  }

  //Utils Functions
  def getJSValueFromString(json: String): JsValue = {
    Json.parse(json)
  }

} 
Example 109
Source File: IdentifyFeyActorsSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.{ActorRef, Props}

class IdentifyFeyActorsSpec extends BaseAkkaSpec {

  val aux_events = new Trie(systemName)

  "Sending IdentifyFeyActors.IDENTIFY_TREE to IdentifyFeyActors" should {
    s"result in one path added to IdentifyFeyActors.actorsPath" in {
      globalIdentifierRef ! IdentifyFeyActors.IDENTIFY_TREE(s"akka://$systemName/user")
      Thread.sleep(1000)
      IdentifyFeyActors.actorsPath.size should equal(1)
    }
    s"result in path 'akka://FEY-TEST/user/$globalIdentifierName' " in {
      IdentifyFeyActors.actorsPath should contain(s"akka://$systemName/user/$globalIdentifierName")
    }
  }

  var actor2: ActorRef = _

  "Creating a new actor in the system and sending IdentifyFeyActors.IDENTIFY_TREE to IdentifyFeyActors" should {
    s"result in two paths added to IdentifyFeyActors.actorsPath" in {
      actor2 = system.actorOf(Props(new Monitor(aux_events)),"MONITOR")
      globalIdentifierRef ! IdentifyFeyActors.IDENTIFY_TREE(s"akka://$systemName/user")
      Thread.sleep(1000)
      IdentifyFeyActors.actorsPath.size should equal(2)
    }
    s"result in matching paths" in {
      IdentifyFeyActors.actorsPath should contain(s"akka://$systemName/user/$globalIdentifierName")
      IdentifyFeyActors.actorsPath should contain(s"akka://$systemName/user/MONITOR")
    }
  }

  "Stopping previous added actor and sending IdentifyFeyActors.IDENTIFY_TREE to IdentifyFeyActors" should {
    "result in going back to have just one path added to IdentifyFeyActors.actorsPath" in {
      globalIdentifierRef ! IdentifyFeyActors.IDENTIFY_TREE(s"akka://$systemName/user")
      Thread.sleep(1000)
      IdentifyFeyActors.actorsPath.size should equal(2)
    }
    s"result in path 'akka://FEY-TEST/user/$globalIdentifierName' " in {
      IdentifyFeyActors.actorsPath should contain(s"akka://FEY-TEST/user/$globalIdentifierName")
    }
  }
} 
Example 110
Source File: FeyGenericActorReceiverTest.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.ActorRef
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.duration.FiniteDuration

class FeyGenericActorReceiverTest(override val params: Map[String,String] = Map.empty,
                          override val backoff: FiniteDuration = 1.minutes,
                          override val connectTo: Map[String,ActorRef] = Map.empty,
                          override val schedulerTimeInterval: FiniteDuration = 2.seconds,
                          override val orchestrationName: String = "",
                          override val orchestrationID: String = "",
                          override val autoScale: Boolean = false) extends FeyGenericActorReceiver {

  override def customReceive:Receive = {
    case "PROPAGATE" => propagateMessage("PROPAGATE-CALLED")
    case x => log.debug(s"Message not treated: $x")
  }

  override def getJSONString[T](input: T): String = {
    input match{
      case "VALID_JSON" => Utils_JSONTest.create_json_test
      case "INVALID_JSON" => Utils_JSONTest.test_json_schema_invalid
      case "JSON_LOCATION" => Utils_JSONTest.location_test_2
    }
  }

  var count = 0
  var started = false
  var executing = false
  var stopped = false
  var restarted = false

  override def onStart(): Unit = {
    started = true
  }

  override def execute(): Unit = {
    log.info(s"Executing action in ${self.path.name}")
    executing = true
  }

  override def onStop(): Unit = {
    log.info(s"Actor ${self.path.name} stopped.")
    stopped = true
  }

  override def onRestart(reason: Throwable): Unit = {
    restarted = true
  }

} 
Example 111
Source File: DurableEventLogs.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.stream

//# durable-event-logs
import akka.actor.{ ActorRef, ActorSystem }
import akka.stream.{ ActorMaterializer, Materializer }
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog

//#
trait DurableEventLogs {
  //# durable-event-logs
  implicit val system: ActorSystem = ActorSystem("example")
  implicit val materializer: Materializer = ActorMaterializer()

  val logAId = "A"
  val logBId = "B"
  val logCId = "C"

  val logA: ActorRef = createLog(logAId)
  val logB: ActorRef = createLog(logBId)
  val logC: ActorRef = createLog(logCId)

  def createLog(id: String): ActorRef =
    system.actorOf(LeveldbEventLog.props(id))
  //#
} 
Example 112
Source File: OrderView.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.ordermgnt

import akka.actor.ActorRef

import com.rbmhtechnology.eventuate.EventsourcedView

object OrderView {
  case class GetUpdateCount(orderId: String)
  case class GetUpdateCountSuccess(orderId: String, count: Int)
}


class OrderView(replicaId: String, val eventLog: ActorRef) extends EventsourcedView {
  import OrderActor._
  import OrderView._

  var updateCounts: Map[String, Int] = Map.empty

  override val id = s"s-ov-$replicaId"

  override def onCommand = {
    case GetUpdateCount(orderId) => sender() ! GetUpdateCountSuccess(orderId, updateCounts.getOrElse(orderId, 0))
  }

  override def onEvent = {
    case oe: OrderEvent => updateCounts.get(oe.orderId) match {
      case Some(count) => updateCounts += (oe.orderId -> (count + 1))
      case None        => updateCounts += (oe.orderId -> 1)
    }
  }
} 
Example 113
Source File: Writer.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.querydb

//#writer
import java.lang.{ Long => JLong }

import akka.actor.ActorRef

import com.datastax.driver.core._
import com.rbmhtechnology.eventuate.EventsourcedWriter

import scala.concurrent.Future


  override def readSuccess(result: Long): Option[Long] =
    Some(result + 1L)
}

object Writer {
  import java.util.concurrent.Executor

  import com.google.common.util.concurrent.ListenableFuture

  import scala.concurrent.{ ExecutionContext, Promise }
  import scala.language.implicitConversions
  import scala.util.Try

  implicit class ListenableFutureConverter[A](lf: ListenableFuture[A])(implicit executionContext: ExecutionContext) {

    def toFuture: Future[A] = {
      val promise = Promise[A]
      lf.addListener(new Runnable {
        def run() = promise.complete(Try(lf.get()))
      }, executionContext.asInstanceOf[Executor])
      promise.future
    }
  }
}
//# 
Example 114
Source File: NotificationChannel.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log

import java.util.concurrent.TimeUnit

import akka.actor.Actor
import akka.actor.ActorRef
import com.rbmhtechnology.eventuate._
import com.rbmhtechnology.eventuate.ReplicationProtocol._
import com.typesafe.config.Config

import scala.collection.immutable.Seq
import scala.concurrent.duration.DurationLong
import scala.concurrent.duration.FiniteDuration

class NotificationChannelSettings(config: Config) {
  val registrationExpirationDuration: FiniteDuration =
    config.getDuration("eventuate.log.replication.retry-delay", TimeUnit.MILLISECONDS).millis
}

object NotificationChannel {
  case class Updated(events: Seq[DurableEvent])

  private case class Registration(replicator: ActorRef, currentTargetVersionVector: VectorTime, filter: ReplicationFilter, registrationTime: Long)

  private object Registration {
    def apply(read: ReplicationRead): Registration =
      new Registration(read.replicator, read.currentTargetVersionVector, read.filter, System.nanoTime())
  }
}


class NotificationChannel(logId: String) extends Actor {
  import NotificationChannel._

  private val settings = new NotificationChannelSettings(context.system.settings.config)

  // targetLogId -> subscription
  private var registry: Map[String, Registration] = Map.empty

  // targetLogIds for which a read operation is in progress
  private var reading: Set[String] = Set.empty

  def receive = {
    case Updated(events) =>
      val currentTime = System.nanoTime()
      registry.foreach {
        case (targetLogId, reg) =>
          if (!reading.contains(targetLogId)
            && events.exists(_.replicable(reg.currentTargetVersionVector, reg.filter))
            && currentTime - reg.registrationTime <= settings.registrationExpirationDuration.toNanos)
            reg.replicator ! ReplicationDue
      }
    case r: ReplicationRead =>
      registry += (r.targetLogId -> Registration(r))
      reading += r.targetLogId
    case r: ReplicationReadSuccess =>
      reading -= r.targetLogId
    case r: ReplicationReadFailure =>
      reading -= r.targetLogId
    case w: ReplicationWrite =>
      for {
        id <- w.sourceLogIds
        rr <- registry.get(id)
      } registry += (id -> rr.copy(currentTargetVersionVector = w.metadata(id).currentVersionVector))
  }
} 
Example 115
Source File: SubscriberRegistry.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log

import akka.actor.ActorRef
import com.rbmhtechnology.eventuate.DurableEvent
import com.rbmhtechnology.eventuate.EventsourcingProtocol._

import scala.collection.immutable.Seq

private case class SubscriberRegistry(
  aggregateRegistry: AggregateRegistry = AggregateRegistry(),
  defaultRegistry: Set[ActorRef] = Set.empty) {

  def registerDefaultSubscriber(subscriber: ActorRef): SubscriberRegistry =
    copy(defaultRegistry = defaultRegistry + subscriber)

  def registerAggregateSubscriber(subscriber: ActorRef, aggregateId: String): SubscriberRegistry =
    copy(aggregateRegistry = aggregateRegistry.add(subscriber, aggregateId))

  def unregisterSubscriber(subscriber: ActorRef): SubscriberRegistry =
    aggregateRegistry.aggregateId(subscriber) match {
      case Some(aggregateId) => copy(aggregateRegistry = aggregateRegistry.remove(subscriber, aggregateId))
      case None              => copy(defaultRegistry = defaultRegistry - subscriber)
    }

  def notifySubscribers(events: Seq[DurableEvent], condition: ActorRef => Boolean = _ => true): Unit =
    events.foreach { event =>
      val written = Written(event)
      // in any case, notify all default subscribers
      // for which condition evaluates to true
      defaultRegistry.foreach(r => if (condition(r)) r ! written)
      // notify subscribers with matching aggregate id
      for {
        aggregateId <- event.destinationAggregateIds
        aggregate <- aggregateRegistry(aggregateId) if condition(aggregate)
      } aggregate ! written
    }
}

private case class AggregateRegistry(
  aggregateRegistry: Map[String, Set[ActorRef]] = Map.empty,
  aggregateRegistryIndex: Map[ActorRef, String] = Map.empty) {

  def apply(aggregateId: String): Set[ActorRef] =
    aggregateRegistry.getOrElse(aggregateId, Set.empty)

  def aggregateId(aggregate: ActorRef): Option[String] =
    aggregateRegistryIndex.get(aggregate)

  def add(aggregate: ActorRef, aggregateId: String): AggregateRegistry = {
    val aggregates = aggregateRegistry.get(aggregateId) match {
      case Some(as) => as + aggregate
      case None     => Set(aggregate)
    }
    copy(
      aggregateRegistry + (aggregateId -> aggregates),
      aggregateRegistryIndex + (aggregate -> aggregateId))
  }

  def remove(aggregate: ActorRef, aggregateId: String): AggregateRegistry = {
    val aggregates = aggregateRegistry.get(aggregateId) match {
      case Some(as) => as - aggregate
      case None     => Set(aggregate)
    }
    copy(
      aggregateRegistry + (aggregateId -> aggregates),
      aggregateRegistryIndex - aggregate)
  }
} 
Example 116
Source File: EventProducerConfig.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx.api

import akka.actor.ActorRef

import scala.concurrent.duration.FiniteDuration

sealed trait EventProducerConfig {
  def id: String
  def log: ActorRef
}

sealed trait VertxProducerConfig extends EventProducerConfig {
  def endpointRouter: EndpointRouter
}

case class VertxPublisherConfig(id: String, log: ActorRef, endpointRouter: EndpointRouter) extends VertxProducerConfig
case class VertxSenderConfig(id: String, log: ActorRef, endpointRouter: EndpointRouter, deliveryMode: DeliveryMode) extends VertxProducerConfig
case class LogProducerConfig(id: String, log: ActorRef, endpoints: Set[String], filter: PartialFunction[Any, Boolean]) extends EventProducerConfig

sealed trait ConfirmationType
case object Single extends ConfirmationType
case class Batch(size: Int) extends ConfirmationType

sealed trait DeliveryMode
case object AtMostOnce extends DeliveryMode
case class AtLeastOnce(confirmationType: ConfirmationType, confirmationTimeout: FiniteDuration) extends DeliveryMode

object EndpointRouter {

  def route(f: PartialFunction[Any, String]): EndpointRouter =
    new EndpointRouter(f)

  def routeAllTo(s: String): EndpointRouter =
    new EndpointRouter({ case _ => s })
}

class EndpointRouter(f: PartialFunction[Any, String]) {
  val endpoint: Any => Option[String] = f.lift
}


  def writeTo(log: ActorRef, filter: PartialFunction[Any, Boolean] = { case _ => true }) = new CompletableEventProducerConfigFactory {
    override def as(id: String): LogProducerConfig =
      LogProducerConfig(id, log, endpoints, filter)
  }
} 
Example 117
Source File: LogProducer.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorRef, Props }
import com.rbmhtechnology.eventuate.EventsourcedActor
import io.vertx.core.eventbus.Message

import scala.util.{ Failure, Success }

private[vertx] object LogProducer {

  case class PersistMessage(message: Message[Any])

  def props(id: String, eventLog: ActorRef): Props =
    Props(new LogProducer(id, eventLog))
}

private[vertx] class LogProducer(val id: String, val eventLog: ActorRef) extends EventsourcedActor {
  import LogProducer._

  override def stateSync: Boolean = false

  // prevent event-replay
  override def replayFromSequenceNr: Option[Long] = Some(Long.MaxValue)

  override def onCommand: Receive = {
    case PersistMessage(msg) =>
      persist(msg.body()) {
        case Success(res) => msg.reply(ProcessingResult.PERSISTED)
        case Failure(err) => msg.fail(0, err.getMessage)
      }
  }

  override def onEvent: Receive = {
    case _ =>
  }
} 
Example 118
Source File: LogEventDispatcher.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ Actor, ActorRef, Props }
import com.rbmhtechnology.eventuate.adapter.vertx.LogEventDispatcher.EndpointRoute
import com.rbmhtechnology.eventuate.adapter.vertx.LogProducer.PersistMessage
import io.vertx.core.Vertx
import io.vertx.core.eventbus.{ Message, MessageConsumer }

object LogEventDispatcher {

  case class EventProducerRef(id: String, log: ActorRef)
  case class EndpointRoute(sourceEndpoint: String, producer: EventProducerRef, filter: PartialFunction[Any, Boolean] = { case _ => true })

  def props(routes: Seq[EndpointRoute], vertx: Vertx): Props =
    Props(new LogEventDispatcher(routes, vertx))
}

class LogEventDispatcher(routes: Seq[EndpointRoute], vertx: Vertx) extends Actor {

  import VertxHandlerConverters._

  val producers = routes
    .groupBy(_.producer)
    .map { case (producer, _) => producer.id -> context.actorOf(LogProducer.props(producer.id, producer.log)) }

  val consumers = routes
    .map { r => installMessageConsumer(r.sourceEndpoint, producers(r.producer.id), r.filter) }

  private def installMessageConsumer(endpoint: String, producer: ActorRef, filter: PartialFunction[Any, Boolean]): MessageConsumer[Any] = {
    val handler = (msg: Message[Any]) => {
      if (filter.applyOrElse(msg.body(), (_: Any) => false)) {
        producer ! PersistMessage(msg)
      } else {
        msg.reply(ProcessingResult.FILTERED)
      }
    }
    vertx.eventBus().consumer[Any](endpoint, handler.asVertxHandler)
  }

  override def receive: Receive = Actor.emptyBehavior

  override def postStop(): Unit = {
    consumers.foreach(_.unregister())
  }
} 
Example 119
Source File: VertxNoConfirmationSender.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorRef, Props }
import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EndpointRouter, StorageProvider }
import io.vertx.core.Vertx

import scala.collection.immutable.Seq
import scala.concurrent.{ ExecutionContext, Future }

private[eventuate] object VertxNoConfirmationSender {
  def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, storageProvider: StorageProvider): Props =
    Props(new VertxNoConfirmationSender(id, eventLog, endpointRouter, vertx, storageProvider))
      .withDispatcher("eventuate.log.dispatchers.write-dispatcher")
}

private[eventuate] class VertxNoConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, val storageProvider: StorageProvider)
  extends VertxEventDispatcher[Long, Long] with VertxSender with SequenceNumberProgressStore {

  override def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] =
    Future(events.foreach(e => send(e.address, e.evt)))
} 
Example 120
Source File: VertxNoConfirmationPublisher.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorRef, Props }
import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EndpointRouter, StorageProvider }
import io.vertx.core.Vertx

import scala.collection.immutable.Seq
import scala.concurrent.{ ExecutionContext, Future }

private[eventuate] object VertxNoConfirmationPublisher {
  def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, storageProvider: StorageProvider): Props =
    Props(new VertxNoConfirmationPublisher(id, eventLog, endpointRouter, vertx, storageProvider))
      .withDispatcher("eventuate.log.dispatchers.write-dispatcher")
}

private[eventuate] class VertxNoConfirmationPublisher(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, val storageProvider: StorageProvider)
  extends VertxEventDispatcher[Long, Long] with VertxPublisher with SequenceNumberProgressStore {

  override def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] =
    Future(events.foreach(e => publish(e.address, e.evt)))
} 
Example 121
Source File: VertxSingleConfirmationSender.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorLogging, ActorRef, Props }
import akka.pattern.pipe
import com.rbmhtechnology.eventuate.adapter.vertx.api.EndpointRouter
import com.rbmhtechnology.eventuate.{ ConfirmedDelivery, EventsourcedActor }
import io.vertx.core.Vertx

import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success }

private[vertx] object VertxSingleConfirmationSender {

  case class DeliverEvent(evt: EventEnvelope, deliveryId: String)
  case class Confirm(deliveryId: String)
  case class DeliverFailed(evt: EventEnvelope, deliveryId: String, err: Throwable)
  case object Redeliver

  case class DeliveryConfirmed()

  def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, confirmationTimeout: FiniteDuration): Props =
    Props(new VertxSingleConfirmationSender(id, eventLog, endpointRouter, vertx, confirmationTimeout))
}

private[vertx] class VertxSingleConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, confirmationTimeout: FiniteDuration)
  extends EventsourcedActor with ConfirmedDelivery with VertxSender with ActorLogging {

  import VertxSingleConfirmationSender._
  import context.dispatcher

  context.system.scheduler.schedule(confirmationTimeout, confirmationTimeout, self, Redeliver)

  override def onCommand: Receive = {
    case DeliverEvent(envelope, deliveryId) =>
      send[Any](envelope.address, envelope.evt, confirmationTimeout)
        .map(_ => Confirm(deliveryId))
        .recover {
          case err => DeliverFailed(envelope, deliveryId, err)
        }
        .pipeTo(self)

    case Confirm(deliveryId) if unconfirmed.contains(deliveryId) =>
      persistConfirmation(DeliveryConfirmed(), deliveryId) {
        case Success(evt) =>
        case Failure(err) => log.error(s"Confirmation for delivery with id '$deliveryId' could not be persisted.", err)
      }

    case Redeliver =>
      redeliverUnconfirmed()

    case DeliverFailed(evt, deliveryId, err) =>
      log.warning(s"Delivery with id '$deliveryId' for event [$evt] failed with $err. The delivery will be retried.")
  }

  override def onEvent: Receive = {
    case DeliveryConfirmed() =>
    // confirmations should not be published
    case ev =>
      endpointRouter.endpoint(ev) match {
        case Some(endpoint) =>
          val deliveryId = lastSequenceNr.toString
          deliver(deliveryId, DeliverEvent(EventEnvelope(endpoint, lastHandledEvent), deliveryId), self.path)
        case None =>
      }
  }
} 
Example 122
Source File: PersistOnEventWithRecoverySpecLeveldb.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate

import java.util.UUID

import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Props
import akka.testkit.TestProbe
import com.rbmhtechnology.eventuate.ReplicationIntegrationSpec.replicationConnection
import com.rbmhtechnology.eventuate.utilities._
import org.apache.commons.io.FileUtils
import org.scalatest.Matchers
import org.scalatest.WordSpec

import scala.concurrent.duration.DurationInt

object PersistOnEventWithRecoverySpecLeveldb {
  class OnBEmitRandomActor(val eventLog: ActorRef, probe: TestProbe) extends EventsourcedActor with PersistOnEvent {

    override def id = getClass.getName

    override def onCommand = Actor.emptyBehavior

    override def onEvent = {
      case "A"          =>
      case "B"          => persistOnEvent(UUID.randomUUID().toString)
      case uuid: String => probe.ref ! uuid
    }
  }

  def persistOnEventProbe(locationA1: Location, log: ActorRef) = {
    val probe = locationA1.probe
    locationA1.system.actorOf(Props(new OnBEmitRandomActor(log, probe)))
    probe
  }

  val noMsgTimeout = 100.millis
}

class PersistOnEventWithRecoverySpecLeveldb extends WordSpec with Matchers with MultiLocationSpecLeveldb {
  import RecoverySpecLeveldb._
  import PersistOnEventWithRecoverySpecLeveldb._

  override val logFactory: String => Props =
    id => SingleLocationSpecLeveldb.TestEventLog.props(id, batching = true)

  "An EventsourcedActor with PersistOnEvent" must {
    "not re-attempt persistence on successful write after reordering of events through disaster recovery" in {
      val locationB = location("B", customConfig = RecoverySpecLeveldb.config)
      def newLocationA = location("A", customConfig = RecoverySpecLeveldb.config)
      val locationA1 = newLocationA

      val endpointB = locationB.endpoint(Set("L1"), Set(replicationConnection(locationA1.port)))
      def newEndpointA(l: Location, activate: Boolean) = l.endpoint(Set("L1"), Set(replicationConnection(locationB.port)), activate = activate)
      val endpointA1 = newEndpointA(locationA1, activate = true)

      val targetA = endpointA1.target("L1")
      val logDirA = logDirectory(targetA)
      val targetB = endpointB.target("L1")
      val a1Probe = persistOnEventProbe(locationA1, targetA.log)

      write(targetA, List("A"))
      write(targetB, List("B"))
      val event = a1Probe.expectMsgClass(classOf[String])
      assertConvergence(Set("A", "B", event), endpointA1, endpointB)

      locationA1.terminate().await
      FileUtils.deleteDirectory(logDirA)

      val locationA2 = newLocationA
      val endpointA2 = newEndpointA(locationA2, activate = false)
      endpointA2.recover().await

      val a2Probe = persistOnEventProbe(locationA2, endpointA2.logs("L1"))
      a2Probe.expectMsg(event)
      a2Probe.expectNoMsg(noMsgTimeout)
      assertConvergence(Set("A", "B", event), endpointA2, endpointB)
    }
  }
} 
Example 123
Source File: ClusterAwareHostBalancer.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.scaladsl.Sink
import akka.stream.{ActorAttributes, Materializer, Supervision}
import akka.util.Timeout
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.{GetConnection, LogDeadConnections}
import com.crobox.clickhouse.balancing.discovery.cluster.ClusterConnectionFlow

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


case class ClusterAwareHostBalancer(host: Uri,
                                    cluster: String = "cluster",
                                    manager: ActorRef,
                                    scanningInterval: FiniteDuration)(
    implicit system: ActorSystem,
    connectionRetrievalTimeout: Timeout,
    ec: ExecutionContext,
    materializer: Materializer
) extends HostBalancer {

  ClusterConnectionFlow
    .clusterConnectionsFlow(Future.successful(host), scanningInterval, cluster)
    .withAttributes(
      ActorAttributes.supervisionStrategy({
        case ex: IllegalArgumentException =>
          logger.error("Failed resolving hosts for cluster, stopping the flow.", ex)
          Supervision.stop
        case ex =>
          logger.error("Failed resolving hosts for cluster, resuming.", ex)
          Supervision.Resume
      })
    )
    .runWith(Sink.actorRef(manager, LogDeadConnections))

  override def nextHost: Future[Uri] =
    (manager ? GetConnection()).mapTo[Uri]
} 
Example 124
Source File: ClickhouseClientAsyncSpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import akka.util.Timeout
import akka.util.Timeout.durationToTimeout
import com.crobox.clickhouse.balancing.HostBalancer
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.GetConnection
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import org.scalatest.flatspec.AsyncFlatSpecLike
import org.scalatest.matchers.should.Matchers

abstract class ClickhouseClientAsyncSpec(val config: Config = ConfigFactory.load())
    extends TestKit(ActorSystem("clickhouseClientAsyncTestSystem", config.getConfig("crobox.clickhouse.client")))
    with AsyncFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach {

  implicit val timeout: Timeout = 5.second
  implicit val materializer: Materializer = ActorMaterializer()

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally Await.result(system.terminate(), 10.seconds)
  }

  def requestParallelHosts(balancer: HostBalancer, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          balancer.nextHost
        })
    )

  def getConnections(manager: ActorRef, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          (manager ? GetConnection()).mapTo[Uri]
        })
    )

  //  TODO change this methods to custom matchers
  def returnsConnectionsInRoundRobinFashion(manager: ActorRef, expectedConnections: Set[Uri]): Future[Assertion] = {
    val RequestConnectionsPerHost = 100
    getConnections(manager, RequestConnectionsPerHost * expectedConnections.size)
      .map(connections => {
        expectedConnections.foreach(
          uri =>
            connections
              .count(_ == uri) shouldBe (RequestConnectionsPerHost +- RequestConnectionsPerHost / 10) //10% delta for warm-up phase
        )
        succeed
      })
  }

} 
Example 125
Source File: DemoApp.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination
package demo

import akka.actor.{ ActorRef, ActorSystem, Address }
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{ Duration, MILLISECONDS }

object DemoApp {

  val conf     = ConfigFactory.load()
  val hostname = conf.getString("demo.hostname")
  val httpPort = conf.getInt("demo.port")

  def main(args: Array[String]): Unit = {
    // Create an Akka system
    implicit val system = ActorSystem("ConstructR-Consul")
    import system.dispatcher
    implicit val mat = ActorMaterializer()

    // Create an actor that handles cluster domain events
    val cluster =
      system.actorOf(SimpleClusterListener.props, SimpleClusterListener.Name)
    Http().bindAndHandle(route(cluster), hostname, httpPort)
  }

  private def route(cluster: ActorRef) = {
    import Directives._
    implicit val timeout = Timeout(
      Duration(
        conf.getDuration("demo.cluster-view-timeout").toMillis,
        MILLISECONDS
      )
    )
    path("member-nodes") { // List cluster nodes
      get {
        onSuccess(
          (cluster ? SimpleClusterListener.GetMemberNodes).mapTo[Set[Address]]
        )(addresses => complete(addresses.mkString("\n")))
      }
    }
  }

} 
Example 126
Source File: ImapProtocol.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol

import java.util.Properties
import java.util.UUID.randomUUID

import akka.actor.ActorRef
import com.linagora.gatling.imap.protocol.Command.Disconnect
import io.gatling.core.CoreComponents
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.protocol.{Protocol, ProtocolComponents, ProtocolKey}
import io.gatling.core.session.Session

object ImapProtocol {
  val ImapProtocolKey = new ProtocolKey[ImapProtocol, ImapComponents] {

    override def protocolClass: Class[io.gatling.core.protocol.Protocol] = classOf[ImapProtocol].asInstanceOf[Class[io.gatling.core.protocol.Protocol]]

    override def defaultProtocolValue(configuration: GatlingConfiguration): ImapProtocol = throw new IllegalStateException("Can't provide a default value for ImapProtocol")

    override def newComponents(coreComponents: CoreComponents): ImapProtocol => ImapComponents = { protocol =>
      val sessions: ActorRef = coreComponents.actorSystem.actorOf(ImapSessions.props(protocol), "imapsessions_" + randomUUID().toString)
      ImapComponents(protocol, sessions)
    }
  }
}

case class ImapComponents(protocol: ImapProtocol, sessions: ActorRef) extends ProtocolComponents {
  override def onStart: Session => Session = s => s

  override def onExit: Session => Unit = session => sessions ! Disconnect(UserId(session.userId))
}

case class ImapProtocol(host: String,
                        port: Int = 143,
                        config: Properties = new Properties()
                       ) extends Protocol 
Example 127
Source File: UIDFetchHandler.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import akka.actor.{ActorRef, Props}
import com.linagora.gatling.imap.protocol._
import com.yahoo.imapnio.async.client.ImapAsyncSession
import com.yahoo.imapnio.async.request.UidFetchCommand
import io.gatling.core.akka.BaseActor

object UIDFetchHandler {
  def props(session: ImapAsyncSession) = Props(new UIDFetchHandler(session))
}

class UIDFetchHandler(session: ImapAsyncSession) extends BaseActor {

  override def receive: Receive = {
    case Command.UIDFetch(userId, sequence, attributes) =>
      context.become(waitCallback(sender()))
      ImapSessionExecutor.listen(self, userId, Response.Fetched)(logger)(session.execute(new UidFetchCommand(sequence.asImap, attributes.asString)))

  }

  def waitCallback(sender: ActorRef): Receive = {
    case [email protected](_) =>
      sender ! msg
      context.stop(self)
  }

} 
Example 128
Source File: ExpungeHandler.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import akka.actor.{ActorRef, Props}
import com.linagora.gatling.imap.protocol.{Response, _}
import com.yahoo.imapnio.async.client.ImapAsyncSession
import com.yahoo.imapnio.async.request.ExpungeCommand
import io.gatling.core.akka.BaseActor

object ExpungeHandler {
  def props(session: ImapAsyncSession) = Props(new ExpungeHandler(session))
}

class ExpungeHandler(session: ImapAsyncSession) extends BaseActor {

  override def receive: Receive = {
    case Command.Expunge(userId) =>
      context.become(waitCallback(sender()))
      ImapSessionExecutor.listen(self, userId, Response.Expunged)(logger)(session.execute(new ExpungeCommand()))
  }

  def waitCallback(sender: ActorRef): Receive = {
    case [email protected](_) =>
      sender ! msg
      context.stop(self)
  }

} 
Example 129
Source File: FetchHandler.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import akka.actor.{ActorRef, Props}
import com.linagora.gatling.imap.protocol._
import com.yahoo.imapnio.async.client.ImapAsyncSession
import com.yahoo.imapnio.async.request.FetchCommand
import io.gatling.core.akka.BaseActor


abstract class FetchAttributes {
  def asString: String
}

object FetchAttributes {

  case class ALL() extends FetchAttributes {
    override def asString = "ALL"
  }

  case class FULL() extends FetchAttributes {
    override def asString = "FULL"
  }

  case class FAST() extends FetchAttributes {
    override def asString = "FAST"
  }

  case class AttributeList(fetchAttributes: String*) extends FetchAttributes {
    override def asString = fetchAttributes.mkString(" ")
  }

}

object FetchHandler {
  def props(session: ImapAsyncSession) = Props(new FetchHandler(session))
}

class FetchHandler(session: ImapAsyncSession) extends BaseActor {

  override def receive: Receive = {
    case Command.Fetch(userId, sequence, attributes) =>
      context.become(waitCallback(sender()))
      ImapSessionExecutor.listen(self, userId, Response.Fetched)(logger)(session.execute(new FetchCommand(sequence.asImap, attributes.asString)))
  }

  def waitCallback(sender: ActorRef): Receive = {
    case [email protected](_) =>
      sender ! msg
      context.stop(self)
  }

} 
Example 130
Source File: ImapSessionExecutor.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import akka.actor.ActorRef
import com.linagora.gatling.imap.protocol.{ImapResponses, Response, UserId}
import com.typesafe.scalalogging.Logger
import com.yahoo.imapnio.async.client.ImapFuture
import com.yahoo.imapnio.async.response.ImapAsyncResponse

import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global

private[command] object ImapSessionExecutor {
  def listen(self: ActorRef, userId: UserId, getResponse: ImapResponses => Response)(logger: Logger)(response: ImapFuture[ImapAsyncResponse]): Unit = {
    listenWithHandler(self, userId, getResponse, _ => ())(logger)(response)
  }

  def listenWithHandler[T](self: ActorRef, userId: UserId, getResponse: ImapResponses => Response, callback: Future[ImapAsyncResponse] => T)(logger: Logger)(response: ImapFuture[ImapAsyncResponse]): T = {
    import collection.JavaConverters._

    callback(Future {
      val responses = response.get()
      val responsesList = ImapResponses(responses.getResponseLines.asScala.to[Seq])
      logger.trace(s"On response for $userId :\n ${responsesList.mkString("\n")}")
      self ! getResponse(responsesList)
      responses
    })
  }
} 
Example 131
Source File: ListHandler.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import akka.actor.{ActorRef, Props}
import com.linagora.gatling.imap.protocol._
import com.yahoo.imapnio.async.client.ImapAsyncSession
import com.yahoo.imapnio.async.request.ListCommand
import com.yahoo.imapnio.async.response.ImapAsyncResponse
import io.gatling.core.akka.BaseActor

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}

object ListHandler {
  def props(session: ImapAsyncSession) = Props(new ListHandler(session))
}

class ListHandler(session: ImapAsyncSession) extends BaseActor {

  override def receive: Receive = {
    case Command.List(userId, reference, name) =>
      context.become(waitCallback(sender()))
      ImapSessionExecutor.listenWithHandler(self, userId, Response.Listed, callback)(logger)(session.execute(new ListCommand(reference, name)))
  }

  private def callback(response: Future[ImapAsyncResponse]) = {
    Try(response) match {
      case Success(_) =>
      case Failure(e) =>
        logger.error("ERROR when executing LIST COMMAND", e)
        throw e;
    }
  }

  def waitCallback(sender: ActorRef): Receive = {
    case [email protected](_) =>
      sender ! msg
      context.stop(self)
  }

} 
Example 132
Source File: AppendHandler.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import java.nio.charset.StandardCharsets

import javax.mail.Flags

import akka.actor.{ActorRef, Props}
import com.linagora.gatling.imap.protocol._
import com.yahoo.imapnio.async.client.ImapAsyncSession
import com.yahoo.imapnio.async.request.AppendCommand
import com.yahoo.imapnio.async.response.ImapAsyncResponse
import io.gatling.core.akka.BaseActor

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}

object AppendHandler {
  def props(session: ImapAsyncSession) = Props(new AppendHandler(session))
}

class AppendHandler(session: ImapAsyncSession) extends BaseActor {

  override def receive: Receive = {
    case Command.Append(userId, mailbox, flags, date, content) =>
      if (date.isDefined) throw new NotImplementedError("Date parameter for APPEND is still not implemented")

      logger.debug(s"APPEND receive from sender ${sender.path} on ${self.path}")
      context.become(waitCallback(sender()))
      val nullDate = null
      val crLfContent = content.replaceAll("(?<!\r)\n", "\r\n").getBytes(StandardCharsets.UTF_8)
      ImapSessionExecutor
        .listenWithHandler(self, userId, Response.Appended, callback)(logger)(session.execute(new AppendCommand(mailbox, flags.map(toImapFlags).orNull, nullDate, crLfContent)))
  }

  private def callback(response: Future[ImapAsyncResponse]) = {
    Try(response) match {
      case Success(futureResult) =>
        futureResult.onComplete(future => {
          logger.debug(s"AppendHandler command completed, success : ${future.isSuccess}")
          if (!future.isSuccess) {
            logger.error("AppendHandler command failed", future.toEither.left)
          }

        })
      case Failure(e) =>
        logger.error("ERROR when executing APPEND COMMAND", e)
        throw e
    }
  }

  private def toImapFlags(flags: Seq[String]): Flags = {
    val imapFlags = new Flags()
    flags.foreach(imapFlags.add)
    imapFlags
  }

  def waitCallback(sender: ActorRef): Receive = {
    case [email protected](_) =>
      logger.debug(s"APPEND reply to sender ${sender.path}")
      sender ! msg
      context.stop(self)
  }

} 
Example 133
Source File: LoginHandler.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import akka.actor.{ActorRef, Props}
import com.linagora.gatling.imap.protocol._
import com.yahoo.imapnio.async.client.ImapAsyncSession
import com.yahoo.imapnio.async.request.LoginCommand
import com.yahoo.imapnio.async.response.ImapAsyncResponse
import io.gatling.core.akka.BaseActor

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}

object LoginHandler {
  def props(session: ImapAsyncSession) = Props(new LoginHandler(session))
}

class LoginHandler(session: ImapAsyncSession) extends BaseActor {

  override def receive: Receive = {
    case Command.Login(userId, user, password) =>
      logger.trace(s"LoginHandler for user : ${userId.value}, on actor ${self.path} responding to ${sender.path}")
      context.become(waitForLoggedIn(sender()))
      ImapSessionExecutor.listenWithHandler(self, userId, Response.LoggedIn, callback)(logger)(session.execute(new LoginCommand(user, password)))
  }

  private def callback(response: Future[ImapAsyncResponse]) = {
    Try(response) match {
      case Success(futureResult) =>
        futureResult.onComplete(future => {
            logger.debug(s"LoginHandler command completed, success : ${future.isSuccess}")
            if (!future.isSuccess) {
              logger.error("LoginHandler command failed", future.toEither.left)
            }
          })
      case Failure(e) =>
        logger.error("ERROR when executing LOGIN COMMAND", e)
        throw e
    }
  }

  def waitForLoggedIn(sender: ActorRef): Receive = {
    case [email protected](_) =>
      logger.trace(s"LoginHandler respond to ${sender.path} with $msg")
      sender ! msg
      context.stop(self)
  }

} 
Example 134
Source File: SelectHandler.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap.protocol.command

import akka.actor.{ActorRef, Props}
import com.linagora.gatling.imap.protocol._
import com.yahoo.imapnio.async.client.ImapAsyncSession
import com.yahoo.imapnio.async.request.SelectFolderCommand
import io.gatling.core.akka.BaseActor

object SelectHandler {
  def props(session: ImapAsyncSession) = Props(new SelectHandler(session))
}

class SelectHandler(session: ImapAsyncSession) extends BaseActor {

  override def receive: Receive = {
    case Command.Select(userId, mailbox) =>
      context.become(waitCallback(sender()))
      ImapSessionExecutor.listen(self, userId, Response.Selected)(logger)(session.execute(new SelectFolderCommand(mailbox)))
  }

  def waitCallback(sender: ActorRef): Receive = {
    case [email protected](_) =>
      sender ! msg
      context.stop(self)
  }

} 
Example 135
Source File: WebSocket.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package controllers

import javax.inject.{Inject, Singleton}

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.stream.{Materializer, ThrottleMode}
import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
import play.api.libs.streams.ActorFlow
import play.api.mvc.{Controller, WebSocket}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._


//@Singleton
class KafkaWebSocket @Inject() (implicit system: ActorSystem, materializer: Materializer) extends Controller {

  def kafkaWS = WebSocket.accept[String, String] { request =>
    ActorFlow.actorRef(out => KafkaWSActor.props(out))
  }

  object KafkaWSActor {
    def props(outRef: ActorRef) = Props(new KafkaWSActor(outRef))
  }

  class KafkaWSActor(outRef: ActorRef) extends Actor {

    val config = ConfigFactory.load()
    val combinedConfig = ConfigFactory.defaultOverrides()
      .withFallback(config)
      .withFallback(ConfigFactory.defaultApplication())
      .getConfig("trucking-web-application.backend")

    val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer)
      //.withBootstrapServers("sandbox-hdf.hortonworks.com:6667")
      .withBootstrapServers(combinedConfig.getString("kafka.bootstrap-servers"))
      .withGroupId("group1")
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    Consumer.committableSource(consumerSettings, Subscriptions.topics("trucking_data_joined"))
      .mapAsync(1) { msg => Future(outRef ! msg.record.value).map(_ => msg) }
      //.mapAsync(1) { msg => msg.committableOffset.commitScaladsl() } // TODO: Disabling commits for debug
      .throttle(1, 250.milliseconds, 1, ThrottleMode.Shaping)
      .runWith(Sink.ignore)

    def receive = {
      case msg: String => outRef ! s"Ack: $msg"
    }
  }

} 
Example 136
Source File: SharedFlowManager.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.flows

import akka.actor.{ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.simulator.flows.FlowManager.ShutdownFlow
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit


object SharedFlowManager {
  def props(transmitter: ActorRef) =
    Props(new SharedFlowManager(transmitter))
}
class SharedFlowManager(transmitter: ActorRef) extends FlowManager {

  def receive = {
    case msg: Transmit => transmitter ! msg

    case ShutdownFlow =>
      transmitter ! PoisonPill
      context watch transmitter

    case Terminated(`transmitter`) =>
      context stop self
  }
} 
Example 137
Source File: TruckAndTrafficFlowManager.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.flows

import akka.actor.{ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.commons.models.{TrafficData, TruckData}
import com.orendainx.trucking.simulator.flows.FlowManager.ShutdownFlow
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit


object TruckAndTrafficFlowManager {

  def props(truckTransmitter: ActorRef, trafficTransmitter: ActorRef) =
    Props(new TruckAndTrafficFlowManager(truckTransmitter, trafficTransmitter))
}

class TruckAndTrafficFlowManager(truckTransmitter: ActorRef, trafficTransmitter: ActorRef) extends FlowManager {

  var transmittersTerminated = 0

  def receive = {
    case Transmit(data: TruckData) => truckTransmitter ! Transmit(data)
    case Transmit(data: TrafficData) => trafficTransmitter ! Transmit(data)

    case ShutdownFlow =>
      truckTransmitter ! PoisonPill
      trafficTransmitter ! PoisonPill
      context watch truckTransmitter
      context watch trafficTransmitter

    case Terminated(_) =>
      transmittersTerminated += 1
      if (transmittersTerminated == 2) context stop self
  }
} 
Example 138
Source File: ManualCoordinator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.coordinators

import akka.actor.{ActorLogging, ActorRef, Props}
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator.AcknowledgeTick
import com.orendainx.trucking.simulator.coordinators.ManualCoordinator.Tick
import com.orendainx.trucking.simulator.generators.DataGenerator
import com.typesafe.config.Config

import scala.collection.mutable


  def props(generators: Seq[ActorRef])(implicit config: Config) =
    Props(new ManualCoordinator(generators))
}

class ManualCoordinator(generators: Seq[ActorRef])(implicit config: Config) extends GeneratorCoordinator with ActorLogging {

  // Set all generators as ready
  val generatorsReady = mutable.Set(generators: _*)

  def receive = {
    case AcknowledgeTick(generator) =>
      generatorsReady += generator
      log.debug(s"Generator acknowledged tick - total ready: ${generatorsReady.size}")

    case Tick =>
      generatorsReady.foreach(_ ! DataGenerator.GenerateData)
      generatorsReady.clear()
  }

} 
Example 139
Source File: AutomaticCoordinator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.coordinators

import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.simulator.coordinators.AutomaticCoordinator.TickGenerator
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator.AcknowledgeTick
import com.orendainx.trucking.simulator.flows.FlowManager
import com.orendainx.trucking.simulator.generators.DataGenerator
import com.typesafe.config.Config

import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.Random


  def props(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) =
    Props(new AutomaticCoordinator(eventCount, generators, flowManager))
}

class AutomaticCoordinator(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) extends GeneratorCoordinator with ActorLogging {

  // For receive messages and an execution context
  import context.dispatcher

  // Event delay settings, and initialize a counter for each data generator
  val eventDelay = config.getInt("generator.event-delay")
  val eventDelayJitter = config.getInt("generator.event-delay-jitter")
  val generateCounters = mutable.Map(generators.map((_, 0)): _*)

  // Insert each new generator into the simulation (at a random scheduled point) and begin "ticking"
  generators.foreach { generator =>
    context.system.scheduler.scheduleOnce(Random.nextInt(eventDelay + eventDelayJitter).milliseconds, self, TickGenerator(generator))
  }

  def receive = {
    case AcknowledgeTick(generator) =>
      self ! TickGenerator(generator) // Each ack triggers another tick

    case TickGenerator(generator) =>
      generateCounters.update(generator, generateCounters(generator)+1)

      if (generateCounters(generator) <= eventCount) {
        context.system.scheduler.scheduleOnce((eventDelay + Random.nextInt(eventDelayJitter)).milliseconds, generator, DataGenerator.GenerateData)
      } else {
        // Kill the individual generator, since we are done with it.
        generator ! PoisonPill

        // If all other generators have met their count, tell flow manager to shutdown
        if (!generateCounters.values.exists(_ <= eventCount)) {
          flowManager ! FlowManager.ShutdownFlow
          context watch flowManager
        }
      }

    // Once the flow manager and its transmitters terminate, shut it all down
    case Terminated(`flowManager`) =>
      context.system.terminate()
  }
} 
Example 140
Source File: TrafficGenerator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.generators

import java.time.Instant

import akka.actor.{ActorLogging, ActorRef, Props, Stash}
import com.orendainx.trucking.commons.models._
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator
import com.orendainx.trucking.simulator.depots.ResourceDepot.{RequestRoute, ReturnRoute}
import com.orendainx.trucking.simulator.generators.DataGenerator.{GenerateData, NewResource}
import com.orendainx.trucking.simulator.models._
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit
import com.orendainx.trucking.simulator.models.{EmptyRoute, Route}
import com.typesafe.config.Config

import scala.collection.mutable
import scala.util.Random


  def props(depot: ActorRef, flowManager: ActorRef)(implicit config: Config) =
    Props(new TrafficGenerator(depot, flowManager))
}

class TrafficGenerator(depot: ActorRef, flowManager: ActorRef)(implicit config: Config) extends DataGenerator with Stash with ActorLogging {

  // Some settings
  val NumberOfRoutes = config.getInt("generator.routes-to-simulate")
  val CongestionDelta = config.getInt("generator.congestion.delta")

  var congestionLevel = config.getInt("generator.congestion.start")
  var routes = mutable.Buffer.empty[Route]

  // Request NumberOfRoutes routes
  (1 to NumberOfRoutes).foreach(_ => depot ! RequestRoute(EmptyRoute))

  context become waitingOnDepot

  def waitingOnDepot: Receive = {
    case NewResource(newRoute: Route) =>
      routes += newRoute
      unstashAll()
      context become driverActive
      log.info(s"Received new route: ${newRoute.name}")

    case GenerateData =>
      stash()
      log.debug("Received Tick command while waiting on route. Command stashed for later processing.")
  }

  def driverActive: Receive = {
    case GenerateData =>
      routes.foreach { route =>
        // Create traffic data and emit it
        congestionLevel += -CongestionDelta + Random.nextInt(CongestionDelta*2 + 1)
        val traffic = TrafficData(Instant.now().toEpochMilli, route.id, congestionLevel)
        flowManager ! Transmit(traffic)
      }

      // Tell the coordinator we've acknowledged the drive command
      sender() ! GeneratorCoordinator.AcknowledgeTick(self)
  }

  def receive = {
    case _ => log.error("This message should never be seen.")
  }

  // When this actor is stopped, release resources it may still be holding onto
  override def postStop(): Unit =
    routes.foreach(ReturnRoute)
} 
Example 141
Source File: StorageNodeActor.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.actors

import akka.actor.{Actor, ActorRef, Props, RootActorPath, Terminated}
import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp}
import akka.cluster.{Cluster, Member, MemberStatus}
import com.typesafe.scalalogging.StrictLogging
import justin.db.actors.protocol.{RegisterNode, _}
import justin.db.cluster.ClusterMembers
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica._
import justin.db.replica.read.{ReplicaLocalReader, ReplicaReadCoordinator, ReplicaRemoteReader}
import justin.db.replica.write.{ReplicaLocalWriter, ReplicaRemoteWriter, ReplicaWriteCoordinator}
import justin.db.storage.PluggableStorageProtocol

import scala.concurrent.ExecutionContext

class StorageNodeActor(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N) extends Actor with StrictLogging {

  private[this] implicit val ec: ExecutionContext = context.dispatcher
  private[this] val cluster = Cluster(context.system)

  private[this] var clusterMembers   = ClusterMembers.empty
  private[this] val readCoordinator  = new ReplicaReadCoordinator(nodeId, ring, n, new ReplicaLocalReader(storage), new ReplicaRemoteReader)
  private[this] val writeCoordinator = new ReplicaWriteCoordinator(nodeId, ring, n, new ReplicaLocalWriter(storage), new ReplicaRemoteWriter)

  private[this] val coordinatorRouter = context.actorOf(
    props = RoundRobinCoordinatorRouter.props(readCoordinator, writeCoordinator),
    name  = RoundRobinCoordinatorRouter.routerName
  )

  private[this] val name = self.path.name

  override def preStart(): Unit = cluster.subscribe(this.self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(this.self)

  def receive: Receive = {
    receiveDataPF orElse receiveClusterDataPF orElse receiveRegisterNodePR orElse notHandledPF
  }

  private[this] def receiveDataPF: Receive = {
    case readReq: StorageNodeReadRequest              =>
      coordinatorRouter ! ReadData(sender(), clusterMembers, readReq)
    case writeLocalDataReq: StorageNodeWriteDataLocal =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeLocalDataReq)
    case writeClientReplicaReq: Internal.WriteReplica =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeClientReplicaReq)
  }

  private[this] def receiveClusterDataPF: Receive = {
    case "members"                  => sender() ! clusterMembers
    case MemberUp(member)           => register(nodeId, ring, member)
    case state: CurrentClusterState => state.members.filter(_.status == MemberStatus.Up).foreach(member => register(nodeId, ring, member))
    case Terminated(actorRef)       => clusterMembers = clusterMembers.removeByRef(StorageNodeActorRef(actorRef))
  }

  private[this] def receiveRegisterNodePR: Receive = {
    case RegisterNode(senderNodeId) if clusterMembers.notContains(senderNodeId) =>
      val senderRef = sender()
      context.watch(senderRef)
      clusterMembers = clusterMembers.add(senderNodeId, StorageNodeActorRef(senderRef))
      senderRef ! RegisterNode(nodeId)
      logger.info(s"Actor[$name]: Successfully registered node [id-${senderNodeId.id}]")
    case RegisterNode(senderNodeId) =>
      logger.info(s"Actor[$name]: Node [id-${senderNodeId.id}] is already registered")
  }

  private[this] def register(nodeId: NodeId, ring: Ring, member: Member) = {
    (member.hasRole(StorageNodeActor.role), datacenter.name == member.dataCenter) match {
      case (true, true) => register()
      case (_,   false) => logger.info(s"Actor[$name]: $member doesn't belong to datacenter [${datacenter.name}]")
      case (false,   _) => logger.info(s"Actor[$name]: $member doesn't have [${StorageNodeActor.role}] role (it has roles ${member.roles}")
    }

    def register() = for {
      ringNodeId    <- ring.nodesId
      nodeName       = StorageNodeActor.name(ringNodeId, Datacenter(member.dataCenter))
      nodeRef        = context.actorSelection(RootActorPath(member.address) / "user" / nodeName)
    } yield nodeRef ! RegisterNode(nodeId)
  }

  private[this] def notHandledPF: Receive = {
    case t => logger.warn(s"Actor[$name]: Not handled message [$t]")
  }
}

object StorageNodeActor {
  def role: String = "storagenode"
  def name(nodeId: NodeId, datacenter: Datacenter): String = s"${datacenter.name}-id-${nodeId.id}"
  def props(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N): Props = {
    Props(new StorageNodeActor(nodeId, datacenter, storage, ring, n))
  }
}

case class StorageNodeActorRef(ref: ActorRef) extends AnyVal 
Example 142
Source File: ConnectionProvider.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.discovery

import akka.actor.ActorRef

import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global

trait ConnectionProvider {
  def getConnection: Future[Any]
  def returnConnection(connectionHolder: ConnectionHolder): Unit = ()
  def destroy(): Unit = ()
  def getConnectionHolder(i: String, lb: ActorRef): Future[ConnectionHolder] = getConnection.map { connection ⇒
    new ConnectionHolder {
      override def connection: Future[Any] = getConnection
      override val loadBalancer: ActorRef = lb
      override val id: String = i
    }
  }
} 
Example 143
Source File: ConnectionStrategy.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.discovery

import akka.actor.{ ActorRef, ActorRefFactory }
import stormlantern.consul.client.loadbalancers.{ LoadBalancer, LoadBalancerActor, RoundRobinLoadBalancer }

case class ServiceDefinition(key: String, serviceName: String, serviceTags: Set[String] = Set.empty, dataCenter: Option[String] = None)
object ServiceDefinition {

  def apply(serviceName: String): ServiceDefinition = {
    ServiceDefinition(serviceName, serviceName)
  }

  def apply(serviceName: String, serviceTags: Set[String]): ServiceDefinition = {
    ServiceDefinition(serviceName, serviceName, serviceTags)
  }

}

case class ConnectionStrategy(
  serviceDefinition: ServiceDefinition,
  connectionProviderFactory: ConnectionProviderFactory,
  loadBalancerFactory: ActorRefFactory ⇒ ActorRef
)

object ConnectionStrategy {

  def apply(serviceDefinition: ServiceDefinition, connectionProviderFactory: ConnectionProviderFactory, loadBalancer: LoadBalancer): ConnectionStrategy =
    ConnectionStrategy(serviceDefinition, connectionProviderFactory, ctx ⇒ ctx.actorOf(LoadBalancerActor.props(loadBalancer, serviceDefinition.key)))

  def apply(serviceDefinition: ServiceDefinition, connectionProviderFactory: (String, Int) ⇒ ConnectionProvider, loadBalancer: LoadBalancer): ConnectionStrategy = {
    val cpf = new ConnectionProviderFactory {
      override def create(host: String, port: Int): ConnectionProvider = connectionProviderFactory(host, port)
    }
    ConnectionStrategy(serviceDefinition, cpf, ctx ⇒ ctx.actorOf(LoadBalancerActor.props(loadBalancer, serviceDefinition.key)))
  }

  def apply(serviceName: String, connectionProviderFactory: (String, Int) ⇒ ConnectionProvider, loadBalancer: LoadBalancer): ConnectionStrategy = {
    ConnectionStrategy(ServiceDefinition(serviceName), connectionProviderFactory, loadBalancer)
  }

  def apply(serviceName: String, connectionProviderFactory: (String, Int) ⇒ ConnectionProvider): ConnectionStrategy = {
    ConnectionStrategy(serviceName, connectionProviderFactory, new RoundRobinLoadBalancer)
  }

} 
Example 144
Source File: SessionActor.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.session

import java.util.UUID

import akka.actor.{ ActorRef, Props, Actor }
import stormlantern.consul.client.dao.ConsulHttpClient
import stormlantern.consul.client.session.SessionActor.{ MonitorSession, SessionAcquired, StartSession }

import scala.concurrent.Future

class SessionActor(httpClient: ConsulHttpClient, listener: ActorRef) extends Actor {

  import scala.concurrent.ExecutionContext.Implicits.global

  // Actor state
  var sessionId: Option[UUID] = None

  def receive = {
    case StartSession ⇒ startSession().map { id ⇒
      self ! SessionAcquired(id)
    }
    case SessionAcquired(id) ⇒
      sessionId = Some(id)
      listener ! SessionAcquired(id)
      self ! MonitorSession(0)
    case MonitorSession(lastIndex) ⇒

  }

  // Internal methods
  def startSession(): Future[UUID] = {
    httpClient.putSession().map { id ⇒
      sessionId = Some(id)
      id
    }
  }
}

object SessionActor {
  // Constructors
  def props(httpClient: ConsulHttpClient, listener: ActorRef) = Props(new SessionActor(httpClient, listener))
  // Public messages
  case object StartSession
  case class SessionAcquired(sessionId: UUID)
  // Private messages
  private case class MonitorSession(lastIndex: Long)
} 
Example 145
Source File: ServiceBrokerSpec.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client

import akka.actor.{ ActorRef, ActorSystem }
import akka.actor.Status.Failure
import akka.testkit.{ ImplicitSender, TestKit }
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfterAll, FlatSpecLike, Matchers }
import stormlantern.consul.client.dao.ConsulHttpClient
import stormlantern.consul.client.discovery.ConnectionHolder
import stormlantern.consul.client.helpers.CallingThreadExecutionContext
import stormlantern.consul.client.loadbalancers.LoadBalancerActor
import stormlantern.consul.client.util.Logging

import scala.concurrent.Future

class ServiceBrokerSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FlatSpecLike
    with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory with Logging {

  implicit val ec = CallingThreadExecutionContext()
  def this() = this(ActorSystem("ServiceBrokerSpec"))

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }

  trait TestScope {
    val connectionHolder: ConnectionHolder = mock[ConnectionHolder]
    val httpClient: ConsulHttpClient = mock[ConsulHttpClient]
    val loadBalancer: ActorRef = self
  }

  "The ServiceBroker" should "return a service connection when requested" in new TestScope {
    (connectionHolder.connection _).expects().returns(Future.successful(true))
    (connectionHolder.loadBalancer _).expects().returns(loadBalancer)
    val sut = new ServiceBroker(self, httpClient)
    val result: Future[Boolean] = sut.withService("service1") { service: Boolean ⇒
      Future.successful(service)
    }
    expectMsgPF() {
      case ServiceBrokerActor.GetServiceConnection("service1") ⇒
        lastSender ! connectionHolder
        result.map(_ shouldEqual true).futureValue
    }
    expectMsg(LoadBalancerActor.ReturnConnection(connectionHolder))
  }

  it should "return the connection when an error occurs" in new TestScope {
    (connectionHolder.connection _).expects().returns(Future.successful(true))
    (connectionHolder.loadBalancer _).expects().returns(loadBalancer)
    val sut = new ServiceBroker(self, httpClient)
    val result: Future[Boolean] = sut.withService[Boolean, Boolean]("service1") { service: Boolean ⇒
      throw new RuntimeException()
    }
    expectMsgPF() {
      case ServiceBrokerActor.GetServiceConnection("service1") ⇒
        lastSender ! connectionHolder
        an[RuntimeException] should be thrownBy result.futureValue
    }
    expectMsg(LoadBalancerActor.ReturnConnection(connectionHolder))
  }

  it should "throw an error when an excpetion is returned" in new TestScope {
    val sut = new ServiceBroker(self, httpClient)
    val result: Future[Boolean] = sut.withService("service1") { service: Boolean ⇒
      Future.successful(service)
    }
    expectMsgPF() {
      case ServiceBrokerActor.GetServiceConnection("service1") ⇒
        lastSender ! Failure(new RuntimeException())
        an[RuntimeException] should be thrownBy result.futureValue
    }
  }
} 
Example 146
Source File: AmqpMessageTracker.scala    From gatling-amqp-plugin   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.gatling.amqp.client

import akka.actor.ActorRef
import io.gatling.core.action.Action
import io.gatling.core.session.Session
import ru.tinkoff.gatling.amqp.AmqpCheck
import ru.tinkoff.gatling.amqp.client.AmqpMessageTrackerActor.MessagePublished

class AmqpMessageTracker(actor: ActorRef) {

  def track(
      matchId: String,
      sent: Long,
      replyTimeout: Long,
      checks: List[AmqpCheck],
      session: Session,
      next: Action,
      requestName: String
  ): Unit =
    actor ! MessagePublished(
      matchId,
      sent,
      replyTimeout,
      checks,
      session,
      next,
      requestName
    )
} 
Example 147
Source File: ConsumerCommands.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.http.routes

import akka.actor.ActorRef
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.pattern.ask
import akka.util.Timeout
import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream}

import scala.concurrent.duration._



trait ConsumerCommands {
  def dataConsumer: ActorRef

  def eventConsumer: ActorRef

  def log: LoggingAdapter

  val dataConsumerHttpCommands: Route = pathPrefix("data_consumer") {
    implicit val timeout = Timeout(10 seconds)
    path("stop") {
      get {
        onSuccess(dataConsumer ? ManuallyTerminateStream) {
          case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message);
          case _ => complete(StatusCodes.InternalServerError)
        }
      }
    } ~
      path("start") {
        get {
          onSuccess(dataConsumer ? ManuallyInitializeStream) {
            case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message)
            case _ => complete(StatusCodes.InternalServerError)
          }
        }
      }
  }

  val eventConsumerHttpCommands: Route = pathPrefix("event_consumer") {
    implicit val timeout = Timeout(10 seconds)
    path("stop") {
      get {
        onSuccess(eventConsumer ? ManuallyTerminateStream) {
          case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message);
          case _ => complete(StatusCodes.InternalServerError)
        }
      }
    } ~
      path("start") {
        get {
          onSuccess(eventConsumer ? ManuallyInitializeStream) {
            case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message)
            case _ => complete(StatusCodes.InternalServerError)
          }
        }
      }
  }

} 
Example 148
Source File: ProducerCommands.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.http.routes

import akka.actor.ActorRef
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import com.omearac.producers.DataProducer.PublishMessages
import com.omearac.shared.EventMessages.MessagesPublished

import scala.concurrent.duration._




trait ProducerCommands {
    def log: LoggingAdapter
    def dataProducer: ActorRef

    val producerHttpCommands: Route = pathPrefix("data_producer"){
        implicit val timeout = Timeout(10 seconds)
        path("produce" / IntNumber) {
            {numOfMessagesToProduce =>
                get {
                    onSuccess(dataProducer ? PublishMessages(numOfMessagesToProduce)) {
                        case MessagesPublished(numberOfMessages) => complete(StatusCodes.OK,  numberOfMessages + " messages Produced as Ordered, Boss!")
                        case _ => complete(StatusCodes.InternalServerError)
                    }
                }
            }
        }
    }
} 
Example 149
Source File: ConsumerStream.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.consumers

import akka.actor.{ActorRef, ActorSystem}
import akka.kafka.ConsumerMessage.CommittableOffsetBatch
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerMessage, ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.{Flow, Sink}
import com.omearac.shared.EventMessages.FailedMessageConversion
import com.omearac.shared.JsonMessageConversion.Conversion
import com.omearac.shared.{AkkaStreams, EventSourcing}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.Future



trait ConsumerStream extends AkkaStreams with EventSourcing {
    implicit val system: ActorSystem
    def self: ActorRef


    def createStreamSink(consumerActorSink : ActorRef) = {
        Sink.actorRefWithAck(consumerActorSink, "STREAM_INIT", "OK", "STREAM_DONE")
    }

    def createStreamSource(consumerProperties: Map[String,String])  = {
        val kafkaMBAddress = consumerProperties("bootstrap-servers")
        val groupID = consumerProperties("groupId")
        val topicSubscription = consumerProperties("subscription-topic")
        val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer)
            .withBootstrapServers(kafkaMBAddress)
            .withGroupId(groupID)
            .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

        Consumer.committableSource(consumerSettings, Subscriptions.topics(topicSubscription))
    }

    def createStreamFlow[msgType: Conversion] = {
        Flow[ConsumerMessage.CommittableMessage[Array[Byte], String]]
            .map(msg => (msg.committableOffset, Conversion[msgType].convertFromJson(msg.record.value)))
            //Publish the conversion error event messages returned from the JSONConversion
            .map (tuple => publishConversionErrors[msgType](tuple))
            .filter(result => result.isRight)
            .map(test => test.right.get)
            //Group the commit offsets and correctly converted messages for more efficient Kafka commits
            .batch(max = 20, tuple => (CommittableOffsetBatch.empty.updated(tuple._1), ArrayBuffer[msgType](tuple._2)))
            {(tupleOfCommitOffsetAndMsgs, tuple) =>
            (tupleOfCommitOffsetAndMsgs._1.updated(tuple._1), tupleOfCommitOffsetAndMsgs._2 :+ tuple._2)
            }
            //Take the first element of the tuple (set of commit numbers) to add to kafka commit log and then return the collection of grouped case class messages
            .mapAsync(4)(tupleOfCommitOffsetAndMsgs => commitOffsetsToKafka[msgType](tupleOfCommitOffsetAndMsgs))
            .map(msgGroup => msgGroup._2)
    }

    def commitOffsetsToKafka[msgType](tupleOfCommitOffsetAndMsgs: (ConsumerMessage.CommittableOffsetBatch, ArrayBuffer[msgType])) = Future {
        (tupleOfCommitOffsetAndMsgs._1.commitScaladsl(), tupleOfCommitOffsetAndMsgs._2)
    }

    def publishConversionErrors[msgType](tupleOfCommitOffsetAndConversionResults: (ConsumerMessage.CommittableOffset, Either[FailedMessageConversion,msgType]))
    : Either[Unit,(ConsumerMessage.CommittableOffset,msgType)] = {

        if (tupleOfCommitOffsetAndConversionResults._2.isLeft) {

            //Publish a local event that there was a failure in conversion
            publishLocalEvent(tupleOfCommitOffsetAndConversionResults._2.left.get)

            //Commit the Kafka Offset to acknowledge that the message was consumed
            Left(tupleOfCommitOffsetAndConversionResults._1.commitScaladsl())
        }
        else
            Right(tupleOfCommitOffsetAndConversionResults._1,tupleOfCommitOffsetAndConversionResults._2.right.get)
    }
} 
Example 150
Source File: ProducerStream.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package com.omearac.producers

import akka.actor.{ActorRef, ActorSystem}
import akka.kafka.ProducerSettings
import akka.kafka.scaladsl.Producer
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Source}
import com.omearac.shared.JsonMessageConversion.Conversion
import com.omearac.shared.{AkkaStreams, EventSourcing}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}



trait ProducerStream extends AkkaStreams with EventSourcing {
    implicit val system: ActorSystem
    def self: ActorRef

    def createStreamSource[msgType] = {
        Source.queue[msgType](Int.MaxValue,OverflowStrategy.backpressure)
    }

    def createStreamSink(producerProperties: Map[String, String]) = {
        val kafkaMBAddress = producerProperties("bootstrap-servers")
        val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer).withBootstrapServers(kafkaMBAddress)

        Producer.plainSink(producerSettings)
    }

    def createStreamFlow[msgType: Conversion](producerProperties: Map[String, String]) = {
        val numberOfPartitions = producerProperties("num.partitions").toInt -1
        val topicToPublish = producerProperties("publish-topic")
        val rand = new scala.util.Random
        val range = 0 to numberOfPartitions

        Flow[msgType].map { msg =>
            val partition = range(rand.nextInt(range.length))
            val stringJSONMessage = Conversion[msgType].convertToJson(msg)
            new ProducerRecord[Array[Byte], String](topicToPublish, partition, null, stringJSONMessage)
        }
    }
} 
Example 151
Source File: CodebaseAnalyzeAggregatorActor.scala    From CodeAnalyzerTutorial   with Apache License 2.0 5 votes vote down vote up
package tutor

import java.util.Date

import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable, Props, Terminated}
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}
import tutor.CodebaseAnalyzeAggregatorActor.{AnalyzeDirectory, Complete, Report, Timeout}
import tutor.SourceCodeAnalyzerActor.NewFile
import tutor.utils.BenchmarkUtil

import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

object CodebaseAnalyzeAggregatorActor {
  def props(): Props = Props(new CodebaseAnalyzeAggregatorActor)

  final case class AnalyzeDirectory(path: String)

  final case class Complete(result: Try[SourceCodeInfo])

  final case object Timeout

  final case class Report(codebaseInfo: CodebaseInfo)

}

class CodebaseAnalyzeAggregatorActor extends Actor with ActorLogging with DirectoryScanner with ReportFormatter {
  var controller: ActorRef = _
  var currentPath: String = _
  var beginTime: Date = _
  var fileCount = 0
  var completeCount = 0
  var failCount = 0
  var result: CodebaseInfo = CodebaseInfo.empty
  var timeoutTimer: Cancellable = _

  var router: Router = {
    val routees = Vector.fill(8) {
      val r = context.actorOf(SourceCodeAnalyzerActor.props())
      context watch r
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }

  override def receive: Receive = {
    case AnalyzeDirectory(path) => {
      controller = sender()
      currentPath = path
      beginTime = BenchmarkUtil.recordStart(s"analyze folder $currentPath")
      foreachFile(path, PresetFilters.knownFileTypes, PresetFilters.ignoreFolders) { file =>
        fileCount += 1
        router.route(NewFile(file.getAbsolutePath), context.self)
      }
      import context.dispatcher
      timeoutTimer = context.system.scheduler.scheduleOnce((fileCount / 1000).seconds, context.self, Timeout)
    }
    case Complete(Success(sourceCodeInfo: SourceCodeInfo)) => {
      completeCount += 1
      result = result + sourceCodeInfo
      finishIfAllComplete()
    }
    case Complete(Failure(exception)) => {
      completeCount += 1
      failCount += 1
      log.warning("processing file failed {}", exception)
      finishIfAllComplete()
    }
    case Timeout => {
      println(s"${result.totalFileNums} of $fileCount files processed before timeout")
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
    }
    case Terminated(a) =>
      router = router.removeRoutee(a)
      val r = context.actorOf(Props[SourceCodeAnalyzerActor])
      context watch r
      router = router.addRoutee(r)
    case x@_ => log.error(s"receive unknown message $x")
  }

  def finishIfAllComplete(): Unit = {
    if (completeCount == fileCount) {
      timeoutTimer.cancel()
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
      context.stop(self)
    }
  }
} 
Example 152
Source File: CodebaseAnalyzerAkkaApp.scala    From CodeAnalyzerTutorial   with Apache License 2.0 5 votes vote down vote up
package tutor

import akka.actor.{ActorRef, ActorSystem}
import tutor.CodebaseAnalyzeAggregatorActor.AnalyzeDirectory

import scala.io.StdIn

object CodebaseAnalyzerAkkaApp extends App {

  val system = ActorSystem("CodebaseAnalyzer")
  val codebaseAnalyzerControllerActor: ActorRef = system.actorOf(CodebaseAnalyzerControllerActor.props())

  var shouldContinue = true
  try {
    while (shouldContinue) {
      println("please input source file folder or :q to quit")
      val input = StdIn.readLine()
      if (input == ":q") {
        shouldContinue = false
      } else {
        codebaseAnalyzerControllerActor ! AnalyzeDirectory(input)
      }
    }
  } finally {
    println("good bye!")
    system.terminate()
  }
} 
Example 153
Source File: CamelActorPublisher.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package camel

import akka.actor.{ ActorLogging, ActorRef, Props }
import akka.camel.{ CamelMessage, Consumer }
import akka.event.LoggingReceive
import akka.stream.actor.ActorPublisher
import akka.stream.actor.ActorPublisherMessage.Cancel
import akka.stream.scaladsl.Source

class CamelActorPublisher(val endpointUri: String) extends Consumer with ActorPublisher[(ActorRef, CamelMessage)] with ActorLogging {
  override val autoAck: Boolean = false

  override def receive: Receive = LoggingReceive {
    case CamelMessage if totalDemand == 0 =>
      sender() ! akka.actor.Status.Failure(new IllegalStateException("No demand for new messages"))

    case msg: CamelMessage => onNext((sender(), msg))

    case Cancel            => context stop self
  }
}

class CamelActorPublisherWithExtractor[A: CamelMessageExtractor](val endpointUri: String) extends Consumer with ActorPublisher[(ActorRef, A)] with ActorLogging {
  override val autoAck: Boolean = false

  override def receive: Receive = LoggingReceive {
    case CamelMessage if totalDemand == 0 =>
      sender() ! akka.actor.Status.Failure(new IllegalStateException("No demand for new messages"))

    case msg: CamelMessage =>
      try {
        onNext((sender(), implicitly[CamelMessageExtractor[A]].extract(msg)))
      } catch {
        case t: Throwable =>
          log.error(t, "Removing message from the broker because of error while extracting the message")
          sender() ! akka.camel.Ack
      }

    case Cancel => context stop self
  }
}

object CamelActorPublisher {
  def fromEndpointUri(endpointUri: String): Source[AckRefTup[CamelMessage], ActorRef] =
    Source.actorPublisher[AckRefTup[CamelMessage]](Props(new CamelActorPublisher(endpointUri)))

  def fromEndpointUriWithExtractor[A: CamelMessageExtractor](endpointUri: String): Source[AckRefTup[A], ActorRef] =
    Source.actorPublisher[AckRefTup[A]](Props(new CamelActorPublisherWithExtractor(endpointUri)))
} 
Example 154
Source File: AckedFlow.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.actor.ActorRef
import akka.camel.CamelMessage
import akka.stream._
import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }

import scala.concurrent.{ ExecutionContext, Future, Promise }

private[activemq] class AckedFlow[A, B](implicit ec: ExecutionContext) extends GraphStage[FlowShape[(ActorRef, B), AckTup[A, B]]] {
  val in = Inlet[(ActorRef, B)]("AckedFlow.in")
  val out = Outlet[AckTup[A, B]]("AckedFlow.out")

  override val shape: FlowShape[(ActorRef, B), AckTup[A, B]] = FlowShape.of(in, out)

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
    var promises = Vector.empty[(Promise[A], Future[A])]
    setHandler(in, new InHandler {
      override def onPush(): Unit = {
        val (ref, b) = grab(in)
        val p = Promise[A]()
        val eventualResponse = p.future
        eventualResponse.onSuccess(successResponse(ref))
        eventualResponse.onFailure {
          case cause: Throwable =>
            ref ! akka.actor.Status.Failure(cause)
        }
        promises = promises.filterNot(_._1.isCompleted) :+ (p -> eventualResponse)
        push(out, p -> b)
      }
    })

    setHandler(out, new OutHandler {
      override def onPull(): Unit = {
        pull(in)
      }
    })
  }

  
  def successResponse(source: ActorRef): PartialFunction[A, Unit] = {
    case _ => source ! akka.camel.Ack
  }
}

class AckedResponseFlow[A, B](implicit ec: ExecutionContext, builder: MessageBuilder[A, CamelMessage]) extends AckedFlow[A, B] {
  override def successResponse(source: ActorRef): PartialFunction[A, Unit] = {
    case msg => source ! builder.build(msg)
  }
} 
Example 155
Source File: ActiveMqReqRespFlowTest.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.actor.ActorRef
import akka.stream.integration.PersonDomain.Person

import scala.concurrent.Promise


class ActiveMqReqRespFlowTest extends ActiveMqTestSpec {

  behavior of "ActiveMqReqRespFlow"

  it should "support request-response for a single message" in {
    withBackendFlow { implicit backendFlow => flowProbe =>
      withReqRespBidiFlow("AckBidiFlowReqRespTestInput") { testFlow =>
        var ref: ActorRef = null
        withTestTopicPublisher("AckBidiFlowReqRespTestInput") { pub =>
          withTestTopicSubscriber("AckBidiFlowReqRespTestOutput") { sub =>

            // echo all received messages
            flowProbe.setAutoPilot(identity[Person] _)
            ref = testFlow.join(backendFlow).run()

            sub.request(2)
            pub.sendNext(testPerson1)

            sub.expectNextPF {
              case (p: Promise[Unit], `testPerson1`) => p.success(())
            }

            sub.cancel()
            pub.sendComplete()
          }
        }
        ref
      }
    }
  }

  it should "support request-response for a multiple messages" in {
    withBackendFlow { implicit backendFlow => flowProbe =>
      withReqRespBidiFlow("AckBidiFlowReqRespTestInput") { testFlow =>
        var ref: ActorRef = null
        withTestTopicPublisher("AckBidiFlowReqRespTestInput") { pub =>
          withTestTopicSubscriber("AckBidiFlowReqRespTestOutput") { sub =>

            // echo all received messages
            flowProbe.setAutoPilot(identity[Person] _)
            ref = testFlow.join(backendFlow).run()

            sub.request(2)

            pub.sendNext(testPerson1)
            sub.expectNextPF {
              case (p: Promise[Unit], `testPerson1`) => p.success(())
            }

            pub.sendNext(testPerson2)
            sub.expectNextPF {
              case (p: Promise[Unit], `testPerson2`) => p.success(())
            }

            sub.cancel()
            pub.sendComplete()
          }
        }
        ref
      }
    }
  }
} 
Example 156
Source File: ActiveMqTestSpec.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.NotUsed
import akka.actor.ActorRef
import akka.stream.integration.PersonDomain.Person
import akka.stream.scaladsl.{ Flow, Keep }
import akka.stream.testkit.scaladsl.{ TestSink, TestSource }
import akka.stream.testkit.{ TestPublisher, TestSubscriber }
import akka.testkit.TestActor.AutoPilot
import akka.testkit.TestProbe
import JsonCamelMessageExtractor._
import JsonCamelMessageBuilder._

import scala.util.{ Failure, Success, Try }

  implicit def function1ToAutoPilot[S, T](f: S => T): AutoPilot = new AutoPilot {
    override def run(sender: ActorRef, msg: Any): AutoPilot = msg match {
      case s: S =>
        val tryT: Try[T] = Try(f(s))
        tryT match {
          case Success(t) =>
            sender ! t
            function1ToAutoPilot(f)
          case Failure(f) =>
            fail(s"Failed to apply supplied function to received message: $s", f)
        }
      case _ =>
        fail(s"Received message is not of the required type: $msg")
    }
  }
} 
Example 157
Source File: LeanBalancer.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.loadBalancer

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.stream.ActorMaterializer
import org.apache.openwhisk.common._
import org.apache.openwhisk.core.WhiskConfig._
import org.apache.openwhisk.core.connector._
import org.apache.openwhisk.core.containerpool.ContainerPoolConfig
import org.apache.openwhisk.core.entity.ControllerInstanceId
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.invoker.InvokerProvider
import org.apache.openwhisk.core.{ConfigKeys, WhiskConfig}
import org.apache.openwhisk.spi.SpiLoader
import org.apache.openwhisk.utils.ExecutionContextFactory
import pureconfig._
import pureconfig.generic.auto._
import org.apache.openwhisk.core.entity.size._

import scala.concurrent.Future


  private def makeALocalThreadedInvoker(): Unit = {
    implicit val ec = ExecutionContextFactory.makeCachedThreadPoolExecutionContext()
    val limitConfig: ConcurrencyLimitConfig = loadConfigOrThrow[ConcurrencyLimitConfig](ConfigKeys.concurrencyLimit)
    SpiLoader.get[InvokerProvider].instance(config, invokerName, messageProducer, poolConfig, limitConfig)
  }

  makeALocalThreadedInvoker()

  override protected val invokerPool: ActorRef = actorSystem.actorOf(Props.empty)

  override protected def releaseInvoker(invoker: InvokerInstanceId, entry: ActivationEntry) = {
    // Currently do nothing
  }

  override protected def emitMetrics() = {
    super.emitMetrics()
  }
}

object LeanBalancer extends LoadBalancerProvider {

  override def instance(whiskConfig: WhiskConfig, instance: ControllerInstanceId)(
    implicit actorSystem: ActorSystem,
    logging: Logging,
    materializer: ActorMaterializer): LoadBalancer = {

    new LeanBalancer(whiskConfig, createFeedFactory(whiskConfig, instance), instance)
  }

  def requiredProperties =
    ExecManifest.requiredProperties ++
      wskApiHost
} 
Example 158
Source File: LocalTransformServiceActor.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor.service

import akka.actor.{Actor, ActorRef, Props, Status, Terminated}
import akka.stream.{ActorMaterializer, Materializer}
import ml.combust.mleap.executor.repository.RepositoryBundleLoader
import ml.combust.mleap.executor._
import ml.combust.mleap.executor.error.NotFoundException

import scala.util.{Failure, Success, Try}

object LocalTransformServiceActor {
  def props(loader: RepositoryBundleLoader,
            config: ExecutorConfig): Props = {
    Props(new LocalTransformServiceActor(loader, config))
  }

  object Messages {
    case object Close
  }
}

class LocalTransformServiceActor(loader: RepositoryBundleLoader,
                                 config: ExecutorConfig) extends Actor {
  import LocalTransformServiceActor.Messages

  private implicit val materializer: Materializer = ActorMaterializer()(context.system)

  private var lookup: Map[String, ActorRef] = Map()
  private var modelNameLookup: Map[ActorRef, String] = Map()

  override def postStop(): Unit = {
    for (child <- context.children) {
      context.unwatch(child)
      context.stop(child)
    }
  }

  override def receive: Receive = {
    case request: TransformFrameRequest => handleModelRequest(request)
    case request: GetBundleMetaRequest => handleModelRequest(request)
    case request: GetModelRequest => handleModelRequest(request)
    case request: CreateFrameStreamRequest => handleModelRequest(request)
    case request: CreateRowStreamRequest => handleModelRequest(request)
    case request: GetRowStreamRequest => handleModelRequest(request)
    case request: CreateFrameFlowRequest => handleModelRequest(request)
    case request: GetFrameStreamRequest => handleModelRequest(request)
    case request: CreateRowFlowRequest => handleModelRequest(request)
    case request: UnloadModelRequest => handleModelRequest(request)
    case request: LoadModelRequest => loadModel(request)
    case Messages.Close => context.stop(self)

    case Terminated(actor) => terminated(actor)
  }

  def handleModelRequest(request: ModelRequest): Unit = {
    lookup.get(request.modelName) match {
      case Some(actor) => actor.tell(request, sender)
      case None => sender ! Status.Failure(new NotFoundException(s"no model with name ${request.modelName}"))
    }
  }

  def loadModel(request: LoadModelRequest): Unit = {
    Try(context.actorOf(BundleActor.props(request, loader, config), request.modelName)) match {
      case Success(actor) =>
        lookup += (request.modelName -> actor)
        modelNameLookup += (actor -> request.modelName)
        context.watch(actor)
        actor.tell(request, sender)
      case Failure(err) => sender ! Status.Failure(err)
    }
  }

  private def terminated(ref: ActorRef): Unit = {
    val uri = modelNameLookup(ref)
    modelNameLookup -= ref
    lookup -= uri
  }
} 
Example 159
Source File: Flows.scala    From BusFloatingData   with Apache License 2.0 5 votes vote down vote up
package de.nierbeck.floating.data.server

import akka.actor.{ActorRef, Props}
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.FlowShape
import akka.stream.scaladsl.{Flow, GraphDSL, Merge, Source}
import de.nierbeck.floating.data.domain.Vehicle
import GraphDSL.Implicits._
import de.nierbeck.floating.data.server._
import de.nierbeck.floating.data.server.actors.websocket._


object Flows {

  def graphFlowWithStats(router: ActorRef): Flow[Message, Message, _] = {
    Flow.fromGraph(GraphDSL.create() { implicit builder =>


      // create an actor source
      val source = Source.actorPublisher[String](VehiclePublisher.props(router))

      // Graph elements we'll use
      val merge = builder.add(Merge[String](2))
      val filter = builder.add(Flow[String].filter(_ => false))

      // get BBox from request and send it to route, return nothing ...
      val mapMsgToString = builder.add(Flow[Message].map[String] {
        case TextMessage.Strict(msg) => {
          println(s"received message: $msg")
          if (msg.contains("close")) {
            router ! msg
          } else if (msg.contains("spark")) {
            router ! SPARK
          } else if (msg.contains("flink")) {
            router ! FLINK
          } else {
            val bbox = toBoundingBox(msg)
            println(s"transformedt to bbox: $bbox")
            router ! bbox
          }
          ""
        }
      })
      //outgoing message ...
      val mapStringToMsg = builder.add(Flow[String].map[Message](x => TextMessage.Strict(x)))

      //add source to flow
      val vehiclesSource = builder.add(source)

      // connect the graph
      mapMsgToString ~> filter ~> merge // this part of the merge will never provide msgs
      vehiclesSource ~> merge ~> mapStringToMsg

      // expose ports
      FlowShape(mapMsgToString.in, mapStringToMsg.out)
    })
  }

} 
Example 160
Source File: ServiceApp.scala    From BusFloatingData   with Apache License 2.0 5 votes vote down vote up
package de.nierbeck.floating.data.server

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model.ws.UpgradeToWebSocket
import akka.http.scaladsl.model.{HttpRequest, HttpResponse, Uri}
import akka.stream.ActorMaterializer
import de.nierbeck.floating.data.server.actors.websocket.{FLINK, RouterActor, SPARK, TiledVehiclesFromKafkaActor}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}

object ServiceApp extends RestService {

  import ServiceConfig._
  import system.dispatcher

  implicit val system = ActorSystem("service-api-http")
  implicit val mat = ActorMaterializer()

  override val logger = Logging(system, getClass.getName)
  override val session = CassandraConnector.connect()

  def main(args: Array[String]): Unit = {

    val router: ActorRef = system.actorOf(Props[RouterActor], "router")
    val sparkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "tiledVehicles", SPARK), "Kafka-Consumer-Spark")
    val flinkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "flinkTiledVehicles", FLINK), "Kafka-Consumer-Flink")


    val requestHandler: HttpRequest => HttpResponse = {
      case req@HttpRequest(GET, Uri.Path("/ws/vehicles"), _, _, _) =>
        req.header[UpgradeToWebSocket] match {
          case Some(upgrade) => upgrade.handleMessages(Flows.graphFlowWithStats(router))
          case None => HttpResponse(400, entity = "Not a valid websocket request!")
        }
      case _: HttpRequest => HttpResponse(404, entity = "Unknown resource!")
    }

    Http()
      .bindAndHandle(route(), serviceInterface, servicePort)
      .onComplete {
        case Success(_) => logger.info(s"Successfully bound to $serviceInterface:$servicePort")
        case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}")
      }

    Http()
      .bindAndHandleSync(requestHandler, serviceInterface, 8001)
      .onComplete {
        case Success(_) => logger.info(s"Successfully started Server to $serviceInterface:8001")
        case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}")
      }

    Await.ready(system.whenTerminated, Duration.Inf)
    CassandraConnector.close(session)
  }

} 
Example 161
Source File: HomeController.scala    From Aton   with GNU General Public License v3.0 5 votes vote down vote up
package controllers

import akka.actor.{ActorRef, ActorSystem}
import com.google.inject.name.Named
import com.google.inject.{Inject, Singleton}
import dao.{DatabaseInitializer, LaboratoryDAO, UserDAO}
import model.{Role, User}
import play.api.{Environment, Logger}
import play.api.i18n.MessagesApi
import services.{LaboratoryService, UserService}
import views.html._

import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._


@Singleton
class HomeController @Inject()(databaseInitializer: DatabaseInitializer, laboratoryService : LaboratoryService, @Named("computerChecker") computerChecker: ActorRef, actorSystem: ActorSystem)(implicit userService: UserService, executionContext: ExecutionContext, environment: Environment) extends ControllerWithNoAuthRequired {
  val logger = Logger("HomeController")

  play.Logger.debug("Configuring Computer Checker...")
  actorSystem.scheduler.schedule(0.microseconds,5.minutes, computerChecker,"Execute")
  play.Logger.debug("Computer Checker configured.")

  logger.debug("Initializing database")
  Await.result(databaseInitializer.initialize(), 2.seconds)
  logger.debug("Database initialized")

  def home = AsyncStack { implicit request =>
    play.Logger.debug("Logged user: " + loggedIn)
    implicit val (username: Option[String], isAdmin: Boolean) = loggedIn match {
      case Some(User(usernameString, password, Some(name), role)) => (Some(name), role == Role.Administrator)
      case Some(User(usernameString, password, None, role)) => (Some(usernameString), role == Role.Administrator)
      case _ => (None, false)
    }
    logger.debug("Petición de listar todos los laboratorios con el siguiente request recibida " + request)
    logger.debug("User: " + username + ", is admin: " + isAdmin)
    laboratoryService.listAll.map { _ =>
      Ok(index("Aton"))
    }
  }

  def about = StackAction { implicit request =>
    implicit val (username: Option[String], isAdmin: Boolean) = loggedIn match {
      case Some(User(usernameString, password, Some(name), role)) => (Some(name), role == Role.Administrator)
      case Some(User(usernameString, password, None, role)) => (Some(usernameString), role == Role.Administrator)
      case _ => (None, false)
    }

    Ok//(index(messagesApi("about"),views.html.about()))
  }
} 
Example 162
Source File: VisualMailboxMetricClient.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorRef, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider, Props}
import akka.io.{IO, Udp}
import akka.util.ByteString
import de.aktey.akka.visualmailbox.packing.Packing


object VisualMailboxMetricClient extends ExtensionId[VisualMailboxMetricClient] with ExtensionIdProvider {
  override def createExtension(system: ExtendedActorSystem): VisualMailboxMetricClient = {
    new VisualMailboxMetricClient(
      system,
      VisualMailboxMetricClientConfig.fromConfig(system.settings.config)
    )
  }

  override def lookup(): ExtensionId[_ <: Extension] = VisualMailboxMetricClient
}

class VisualMailboxMetricClient(system: ExtendedActorSystem, config: VisualMailboxMetricClientConfig) extends Extension {
  private val udpSender = system.systemActorOf(
    Props(new UdpSender(config.serverAddress)).withDispatcher("de.aktey.akka.visualmailbox.client.dispatcher"),
    "de-aktey-akka-visualmailbox-sender"
  )
  system.systemActorOf(
    Props(new VisualMailboxMetricListener(udpSender)).withDispatcher("de.aktey.akka.visualmailbox.client.dispatcher"),
    "de-aktey-akka-visualmailbox-receiver"
  )
}

class VisualMailboxMetricListener(udpSender: ActorRef) extends Actor {

  import context._

  import concurrent.duration._

  var buffer: List[VisualMailboxMetric] = Nil

  system.eventStream.subscribe(self, classOf[VisualMailboxMetric])
  system.scheduler.schedule(1.second, 1.second, self, "flush")

  @scala.throws[Exception](classOf[Exception])
  override def postStop(): Unit = {
    system.eventStream.unsubscribe(self)
  }

  def receive: Receive = {
    case v: VisualMailboxMetric =>
      buffer ::= v
      if (buffer.size > 40) self ! "flush"

    case "flush" if buffer.nonEmpty =>
      udpSender ! Packing.pack(MetricEnvelope(1, Packing.pack(buffer)))
      buffer = Nil
  }
}

class UdpSender(remote: InetSocketAddress) extends Actor {

  import context._

  IO(Udp) ! Udp.SimpleSender

  def receive = {
    case Udp.SimpleSenderReady =>
      context.become(ready(sender()))
  }

  def ready(send: ActorRef): Receive = {
    case msg: Array[Byte] =>
      send ! Udp.Send(ByteString(msg), remote)
  }
} 
Example 163
Source File: Routing.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox.web

import akka.actor.ActorRef
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.Source
import de.heikoseeberger.akkasse.ServerSentEvent

import scala.concurrent.duration._


object Routing {
  def api(metricFlow: Source[String, ActorRef]): Route = pathPrefix("api") {
    import de.heikoseeberger.akkasse.EventStreamMarshalling._

    path("events") {
      complete {
        metricFlow
          .map(s => ServerSentEvent(s, "vmm"))
          .keepAlive(20.seconds, () => ServerSentEvent.heartbeat)
      }
    }
  }

  val static: Route = pathEndOrSingleSlash {
    getFromResource("web/index.html")
  } ~ getFromResourceDirectory("web")

  def root(metricFlow: Source[String, ActorRef]): Route = api(metricFlow) ~ static
} 
Example 164
Source File: MetricFlow.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox

import akka.actor.{ActorRef, Props}
import akka.stream.actor.ActorPublisher
import akka.stream.scaladsl.Source

import scala.annotation.tailrec

object MetricFlow {

  // subscriber as flow source
  // that registrates itself to a router
  class MetricsSubscriber(router: ActorRef) extends ActorPublisher[VisualMailboxMetric] {

    import akka.stream.actor.ActorPublisherMessage._

    val MaxBufferSize = 100
    var buf = Vector.empty[VisualMailboxMetric]

    router ! self

    def receive = {
      case metric: VisualMailboxMetric if buf.size == MaxBufferSize =>
      case metric: VisualMailboxMetric =>
        if (buf.isEmpty && totalDemand > 0)
          onNext(metric)
        else {
          buf :+= metric
          deliverBuf()
        }
      case Request(_) =>
        deliverBuf()
      case Cancel =>
        context.stop(self)
    }

    @tailrec
    private def deliverBuf(): Unit =
      if (totalDemand > 0) {
        if (totalDemand <= Int.MaxValue) {
          val (use, keep) = buf.splitAt(totalDemand.toInt)
          buf = keep
          use foreach onNext
        } else {
          val (use, keep) = buf.splitAt(Int.MaxValue)
          buf = keep
          use foreach onNext
          deliverBuf()
        }
      }
  }

  object MetricsSubscriber {
    def props(router: ActorRef) = Props(new MetricsSubscriber(router))
  }

  def metricSource(router: ActorRef): Source[String, ActorRef] =
    Source.actorPublisher[VisualMailboxMetric](MetricsSubscriber.props(router)).map {
      case VisualMailboxMetric(sender, receiver, receiverMailBoxSize, meassureTimeMillies) =>
        s"""{
            |  "sender": "$sender",
            |  "receiver": "$receiver",
            |  "receiverMailBoxSize": $receiverMailBoxSize,
            |  "meassureTimeMillies": $meassureTimeMillies
            |}""".stripMargin
    }
} 
Example 165
Source File: DataSourceEndpoint.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox.data

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.io.Udp.Received
import de.aktey.akka.visualmailbox.packing.Packing
import de.aktey.akka.visualmailbox.{MetricEnvelope, VisualMailboxMetric}

import scala.util.{Failure, Success}


class DataSourceEndpoint(router: ActorRef) extends Actor with ActorLogging {
  def receive = {
    case Received(datagram, _) => Packing.unpack[MetricEnvelope](datagram.to[Array]) match {
      case Success(MetricEnvelope(1, payload)) =>
        Packing.unpack[List[VisualMailboxMetric]](payload) match {
          case Success(list) => list.foreach(router ! _)
          case Failure(e) => log.error(e, "unmarshal error")
        }
      case Success(MetricEnvelope(version, _)) => log.warning("unknown protocol version: " + version)
      case Failure(e) => log.error(e, "unmarshal error")
    }
  }
}

object DataSourceEndpoint {
  def props(router: ActorRef) = Props(new DataSourceEndpoint(router))
} 
Example 166
Source File: MetricsRouter.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox

import akka.actor.{Actor, ActorLogging, ActorRef, Props, Terminated}

class MetricsRouter extends Actor with ActorLogging {

  import context._

  var routees: Set[ActorRef] = Set.empty

  override def postStop() {
    routees foreach unwatch
  }

  def receive = {
    case registrar: ActorRef =>
      watch(registrar)
      routees += registrar
      if (log.isDebugEnabled) log.debug(s"""{"type":"registerd","registered":"$registrar","routees":${routees.size}}""")
    case Terminated(ref) =>
      unwatch(ref)
      routees -= ref
      if (log.isDebugEnabled) log.debug(s"""{"type":"unregistered","terminated":"$ref","routees":${routees.size}}""")
    case msg =>
      routees foreach (_ forward msg)
  }
}

object MetricsRouter {
  def props() = Props(new MetricsRouter)
} 
Example 167
Source File: AuditSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import javax.inject.{Inject, Singleton}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration

import play.api.Logger

import akka.actor.{Actor, ActorRef}
import org.thp.cortex.models.JobStatus

import org.elastic4play.models.BaseEntity
import org.elastic4play.services._

object AuditActor {
  case class Register(jobId: String, timeout: FiniteDuration)
  case class Unregister(jobId: String, actorRef: ActorRef)
  case class JobEnded(jobId: String, status: JobStatus.Type)
}

@Singleton
class AuditActor @Inject()(eventSrv: EventSrv, implicit val ec: ExecutionContext) extends Actor {

  import AuditActor._

  object EntityExtractor {
    def unapply(e: BaseEntity) = Some((e.model, e.id, e.routing))
  }
  var registration                    = Map.empty[String, Seq[ActorRef]]
  private[AuditActor] lazy val logger = Logger(getClass)

  override def preStart(): Unit = {
    eventSrv.subscribe(self, classOf[EventMessage])
    super.preStart()
  }

  override def postStop(): Unit = {
    eventSrv.unsubscribe(self)
    super.postStop()
  }

  override def receive: Receive = {
    case Register(jobId, timeout) ⇒
      logger.info(s"Register new listener for job $jobId ($sender)")
      val newActorList = registration.getOrElse(jobId, Nil) :+ sender
      registration += (jobId → newActorList)
      context.system.scheduler.scheduleOnce(timeout, self, Unregister(jobId, sender))

    case Unregister(jobId, actorRef) ⇒
      logger.info(s"Unregister listener for job $jobId ($actorRef)")
      val newActorList = registration.getOrElse(jobId, Nil).filterNot(_ == actorRef)
      registration += (jobId → newActorList)

    case AuditOperation(EntityExtractor(model, id, routing), action, details, authContext, date) ⇒
      if (model.modelName == "job" && action == AuditableAction.Update) {
        logger.info(s"Job $id has be updated (${details \ "status"})")
        val status = (details \ "status").asOpt[JobStatus.Type].getOrElse(JobStatus.InProgress)
        if (status != JobStatus.InProgress) registration.getOrElse(id, Nil).foreach { aref ⇒
          aref ! JobEnded(id, status)
        }
      }
  }
} 
Example 168
Source File: TransactionActor.scala    From changestream   with MIT License 5 votes vote down vote up
package changestream.actors

import java.util.UUID

import akka.actor.{ Actor, ActorRef, ActorRefFactory }
import changestream.events.MutationWithInfo

import changestream.events._
import kamon.Kamon
import org.slf4j.LoggerFactory

class TransactionActor(getNextHop: ActorRefFactory => ActorRef) extends Actor {
  protected val log = LoggerFactory.getLogger(getClass)
  protected val batchSizeMetric = Kamon.histogram("changestream.binlog_event.row_count")
  protected val transactionSizeMetric = Kamon.histogram("changestream.transaction.row_count")

  protected val nextHop = getNextHop(context)

  
  protected var mutationCount: Long = 1
  protected var currentGtid: Option[String] = None
  protected var previousMutation: Option[MutationWithInfo] = None

  def receive = {
    case BeginTransaction =>
      log.debug("Received BeginTransacton")
      mutationCount = 1
      currentGtid = Some(UUID.randomUUID.toString)
      previousMutation = None

    case Gtid(guid) =>
      log.debug("Received GTID for transaction: {}", guid)
      currentGtid = Some(guid)

    case event: MutationWithInfo =>
      log.debug("Received Mutation for tableId: {}", event.mutation.tableId)
      batchSizeMetric.record(event.mutation.rows.length)

      currentGtid match {
        case None =>
          nextHop ! event
        case Some(gtid) =>
          previousMutation.foreach { mutation =>
            log.debug("Adding transaction info and forwarding to the {} actor.", nextHop.path.name)
            nextHop ! mutation
          }
          previousMutation = Some(event.copy(
            transaction = Some(TransactionInfo(
              gtid = gtid,
              currentRow = mutationCount
            ))
          ))
          mutationCount += event.mutation.rows.length
      }

    case CommitTransaction(position) =>
      log.debug("Received Commit with position {}", position)
      previousMutation.foreach { mutation =>
        log.debug("Adding transaction info and forwarding to the {} actor.", nextHop.path.name)
        nextHop ! mutation.copy(
          transaction = mutation.transaction.map { txInfo =>
            txInfo.copy(lastMutationInTransaction = true)
          },
          // TODO: this is unfortunate... because we are now essentially saving the "last safe position" we are guaranteed to replay events when we shut down un-gracefully
          nextPosition = mutation.nextPosition.split(":")(0) + ":" + position.toString
        )
      }
      transactionSizeMetric.record(mutationCount)
      mutationCount = 1
      currentGtid = None
      previousMutation = None

    case RollbackTransaction =>
      log.debug("Received Rollback")

      // TODO: this probably doesn't work for mysql configurations that send a rollback event (vs only sending committed events).. consider removing the rollback handling
      previousMutation.foreach { mutation =>
        log.debug("Adding transaction info and forwarding to the {} actor.", nextHop.path.name)
        nextHop ! mutation.copy(
          transaction = mutation.transaction.map { txInfo =>
            txInfo.copy(lastMutationInTransaction = true)
          }
        )
      }
      transactionSizeMetric.record(mutationCount)
      mutationCount = 1
      currentGtid = None
      previousMutation = None
  }
} 
Example 169
Source File: StdoutActor.scala    From changestream   with MIT License 5 votes vote down vote up
package changestream.actors

import akka.actor.{Actor, ActorRef, ActorRefFactory}
import changestream.actors.PositionSaver.EmitterResult
import changestream.events.MutationWithInfo
import com.typesafe.config.{Config, ConfigFactory}
import kamon.Kamon

class StdoutActor(getNextHop: ActorRefFactory => ActorRef,
                  config: Config = ConfigFactory.load().getConfig("changestream")) extends Actor {
  protected val nextHop = getNextHop(context)
  protected val counterMetric = Kamon.counter("changestream.emitter.total").refine("emitter" -> "stdout", "result" -> "success")

  def receive = {
    case MutationWithInfo(_, pos, _, _, Some(message: String)) =>
      println(message)
      counterMetric.increment()
      nextHop ! EmitterResult(pos)
  }
} 
Example 170
Source File: SchemaManagerRouter.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.actors

import akka.actor.SupervisorStrategy._
import akka.actor.{Actor, ActorInitializationException, ActorRef, OneForOneStrategy, Props}
import akka.event.Logging
import akka.routing.RoundRobinPool
import org.schedoscope.conf.SchedoscopeSettings
import org.schedoscope.scheduler.messages._
import org.schedoscope.scheduler.utils.BackOffSupervision
import org.schedoscope.schema.RetryableSchemaManagerException

import scala.concurrent.duration._


  override val supervisorStrategy =
    OneForOneStrategy(maxNrOfRetries = -1) {
      case _: RetryableSchemaManagerException => Restart
      case _: ActorInitializationException => Restart
      case _ => Escalate
    }

  override def preStart {
    metadataLoggerActor = actorOf(
      MetadataLoggerActor.props(settings.jdbcUrl, settings.metastoreUri, settings.kerberosPrincipal),
      "metadata-logger")
    partitionCreatorActor = actorOf(
      PartitionCreatorActor.props(settings.jdbcUrl, settings.metastoreUri, settings.kerberosPrincipal, self)
        .withRouter(new RoundRobinPool(settings.metastoreConcurrency)),
      "partition-creator")
  }

  def scheduleTick(managedActor: ActorRef, backOffTime: FiniteDuration) {
    system.scheduler.scheduleOnce(backOffTime, managedActor, "tick")
  }

  def manageActorLifecycle(metaActor: ActorRef) {
    val slot = settings.backOffSlotTime millis
    val delay = settings.backOffMinimumDelay millis

    val backOffTime = metastoreActorsBackOffSupervision.manageActorLifecycle(
      managedActor = metaActor,
      backOffSlotTime = slot,
      backOffMinimumDelay = delay)

    scheduleTick(metaActor, backOffTime)
  }


  def receive = {

    case "tick" => manageActorLifecycle(sender)

    case m: CheckOrCreateTables => partitionCreatorActor forward m

    case a: AddPartitions => partitionCreatorActor forward a

    case s: SetViewVersion => metadataLoggerActor forward s

    case l: LogTransformationTimestamp => metadataLoggerActor forward l

    case g: GetMetaDataForMaterialize => partitionCreatorActor forward g

  }
}

object SchemaManagerRouter {
  def props(settings: SchedoscopeSettings) = (Props(classOf[SchemaManagerRouter], settings)).withDispatcher("akka.actor.schema-manager-dispatcher")
} 
Example 171
Source File: BackOffSupervision.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.utils

import akka.actor.{ActorRef, ActorSystem}
import org.slf4j.LoggerFactory

import scala.collection.mutable.HashMap
import scala.concurrent.duration.{FiniteDuration, _}


  def manageActorLifecycle(managedActor: ActorRef, backOffSlotTime: FiniteDuration = null, backOffMinimumDelay: FiniteDuration = null): FiniteDuration = {
    val managedActorName = managedActor.path.toStringWithoutAddress

    if (actorBackOffWaitTime.contains(managedActorName)) {
      val newBackOff = actorBackOffWaitTime(managedActorName).nextBackOff
      actorBackOffWaitTime.put(managedActorName, newBackOff)
      log.warn(s"$managerName: Set new back-off waiting " +
        s"time to value ${newBackOff.backOffWaitTime} for rebooted actor ${managedActorName}; " +
        s"(retries=${newBackOff.retries}, resets=${newBackOff.resets}, total-retries=${newBackOff.totalRetries})")

      //schedule tick response based on backoff
      newBackOff.backOffWaitTime
    } else {
      val backOff = ExponentialBackOff(backOffSlotTime = backOffSlotTime, constantDelay = backOffMinimumDelay)
      log.debug(s"$managerName: Set initial back-off waiting " +
        s"time to value ${backOff.backOffWaitTime} for booted actor ${managedActorName}; " +
        s"(retries=${backOff.retries}, resets=${backOff.resets}, total-retries=${backOff.totalRetries})")
      actorBackOffWaitTime.put(managedActorName, backOff)

      //schedule immediate tick response
      0 millis
    }
  }

} 
Example 172
Source File: MetadataLoggerActorTest.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.actors

import akka.actor.{Actor, ActorRef, ActorSystem}
import akka.testkit.{EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.schedoscope.Schedoscope


class MetadataLoggerActorTest extends TestKit(ActorSystem("schedoscope",
  ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]""")))
  with ImplicitSender
  with FlatSpecLike
  with Matchers
  with BeforeAndAfterAll {

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }

  val msgHub = TestProbe()
  val settings = Schedoscope.settings


  case class toPCA(msg: String)

  class TestRouter(to: ActorRef) extends Actor {
    val pca = TestActorRef(new MetadataLoggerActor("", "", "") {
      override def getSchemaManager(jdbcUrl: String, metaStoreUri: String, serverKerberosPrincipal: String) = {
        null
      }

      override def schemaRouter = msgHub.ref
    })

    def receive = {
      case toPCA(m) => pca forward (m)

      case "tick" => to forward "tick"
    }
  }

  it should "send tick msg upon start" in {
    TestActorRef(new TestRouter(msgHub.ref))
    msgHub.expectMsg("tick")
  }

  it should "change to active state upon receive of tick msg" in {
    val router = TestActorRef(new TestRouter(msgHub.ref))

    EventFilter.info(message = "METADATA LOGGER ACTOR: changed to active state.", occurrences = 1) intercept {
      msgHub.send(router, toPCA("tick"))
    }
  }

} 
Example 173
Source File: SchemaManagerRouterTest.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.actors

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.schedoscope.conf.SchedoscopeSettings
import org.schedoscope.{Settings, TestUtils}

import scala.concurrent.duration._

class SchemaManagerRouterTest extends TestKit(ActorSystem("schedoscope"))
  with FlatSpecLike
  with Matchers
  with BeforeAndAfterAll {

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }

  // common vars
  val settings: SchedoscopeSettings = Settings()


  class ForwardChildActor(to: ActorRef) extends Actor {
    def receive = {
      case x => to.forward(x)
    }
  }

  trait SchemaManagerRouterTest {

    val partitionCreatorRouterActor = TestProbe()
    val metadataLoggerActorTest = TestProbe()

    def getSchemaManager(s: SchedoscopeSettings): ActorRef = {

      TestActorRef(new SchemaManagerRouter(s) {
        override def preStart {
          context.actorOf(Props(new ForwardChildActor(partitionCreatorRouterActor.ref)),
            "partition-creator")
          context.actorOf(Props(new ForwardChildActor(metadataLoggerActorTest.ref)),
            "metadata-logger")
        }
      })
    }
  }

  it should "set an exponential backoff time for restarting drivers" in
    new SchemaManagerRouterTest {

      val newSettings = TestUtils.createSettings(
        "schedoscope.metastore.actor-backoff-slot-time=10",
        "schedoscope.metastore.actor-backoff-minimum-delay=0")


      var schemaManagerRouter: ActorRef = getSchemaManager(newSettings)

      partitionCreatorRouterActor.send(schemaManagerRouter, "tick")
      partitionCreatorRouterActor.expectMsg("tick")
      partitionCreatorRouterActor.send(schemaManagerRouter, "tick")
      partitionCreatorRouterActor.expectMsg("tick")

      metadataLoggerActorTest.send(schemaManagerRouter, "tick")
      metadataLoggerActorTest.expectMsg("tick")
      metadataLoggerActorTest.send(schemaManagerRouter, "tick")
      metadataLoggerActorTest.expectMsg("tick")

    }

  it should "set an exponential backoff time too big for the test to get it" in
    new SchemaManagerRouterTest {

      val newSettings = TestUtils.createSettings(
        "schedoscope.metastore.actor-backoff-slot-time=10000",
        "schedoscope.metastore.actor-backoff-minimum-delay=10000")


      var schemaManagerRouter: ActorRef = getSchemaManager(newSettings)

      partitionCreatorRouterActor.send(schemaManagerRouter, "tick")
      partitionCreatorRouterActor.expectMsg("tick")
      partitionCreatorRouterActor.send(schemaManagerRouter, "tick")
      partitionCreatorRouterActor.expectNoMsg(3 seconds)

      metadataLoggerActorTest.send(schemaManagerRouter, "tick")
      metadataLoggerActorTest.expectMsg("tick")
      metadataLoggerActorTest.send(schemaManagerRouter, "tick")
      metadataLoggerActorTest.expectNoMsg(3 seconds)

    }

} 
Example 174
Source File: PartitionCreatorActorTest.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.actors

import akka.actor.{Actor, ActorRef, ActorSystem}
import akka.testkit.{EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.schedoscope.Schedoscope

class PartitionCreatorActorTest extends TestKit(ActorSystem("schedoscope",
  ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]""")))
  with ImplicitSender
  with FlatSpecLike
  with Matchers
  with BeforeAndAfterAll {

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }

  val msgHub = TestProbe()
  val settings = Schedoscope.settings


  case class ToPCA(msg: String)

  class TestRouter(to: ActorRef) extends Actor {
    val pca = TestActorRef(new PartitionCreatorActor("", "", "", msgHub.ref) {
      override def getSchemaManager(jdbcUrl: String, metaStoreUri: String, serverKerberosPrincipal: String) = {
        null
      }

      override def schemaRouter = msgHub.ref
    })

    def receive = {
      case ToPCA(m) => pca forward (m)

      case "tick" => to forward "tick"
    }
  }

  it should "send tick msg upon start" in {
    TestActorRef(new TestRouter(msgHub.ref))
    msgHub.expectMsg("tick")
  }

  it should "change to active state upon receive of tick msg" in {
    val router = TestActorRef(new TestRouter(msgHub.ref))

    EventFilter.info(message = "PARTITION CREATOR ACTOR: changed to active state.", occurrences = 1) intercept {
      msgHub.send(router, ToPCA("tick"))
    }
  }

} 
Example 175
Source File: Routes.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch14

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.directives.MethodDirectives.{delete, get, post}
import akka.http.scaladsl.server.directives.PathDirectives.path
import akka.http.scaladsl.server.directives.RouteDirectives.complete
import akka.pattern.ask
import akka.util.Timeout
import ch14.Commands._
import ch14.Events.{
  ArticleCreated,
  ArticleDeleted,
  ArticlesPurchased,
  ArticlesRestocked
}

import scala.concurrent.{ExecutionContext, Future}

trait Routes extends JsonSupport {
  implicit def system: ActorSystem
  def inventory: ActorRef
  def config: Config

  implicit lazy val timeout: Timeout = config.timeout
  implicit lazy val ec: ExecutionContext = system.dispatcher

  lazy val articlesRoutes: Route =
    pathPrefix("articles") {
      concat(
        path(Segment) { name =>
          concat(
            post {
              val changedInventory: Future[Option[ArticleCreated]] =
                (inventory ? CreateArticle(name, 0))
                  .mapTo[Option[ArticleCreated]]
              onSuccess(changedInventory) {
                case None        => complete(StatusCodes.Conflict)
                case Some(event) => complete(StatusCodes.Created, event)
              }
            },
            delete {
              val changedInventory: Future[Option[ArticleDeleted]] =
                (inventory ? DeleteArticle(name)).mapTo[Option[ArticleDeleted]]
              rejectEmptyResponse {
                complete(changedInventory)
              }
            },
            get {
              complete((inventory ? GetArticle(name)).mapTo[Inventory])
            }
          )
        }
      )
    }

  lazy val inventoryRoutes: Route =
    path("inventory") {
      get {
        complete((inventory ? GetInventory).mapTo[Inventory])
      }
    } ~
      path("purchase") {
        post {
          entity(as[PurchaseArticles]) { order =>
            val response: Future[Option[ArticlesPurchased]] =
              (inventory ? order).mapTo[Option[ArticlesPurchased]]
            onSuccess(response) {
              case None        => complete(StatusCodes.Conflict)
              case Some(event) => complete(event)
            }
          }
        }
      } ~
      path("restock") {
        post {
          entity(as[RestockArticles]) { stock =>
            val response: Future[Option[ArticlesRestocked]] =
              (inventory ? stock).mapTo[Option[ArticlesRestocked]]
            complete(response)
          }
        }
      }


  lazy val routes: Route = articlesRoutes ~ inventoryRoutes

} 
Example 176
Source File: RoutesSpec.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch14

import akka.actor.ActorRef
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.testkit.ScalatestRouteTest
import ch14.Commands.{PurchaseArticles, RestockArticles}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.duration._

class RoutesSpec
    extends WordSpec
    with Matchers
    with ScalaFutures
    with ScalatestRouteTest
    with Routes {

  override lazy val config: Config = Config.load()

  DB.initialize(config.database)

  override lazy val inventory: ActorRef =
    system.actorOf(InventoryActor.props, "inventory")

  "Routes" should {
    "return no articles in the beginning" in {
      val request = HttpRequest(uri = "/inventory")
      implicit val timeout: Duration = 3.seconds
      request ~> routes ~> check {
        status shouldBe StatusCodes.OK
        contentType shouldBe ContentTypes.`application/json`
        entityAs[String] shouldBe """{"state":{}}"""
      }
    }
    "be able to add article (POST /articles/eggs)" in {
      val request = Post("/articles/eggs")
      request ~> routes ~> check {
        status shouldBe StatusCodes.Created
        contentType shouldBe ContentTypes.`application/json`
        entityAs[String] shouldBe """{"name":"eggs","count":0}"""
      }
    }
    "not be able to delete article (delete /articles/no)" in {
      val request = Delete("/articles/no-such-article")
      request ~> Route.seal(routes) ~> check {
        status shouldBe StatusCodes.NotFound
      }
    }
    "not be able to add article twice (POST /articles/eggs)" in {
      val request = Post("/articles/eggs")
      request ~> routes ~> check {
        status shouldBe StatusCodes.Conflict
      }
    }
    "be able to restock articles (POST /restock)" in {
      val restock = RestockArticles(Map("eggs" -> 10, "chocolate" -> 20))
      val entity  = Marshal(restock).to[MessageEntity].futureValue // futureValue is from ScalaFutures
      val request = Post("/restock").withEntity(entity)
      request ~> routes ~> check {
        status shouldBe StatusCodes.OK
        contentType shouldBe ContentTypes.`application/json`
        entityAs[String] shouldBe """{"stock":{"eggs":10,"chocolate":20}}"""
      }
    }
    "be able to purchase articles (POST /purchase)" in {
      val restock = PurchaseArticles(Map("eggs" -> 5, "chocolate" -> 10))
      val entity  = Marshal(restock).to[MessageEntity].futureValue // futureValue is from ScalaFutures
      val request = Post("/purchase").withEntity(entity)
      request ~> routes ~> check {
        status shouldBe StatusCodes.OK
        contentType shouldBe ContentTypes.`application/json`
        entityAs[String] shouldBe """{"order":{"eggs":5,"chocolate":10}}"""
      }
    }
    "not be able to purchase articles (POST /purchase)" in {
      val restock = PurchaseArticles(Map("eggs" -> 50, "chocolate" -> 10))
      val entity  = Marshal(restock).to[MessageEntity].futureValue // futureValue is from ScalaFutures
      val request = Post("/purchase").withEntity(entity)
      request ~> routes ~> check {
        status shouldBe StatusCodes.Conflict
      }
    }
  }
} 
Example 177
Source File: SubscriberImpl.scala    From zio-akka-cluster   with Apache License 2.0 5 votes vote down vote up
package zio.akka.cluster.pubsub.impl

import akka.actor.{ Actor, ActorRef, ActorSystem, PoisonPill, Props }
import akka.cluster.pubsub.DistributedPubSubMediator.{ Subscribe, SubscribeAck }
import zio.Exit.{ Failure, Success }
import zio.akka.cluster.pubsub.impl.SubscriberImpl.SubscriberActor
import zio.akka.cluster.pubsub.{ MessageEnvelope, Subscriber }
import zio.{ Promise, Queue, Runtime, Task }

private[pubsub] trait SubscriberImpl[A] extends Subscriber[A] {
  val getActorSystem: ActorSystem
  val getMediator: ActorRef

  override def listenWith(topic: String, queue: Queue[A], group: Option[String] = None): Task[Unit] =
    for {
      rts        <- Task.runtime
      subscribed <- Promise.make[Nothing, Unit]
      _          <- Task(
                      getActorSystem.actorOf(Props(new SubscriberActor[A](getMediator, topic, group, rts, queue, subscribed)))
                    )
      _          <- subscribed.await
    } yield ()
}

object SubscriberImpl {
  private[impl] class SubscriberActor[A](
    mediator: ActorRef,
    topic: String,
    group: Option[String],
    rts: Runtime[Any],
    queue: Queue[A],
    subscribed: Promise[Nothing, Unit]
  ) extends Actor {

    mediator ! Subscribe(topic, group, self)

    def receive: PartialFunction[Any, Unit] = {
      case SubscribeAck(_)      =>
        rts.unsafeRunSync(subscribed.succeed(()))
        ()
      case MessageEnvelope(msg) =>
        rts.unsafeRunAsync(queue.offer(msg.asInstanceOf[A])) {
          case Success(_)     => ()
          case Failure(cause) => if (cause.interrupted) self ! PoisonPill // stop listening if the queue was shut down
        }
    }
  }
} 
Example 178
Source File: RestRoute.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.highlevelserver.route

import akka.actor.ActorRef
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.marshalling.ToResponseMarshaller
import akka.http.scaladsl.model.{ StatusCodes, Uri }
import akka.http.scaladsl.server.{ Directives, Route }
import akka.http.scaladsl.unmarshalling.FromRequestUnmarshaller
import akka.pattern.ask
import akka.util.Timeout
import com.github.dnvriend.component.highlevelserver.dto.PersonWithId
import com.github.dnvriend.component.highlevelserver.marshaller.Marshaller
import com.github.dnvriend.component.simpleserver.dto.http.Person

import scala.concurrent.Future

// see: akka.http.scaladsl.marshalling.ToResponseMarshallable
// see: akka.http.scaladsl.marshalling.PredefinedToResponseMarshallers
object RestRoute extends Directives with SprayJsonSupport with Marshaller {
  def routes(personDb: ActorRef)(implicit timeout: Timeout, trmSingle: ToResponseMarshaller[PersonWithId], trmList: ToResponseMarshaller[List[PersonWithId]], fru: FromRequestUnmarshaller[Person]): Route = {
    pathEndOrSingleSlash {
      redirect(Uri("/api/person"), StatusCodes.PermanentRedirect)
    } ~
      pathPrefix("api" / "person") {
        get {
          path(IntNumber) { id =>
            println(s"PathEndsInNumber=$id")
            complete((personDb ? "findAll").mapTo[List[PersonWithId]])
          } ~
            pathEndOrSingleSlash {
              parameter("foo") { foo =>
                println(s"foo=$foo")
                complete((personDb ? "findAll").mapTo[List[PersonWithId]])
              } ~
                parameter('bar) { bar =>
                  println(s"bar=$bar")
                  complete((personDb ? "findAll").mapTo[List[PersonWithId]])
                } ~
                complete((personDb ? "findAll").mapTo[List[PersonWithId]])
            }
        } ~
          (post & pathEndOrSingleSlash & entity(as[Person])) { person =>
            complete((personDb ? person).mapTo[PersonWithId])
          }
      } ~
      path("failure") {
        pathEnd {
          complete(Future.failed[String](new RuntimeException("Simulated Failure")))
        }
      } ~
      path("success") {
        pathEnd {
          complete(Future.successful("Success!!"))
        }
      }
  }
} 
Example 179
Source File: TestSpec.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.test.WsTestClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]

  def getAnnotatedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8080
  implicit val timeout: Timeout = 10.seconds
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]

  // ================================== Supporting Operations ====================================
  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, _]) {
    def testProbe(f: TestSubscriber.Probe[A] => Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def killActors(actors: ActorRef*): Unit = {
    val tp = TestProbe()
    actors.foreach { (actor: ActorRef) =>
      tp watch actor
      actor ! PoisonPill
      tp.expectTerminated(actor)
    }
  }

  override protected def beforeEach(): Unit = {
  }
} 
Example 180
Source File: SBTBotTestRunner.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package org.perftester.sbtbot

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.io.{IO, Tcp}
import akka.testkit.TestProbe
import ammonite.ops.Path
import org.perftester.sbtbot.SBTBot.{ExecuteTask, SBTBotReady, TaskResult}

object SBTBotTestRunner {

  
  def run(testDir: Path,
          programArgs: List[String],
          jvmArgs: List[String],
          repeats: Int,
          commands: List[String],
          debugging: Boolean): Unit = {
    implicit val actorSystem: ActorSystem = ActorSystem("test")

    val manager = IO(Tcp)

    val proxy = TestProbe()
    val parent = actorSystem.actorOf(Props(new Actor {
      val child: ActorRef =
        context.actorOf(SBTBot.props(testDir, programArgs, jvmArgs), "sbtbot")

      def receive: Receive = {
        case x if sender == child => proxy.ref forward x
        case x                    => child forward x
      }
    }))

    import scala.concurrent.duration._

    try {
      proxy.expectMsg(600.seconds, SBTBotReady)
      println("SBT Bot ready - starting run")
      val timeout = if (debugging) 20 minutes else 40 minutes

      for (i <- 1 to repeats) {
        implicit val sender: ActorRef = proxy.ref
        commands.zipWithIndex foreach {
          case (cmd, idx) =>
            println(
              s"--------------- $cmd - iteration  $i/$repeats -------------------------------")
            parent ! ExecuteTask(s"$idx", cmd)
            proxy.expectMsgClass(timeout, classOf[TaskResult])
//            Thread.sleep(5000)
        }
      }

      println("---------------Finished --------------------------------")
    } finally {
      actorSystem.terminate()
    }
  }
} 
Example 181
Source File: SBTBotMain.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package org.perftester.sbtbot

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.testkit.TestProbe
import SBTBot.{ExecuteTask, SBTBotReady, TaskResult}

// Test class for SBTBot
object SBTBotMain {

  def main(args: Array[String]): Unit = {
    implicit val actorSystem: ActorSystem = ActorSystem("test")
    import ammonite.ops._

    val proxy = TestProbe()
    val parent = actorSystem.actorOf(Props(new Actor {
      val child: ActorRef =
        context.actorOf(SBTBot.props(root / "workspace" / "perf_tester" / "corpus" / "akka",
                                     List.empty,
                                     List.empty),
                        "sbtbot")

      def receive: Receive = {
        case x if sender == child => proxy.ref forward x
        case x                    => child forward x
      }
    }))

    import scala.concurrent.duration._

    try {
      proxy.expectMsg(600.seconds, SBTBotReady)
      println("SBT Bot ready - triggering clean")

      val testName = "ActionCompositionSpec"

      implicit val sender: ActorRef = proxy.ref
      println("---------------clean--------------------------------")
      parent ! ExecuteTask("1", "clean")
      proxy.expectMsgClass(30.seconds, classOf[TaskResult])

      println("---------------compile--------------------------------")
      parent ! ExecuteTask("2", "compile")
      proxy.expectMsgClass(60.seconds, classOf[TaskResult])

      println("---------------Finished --------------------------------")
      println("Finished")
    } finally {
      actorSystem.terminate()
    }
  }
} 
Example 182
Source File: Main.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka

import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Actor
import akka.actor.Terminated
import akka.actor.ActorLogging
import akka.actor.Props
import akka.actor.ActorRef
import scala.util.control.NonFatal


  def main(args: Array[String]): Unit = {
    if (args.length != 1) {
      println("you need to provide exactly one argument: the class of the application supervisor actor")
    } else {
      val system = ActorSystem("Main")
      try {
        val appClass = system.asInstanceOf[ExtendedActorSystem].dynamicAccess.getClassFor[Actor](args(0)).get
        val app = system.actorOf(Props(appClass), "app")
        val terminator = system.actorOf(Props(classOf[Terminator], app), "app-terminator")
      } catch {
        case NonFatal(e) ⇒ system.terminate(); throw e
      }
    }
  }

  class Terminator(app: ActorRef) extends Actor with ActorLogging {
    context watch app
    def receive = {
      case Terminated(_) ⇒
        log.info("application supervisor has terminated, shutting down")
        context.system.terminate()
    }
  }

} 
Example 183
Source File: PipeToSupport.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.pattern

import language.implicitConversions
import scala.concurrent.{ Future, ExecutionContext }
import scala.util.{ Failure, Success }
import akka.actor.{ Status, ActorRef, Actor }
import akka.actor.ActorSelection
import java.util.concurrent.CompletionStage
import java.util.function.BiConsumer

trait PipeToSupport {

  final class PipeableFuture[T](val future: Future[T])(implicit executionContext: ExecutionContext) {
    def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): Future[T] = {
      future andThen {
        case Success(r) ⇒ recipient ! r
        case Failure(f) ⇒ recipient ! Status.Failure(f)
      }
    }
    def pipeToSelection(recipient: ActorSelection)(implicit sender: ActorRef = Actor.noSender): Future[T] = {
      future andThen {
        case Success(r) ⇒ recipient ! r
        case Failure(f) ⇒ recipient ! Status.Failure(f)
      }
    }
    def to(recipient: ActorRef): PipeableFuture[T] = to(recipient, Actor.noSender)
    def to(recipient: ActorRef, sender: ActorRef): PipeableFuture[T] = {
      pipeTo(recipient)(sender)
      this
    }
    def to(recipient: ActorSelection): PipeableFuture[T] = to(recipient, Actor.noSender)
    def to(recipient: ActorSelection, sender: ActorRef): PipeableFuture[T] = {
      pipeToSelection(recipient)(sender)
      this
    }
  }

  final class PipeableCompletionStage[T](val future: CompletionStage[T])(implicit executionContext: ExecutionContext) {
    def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): CompletionStage[T] = {
      future whenComplete new BiConsumer[T, Throwable] {
        override def accept(t: T, ex: Throwable) {
          if (t != null) recipient ! t
          if (ex != null) recipient ! Status.Failure(ex)
        }
      }
    }
    def pipeToSelection(recipient: ActorSelection)(implicit sender: ActorRef = Actor.noSender): CompletionStage[T] = {
      future whenComplete new BiConsumer[T, Throwable] {
        override def accept(t: T, ex: Throwable) {
          if (t != null) recipient ! t
          if (ex != null) recipient ! Status.Failure(ex)
        }
      }
    }
    def to(recipient: ActorRef): PipeableCompletionStage[T] = to(recipient, Actor.noSender)
    def to(recipient: ActorRef, sender: ActorRef): PipeableCompletionStage[T] = {
      pipeTo(recipient)(sender)
      this
    }
    def to(recipient: ActorSelection): PipeableCompletionStage[T] = to(recipient, Actor.noSender)
    def to(recipient: ActorSelection, sender: ActorRef): PipeableCompletionStage[T] = {
      pipeToSelection(recipient)(sender)
      this
    }
  }

  
  implicit def pipeCompletionStage[T](future: CompletionStage[T])(implicit executionContext: ExecutionContext): PipeableCompletionStage[T] = new PipeableCompletionStage(future)
} 
Example 184
Source File: LoggerMailbox.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.event

import akka.dispatch.MessageQueue
import akka.dispatch.MailboxType
import akka.dispatch.UnboundedMailbox
import com.typesafe.config.Config
import akka.actor.ActorSystem
import akka.actor.ActorRef
import akka.dispatch.ProducesMessageQueue
import akka.event.Logging.Debug
import akka.event.Logging.LogEvent

trait LoggerMessageQueueSemantics


private[akka] class LoggerMailbox(owner: ActorRef, system: ActorSystem)
  extends UnboundedMailbox.MessageQueue with LoggerMessageQueueSemantics {

  override def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
    if (hasMessages) {
      val logLevel = system.eventStream.logLevel
      var envelope = dequeue
      // Drain all remaining messages to the StandardOutLogger.
      // cleanUp is called after switching out the mailbox, which is why
      // this kind of look works without a limit.
      val loggingEnabled = Logging.AllLogLevels.contains(logLevel)
      while (envelope ne null) {
        // skip logging if level is OFF
        if (loggingEnabled)
          envelope.message match {
            case e: LogEvent if e.level <= logLevel ⇒
              // Logging.StandardOutLogger is a MinimalActorRef, i.e. not a "real" actor
              Logging.StandardOutLogger.tell(envelope.message, envelope.sender)
            case _ ⇒ // skip
          }

        envelope = dequeue
      }
    }
    super.cleanUp(owner, deadLetters)
  }
} 
Example 185
Source File: AddressTerminatedTopic.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.event

import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.AddressTerminated
import akka.actor.ExtendedActorSystem
import akka.actor.Extension
import akka.actor.ExtensionId
import akka.actor.ExtensionIdProvider


private[akka] final class AddressTerminatedTopic extends Extension {

  private val subscribers = new AtomicReference[Set[ActorRef]](Set.empty[ActorRef])

  @tailrec def subscribe(subscriber: ActorRef): Unit = {
    val current = subscribers.get
    if (!subscribers.compareAndSet(current, current + subscriber))
      subscribe(subscriber) // retry
  }

  @tailrec def unsubscribe(subscriber: ActorRef): Unit = {
    val current = subscribers.get
    if (!subscribers.compareAndSet(current, current - subscriber))
      unsubscribe(subscriber) // retry
  }

  def publish(msg: AddressTerminated): Unit = {
    subscribers.get foreach { _.tell(msg, ActorRef.noSender) }
  }

} 
Example 186
Source File: TcpIncomingConnection.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.io

import java.nio.channels.SocketChannel
import scala.collection.immutable
import akka.actor.ActorRef
import akka.io.Inet.SocketOption


private[io] class TcpIncomingConnection(
  _tcp: TcpExt,
  _channel: SocketChannel,
  registry: ChannelRegistry,
  bindHandler: ActorRef,
  options: immutable.Traversable[SocketOption],
  readThrottling: Boolean)
  extends TcpConnection(_tcp, _channel, readThrottling) {

  signDeathPact(bindHandler)

  registry.register(channel, initialOps = 0)

  def receive = {
    case registration: ChannelRegistration ⇒ completeConnect(registration, bindHandler, options)
  }
} 
Example 187
Source File: TestLogkafkaStateActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager

import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern._
import akka.util.Timeout
import akka.util.Timeout._
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.features.ClusterFeatures
import kafka.manager.logkafka.LogkafkaStateActor
import kafka.manager.model.{ActorModel, ClusterConfig, ClusterContext}
import kafka.manager.utils.KafkaServerInTest
import ActorModel._
import kafka.test.SeededBroker

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try


class TestLogkafkaStateActor extends KafkaServerInTest with BaseTest {

  private[this] val akkaConfig: Properties = new Properties()
  akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
  akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
  private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
  private[this] val system = ActorSystem("test-logkafka-state-actor",config)
  private[this] val broker = new SeededBroker("ks-test",4)
  override val kafkaServerZkPath = broker.getZookeeperConnectionString
  private[this] var logkafkaStateActor : Option[ActorRef] = None
  private[this] implicit val timeout: Timeout = 10.seconds
  private[this] val defaultClusterConfig = ClusterConfig("test","0.8.2.0","localhost:2818",100,false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
  private[this] val defaultClusterContext = ClusterContext(ClusterFeatures.from(defaultClusterConfig), defaultClusterConfig)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val props = Props(classOf[LogkafkaStateActor],sharedCurator, defaultClusterContext)

    logkafkaStateActor = Some(system.actorOf(props.withDispatcher("pinned-dispatcher"),"lksa"))
  }

  override protected def afterAll(): Unit = {
    logkafkaStateActor.foreach( _ ! Kill )
    Try(Await.ready(system.terminate(), Duration(5, TimeUnit.SECONDS)))
    Try(broker.shutdown())
    super.afterAll()
  }

  private[this] def withLogkafkaStateActor[Input,Output,FOutput](msg: Input)(fn: Output => FOutput)(implicit tag: ClassTag[Output]) : FOutput = {
    require(logkafkaStateActor.isDefined, "logkafkaStateActor undefined!")
    val future = ask(logkafkaStateActor.get, msg).mapTo[Output]
    val result = Await.result(future,10.seconds)
    fn(result)
  }

  test("get logkafka logkafka id list") {
    withLogkafkaStateActor(LKSGetLogkafkaLogkafkaIds) { result: LogkafkaLogkafkaIdList =>
      result.list foreach println
    }
  }

  test("get logkafka config") {
    withLogkafkaStateActor(LKSGetLogkafkaLogkafkaIds) { result: LogkafkaLogkafkaIdList =>
      val configs = result.list map { logkafka_id =>
        withLogkafkaStateActor(LKSGetLogkafkaConfig(logkafka_id)) { logkafkaConfig: LogkafkaConfig => logkafkaConfig }
      }
      configs foreach println
    }
  }

  test("get logkafka client") {
    withLogkafkaStateActor(LKSGetLogkafkaLogkafkaIds) { result: LogkafkaLogkafkaIdList =>
      val clients = result.list map { logkafka_id =>
        withLogkafkaStateActor(LKSGetLogkafkaClient(logkafka_id)) { logkafkaClient: LogkafkaClient => logkafkaClient }
      }
      clients foreach println
    }
  }

  test("get logkafka configs") {
    withLogkafkaStateActor(LKSGetAllLogkafkaConfigs()) { lc: LogkafkaConfigs =>
      lc.configs foreach println
    }
  }

  test("get logkafka clients") {
    withLogkafkaStateActor(LKSGetAllLogkafkaClients()) { lc: LogkafkaClients =>
      lc.clients foreach println
    }
  }

} 
Example 188
Source File: TestBrokerViewCacheActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager

import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern._
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.actor.cluster.{BrokerViewCacheActor, BrokerViewCacheActorConfig, KafkaManagedOffsetCacheConfig, KafkaStateActor, KafkaStateActorConfig}
import kafka.manager.base.LongRunningPoolConfig
import kafka.manager.features.ClusterFeatures
import kafka.manager.model.{ActorModel, ClusterConfig, ClusterContext}
import kafka.manager.utils.KafkaServerInTest
import ActorModel._
import kafka.test.SeededBroker

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try


class TestBrokerViewCacheActor extends KafkaServerInTest with BaseTest {
  private[this] val akkaConfig: Properties = new Properties()
  akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
  akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
  private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
  private[this] val system = ActorSystem("test-broker-view-cache-actor",config)
  private[this] val broker = new SeededBroker("bvc-test",4)
  override val kafkaServerZkPath = broker.getZookeeperConnectionString
  private[this] var kafkaStateActor : Option[ActorRef] = None
  private[this] implicit val timeout: Timeout = 10.seconds

  private[this] var brokerViewCacheActor : Option[ActorRef] = None
  private[this] val defaultClusterConfig = ClusterConfig("test","0.8.2.0","localhost:2818",100,false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxSsl = false, jmxPass = None, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
  private[this] val defaultClusterContext = ClusterContext(ClusterFeatures.from(defaultClusterConfig), defaultClusterConfig)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val clusterConfig = ClusterConfig("dev","0.8.2.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
    val clusterContext = ClusterContext(ClusterFeatures.from(clusterConfig), clusterConfig)
    val ksConfig = KafkaStateActorConfig(sharedCurator, "pinned-dispatcher", clusterContext, LongRunningPoolConfig(2,100), LongRunningPoolConfig(2,100), 5, 10000, None, KafkaManagedOffsetCacheConfig())
    val props = Props(classOf[KafkaStateActor],ksConfig)

    kafkaStateActor = Some(system.actorOf(props.withDispatcher("pinned-dispatcher"),"ksa"))

    val bvConfig = BrokerViewCacheActorConfig(kafkaStateActor.get.path, clusterContext, LongRunningPoolConfig(2,100), FiniteDuration(10, SECONDS))
    val bvcProps = Props(classOf[BrokerViewCacheActor],bvConfig)

    brokerViewCacheActor = Some(system.actorOf(bvcProps,"broker-view"))

    brokerViewCacheActor.get ! BVForceUpdate
    Thread.sleep(10000)
  }

  override protected def afterAll(): Unit = {
    brokerViewCacheActor.foreach( _ ! Kill )
    kafkaStateActor.foreach( _ ! Kill )
    Try(Await.ready(system.terminate(), Duration(5, TimeUnit.SECONDS)))
    Try(broker.shutdown())
    super.afterAll()
  }

  private[this] def withBrokerViewCacheActor[Input,Output,FOutput]
  (msg: Input)(fn: Output => FOutput)(implicit tag: ClassTag[Output]) : FOutput = {
    require(brokerViewCacheActor.isDefined, "brokerViewCacheActor undefined!")
    val future = ask(brokerViewCacheActor.get, msg).mapTo[Output]
    val result = Await.result(future,10.seconds)
    fn(result)
  }

  test("get broker view") {
    withBrokerViewCacheActor(BVGetView(1)) { optionalBrokerView : Option[BVView] =>
      println(optionalBrokerView)
    }
  }

} 
Example 189
Source File: TestLogkafkaViewCacheActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager

import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern._
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.actor.cluster.KafkaStateActor
import kafka.manager.base.LongRunningPoolConfig
import kafka.manager.features.ClusterFeatures
import kafka.manager.logkafka.{LogkafkaViewCacheActor, LogkafkaViewCacheActorConfig}
import kafka.manager.model.{ActorModel, ClusterConfig, ClusterContext}
import kafka.manager.utils.KafkaServerInTest
import ActorModel._
import kafka.test.SeededBroker

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try


class TestLogkafkaViewCacheActor extends KafkaServerInTest with BaseTest {
  private[this] val akkaConfig: Properties = new Properties()
  akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
  akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
  private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
  private[this] val system = ActorSystem("test-logkafka-view-cache-actor",config)
  private[this] val broker = new SeededBroker("lkvc-test",4)
  override val kafkaServerZkPath = broker.getZookeeperConnectionString
  private[this] var logkafkaStateActor : Option[ActorRef] = None
  private[this] implicit val timeout: Timeout = 10.seconds

  private[this] var logkafkaViewCacheActor : Option[ActorRef] = None
  private[this] val defaultClusterConfig = ClusterConfig("test","0.8.2.0","localhost:2818",100,false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
  private[this] val defaultClusterContext = ClusterContext(ClusterFeatures.from(defaultClusterConfig), defaultClusterConfig)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val clusterConfig = ClusterConfig("dev","0.8.2.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT", saslMechanism=None, jaasConfig=None)
    val clusterContext = ClusterContext(ClusterFeatures.from(clusterConfig), clusterConfig)
    val props = Props(classOf[KafkaStateActor],sharedCurator, defaultClusterContext)

    logkafkaStateActor = Some(system.actorOf(props.withDispatcher("pinned-dispatcher"),"lksa"))

    val lkvConfig = LogkafkaViewCacheActorConfig(logkafkaStateActor.get.path, clusterContext, LongRunningPoolConfig(2,100), FiniteDuration(10, SECONDS))
    val lkvcProps = Props(classOf[LogkafkaViewCacheActor],lkvConfig)

    logkafkaViewCacheActor = Some(system.actorOf(lkvcProps,"logkafka-view"))

    logkafkaViewCacheActor.get ! BVForceUpdate
    Thread.sleep(10000)
  }

  override protected def afterAll(): Unit = {
    logkafkaViewCacheActor.foreach( _ ! Kill )
    logkafkaStateActor.foreach( _ ! Kill )
    Try(Await.ready(system.terminate(), Duration(5, TimeUnit.SECONDS)))
    Try(broker.shutdown())
    super.afterAll()
  }

  private[this] def withLogkafkaViewCacheActor[Input,Output,FOutput]
  (msg: Input)(fn: Output => FOutput)(implicit tag: ClassTag[Output]) : FOutput = {
    require(logkafkaViewCacheActor.isDefined, "logkafkaViewCacheActor undefined!")
    val future = ask(logkafkaViewCacheActor.get, msg).mapTo[Output]
    val result = Await.result(future,10.seconds)
    fn(result)
  }
} 
Example 190
Source File: Server.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain

import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.pubsub.DistributedPubSub
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import com.elleflorio.scalachain.actor.Node
import com.elleflorio.scalachain.api.NodeRoutes
import com.elleflorio.scalachain.cluster.ClusterManager
import com.typesafe.config.{Config, ConfigFactory}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Server extends App with NodeRoutes {

  implicit val system: ActorSystem = ActorSystem("scalachain")
  implicit val materializer: ActorMaterializer = ActorMaterializer()

  val config: Config = ConfigFactory.load()
  val address = config.getString("http.ip")
  val port = config.getInt("http.port")
  val nodeId = config.getString("scalachain.node.id")

  lazy val routes: Route = statusRoutes ~ transactionRoutes ~ mineRoutes

  val clusterManager: ActorRef = system.actorOf(ClusterManager.props(nodeId), "clusterManager")
  val mediator: ActorRef = DistributedPubSub(system).mediator
  val node: ActorRef = system.actorOf(Node.props(nodeId, mediator), "node")

  Http().bindAndHandle(routes, address, port)
  println(s"Server online at http://$address:$port/")

  Await.result(system.whenTerminated, Duration.Inf)

} 
Example 191
Source File: NodeRoutes.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.api

import com.elleflorio.scalachain.actor.Node._
import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import com.elleflorio.scalachain.blockchain.{Chain, Transaction}
import com.elleflorio.scalachain.cluster.ClusterManager.GetMembers
import com.elleflorio.scalachain.utils.JsonSupport._

import scala.concurrent.Future
import scala.concurrent.duration._

trait NodeRoutes extends SprayJsonSupport {

  implicit def system: ActorSystem

  def node: ActorRef
  def clusterManager: ActorRef

  implicit lazy val timeout = Timeout(5.seconds)

  lazy val statusRoutes: Route = pathPrefix("status") {
    concat(
      pathEnd {
        concat(
          get {
            val statusFuture: Future[Chain] = (node ? GetStatus).mapTo[Chain]
            onSuccess(statusFuture) { status =>
              complete(StatusCodes.OK, status)
            }
          }
        )
      },
      pathPrefix("members") {
        concat(
          pathEnd {
            concat(
              get {
                val membersFuture: Future[List[String]] = (clusterManager ? GetMembers).mapTo[List[String]]
                onSuccess(membersFuture) { members =>
                  complete(StatusCodes.OK, members)
                }
              }
            )
          }
        )
      }
    )
  }

  lazy val transactionRoutes: Route = pathPrefix("transactions") {
    concat(
      pathEnd {
        concat(
          get {
            val transactionsRetrieved: Future[List[Transaction]] =
              (node ? GetTransactions).mapTo[List[Transaction]]
            onSuccess(transactionsRetrieved) { transactions =>
              complete(transactions.toList)
            }
          },
          post {
            entity(as[Transaction]) { transaction =>
              val transactionCreated: Future[Int] =
                (node ? AddTransaction(transaction)).mapTo[Int]
              onSuccess(transactionCreated) { done =>
                complete((StatusCodes.Created, done.toString))
              }
            }
          }
        )
      }
    )
  }

  lazy val mineRoutes: Route = pathPrefix("mine") {
    concat(
      pathEnd {
        concat(
          get {
            node ! Mine
            complete(StatusCodes.OK)
          }
        )
      }
    )
  }

} 
Example 192
Source File: ClusterManager.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.cluster

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.{Cluster, MemberStatus}
import com.elleflorio.scalachain.cluster.ClusterManager.GetMembers

object ClusterManager {

  sealed trait ClusterMessage
  case object GetMembers extends ClusterMessage

  def props(nodeId: String) = Props(new ClusterManager(nodeId))
}

class ClusterManager(nodeId: String) extends Actor with ActorLogging {

  val cluster: Cluster = Cluster(context.system)
  val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener")

  override def receive: Receive = {
    case GetMembers => {
      sender() ! cluster.state.members.filter(_.status == MemberStatus.up)
        .map(_.address.toString)
        .toList
    }
  }
} 
Example 193
Source File: BrokerTest.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.actor

import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.pubsub.DistributedPubSub
import akka.testkit.{ImplicitSender, TestKit}
import com.elleflorio.scalachain.actor.Broker.{AddTransaction, Clear, GetTransactions}
import com.elleflorio.scalachain.blockchain.Transaction
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

import scala.concurrent.duration._

class BrokerTest(sys: ActorSystem) extends TestKit(sys)
  with ImplicitSender
  with Matchers
  with FlatSpecLike
  with BeforeAndAfterAll {

  def this() = this(ActorSystem("broker-test"))
  val mediator: ActorRef = DistributedPubSub(this.system).mediator

  override def afterAll: Unit = {
    shutdown(system)
  }

  "A Broker Actor" should "start with an empty list of transactions" in {
    val broker = system.actorOf(Broker.props)

    broker ! GetTransactions
    expectMsg(500 millis, List())
  }

  "A Broker Actor" should "return the correct list of added transactions" in {
    val broker = system.actorOf(Broker.props)
    val transaction1 = Transaction("A", "B", 100)
    val transaction2 = Transaction("C", "D", 1000)

    broker ! AddTransaction(transaction1)
    broker ! AddTransaction(transaction2)

    broker ! GetTransactions
    expectMsg(500 millis, List(transaction2, transaction1))
  }

  "A Broker Actor" should "clear the transaction lists when requested" in {
    val broker = system.actorOf(Broker.props)
    val transaction1 = Transaction("A", "B", 100)
    val transaction2 = Transaction("C", "D", 1000)

    broker ! AddTransaction(transaction1)
    broker ! AddTransaction(transaction2)

    broker ! Clear

    broker ! GetTransactions
    expectMsg(500 millis, List())
  }

} 
Example 194
Source File: ClusterCheckerService.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.services

import akka.actor.{ActorContext, ActorRef}
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.utils.PolicyStatusUtils
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success}

class ClusterCheckerService(val curatorFramework: CuratorFramework) extends PolicyStatusUtils {

  def checkPolicyStatus(policy: PolicyModel, launcherActor: ActorRef, akkaContext: ActorContext): Unit = {
    findStatusById(policy.id.get) match {
      case Success(policyStatus) =>
        if (policyStatus.status == Launched || policyStatus.status == Starting || policyStatus.status == Uploaded ||
          policyStatus.status == Stopping || policyStatus.status == NotStarted) {
          val information = s"The checker detects that the policy not start/stop correctly"
          log.error(information)
          updateStatus(PolicyStatusModel(id = policy.id.get, status = Failed, statusInfo = Some(information)))
          akkaContext.stop(launcherActor)
        } else {
          val information = s"The checker detects that the policy start/stop correctly"
          log.info(information)
          updateStatus(PolicyStatusModel(id = policy.id.get, status = NotDefined, statusInfo = Some(information)))
        }
      case Failure(exception) =>
        log.error(s"Error when extract policy status in scheduler task.", exception)
    }
  }
} 
Example 195
Source File: ConfigHttpServiceTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.service.http

import akka.actor.ActorRef
import akka.testkit.TestProbe
import com.stratio.sparta.serving.api.actor.ConfigActor
import com.stratio.sparta.serving.api.actor.ConfigActor._
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.constants.{AkkaConstant, AppConstant}
import com.stratio.sparta.serving.core.models.dto.LoggedUserConstant
import com.stratio.sparta.serving.core.models.frontend.FrontendConfiguration
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner

@RunWith(classOf[JUnitRunner])
class ConfigHttpServiceTest extends WordSpec
  with ConfigHttpService
  with HttpServiceBaseTest{

  val configActorTestProbe = TestProbe()

  val dummyUser = Some(LoggedUserConstant.AnonymousUser)

  override implicit val actors: Map[String, ActorRef] = Map(
    AkkaConstant.ConfigActorName -> configActorTestProbe.ref
  )

  override val supervisor: ActorRef = testProbe.ref

  override def beforeEach(): Unit = {
    SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
  }

  protected def retrieveStringConfig(): FrontendConfiguration =
    FrontendConfiguration(AppConstant.DefaultFrontEndTimeout, Option(AppConstant.DefaultOauth2CookieName))

  "ConfigHttpService.FindAll" should {
    "retrieve a FrontendConfiguration item" in {
      startAutopilot(ConfigResponse(retrieveStringConfig()))
      Get(s"/${HttpConstant.ConfigPath}") ~> routes(dummyUser) ~> check {
        testProbe.expectMsgType[ConfigActor.FindAll.type]
        responseAs[FrontendConfiguration] should equal(retrieveStringConfig())
      }
    }
  }

} 
Example 196
Source File: AppStatusHttpServiceTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.service.http

import akka.actor.ActorRef
import com.stratio.sparta.serving.api.constants.HttpConstant
import org.apache.curator.framework.CuratorFramework
import org.junit.runner.RunWith
import org.scalamock.scalatest.MockFactory
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import spray.http.StatusCodes

@RunWith(classOf[JUnitRunner])
class AppStatusHttpServiceTest extends WordSpec
                              with AppStatusHttpService
                              with HttpServiceBaseTest
with MockFactory {

  override implicit val actors: Map[String, ActorRef] = Map()
  override val supervisor: ActorRef = testProbe.ref
  override val curatorInstance = mock[CuratorFramework]

  "AppStatusHttpService" should {
    "check the status of the server" in {
      Get(s"/${HttpConstant.AppStatus}") ~> routes() ~> check {
        status should be (StatusCodes.InternalServerError)
      }
    }
  }
} 
Example 197
Source File: PluginsHttpServiceTest.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.service.http

import akka.actor.ActorRef
import akka.testkit.TestProbe
import com.stratio.sparta.serving.api.actor.PluginActor.{PluginResponse, UploadPlugins}
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.{SpartaConfig, SpartaConfigFactory}
import com.stratio.sparta.serving.core.models.dto.LoggedUserConstant
import com.stratio.sparta.serving.core.models.files.{SpartaFile, SpartaFilesResponse}
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import spray.http._

import scala.util.{Failure, Success}

@RunWith(classOf[JUnitRunner])
class PluginsHttpServiceTest extends WordSpec
  with PluginsHttpService
  with HttpServiceBaseTest {

  override val supervisor: ActorRef = testProbe.ref

  val pluginTestProbe = TestProbe()

  val dummyUser = Some(LoggedUserConstant.AnonymousUser)

  override implicit val actors: Map[String, ActorRef] = Map.empty

  override def beforeEach(): Unit = {
    SpartaConfig.initMainConfig(Option(localConfig), SpartaConfigFactory(localConfig))
  }

  "PluginsHttpService.upload" should {
    "Upload a file" in {
      val response = SpartaFilesResponse(Success(Seq(SpartaFile("", "", "", ""))))
      startAutopilot(response)
      Put(s"/${HttpConstant.PluginsPath}") ~> routes(dummyUser) ~> check {
        testProbe.expectMsgType[UploadPlugins]
        status should be(StatusCodes.OK)
      }
    }
    "Fail when service is not available" in {
      val response = SpartaFilesResponse(Failure(new IllegalArgumentException("Error")))
      startAutopilot(response)
      Put(s"/${HttpConstant.PluginsPath}") ~> routes(dummyUser) ~> check {
        testProbe.expectMsgType[UploadPlugins]
        status should be(StatusCodes.InternalServerError)
      }
    }
  }
} 
Example 198
Source File: DbActor.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.actor

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import me.snov.sns.model.Configuration
import me.snov.sns.service.DbService

object DbActor {
  def props(dbService: DbService) = Props(classOf[DbActor], dbService)

  case class CmdGetConfiguration()
}

class DbActor(dbService: DbService) extends Actor with ActorLogging {
  import me.snov.sns.actor.DbActor._

  val configuration = dbService.load() 
  
  def replyWithConfiguration(actorRef: ActorRef) = {
    if (configuration.isDefined) {
      actorRef ! configuration.get
    }
  }
  
  override def receive = {
    case CmdGetConfiguration => replyWithConfiguration(sender)
    case c: Configuration => dbService.save(c)
  }
} 
Example 199
Source File: PublishActor.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.actor

import akka.actor.Status.{Failure, Success}
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.pattern.ask
import akka.pattern.pipe
import akka.util.Timeout
import me.snov.sns.actor.SubscribeActor.CmdFanOut
import me.snov.sns.model.{Message, MessageAttribute}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object PublishActor {
  def props(actor: ActorRef) = Props(classOf[PublishActor], actor)

  case class CmdPublish(topicArn: String, bodies: Map[String, String], messageAttributes: Map[String, MessageAttribute])
}

class PublishActor(subscribeActor: ActorRef) extends Actor with ActorLogging {
  import me.snov.sns.actor.PublishActor._

  private implicit val timeout = Timeout(1.second)
  private implicit val ec = context.dispatcher

  private def publish(topicArn: String, bodies: Map[String, String], messageAttributes: Map[String, MessageAttribute])(implicit ec: ExecutionContext) = {
    val message = Message(bodies, messageAttributes = messageAttributes)

    (subscribeActor ? CmdFanOut(topicArn, message)).map {
      case Failure(e) => Failure(e)
      case Success => message
    }
  }

  override def receive = {
    case CmdPublish(topicArn, bodies, attributes) => publish(topicArn, bodies, attributes) pipeTo sender
  }
} 
Example 200
Source File: TopicApi.scala    From sns   with Apache License 2.0 5 votes vote down vote up
package me.snov.sns.api

import akka.actor.ActorRef
import akka.actor.Status.Success
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import me.snov.sns.actor.SubscribeActor.{CmdListTopics, CmdDeleteTopic, CmdCreateTopic}
import me.snov.sns.model.Topic
import me.snov.sns.response.TopicResponse

import scala.concurrent.ExecutionContext

object TopicApi {
  private val namePattern = """([\w+_-]{1,256})""".r
  private val arnPattern = """([\w+_:-]{1,512})""".r

  def route(actor: ActorRef)(implicit timeout: Timeout, ec: ExecutionContext): Route = {
    pathSingleSlash {
      formField('Action ! "CreateTopic") {
        formField('Name) {
          case namePattern(name) => complete {
            (actor ? CmdCreateTopic(name)).mapTo[Topic].map {
              TopicResponse.create
            }
          }
          case _ => complete(HttpResponse(400, entity = "InvalidParameter: invalid topic name"))
        } ~
        complete(HttpResponse(400, entity = "Topic name is missing"))
      } ~
      formField('Action ! "DeleteTopic") {
        formField('TopicArn) {
          case arnPattern(arn) => complete {
            (actor ? CmdDeleteTopic(arn)).map {
              case Success => TopicResponse.delete
              case _ => HttpResponse(404, entity = "NotFound")
            }
          }
          case _ => complete(HttpResponse(400, entity = "Invalid topic ARN"))
        } ~
        complete(HttpResponse(404, entity = "NotFound"))
      } ~ 
      formField('Action ! "ListTopics") {
        complete {
          (actor ? CmdListTopics).mapTo[Iterable[Topic]].map {
            TopicResponse.list
          }
        }
      }
    }
  }
}