akka.cluster.Cluster Scala Examples

The following examples show how to use akka.cluster.Cluster. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: HelloAkka.scala    From sbt-reactive-app   with Apache License 2.0 5 votes vote down vote up
package hello.akka

import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.{ Actor, ActorSystem, Props }
import akka.discovery._
import com.typesafe.config.ConfigFactory

final case class Greet(name: String)

class GreeterActor extends Actor {
  val cluster = Cluster(context.system)

  override def preStart = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop = {
    cluster.unsubscribe(self)
  }

  def receive = {
    case Greet(name) =>
      println(s"Hello, $name")
    case MemberUp(member) =>
      println(s"Member up: $member")
    case MemberRemoved(member, previousStatus) =>
      println(s"Member down: $member")
    case _: MemberEvent => // ignore
  }
}

object HelloAkka {
  def main(args: Array[String]) = {
    startup()
  }

  def startup() = {
    val system = ActorSystem("ClusterSystem")
    val discovery = ServiceDiscovery(system).discovery
    val actor = system.actorOf(Props[GreeterActor], name = "GreeterActor")

    actor ! Greet("[unnamed]")
  }
} 
Example 2
Source File: DemoApp.scala    From sbt-reactive-app   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("Appka")

  import system.log
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")
  log.info("something2")
  //#start-akka-management
  AkkaManagement(system).start()
  //#start-akka-management
  ClusterBootstrap(system).start()

  cluster.subscribe(
    system.actorOf(Props[ClusterWatcher]),
    ClusterEvent.InitialStateAsEvents,
    classOf[ClusterDomainEvent])

  // add real app routes here
  val routes =
    path("hello") {
      get {
        complete(
          HttpEntity(
            ContentTypes.`text/html(UTF-8)`,
            "<h1>Hello</h1>"))
      }
    }
  Http().bindAndHandle(routes, "0.0.0.0", 8080)

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 3
Source File: RoleLeaderAutoDowningRoles.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}

final class RoleLeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val leaderRole = system.settings.config.getString("custom-downing.role-leader-auto-downing-roles.leader-role")
    val roles = system.settings.config.getStringList("custom-downing.role-leader-auto-downing-roles.target-roles").asScala.toSet
    if (roles.isEmpty) None else Some(RoleLeaderAutoDownRoles.props(leaderRole, roles, stableAfter))
  }
}


private[autodown] object RoleLeaderAutoDownRoles {
  def props(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[RoleLeaderAutoDownRoles], leaderRole, targetRoles, autoDownUnreachableAfter)
}

private[autodown] class RoleLeaderAutoDownRoles(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends RoleLeaderAutoDownRolesBase(leaderRole, targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("RoleLeader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }
} 
Example 4
Source File: ClusterCustomDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{Address, ActorLogging, Scheduler}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.ClusterDomainEvent
import scala.concurrent.duration._

trait ClusterCustomDowning extends ActorLogging { base: CustomAutoDownBase =>

  val cluster = Cluster(context.system)

  override def selfAddress: Address = cluster.selfAddress

  override def scheduler: Scheduler = {
    if (context.system.scheduler.maxFrequency < 1.second / cluster.settings.SchedulerTickDuration) {
      log.warning("CustomDowning does not use a cluster dedicated scheduler. Cluster will use a dedicated scheduler if configured " +
        "with 'akka.scheduler.tick-duration' [{} ms] >  'akka.cluster.scheduler.tick-duration' [{} ms].",
        (1000 / context.system.scheduler.maxFrequency).toInt, cluster.settings.SchedulerTickDuration.toMillis)
    }
    context.system.scheduler
  }

  override def preStart(): Unit = {
    cluster.subscribe(self, classOf[ClusterDomainEvent])
  }
  override def postStop(): Unit = {
    cluster.unsubscribe(self)
  }
} 
Example 5
Source File: OldestAutoDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.ConfigurationException
import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.concurrent.Await
import scala.concurrent.duration._

class OldestAutoDowning(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val oldestMemberRole = {
      val r = system.settings.config.getString("custom-downing.oldest-auto-downing.oldest-member-role")
      if (r.isEmpty) None else Some(r)
    }
    val downIfAlone = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.down-if-alone")
    val shutdownActorSystem = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.shutdown-actor-system-on-resolution")
    if (stableAfter == Duration.Zero && downIfAlone) throw new ConfigurationException("If you set down-if-alone=true, stable-after timeout must be greater than zero.")
    else {
      Some(OldestAutoDown.props(oldestMemberRole, downIfAlone, shutdownActorSystem, stableAfter))
    }
  }
}

private[autodown] object OldestAutoDown {
  def props(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props =
    Props(classOf[OldestAutoDown], oldestMemberRole, downIfAlone, shutdownActorSystem, autoDownUnreachableAfter)
}

private[autodown] class OldestAutoDown(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends OldestAutoDownBase(oldestMemberRole, downIfAlone, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Oldest is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }

  override def shutdownSelf(): Unit = {
    if (shutdownActorSystem) {
      Await.result(context.system.terminate(), 10 seconds)
    } else {
      throw new SplitBrainResolvedError("OldestAutoDowning")
    }
  }
} 
Example 6
Source File: MajorityLeaderAutoDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.concurrent.Await
import scala.concurrent.duration._

class MajorityLeaderAutoDowning(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = config.getDuration("custom-downing.stable-after").toMillis millis
    val majorityMemberRole = {
      val r = config.getString("custom-downing.majority-leader-auto-downing.majority-member-role")
      if (r.isEmpty) None else Some(r)
    }
    val downIfInMinority = config.getBoolean("custom-downing.majority-leader-auto-downing.down-if-in-minority")
    val shutdownActorSystem = config.getBoolean("custom-downing.majority-leader-auto-downing.shutdown-actor-system-on-resolution")
    Some(MajorityLeaderAutoDown.props(majorityMemberRole, downIfInMinority, shutdownActorSystem, stableAfter))
  }
}

private[autodown] object MajorityLeaderAutoDown {
  def props(majorityMemberRole: Option[String], downIfInMinority: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props =
    Props(classOf[MajorityLeaderAutoDown], majorityMemberRole, downIfInMinority, shutdownActorSystem, autoDownUnreachableAfter)
}

private[autodown] class MajorityLeaderAutoDown(majorityMemberRole: Option[String], downIfInMinority: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends MajorityLeaderAutoDownBase(majorityMemberRole, downIfInMinority, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Majority is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }

  override def shutdownSelf(): Unit = {
    if (shutdownActorSystem) {
      Await.result(context.system.terminate(), 10 seconds)
    } else {
      throw new SplitBrainResolvedError("MajorityAutoDowning")
    }
  }
} 
Example 7
Source File: QuorumLeaderAutoDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.concurrent.Await
import scala.concurrent.duration._

class QuorumLeaderAutoDowning(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val role = {
      val r = system.settings.config.getString("custom-downing.quorum-leader-auto-downing.role")
      if (r.isEmpty) None else Some(r)
    }
    val quorumSize = system.settings.config.getInt("custom-downing.quorum-leader-auto-downing.quorum-size")
    val downIfOutOfQuorum = system.settings.config.getBoolean("custom-downing.quorum-leader-auto-downing.down-if-out-of-quorum")
    val shutdownActorSystem = system.settings.config.getBoolean("custom-downing.quorum-leader-auto-downing.shutdown-actor-system-on-resolution")
    Some(QuorumLeaderAutoDown.props(role, quorumSize, downIfOutOfQuorum, shutdownActorSystem, stableAfter))
  }
}


private[autodown] object QuorumLeaderAutoDown {
  def props(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props =
    Props(classOf[QuorumLeaderAutoDown], quorumRole, quorumSize, downIfOutOfQuorum, shutdownActorSystem, autoDownUnreachableAfter)
}

private[autodown] class QuorumLeaderAutoDown(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends QuorumLeaderAutoDownBase(quorumRole, quorumSize, downIfOutOfQuorum, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Quorum leader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }

  override def shutdownSelf(): Unit = {
    if (shutdownActorSystem) {
      Await.result(context.system.terminate(), 10 seconds)
    } else {
      throw new SplitBrainResolvedError("QuorumLeaderAutoDowning")
    }
  }
} 
Example 8
Source File: LeaderAutoDowningRoles.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}

final class LeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val roles = system.settings.config.getStringList("custom-downing.leader-auto-downing-roles.target-roles").asScala.toSet
    if (roles.isEmpty) None else Some(LeaderAutoDownRoles.props(roles, stableAfter))
  }
}


private[autodown] object LeaderAutoDownRoles {
  def props(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[LeaderAutoDownRoles], targetRoles, autoDownUnreachableAfter)
}

private[autodown] class LeaderAutoDownRoles(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends LeaderAutoDownRolesBase(targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Leader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }
} 
Example 9
Source File: SimpleClusterListener.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination
package demo

import akka.actor.{ Actor, ActorLogging, Address, Props }
import akka.cluster.ClusterEvent.{ MemberEvent, MemberJoined, MemberRemoved, MemberUp, UnreachableMember }
import akka.cluster.Cluster

object SimpleClusterListener {

  case object GetMemberNodes

  final val Name = "clusterListener"

  def props: Props = Props(new SimpleClusterListener)
}

class SimpleClusterListener extends Actor with ActorLogging {
  import SimpleClusterListener._

  val cluster = Cluster(context.system)

  private var members = Set.empty[Address]

  override def preStart(): Unit =
    cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember])

  override def postStop(): Unit = cluster.unsubscribe(self)

  override def receive = {
    case GetMemberNodes =>
      sender() ! members
    case MemberJoined(member) =>
      log.info("Member joined: {}", member.address)
      members += member.address
    case MemberUp(member) =>
      log.info("Member up: {}", member.address)
      members += member.address
    case MemberRemoved(member, _) =>
      log.info("Member removed: {}", member.address)
      members -= member.address
  }
} 
Example 10
Source File: StorageNodeActor.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.actors

import akka.actor.{Actor, ActorRef, Props, RootActorPath, Terminated}
import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp}
import akka.cluster.{Cluster, Member, MemberStatus}
import com.typesafe.scalalogging.StrictLogging
import justin.db.actors.protocol.{RegisterNode, _}
import justin.db.cluster.ClusterMembers
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica._
import justin.db.replica.read.{ReplicaLocalReader, ReplicaReadCoordinator, ReplicaRemoteReader}
import justin.db.replica.write.{ReplicaLocalWriter, ReplicaRemoteWriter, ReplicaWriteCoordinator}
import justin.db.storage.PluggableStorageProtocol

import scala.concurrent.ExecutionContext

class StorageNodeActor(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N) extends Actor with StrictLogging {

  private[this] implicit val ec: ExecutionContext = context.dispatcher
  private[this] val cluster = Cluster(context.system)

  private[this] var clusterMembers   = ClusterMembers.empty
  private[this] val readCoordinator  = new ReplicaReadCoordinator(nodeId, ring, n, new ReplicaLocalReader(storage), new ReplicaRemoteReader)
  private[this] val writeCoordinator = new ReplicaWriteCoordinator(nodeId, ring, n, new ReplicaLocalWriter(storage), new ReplicaRemoteWriter)

  private[this] val coordinatorRouter = context.actorOf(
    props = RoundRobinCoordinatorRouter.props(readCoordinator, writeCoordinator),
    name  = RoundRobinCoordinatorRouter.routerName
  )

  private[this] val name = self.path.name

  override def preStart(): Unit = cluster.subscribe(this.self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(this.self)

  def receive: Receive = {
    receiveDataPF orElse receiveClusterDataPF orElse receiveRegisterNodePR orElse notHandledPF
  }

  private[this] def receiveDataPF: Receive = {
    case readReq: StorageNodeReadRequest              =>
      coordinatorRouter ! ReadData(sender(), clusterMembers, readReq)
    case writeLocalDataReq: StorageNodeWriteDataLocal =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeLocalDataReq)
    case writeClientReplicaReq: Internal.WriteReplica =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeClientReplicaReq)
  }

  private[this] def receiveClusterDataPF: Receive = {
    case "members"                  => sender() ! clusterMembers
    case MemberUp(member)           => register(nodeId, ring, member)
    case state: CurrentClusterState => state.members.filter(_.status == MemberStatus.Up).foreach(member => register(nodeId, ring, member))
    case Terminated(actorRef)       => clusterMembers = clusterMembers.removeByRef(StorageNodeActorRef(actorRef))
  }

  private[this] def receiveRegisterNodePR: Receive = {
    case RegisterNode(senderNodeId) if clusterMembers.notContains(senderNodeId) =>
      val senderRef = sender()
      context.watch(senderRef)
      clusterMembers = clusterMembers.add(senderNodeId, StorageNodeActorRef(senderRef))
      senderRef ! RegisterNode(nodeId)
      logger.info(s"Actor[$name]: Successfully registered node [id-${senderNodeId.id}]")
    case RegisterNode(senderNodeId) =>
      logger.info(s"Actor[$name]: Node [id-${senderNodeId.id}] is already registered")
  }

  private[this] def register(nodeId: NodeId, ring: Ring, member: Member) = {
    (member.hasRole(StorageNodeActor.role), datacenter.name == member.dataCenter) match {
      case (true, true) => register()
      case (_,   false) => logger.info(s"Actor[$name]: $member doesn't belong to datacenter [${datacenter.name}]")
      case (false,   _) => logger.info(s"Actor[$name]: $member doesn't have [${StorageNodeActor.role}] role (it has roles ${member.roles}")
    }

    def register() = for {
      ringNodeId    <- ring.nodesId
      nodeName       = StorageNodeActor.name(ringNodeId, Datacenter(member.dataCenter))
      nodeRef        = context.actorSelection(RootActorPath(member.address) / "user" / nodeName)
    } yield nodeRef ! RegisterNode(nodeId)
  }

  private[this] def notHandledPF: Receive = {
    case t => logger.warn(s"Actor[$name]: Not handled message [$t]")
  }
}

object StorageNodeActor {
  def role: String = "storagenode"
  def name(nodeId: NodeId, datacenter: Datacenter): String = s"${datacenter.name}-id-${nodeId.id}"
  def props(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N): Props = {
    Props(new StorageNodeActor(nodeId, datacenter, storage, ring, n))
  }
}

case class StorageNodeActorRef(ref: ActorRef) extends AnyVal 
Example 11
Source File: JustinDB.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.http.management.ClusterHttpManagement
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
import buildinfo.BuildInfo
import com.typesafe.scalalogging.StrictLogging
import justin.db.actors.{StorageNodeActor, StorageNodeActorRef}
import justin.db.client.ActorRefStorageNodeClient
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica.N
import justin.db.storage.PluggableStorageProtocol
import justin.db.storage.provider.StorageProvider
import justin.httpapi.{BuildInfoRouter, HealthCheckRouter, HttpRouter}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Promise}
import scala.language.reflectiveCalls

// $COVERAGE-OFF$
final class JustinDB

object JustinDB extends StrictLogging {

  private[this] def validConfiguration(justinDBConfig: JustinDBConfig): Unit = {
    require(justinDBConfig.replication.N > 0, "replication N factor can't be smaller or equal 0")
    require(justinDBConfig.ring.`members-count` > 0, "members-counter can't be smaller or equal 0")
    require(justinDBConfig.ring.partitions > 0, "ring partitions can't be smaller or equal 0")
    require(justinDBConfig.ring.partitions >= justinDBConfig.ring.`members-count`, "number of ring partitions can't be smaller than number of members-count")
    require(justinDBConfig.replication.N <= justinDBConfig.ring.`members-count`, "replication N factor can't be bigger than defined members-count number")
  }

  private[this] def initStorage(justinConfig: JustinDBConfig) = {
    val provider = StorageProvider.apply(justinConfig.storage.provider)
    logger.info("Storage provider: " + provider.name)
    provider.init
  }

  def init(justinConfig: JustinDBConfig)(implicit actorSystem: ActorSystem): JustinDB = {
    validConfiguration(justinConfig)

    val processOrchestrator = Promise[JustinDB]

    implicit val executor: ExecutionContext = actorSystem.dispatcher
    implicit val materializer: Materializer = ActorMaterializer()

    val storage: PluggableStorageProtocol = initStorage(justinConfig)

    val cluster = Cluster(actorSystem)

    cluster.registerOnMemberUp {
      // STORAGE ACTOR
      val storageNodeActorRef = StorageNodeActorRef {
        val nodeId     = NodeId(justinConfig.`kubernetes-hostname`.split("-").last.toInt)
        val ring       = Ring(justinConfig.ring.`members-count`, justinConfig.ring.partitions)
        val n          = N(justinConfig.replication.N)
        val datacenter = Datacenter(justinConfig.dc.`self-data-center`)

        actorSystem.actorOf(
          props = StorageNodeActor.props(nodeId, datacenter, storage, ring, n),
          name  = StorageNodeActor.name(nodeId, datacenter)
        )
      }

      // AKKA-MANAGEMENT
      ClusterHttpManagement(cluster).start().map { _ =>
        logger.info("Cluster HTTP-Management is ready!")
      }.recover { case ex => processOrchestrator.failure(ex) }

      // HTTP API
      val routes = logRequestResult(actorSystem.name) {
        new HttpRouter(new ActorRefStorageNodeClient(storageNodeActorRef)).routes ~
          new HealthCheckRouter().routes ~
          new BuildInfoRouter().routes(BuildInfo.toJson)
      }
      Http()
        .bindAndHandle(routes, justinConfig.http.interface, justinConfig.http.port)
        .map { binding => logger.info(s"HTTP server started at ${binding.localAddress}"); processOrchestrator.trySuccess(new JustinDB) }
        .recover { case ex => logger.error("Could not start HTTP server", ex); processOrchestrator.failure(ex) }
    }

    Await.result(processOrchestrator.future, 2.minutes)
  }
}
// $COVERAGE-ON$ 
Example 12
Source File: MultiNodeClusterSpec.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db

import akka.cluster.Cluster
import akka.remote.testkit.MultiNodeSpec
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.Suite

object MultiNodeClusterSpec {

  val commonBaseConfig: Config = ConfigFactory.parseString(
    s"""
       |akka.loglevel = INFO
       |akka.log-config-on-start = false
       |akka.log-dead-letters = off
       |akka.log-dead-letters-during-shutdown = off
       |akka.remote.log-remote-lifecycle-events = off
    """.stripMargin
  )
}

trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeSpec ⇒

  
  def cluster: Cluster = Cluster(system)

  def initialParticipants: Int = roles.size
} 
Example 13
Source File: ClusterListener.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.cluster

import akka.actor.{Actor, ActorLogging, Props}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._

object ClusterListener {
  def props(nodeId: String, cluster: Cluster) = Props(new ClusterListener(nodeId, cluster))
}

class ClusterListener(nodeId: String, cluster: Cluster) extends Actor with ActorLogging {

  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case MemberUp(member) =>
      log.info("Node {} - Member is Up: {}", nodeId, member.address)
    case UnreachableMember(member) =>
      log.info(s"Node {} - Member detected as unreachable: {}", nodeId, member)
    case MemberRemoved(member, previousStatus) =>
      log.info(s"Node {} - Member is Removed: {} after {}",
        nodeId, member.address, previousStatus)
    case _: MemberEvent => // ignore
  }
} 
Example 14
Source File: ClusterManager.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.cluster

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.{Cluster, MemberStatus}
import com.elleflorio.scalachain.cluster.ClusterManager.GetMembers

object ClusterManager {

  sealed trait ClusterMessage
  case object GetMembers extends ClusterMessage

  def props(nodeId: String) = Props(new ClusterManager(nodeId))
}

class ClusterManager(nodeId: String) extends Actor with ActorLogging {

  val cluster: Cluster = Cluster(context.system)
  val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener")

  override def receive: Receive = {
    case GetMembers => {
      sender() ! cluster.state.members.filter(_.status == MemberStatus.up)
        .map(_.address.toString)
        .toList
    }
  }
} 
Example 15
Source File: Main.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.cluster
package words

import com.typesafe.config.ConfigFactory
import akka.actor.{Props, ActorSystem}
import akka.cluster.Cluster

import JobReceptionist.JobRequest

object Main extends App {
  val config = ConfigFactory.load()
  val system = ActorSystem("words", config)

  println(s"Starting node with roles: ${Cluster(system).selfRoles}")

  if(system.settings.config.getStringList("akka.cluster.roles").contains("master")) {
    Cluster(system).registerOnMemberUp {
      val receptionist = system.actorOf(Props[JobReceptionist], "receptionist")
      println("Master node is ready.")

      val text = List("this is a test", "of some very naive word counting", "but what can you say", "it is what it is")
      receptionist ! JobRequest("the first job", (1 to 100000).flatMap(i => text ++ text).toList)
      system.actorOf(Props(new ClusterDomainEventListener), "cluster-listener")
    }
  }
} 
Example 16
Source File: ClusterDomainEventListener.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.cluster.words

import akka.actor.{ActorLogging, Actor}

import akka.cluster.{MemberStatus, Cluster}
import akka.cluster.ClusterEvent._

class ClusterDomainEventListener extends Actor
                    with ActorLogging {
  Cluster(context.system).subscribe(self, classOf[ClusterDomainEvent])

  def receive ={
    case MemberUp(member) =>
      log.info(s"$member UP.")
    case MemberExited(member)=>
      log.info(s"$member EXITED.")
    case MemberRemoved(member, previousState)=>
      if(previousState == MemberStatus.Exiting) {
        log.info(s"Member $member Previously gracefully exited, REMOVED.")
      } else {
        log.info(s"$member Previously downed after unreachable, REMOVED.")
      }
    case UnreachableMember(member) =>
      log.info(s"$member UNREACHABLE")
    case ReachableMember(member) =>
      log.info(s"$member REACHABLE")
    case state: CurrentClusterState =>
      log.info(s"Current state of the cluster: $state")
  }
  override def postStop(): Unit = {
    Cluster(context.system).unsubscribe(self)
    super.postStop()
  }
} 
Example 17
Source File: WordsClusterSpec.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.cluster
package words

import scala.concurrent.duration._

import akka.actor.Props
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp}

import akka.testkit.ImplicitSender
import akka.remote.testkit.MultiNodeSpec
import JobReceptionist._


class WordsClusterSpecMultiJvmNode1 extends WordsClusterSpec
class WordsClusterSpecMultiJvmNode2 extends WordsClusterSpec
class WordsClusterSpecMultiJvmNode3 extends WordsClusterSpec
class WordsClusterSpecMultiJvmNode4 extends WordsClusterSpec

class WordsClusterSpec extends MultiNodeSpec(WordsClusterSpecConfig)
with STMultiNodeSpec with ImplicitSender {

  import WordsClusterSpecConfig._

  def initialParticipants = roles.size

  val seedAddress = node(seed).address
  val masterAddress = node(master).address
  val worker1Address = node(worker1).address
  val worker2Address = node(worker2).address

  muteDeadLetters(classOf[Any])(system)

  "A Words cluster" must {

    "form the cluster" in within(10 seconds) {

      Cluster(system).subscribe(testActor, classOf[MemberUp])
      expectMsgClass(classOf[CurrentClusterState])


      Cluster(system).join(seedAddress)

      receiveN(4).map { case MemberUp(m) => m.address }.toSet must be(
        Set(seedAddress, masterAddress, worker1Address, worker2Address))

      Cluster(system).unsubscribe(testActor)

      enterBarrier("cluster-up")
    }

    "execute a words job once the cluster is running" in within(10 seconds) {
      runOn(master) {
        val receptionist = system.actorOf(Props[JobReceptionist], "receptionist")
        receptionist ! JobRequest("job-1", List("some", "some very long text", "some long text"))
        expectMsg(JobSuccess("job-1", Map("some" -> 3, "very" -> 1, "long" -> 2, "text" -> 2)))
      }
      enterBarrier("job-done")
    }

    "continue to process a job when failures occur" in within(10 seconds) {
      runOn(master) {
        val receptionist = system.actorSelection("/user/receptionist")
        receptionist ! JobRequest("job-2", List("some", "FAIL", "some very long text", "some long text"))
        expectMsg(JobSuccess("job-2", Map("some" -> 3, "very" -> 1, "long" -> 2, "text" -> 2)))
      }
      enterBarrier("job-done")
    }
  }
} 
Example 18
Source File: GatherActor.scala    From typebus   with MIT License 5 votes vote down vote up
package io.surfkit.typebus.actors

import java.io.{PrintWriter, StringWriter}
import java.time.Instant
import java.util.UUID

import akka.actor._
import akka.cluster.Cluster
import akka.util.Timeout
import io.surfkit.typebus.bus.Publisher
import io.surfkit.typebus.{AvroByteStreams, ByteStreamReader, ByteStreamWriter}
import io.surfkit.typebus.event._

import scala.reflect.ClassTag
import scala.util.Try

object GatherActor{
  //def props[T, U](producer: Publisher[T], timeout: Timeout, writer: ByteStreamWriter[T], reader: ByteStreamReader[U]): Props = Props(classOf[GatherActor[T, U]], producer, timeout, writer)

  
class GatherActor[T : ClassTag, U : ClassTag](serviceIdentifier: ServiceIdentifier, producer: Publisher, timeout: Timeout, writer: ByteStreamWriter[T], reader: ByteStreamReader[U]) extends Actor with ActorLogging with AvroByteStreams{
  implicit val system = context.system
  import system.dispatcher
  
  val cluster = Cluster(context.system)
  val correlationId = UUID.randomUUID().toString

  log.debug(s"adding http actor ${self.path.toStringWithoutAddress}")
  def clusterPath = s"${cluster.selfAddress}${self.path.toStringWithoutAddress}"
  var replyTo:ActorRef = null

  val cancel = context.system.scheduler.scheduleOnce(timeout.duration){
    log.warning("GatherActor timeout ... shutting down!")
    context.stop(self)
  }

  def receive = {
    case msg: GatherActor.Request[T] =>
      replyTo = context.sender()
      val meta =  EventMeta(
        eventId = UUID.randomUUID().toString,
        eventType = EventType.parse(msg.data.getClass.getCanonicalName),
        directReply = Some(RpcClient(clusterPath, serviceIdentifier)),
        correlationId = Some(correlationId)
      )
      try {
        log.info(s"[GatherActor] publish ${msg.data}")
        val outEvent = PublishedEvent(
          meta = meta,
          payload = writer.write(msg.data)
        )
        producer.publish(outEvent)
        producer.traceEvent(OutEventTrace(producer.serviceIdentifier, outEvent), outEvent.meta)
      }catch{
        case t:Exception =>
          producer.produceErrorReport(t,meta)
          cancel.cancel()
          context.stop(self)
      }

    case x:PublishedEvent =>
      log.info(s"GatherActor got reply.... ${x.meta.eventType}")
      try{
        log.info(s"GatherActor try to deserialize reader: ${reader} for type: ${x.meta.eventType}")
        val responsePayload = Try(reader.read(x.payload)).toOption.getOrElse( ServiceExceptionReader.read(x.payload) )
        replyTo ! responsePayload
        producer.traceEvent(InEventTrace(producer.serviceIdentifier, x), x.meta)
      }catch{
        case t: Throwable =>
          t.printStackTrace()
          val tType = scala.reflect.classTag[T].runtimeClass.getCanonicalName
          log.error(s"Gather actor failed to reply for response: ${tType}", t)
          replyTo ! producer.produceErrorReport(t, x.meta)
      }finally {
        cancel.cancel()
        context.stop(self)
      }

    case x =>
      log.warning(s"GatherActor got Wrong message type ${x.getClass.getSimpleName}")
      cancel.cancel()
      context.stop(self)

  }

  override def postStop() {
    log.debug(s"GatherActor ACTOR STOP !!! ${self.path.toStringWithoutAddress}")
  }

} 
Example 19
Source File: DistributedDataApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.ddata.Replicator.{ReadFrom, ReadMajority}
import akka.pattern.ask
import akka.util.Timeout
import com.packt.chapter7.SubscriptionManager._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random

object DistributedDataApplication extends App {
  val actorSystem = ActorSystem("ClusterSystem")

  Cluster(actorSystem).registerOnMemberUp {
    val subscriptionManager = actorSystem.actorOf(SubscriptionManager.props)

    val subscription = Subscription(Random.nextInt(3), Cluster(actorSystem).selfUniqueAddress.toString, System.currentTimeMillis())
    subscriptionManager ! AddSubscription(subscription)

    //Let's simulate some time has passed. Never use Thread.sleep in production!
    Thread.sleep(10000)

    implicit val timeout = Timeout(5 seconds)

    val readMajority = ReadMajority(timeout = 5.seconds)
    val readFrom = ReadFrom(n = 2, timeout = 5.second)

    Await.result(subscriptionManager ? GetSubscriptions(readMajority), 5 seconds) match {
      case GetSubscriptionsSuccess(subscriptions) =>
        println(s"The current set of subscriptions is $subscriptions")
      case GetSubscriptionsFailure =>
        println(s"Subscription manager was not able to get subscriptions successfully.")
    }

    subscriptionManager ! RemoveSubscription(subscription)

    Await.result(subscriptionManager ? GetSubscriptions(readFrom), 5 seconds) match {
      case GetSubscriptionsSuccess(subscriptions) =>
        println(s"The current set of subscriptions is $subscriptions")
      case GetSubscriptionsFailure =>
        println(s"Subscription manager was not able to get subscriptions successfully.")
    }
  }
} 
Example 20
Source File: SubscriptionManager.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.{Actor, ActorRef, Props}
import akka.cluster.Cluster
import akka.cluster.ddata._
import akka.cluster.ddata.Replicator._

object SubscriptionManager {
  case class Subscription(id: Int, origin: String, creationTimestamp: Long)

  case class AddSubscription(subscription: Subscription)
  case class RemoveSubscription(subscription: Subscription)
  case class GetSubscriptions(consistency: ReadConsistency)

  trait GetSubscriptionsResult
  case class GetSubscriptionsSuccess(subscriptions: Set[Subscription]) extends GetSubscriptionsResult
  case object GetSubscriptionsFailure extends GetSubscriptionsResult

  def props = Props(new SubscriptionManager())

  val subscriptionKey = "subscription_key"
}

class SubscriptionManager extends Actor {
  import SubscriptionManager._

  val replicator = DistributedData(context.system).replicator
  implicit val node = Cluster(context.system)

  private val DataKey = ORSetKey[Subscription](subscriptionKey)
  replicator ! Subscribe(DataKey, self)

  def receive = {
    case AddSubscription(subscription) =>
      println(s"Adding: $subscription")
      replicator ! Update(DataKey, ORSet.empty[Subscription], WriteLocal)(_ + subscription)

    case RemoveSubscription(subscription) =>
      println(s"Removing $subscription")
      replicator ! Update(DataKey, ORSet.empty[Subscription], WriteLocal)(_ - subscription)

    case GetSubscriptions(consistency) =>
      replicator ! Get(DataKey, consistency, request = Some(sender()))
    case g @ GetSuccess(DataKey, Some(replyTo: ActorRef)) =>
      val value = g.get(DataKey).elements
      replyTo ! GetSubscriptionsSuccess(value)
    case GetFailure(DataKey, Some(replyTo: ActorRef)) =>
      replyTo ! GetSubscriptionsFailure

    case _: UpdateResponse[_] => // ignore

    case c @ Changed(DataKey) =>
      val data = c.get(DataKey)
      println(s"Current elements: ${data.elements}")
  }
} 
Example 21
Source File: ClusterSingletonApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.{ActorSystem, PoisonPill, Props}
import akka.cluster.Cluster
import akka.cluster.singleton._
import scala.concurrent.duration._

object ClusterSingletonApplication extends App {
  val actorSystem = ActorSystem("ClusterSystem")
  val cluster = Cluster(actorSystem)

  val clusterSingletonSettings = ClusterSingletonManagerSettings(actorSystem)
  val clusterSingletonManager = ClusterSingletonManager.props(Props[ClusterAwareSimpleActor], PoisonPill, clusterSingletonSettings)
  actorSystem.actorOf(clusterSingletonManager, "singletonClusteAwareSimpleActor")

  val singletonSimpleActor = actorSystem.actorOf(ClusterSingletonProxy.props(
    singletonManagerPath = "/user/singletonClusteAwareSimpleActor",
    settings = ClusterSingletonProxySettings(actorSystem)),
    name = "singletonSimpleActorProxy")

  import actorSystem.dispatcher
  actorSystem.scheduler.schedule(10 seconds, 5 seconds, singletonSimpleActor, "TEST")
} 
Example 22
Source File: NotificationSubscriber.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.Actor
import akka.cluster.Cluster
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.{Subscribe, SubscribeAck}

case class Notification(title: String, body: String)

class NotificationSubscriber extends Actor {
  val mediator = DistributedPubSub(context.system).mediator
  // subscribe to the topic named "notifications"
  mediator ! Subscribe("notification", self)

  val cluster = Cluster(context.system)
  val clusterAddress = cluster.selfUniqueAddress

  def receive = {
    case notification: Notification ⇒
      println(s"Got notification in node $clusterAddress => $notification")
    case SubscribeAck(Subscribe("notification", None, `self`)) ⇒
      println("subscribing");
  }
} 
Example 23
Source File: ActorServiceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package docs.home.actor

import com.lightbend.lagom.docs.ServiceSupport
import scala.concurrent.duration._
import akka.actor.ActorSystem
import akka.testkit.ImplicitSender
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalactic.TypeCheckedTripleEquals
import org.scalatest.BeforeAndAfterAll
import akka.cluster.Cluster
import java.util.concurrent.TimeUnit

object ActorServiceSpec {
  def config = ConfigFactory.parseString("""
    akka.actor.provider = cluster
    akka.remote.artery.canonical.port = 0
    akka.remote.artery.canonical.hostname = 127.0.0.1
    """)
}

class ActorServiceSpec
    extends TestKit(ActorSystem("ActorServiceSpec", ActorServiceSpec.config))
    with ServiceSupport
    with BeforeAndAfterAll
    with TypeCheckedTripleEquals
    with ImplicitSender {
  val workerRoleConfig = ConfigFactory.parseString("akka.cluster.roles = [worker-node]")
  val node2            = ActorSystem("ActorServiceSpec", workerRoleConfig.withFallback(system.settings.config))
  val node3            = ActorSystem("ActorServiceSpec", workerRoleConfig.withFallback(system.settings.config))

  override def beforeAll {
    Cluster(system).join(Cluster(system).selfAddress)
    Cluster(node2).join(Cluster(system).selfAddress)
    Cluster(node3).join(Cluster(system).selfAddress)
    node2.actorOf(Worker.props(), "worker");
    node3.actorOf(Worker.props(), "worker");
    within(15.seconds) {
      awaitAssert {
        Cluster(system).state.members.size should ===(3)
      }
    }
  }

  override def afterAll {
    shutdown()
    shutdown(node2)
    shutdown(node3)
  }

  "Integration with actors" must {
    "work with for example clustered consistent hashing" in withServiceInstance[WorkerService](
      new WorkerServiceImpl(system)
    ).apply { app => client =>
      {
        val job = Job.of("123", "compute", "abc")

        // might take a while until cluster is formed and router knows about the nodes
        within(15.seconds) {
          awaitAssert {
            client.doWork().invoke(job).toCompletableFuture.get(3, TimeUnit.SECONDS) should ===(JobAccepted.of("123"))
          }
        }
      }
    }
  }
} 
Example 24
Source File: CassandraPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.javadsl.persistence.cassandra

import java.io.File

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.persistence.cassandra.testkit.CassandraLauncher
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.persistence.testkit.PersistenceTestConfig._
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory

class CassandraPersistenceSpec(actorSystemFactory: () => ActorSystem) extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config) =
    this(
      () => {

        // first start Cassandra and bind the necessary ports
        val cassandraDirectory = new File("target/" + testName)
        CassandraLauncher.start(cassandraDirectory, "lagom-test-embedded-cassandra.yaml", clean = true, port = 0)

        // start the ActorSystem
        // note that we first need to bind the Cassandra port and then pass it to the ActorSystem config
        // this is needed to allow the Cassandra plugin to connected to the randomly selected port
        ActorSystem(
          testName,
          config
            .withFallback(cassandraConfig(testName, CassandraLauncher.randomPort))
        )
      }
    )

  def this(config: Config) =
    this(PersistenceSpec.testNameFromCallStack(classOf[CassandraPersistenceSpec]), config)

  def this() = this(ConfigFactory.empty())

  override def beforeAll(): Unit = {
    super.beforeAll()
    awaitPersistenceInit(system)
    // Join ourselves - needed because the Cassandra offset store uses cluster startup task
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)
  }

  override def afterAll(): Unit = {
    CassandraLauncher.stop()
    super.afterAll()
  }
} 
Example 25
Source File: CassandraPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.cassandra

import java.io.File

import akka.actor.ActorSystem
import akka.actor.BootstrapSetup
import akka.actor.setup.ActorSystemSetup
import akka.cluster.Cluster
import akka.persistence.cassandra.testkit.CassandraLauncher
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.persistence.testkit.PersistenceTestConfig._
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory

abstract class CassandraPersistenceSpec private (actorSystemFactory: () => ActorSystem)
    extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config, jsonSerializerRegistry: JsonSerializerRegistry) = {

    this(
      () => {
        // first start Cassandra and bind the necessary ports
        val cassandraDirectory = new File("target/" + testName)
        CassandraLauncher.start(cassandraDirectory, "lagom-test-embedded-cassandra.yaml", clean = true, port = 0)

        // start the ActorSystem
        // note that we first need to bind the Cassandra port and then pass it to the ActorSystem config
        // this is needed to allow the Cassandra plugin to connected to the randomly selected port
        ActorSystem(
          testName,
          ActorSystemSetup(
            BootstrapSetup(
              config
                .withFallback(cassandraConfig(testName, CassandraLauncher.randomPort))
            ),
            JsonSerializerRegistry.serializationSetupFor(jsonSerializerRegistry)
          )
        )
      }
    )
  }

  def this(config: Config, jsonSerializerRegistry: JsonSerializerRegistry) =
    this(PersistenceSpec.testNameFromCallStack(classOf[CassandraPersistenceSpec]), config, jsonSerializerRegistry)

  def this(jsonSerializerRegistry: JsonSerializerRegistry) = this(ConfigFactory.empty(), jsonSerializerRegistry)

  override def beforeAll(): Unit = {
    super.beforeAll()
    awaitPersistenceInit(system)
    // Join ourselves - needed because the Cassandra offset store uses cluster startup task
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)
  }

  override def afterAll(): Unit = {
    CassandraLauncher.stop()
    super.afterAll()
  }
} 
Example 26
Source File: JdbcPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.javadsl.persistence.jdbc

import akka.actor.ActorSystem
import akka.cluster.Cluster
import com.lightbend.lagom.internal.javadsl.persistence.jdbc._
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbTestProvider
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import play.api.Configuration
import play.api.Environment

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class JdbcPersistenceSpec private (actorSystemFactory: () => ActorSystem)
    extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config) = {
    this(() => ActorSystem(testName, config.withFallback(Configuration.load(Environment.simple()).underlying)))
  }

  def this(config: Config) = this(PersistenceSpec.testNameFromCallStack(classOf[JdbcPersistenceSpec]), config)

  def this() = this(ConfigFactory.empty())

  import system.dispatcher

  protected lazy val slick = new SlickProvider(system, coordinatedShutdown)

  protected lazy val offsetStore =
    new JavadslJdbcOffsetStore(
      slick,
      system,
      new OffsetTableConfiguration(
        system.settings.config,
        ReadSideConfig()
      ),
      ReadSideConfig()
    )
  protected lazy val jdbcReadSide: JdbcReadSide = new JdbcReadSideImpl(slick, offsetStore)

  override def beforeAll(): Unit = {
    super.beforeAll()

    // Join ourselves - needed because we're using cluster singleton to create tables
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)

    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)

    // Trigger tables to be created
    Await.ready(slick.ensureTablesCreated(), 20.seconds)

    awaitPersistenceInit(system)
  }
} 
Example 27
Source File: SlickPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.slick

import akka.actor.setup.ActorSystemSetup
import akka.actor.ActorSystem
import akka.actor.BootstrapSetup
import akka.cluster.Cluster
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbTestProvider
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.OffsetTableConfiguration
import com.lightbend.lagom.internal.scaladsl.persistence.slick.SlickReadSideImpl
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import play.api.Configuration
import play.api.Environment

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class SlickPersistenceSpec private (actorSystemFactory: () => ActorSystem)
    extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config, registry: JsonSerializerRegistry) =
    this(
      () =>
        ActorSystem(
          testName,
          ActorSystemSetup(
            BootstrapSetup(
              config.withFallback(Configuration.load(Environment.simple()).underlying)
            ),
            JsonSerializerRegistry.serializationSetupFor(registry)
          )
        )
    )

  def this(config: Config, registry: JsonSerializerRegistry) =
    this(PersistenceSpec.testNameFromCallStack(classOf[SlickPersistenceSpec]), config, registry)

  def this(registry: JsonSerializerRegistry) =
    this(ConfigFactory.empty(), registry)

  import system.dispatcher

  protected lazy val slick = new SlickProvider(system, coordinatedShutdown)
  protected lazy val slickReadSide: SlickReadSide = {
    val offsetStore =
      new SlickOffsetStore(
        system,
        slick,
        new OffsetTableConfiguration(system.settings.config, ReadSideConfig())
      )
    new SlickReadSideImpl(slick, offsetStore)
  }

  override def beforeAll(): Unit = {
    super.beforeAll()

    // Join ourselves - needed because we're using cluster singleton to create tables
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)

    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)

    // Trigger tables to be created
    Await.ready(slick.ensureTablesCreated(), 20.seconds)

    awaitPersistenceInit(system)
  }
} 
Example 28
Source File: JdbcPersistenceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.jdbc

import akka.actor.setup.ActorSystemSetup
import akka.actor.ActorSystem
import akka.actor.BootstrapSetup
import akka.cluster.Cluster
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbTestProvider
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcReadSideImpl
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.OffsetTableConfiguration
import com.lightbend.lagom.persistence.ActorSystemSpec
import com.lightbend.lagom.persistence.PersistenceSpec
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import play.api.Configuration
import play.api.Environment

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class JdbcPersistenceSpec private (actorSystemFactory: () => ActorSystem)
    extends ActorSystemSpec(actorSystemFactory) {
  def this(testName: String, config: Config, registry: JsonSerializerRegistry) =
    this(
      () =>
        ActorSystem(
          testName,
          ActorSystemSetup(
            BootstrapSetup(
              config.withFallback(Configuration.load(Environment.simple()).underlying)
            ),
            JsonSerializerRegistry.serializationSetupFor(registry)
          )
        )
    )

  def this(config: Config, registry: JsonSerializerRegistry) =
    this(PersistenceSpec.testNameFromCallStack(classOf[JdbcPersistenceSpec]), config, registry)

  def this(registry: JsonSerializerRegistry) = this(ConfigFactory.empty(), registry)

  import system.dispatcher

  protected lazy val slick = new SlickProvider(system, coordinatedShutdown)
  protected lazy val jdbcReadSide: JdbcReadSide = new JdbcReadSideImpl(
    slick,
    new SlickOffsetStore(
      system,
      slick,
      new OffsetTableConfiguration(system.settings.config, ReadSideConfig())
    )
  )

  override def beforeAll(): Unit = {
    super.beforeAll()

    // Join ourselves - needed because we're using cluster singleton to create tables
    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)

    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)

    // Trigger tables to be created
    Await.ready(slick.ensureTablesCreated(), 20.seconds)

    awaitPersistenceInit(system)
  }
} 
Example 29
Source File: SlickOffsetStoreSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.jdbc

import akka.cluster.Cluster
import akka.pattern.AskTimeoutException
import com.lightbend.lagom.persistence.ActorSystemSpec
import play.api.Configuration
import play.api.Environment
import slick.jdbc.meta.MTable

import scala.concurrent.Await
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._

class SlickOffsetStoreSpec extends ActorSystemSpec(Configuration.load(Environment.simple()).underlying) {
  import system.dispatcher

  private lazy val slick = new SlickProvider(system, coordinatedShutdown)

  private lazy val offsetStore = new SlickOffsetStore(
    system,
    slick,
    TestOffsetStoreConfiguration()
  )

  protected override def beforeAll(): Unit = {
    super.beforeAll()
    // Trigger database to be loaded and registered to JNDI
    SlickDbTestProvider.buildAndBindSlickDb(system.name, coordinatedShutdown)
  }

  "SlickOffsetStoreSpec" when {
    "auto-creating tables is enabled" should {
      // Regression test for https://github.com/lagom/lagom/issues/1336
      "allow prepare to be retried after a failure" in {
        val exception = Await.result(offsetStore.prepare("test_read_side", "TestTag").failed, 10.seconds)
        exception shouldBe a[AskTimeoutException]

        // Join ourselves - needed because we're using cluster singleton to create tables
        val cluster = Cluster(system)
        cluster.join(cluster.selfAddress)

        Await.result(offsetStore.prepare("test_read_side", "TestTag"), 20.seconds)

        val tables = Await.result(slick.db.run(MTable.getTables("test_read_side_offsets_tbl")), 5.seconds)
        (tables should have).length(1)
      }

      
      "creates the read-side offset table when preparing" in pending
      "allows prepare to be called multiple times" in pending
      "uses configured column names" in pending
      "returns an offset DAO with the last stored offset" in pending
    }

    "auto-creating tables is disabled" in pending
  }

  private case class TestOffsetStoreConfiguration(
      tableName: String = "test_read_side_offsets_tbl",
      schemaName: Option[String] = None,
      idColumnName: String = "test_read_side_id_col",
      tagColumnName: String = "test_tag_col",
      sequenceOffsetColumnName: String = "test_sequence_offset_col",
      timeUuidOffsetColumnName: String = "test_time_uuid_offset_col",
      minBackoff: FiniteDuration = 1.second,
      maxBackoff: FiniteDuration = 1.second,
      randomBackoffFactor: Double = 0,
      globalPrepareTimeout: FiniteDuration = 5.seconds,
      role: Option[String] = None
  ) extends SlickOffsetStoreConfiguration
} 
Example 30
Source File: ClusterComponents.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.cluster

import akka.actor.ActorSystem
import akka.cluster.Cluster
import com.lightbend.lagom.internal.cluster.JoinClusterImpl
import com.lightbend.lagom.scaladsl.cluster.typed.ClusterShardingTypedComponents
import com.lightbend.lagom.scaladsl.playjson.RequiresJsonSerializerRegistry
import com.lightbend.lagom.scaladsl.server.AkkaManagementComponents
import play.api.Environment


trait ClusterComponents
    extends RequiresJsonSerializerRegistry
    with AkkaManagementComponents
    with ClusterShardingTypedComponents {
  def actorSystem: ActorSystem
  def environment: Environment

  // eager initialization
  val cluster: Cluster = {
    JoinClusterImpl.join(actorSystem, environment, akkaManagementTrigger)
    Cluster(actorSystem)
  }
} 
Example 31
Source File: ClusteredMultiNodeUtils.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.actor.ActorRef
import akka.actor.Address
import akka.cluster.Cluster
import akka.cluster.MemberStatus
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender
import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1

import scala.concurrent.duration._

abstract class ClusteredMultiNodeUtils(val numOfNodes: Int, multiNodeConfig: ClusterMultiNodeConfig)
    extends MultiNodeSpec(multiNodeConfig, ClusterMultiNodeActorSystemFactory.createActorSystem())
    with STMultiNodeSpec
    with ImplicitSender {
  override def initialParticipants: Int = roles.size

  def join(from: RoleName, to: RoleName): Unit = {
    runOn(from) {
      Cluster(system).join(node(to).address)
    }
    enterBarrier(from.name + "-joined")
  }

  def fullAddress(ref: ActorRef): Address =
    if (ref.path.address.hasLocalScope) Cluster(system).selfAddress
    else ref.path.address

  protected override def atStartup(): Unit = {
    join(node1, node1)
    roles.tail.foreach(n => join(n, node1))
    within(15.seconds) {
      awaitAssert(Cluster(system).state.members.size should be(numOfNodes))
      awaitAssert(
        Cluster(system).state.members.toIndexedSeq.map(_.status).distinct should be(IndexedSeq(MemberStatus.Up))
      )
    }

    enterBarrier("startup")
  }
} 
Example 32
Source File: ClusteredTeskit.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.BootstrapSetup
import akka.actor.setup.ActorSystemSetup
import akka.cluster.Cluster
import akka.cluster.MemberStatus
import akka.remote.testconductor.RoleName
import com.typesafe.config.ConfigFactory
import akka.remote.testkit.MultiNodeConfig
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender
import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1
import com.typesafe.config.Config

import scala.concurrent.duration._

object ClusterMultiNodeConfig extends ClusterMultiNodeConfig

// this is reused in multiple multi-jvm tests. There's still some copy/paste around though.
abstract class ClusterMultiNodeConfig extends MultiNodeConfig {
  val node1 = role("node1")
  val node2 = role("node2")
  val node3 = role("node3")

  protected def systemConfig: Config =
    ConfigFactory.parseString(
      """
    akka.loglevel = INFO
    akka.actor.provider = cluster
    terminate-system-after-member-removed = 60s

    # increase default timeouts to leave wider margin for Travis.
    # 30s to 60s
    akka.testconductor.barrier-timeout=60s
    akka.test.single-expect-default = 15s

    akka.cluster.sharding.waiting-for-state-timeout = 5s

    # Don't terminate the actor system when doing a coordinated shutdown
    akka.coordinated-shutdown.terminate-actor-system = off
    akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off
    akka.cluster.run-coordinated-shutdown-when-down = off

    ## The settings below are incidental because this code lives in a project that depends on lagom-cluster and
    ## lagom-akka-management-core.

    # multi-jvm tests forms the cluster programmatically
    # therefore we disable Akka Cluster Bootstrap
    lagom.cluster.bootstrap.enabled = off

    # no jvm exit on tests
    lagom.cluster.exit-jvm-when-system-terminated = off
    """
    )

  commonConfig(systemConfig)
}

// heavily inspired by AbstractClusteredPersistentEntitySpec
// this is reused in multiple multi-jvm tests. There's still some copy/paste around though.
object ClusterMultiNodeActorSystemFactory {
  // Copied from MultiNodeSpec
  private def getCallerName(clazz: Class[_]): String = {
    val s = Thread.currentThread.getStackTrace.map(_.getClassName).drop(1).dropWhile(_.matches(".*MultiNodeSpec.?$"))
    val reduced = s.lastIndexWhere(_ == clazz.getName) match {
      case -1 => s
      case z  => s.drop(z + 1)
    }
    reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_")
  }
  def createActorSystem(): Config => ActorSystem = { config =>
    val setup = ActorSystemSetup(BootstrapSetup(ConfigFactory.load(config)))
    ActorSystem(getCallerName(classOf[MultiNodeSpec]), setup)
  }
} 
Example 33
Source File: ClusterBootstrap.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap

import java.util.concurrent.atomic.AtomicReference

import akka.AkkaVersion
import scala.concurrent.{ Future, Promise, TimeoutException }
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.actor.ClassicActorSystemProvider
import akka.actor.ExtendedActorSystem
import akka.actor.Extension
import akka.actor.ExtensionId
import akka.actor.ExtensionIdProvider
import akka.annotation.InternalApi
import akka.cluster.Cluster
import akka.discovery.{ Discovery, ServiceDiscovery }
import akka.event.Logging
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.server.Route
import akka.management.cluster.bootstrap.contactpoint.HttpClusterBootstrapRoutes
import akka.management.cluster.bootstrap.internal.BootstrapCoordinator
import akka.management.scaladsl.ManagementRouteProviderSettings
import akka.management.scaladsl.ManagementRouteProvider

final class ClusterBootstrap(implicit system: ExtendedActorSystem) extends Extension with ManagementRouteProvider {

  import ClusterBootstrap.Internal._
  import system.dispatcher

  private val log = Logging(system, classOf[ClusterBootstrap])

  private final val bootstrapStep = new AtomicReference[BootstrapStep](NotRunning)

  AkkaVersion.require("cluster-bootstrap", "2.5.27")

  val settings: ClusterBootstrapSettings = ClusterBootstrapSettings(system.settings.config, log)

  // used for initial discovery of contact points
  lazy val discovery: ServiceDiscovery =
    settings.contactPointDiscovery.discoveryMethod match {
      case "akka.discovery" =>
        val discovery = Discovery(system).discovery
        log.info("Bootstrap using default `akka.discovery` method: {}", Logging.simpleName(discovery))
        discovery

      case otherDiscoveryMechanism =>
        log.info("Bootstrap using `akka.discovery` method: {}", otherDiscoveryMechanism)
        Discovery(system).loadServiceDiscovery(otherDiscoveryMechanism)
    }

  private val joinDecider: JoinDecider = {
    system.dynamicAccess
      .createInstanceFor[JoinDecider](
        settings.joinDecider.implClass,
        List((classOf[ActorSystem], system), (classOf[ClusterBootstrapSettings], settings))
      )
      .get
  }

  private[this] val _selfContactPointUri: Promise[Uri] = Promise()

  override def routes(routeProviderSettings: ManagementRouteProviderSettings): Route = {
    log.info(s"Using self contact point address: ${routeProviderSettings.selfBaseUri}")
    this.setSelfContactPoint(routeProviderSettings.selfBaseUri)

    new HttpClusterBootstrapRoutes(settings).routes
  }

  def start(): Unit =
    if (Cluster(system).settings.SeedNodes.nonEmpty) {
      log.warning(
        "Application is configured with specific `akka.cluster.seed-nodes`: {}, bailing out of the bootstrap process! " +
        "If you want to use the automatic bootstrap mechanism, make sure to NOT set explicit seed nodes in the configuration. " +
        "This node will attempt to join the configured seed nodes.",
        Cluster(system).settings.SeedNodes.mkString("[", ", ", "]")
      )
    } else if (bootstrapStep.compareAndSet(NotRunning, Initializing)) {
      log.info("Initiating bootstrap procedure using {} method...", settings.contactPointDiscovery.discoveryMethod)

      ensureSelfContactPoint()
      val bootstrapProps = BootstrapCoordinator.props(discovery, joinDecider, settings)
      val bootstrap = system.systemActorOf(bootstrapProps, "bootstrapCoordinator")
      // Bootstrap already logs in several other execution points when it can't form a cluster, and why.
      selfContactPoint.foreach { uri =>
        bootstrap ! BootstrapCoordinator.Protocol.InitiateBootstrapping(uri)
      }
    } else log.warning("Bootstrap already initiated, yet start() method was called again. Ignoring.")

  
  private[bootstrap] object Internal {
    sealed trait BootstrapStep
    case object NotRunning extends BootstrapStep
    case object Initializing extends BootstrapStep
  }

} 
Example 34
Source File: HttpClusterBootstrapRoutes.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.Member
import akka.event.Logging
import akka.event.LoggingAdapter
import akka.http.javadsl.server.directives.RouteAdapter
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.server.Route
import akka.management.cluster.bootstrap.ClusterBootstrapSettings
import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.ClusterMember
import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.SeedNodes

final class HttpClusterBootstrapRoutes(settings: ClusterBootstrapSettings) extends HttpBootstrapJsonProtocol {

  import akka.http.scaladsl.server.Directives._

  private def routeGetSeedNodes: Route = extractClientIP { clientIp =>
    extractActorSystem { implicit system =>
      import akka.cluster.MemberStatus
      val cluster = Cluster(system)

      def memberToClusterMember(m: Member): ClusterMember =
        ClusterMember(m.uniqueAddress.address, m.uniqueAddress.longUid, m.status.toString, m.roles)

      val state = cluster.state

      // TODO shuffle the members so in a big deployment nodes start joining different ones and not all the same?
      val members = state.members
        .diff(state.unreachable)
        .filter(m =>
          m.status == MemberStatus.up || m.status == MemberStatus.weaklyUp || m.status == MemberStatus.joining)
        .take(settings.contactPoint.httpMaxSeedNodesToExpose)
        .map(memberToClusterMember)

      val info = SeedNodes(cluster.selfMember.uniqueAddress.address, members)
      log.info(
        "Bootstrap request from {}: Contact Point returning {} seed-nodes [{}]",
        clientIp,
        members.size,
        members.map(_.node).mkString(", "))
      complete(info)
    }
  }

  
  def getRoutes: akka.http.javadsl.server.Route = RouteAdapter(routes)

  private def log(implicit sys: ActorSystem): LoggingAdapter =
    Logging(sys, classOf[HttpClusterBootstrapRoutes])

}

object ClusterBootstrapRequests {

  import akka.http.scaladsl.client.RequestBuilding._

  def bootstrapSeedNodes(baseUri: Uri): HttpRequest =
    Get(baseUri + "/bootstrap/seed-nodes")

} 
Example 35
Source File: HttpContactPointRoutesSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import akka.cluster.{ Cluster, ClusterEvent }
import akka.event.NoLogging
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.management.cluster.bootstrap.ClusterBootstrapSettings
import akka.testkit.{ SocketUtil, TestProbe }
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ Matchers, WordSpecLike }

class HttpContactPointRoutesSpec
    extends WordSpecLike
    with Matchers
    with ScalatestRouteTest
    with HttpBootstrapJsonProtocol
    with Eventually {

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = scaled(Span(3, Seconds)), interval = scaled(Span(50, Millis)))

  override def testConfigSource =
    s"""
    akka {
      remote {
        netty.tcp {
          hostname = "127.0.0.1"
          port = ${SocketUtil.temporaryServerAddress("127.0.0.1").getPort}
        }
      }
    }
    """.stripMargin

  "Http Bootstrap routes" should {

    val settings = ClusterBootstrapSettings(system.settings.config, NoLogging)
    val httpBootstrap = new HttpClusterBootstrapRoutes(settings)

    "empty list if node is not part of a cluster" in {
      ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
        responseAs[String] should include(""""seedNodes":[]""")
      }
    }

    "include seed nodes when part of a cluster" in {
      val cluster = Cluster(system)
      cluster.join(cluster.selfAddress)

      val p = TestProbe()
      cluster.subscribe(p.ref, ClusterEvent.InitialStateAsEvents, classOf[ClusterEvent.MemberUp])
      val up = p.expectMsgType[ClusterEvent.MemberUp]
      up.member should ===(cluster.selfMember)

      eventually {
        ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
          val response = responseAs[HttpBootstrapJsonProtocol.SeedNodes]
          response.seedNodes should !==(Set.empty)
          response.seedNodes.map(_.node) should contain(cluster.selfAddress)
        }
      }
    }
  }

} 
Example 36
Source File: ExampleHealthCheck.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package doc.akka.management

import akka.actor.ActorSystem
import akka.cluster.{ Cluster, MemberStatus }

import scala.concurrent.Future

//#basic
class ExampleHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) {
  override def apply(): Future[Boolean] = {
    Future.successful(true)
  }
}
//#basic

//#cluster
class ClusterHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) {
  private val cluster = Cluster(system)
  override def apply(): Future[Boolean] = {
    Future.successful(cluster.selfMember.status == MemberStatus.Up)
  }
}
//#cluster 
Example 37
Source File: Main.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import com.typesafe.config.{ Config, ConfigFactory }

//#main
object Node1 extends App {
  new Main(1)
}

object Node2 extends App {
  new Main(2)
}

object Node3 extends App {
  new Main(3)
}

class Main(nr: Int) {

  val config: Config = ConfigFactory.parseString(s"""
      akka.remote.artery.canonical.hostname = "127.0.0.$nr"
      akka.management.http.hostname = "127.0.0.$nr"
    """).withFallback(ConfigFactory.load())
  val system = ActorSystem("local-cluster", config)

  AkkaManagement(system).start()

  ClusterBootstrap(system).start()

  Cluster(system).registerOnMemberUp({
    system.log.info("Cluster is up!")
  })
}
//#main 
Example 38
Source File: DemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.ActorSystem
import akka.cluster.{ Cluster, MemberStatus }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.scaladsl.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object DemoApp extends App {
  implicit val system = ActorSystem("my-system")
  implicit val materializer = ActorMaterializer()

  val cluster = Cluster(system)

  def isReady() = {
    val selfNow = cluster.selfMember

    selfNow.status == MemberStatus.Up
  }

  def isHealthy() = {
    isReady()
  }

  val route =
    concat(
      path("ping")(complete("pong!")),
      path("healthy")(complete(if (isHealthy()) StatusCodes.OK else StatusCodes.ServiceUnavailable)),
      path("ready")(complete(if (isReady()) StatusCodes.OK else StatusCodes.ServiceUnavailable)))

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  Http().bindAndHandle(
    route,
    sys.env.get("HOST").getOrElse("127.0.0.1"),
    sys.env.get("PORT_HTTP").map(_.toInt).getOrElse(8080))
} 
Example 39
Source File: DemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
//#start-akka-management
import akka.management.scaladsl.AkkaManagement

//#start-akka-management
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("Appka")

  import system.log
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")

  //#start-akka-management
  AkkaManagement(system).start()
  //#start-akka-management
  ClusterBootstrap(system).start()

  cluster.subscribe(
    system.actorOf(Props[ClusterWatcher]),
    ClusterEvent.InitialStateAsEvents,
    classOf[ClusterDomainEvent]
  )

  // add real app routes here
  val routes =
    path("hello") {
      get {
        complete(
          HttpEntity(
            ContentTypes.`text/html(UTF-8)`,
            "<h1>Hello</h1>"
          )
        )
      }
    }
  Http().bindAndHandle(routes, "0.0.0.0", 8080)

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg => log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 40
Source File: MarathonApiDockerDemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.ActorSystem
import akka.cluster.{ Cluster, MemberStatus }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object MarathonApiDockerDemoApp extends App {
  implicit val system = ActorSystem("my-system")
  implicit val materializer = ActorMaterializer()

  val cluster = Cluster(system)

  def isReady() = {
    val selfNow = cluster.selfMember

    selfNow.status == MemberStatus.Up
  }

  def isHealthy() = {
    isReady()
  }

  val route =
    concat(
      path("ping")(complete("pong!")),
      path("healthy")(complete(if (isHealthy()) StatusCodes.OK else StatusCodes.ServiceUnavailable)),
      path("ready")(complete(if (isReady()) StatusCodes.OK else StatusCodes.ServiceUnavailable))
    )

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  Http().bindAndHandle(
    route,
    sys.env.get("HOST").getOrElse("127.0.0.1"),
    sys.env.get("PORT_HTTP").map(_.toInt).getOrElse(8080))
} 
Example 41
Source File: ClusterApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher
    val cluster = Cluster(system)

    system.log.info("Starting Akka Management")
    AkkaManagement(system).start()
    ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)
      )
    )
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent]
    )

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>")
          )
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop..."
    )

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg => log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 42
Source File: DemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.management.scaladsl.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import com.typesafe.config.ConfigFactory

object DemoApp extends App {

  implicit val system = ActorSystem("simple")

  import system.log
  import system.dispatcher
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info("Started [{}], cluster.selfAddress = {}", system, cluster.selfAddress)

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  cluster
    .subscribe(system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent])

  import akka.http.scaladsl.server.Directives._
  Http().bindAndHandle(complete("Hello world"), "0.0.0.0", 8080)

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg => log.info("Cluster {} >>> {}", msg, cluster.selfAddress)
  }
} 
Example 43
Source File: ClusterHttpManagementRouteProvider.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster

import akka.actor.ClassicActorSystemProvider
import akka.actor.{ ActorSystem, ExtendedActorSystem, ExtensionId, ExtensionIdProvider }
import akka.cluster.Cluster
import akka.http.scaladsl.server.Route
import akka.management.cluster.scaladsl.ClusterHttpManagementRoutes
import akka.management.scaladsl.ManagementRouteProviderSettings
import akka.management.scaladsl.ManagementRouteProvider

object ClusterHttpManagementRouteProvider
    extends ExtensionId[ClusterHttpManagementRouteProvider]
    with ExtensionIdProvider {
  override def lookup: ClusterHttpManagementRouteProvider.type = ClusterHttpManagementRouteProvider

  override def get(system: ActorSystem): ClusterHttpManagementRouteProvider = super.get(system)

  override def get(system: ClassicActorSystemProvider): ClusterHttpManagementRouteProvider = super.get(system)

  override def createExtension(system: ExtendedActorSystem): ClusterHttpManagementRouteProvider =
    new ClusterHttpManagementRouteProvider(system)

}


  override def routes(routeProviderSettings: ManagementRouteProviderSettings): Route =
    if (routeProviderSettings.readOnly) {
      ClusterHttpManagementRoutes.readOnly(cluster)
    } else {
      ClusterHttpManagementRoutes(cluster)
    }

} 
Example 44
Source File: ClusterMembershipCheck.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.scaladsl

import akka.actor.ActorSystem
import akka.annotation.InternalApi
import akka.cluster.{ Cluster, MemberStatus }
import akka.util.Helpers
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.Future


@InternalApi
private[akka] object ClusterMembershipCheckSettings {
  def memberStatus(status: String): MemberStatus =
    Helpers.toRootLowerCase(status) match {
      case "weaklyup" => MemberStatus.WeaklyUp
      case "up"       => MemberStatus.Up
      case "exiting"  => MemberStatus.Exiting
      case "down"     => MemberStatus.Down
      case "joining"  => MemberStatus.Joining
      case "leaving"  => MemberStatus.Leaving
      case "removed"  => MemberStatus.Removed
      case invalid =>
        throw new IllegalArgumentException(
          s"'$invalid' is not a valid MemberStatus. See reference.conf for valid values"
        )
    }
  def apply(config: Config): ClusterMembershipCheckSettings =
    new ClusterMembershipCheckSettings(config.getStringList("ready-states").asScala.map(memberStatus).toSet)
}

final class ClusterMembershipCheckSettings(val readyStates: Set[MemberStatus])

final class ClusterMembershipCheck @InternalApi private[akka] (
    system: ActorSystem,
    selfStatus: () => MemberStatus,
    settings: ClusterMembershipCheckSettings)
    extends (() => Future[Boolean]) {

  def this(system: ActorSystem) =
    this(
      system,
      () => Cluster(system).selfMember.status,
      ClusterMembershipCheckSettings(system.settings.config.getConfig("akka.management.cluster.health-check")))

  override def apply(): Future[Boolean] = {
    Future.successful(settings.readyStates.contains(selfStatus()))
  }
} 
Example 45
Source File: ClusterHttpManagementRouteProviderSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster

import akka.actor.ExtendedActorSystem
import akka.cluster.Cluster
import akka.http.scaladsl.model.{ StatusCodes, Uri }
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.management.scaladsl.ManagementRouteProviderSettings
import org.scalatest.{ Matchers, WordSpec }

object ClusterHttpManagementRouteProviderSpec {}

class ClusterHttpManagementRouteProviderSpec extends WordSpec with ScalatestRouteTest with Matchers {

  val cluster = Cluster(system)

  "Cluster HTTP Management Route" should {
    val routes = ClusterHttpManagementRouteProvider(
      system.asInstanceOf[ExtendedActorSystem]
    )
    "not expose write operations when readOnly set" in {
      val readOnlyRoutes = routes.routes(
        ManagementRouteProviderSettings(
          Uri("http://localhost"),
          readOnly = true
        )
      )
      Get("/cluster/members") ~> readOnlyRoutes ~> check {
        handled shouldEqual true
        status shouldEqual StatusCodes.OK
      }
      Post("/cluster/members") ~> readOnlyRoutes ~> check {
        status shouldEqual StatusCodes.MethodNotAllowed
      }
      Get("/cluster/members/member1") ~> readOnlyRoutes ~> check {
        handled shouldEqual true
        status shouldEqual StatusCodes.NotFound
      }
      Delete("/cluster/members/member1") ~> readOnlyRoutes ~> check {
        status shouldEqual StatusCodes.MethodNotAllowed
      }
      Put("/cluster/members/member1") ~> readOnlyRoutes ~> check {
        status shouldEqual StatusCodes.MethodNotAllowed
      }
    }

    "expose write when readOnly false" in {
      val allRoutes = routes.routes(
        ManagementRouteProviderSettings(
          Uri("http://localhost"),
          readOnly = false
        )
      )
      Get("/cluster/members") ~> allRoutes ~> check {
        handled shouldEqual true
      }
      Get("/cluster/members/member1") ~> allRoutes ~> check {
        handled shouldEqual true
        status shouldEqual StatusCodes.NotFound
      }
      Delete("/cluster/members/member1") ~> allRoutes ~> check {
        handled shouldEqual true
        status shouldEqual StatusCodes.NotFound
      }
      Put("/cluster/members/member1") ~> allRoutes ~> check {
        handled shouldEqual true
        status shouldEqual StatusCodes.NotFound
      }
    }
  }

} 
Example 46
Source File: CompileOnlySpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package doc.akka.cluster.http.management

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.http.scaladsl.server.Route
import akka.management.cluster.scaladsl.ClusterHttpManagementRoutes
import akka.management.scaladsl.AkkaManagement

object CompileOnlySpec {

  //#loading
  val system = ActorSystem()
  // Automatically loads Cluster Http Routes
  AkkaManagement(system).start()
  //#loading

  //#all
  val cluster = Cluster(system)
  val allRoutes: Route = ClusterHttpManagementRoutes(cluster)
  //#all

  //#read-only
  val readOnlyRoutes: Route = ClusterHttpManagementRoutes.readOnly(cluster)
  //#read-only
} 
Example 47
Source File: CouchbasePersistenceSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.javadsl.persistence.couchbase

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.persistence.couchbase.CouchbaseBucketSetup
import com.lightbend.lagom.internal.persistence.couchbase.TestConfig
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.persistence.testkit.PersistenceTestConfig._
import com.lightbend.lagom.persistence.{ActorSystemSpec, PersistenceSpec}
import com.typesafe.config.{Config, ConfigFactory}

class CouchbasePersistenceSpec private (system: ActorSystem) extends ActorSystemSpec(system) with CouchbaseBucketSetup {
  def this(testName: String, config: Config) =
    this(
      ActorSystem(
        testName,
        config
          .withFallback(ClusterConfig)
          .withFallback(TestConfig.persistenceConfig())
      )
    )

  def this(config: Config) =
    this(PersistenceSpec.getCallerName(getClass), config)

  def this() = this(ConfigFactory.empty())

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    awaitPersistenceInit(system)

    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)
  }
} 
Example 48
Source File: CouchbasePersistenceSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.couchbase

import akka.actor.setup.ActorSystemSetup
import akka.actor.{ActorSystem, BootstrapSetup}
import akka.cluster.Cluster
import akka.persistence.couchbase.CouchbaseBucketSetup
import com.lightbend.lagom.internal.persistence.couchbase.TestConfig
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.persistence.testkit.PersistenceTestConfig._
import com.lightbend.lagom.persistence.{ActorSystemSpec, PersistenceSpec}
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.{Config, ConfigFactory}

class CouchbasePersistenceSpec private (system: ActorSystem) extends ActorSystemSpec(system) with CouchbaseBucketSetup {
  def this(testName: String, config: Config, jsonSerializerRegistry: JsonSerializerRegistry) =
    this(
      ActorSystem(
        testName,
        ActorSystemSetup(
          BootstrapSetup(
            config
              .withFallback(TestConfig.persistenceConfig())
              .withFallback(ClusterConfig)
          ),
          JsonSerializerRegistry.serializationSetupFor(jsonSerializerRegistry)
        )
      )
    )

  def this(config: Config, jsonSerializerRegistry: JsonSerializerRegistry) =
    this(PersistenceSpec.getCallerName(getClass), config, jsonSerializerRegistry)

  def this(jsonSerializerRegistry: JsonSerializerRegistry) = this(ConfigFactory.empty(), jsonSerializerRegistry)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    awaitPersistenceInit(system)

    val cluster = Cluster(system)
    cluster.join(cluster.selfAddress)
  }
} 
Example 49
Source File: HeadquartersApp.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.headquarters.app

import akka.cluster.Cluster
import akka.cluster.http.management.ClusterHttpManagement
import akka.kernel.Bootable
import ecommerce.headquarters.processes.OrderProcessManager
import ecommerce.invoicing.InvoicingOfficeId
import ecommerce.sales.ReservationOfficeId
import ecommerce.shipping.ShippingOfficeId
import pl.newicom.dddd.cluster._
import pl.newicom.dddd.office.OfficeFactory._
import pl.newicom.dddd.office.OfficeRegistry
import pl.newicom.dddd.process.CommandSink
import pl.newicom.dddd.scheduling.Scheduler

class HeadquartersApp extends Bootable with HeadquartersConfiguration {

  lazy val offices = OfficeRegistry(system)

  override def startup(): Unit = {
    val cluster = Cluster(system)

    cluster.registerOnMemberUp {
      office[Scheduler]
      office[OrderProcessManager]
      office[CommandSink]

      List(InvoicingOfficeId, ReservationOfficeId, ShippingOfficeId).foreach {
        offices.registerOffice(_, external = true)
      }
    }

    ClusterHttpManagement(cluster).start()
  }

} 
Example 50
Source File: ClusterListener.scala    From akka-cluster-load-balancing   with MIT License 5 votes vote down vote up
package kamkor.actor

import scala.collection.immutable.HashSet
import scala.concurrent.duration.DurationInt
import akka.actor.{ Actor, Props }
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.MemberUp
import akka.cluster.metrics.{ ClusterMetricsChanged, ClusterMetricsExtension, NodeMetrics }
import akka.cluster.metrics.StandardMetrics.HeapMemory
import kamkor.{ ConsumerApp }
import kamkor.metrics.{ ClusterHeapMetrics, MetricsLogger }

class ClusterListener(metricsIntervalSeconds: Int) extends Actor {

  import context.dispatcher
  context.system.scheduler.schedule(
    metricsIntervalSeconds.seconds, metricsIntervalSeconds.seconds, self, "logConsumersHeapUse")

  private[this] val cluster = Cluster(context.system)
  private[this] val metricsLogger =
    new MetricsLogger(name = cluster.selfAddress.port.getOrElse(0).toString())
  private[this] val clusterHeapMetrics = new ClusterHeapMetrics()

  private var consumers: Set[String] = HashSet.empty

  override def preStart(): Unit = {
    ClusterMetricsExtension(context.system).subscribe(self)
    cluster.subscribe(self, classOf[MemberUp])
  }
  override def postStop(): Unit = {
    ClusterMetricsExtension(context.system).unsubscribe(self)
    Cluster(context.system).unsubscribe(self)
  }

  def receive: Receive = {
    case MemberUp(m) if m.roles.contains(ConsumerApp.clusterRole) =>
      consumers += m.address.hostPort
    case ClusterMetricsChanged(clusterMetrics) =>
      clusterMetrics
        .filter(nm => consumers.contains(nm.address.hostPort))
        .foreach(updateHeapUse(_))
    case "logConsumersHeapUse" => {
      metricsLogger.log(clusterHeapMetrics.calculateAverages)
      clusterHeapMetrics.clear()
    }
  }

  private[this] def updateHeapUse(nodeMetrics: NodeMetrics) {
    nodeMetrics match {
      case HeapMemory(address, timestamp, used, committed, max) => {
        val usedMB = Math.round(used.doubleValue / 1024 / 1024)
        clusterHeapMetrics.update(address.hostPort, usedMB)
      }
      case _ => // no heap info
    }
  }

}

object ClusterListener {

  def props(metricsIntervalSeconds: Int): Props = Props(new ClusterListener(metricsIntervalSeconds: Int))

} 
Example 51
Source File: JournaledExchangeNodeBootable.scala    From akka-exchange   with Apache License 2.0 5 votes vote down vote up
package com.boldradius.akka_exchange.journal.util

import akka.actor.{Props, ActorSystem}
import akka.cluster.Cluster
import com.boldradius.akka_exchange.journal.{SharedJournalFinder, SharedJournal}
import com.boldradius.akka_exchange.util.ExchangeNodeBootable
import scala.collection.breakOut


abstract class JournaledExchangeNodeBootable extends ExchangeNodeBootable {

  val findJournal: Boolean = true

  val persistentJournal = {
    println("Booting up Journal Finder...")
    system.actorOf(
      Props(
        classOf[SharedJournalFinder]
      ),
      SharedJournalFinder.name
    )
  }

} 
Example 52
Source File: Status.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.cluster.{Cluster, MemberStatus}
import monix.eval.Task

sealed trait Status {

  
  def check: Task[Boolean]
}

object Status {

  class ClusterStatus(cluster: Cluster) extends Status {
    override def check: Task[Boolean] =
      Task.pure(
        !cluster.isTerminated &&
          cluster.state.leader.isDefined && cluster.state.members.nonEmpty &&
          !cluster.state.members.exists(_.status != MemberStatus.Up) && cluster.state.unreachable.isEmpty
      )
  }
} 
Example 53
Source File: ActorSystemFixture.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.util

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.testkit._
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.concurrent.ScalaFutures._
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.Promise
import scala.concurrent.duration._

class ActorSystemFixture(name: String, startCluster: Boolean = false, configs: Vector[Config] = Vector.empty)
    extends TestKit(
      ActorSystem(
        name,
        (configs :+ ConfigFactory.load("test.conf"))
          .foldLeft(ConfigFactory.empty()) { case (c, e) => c withFallback e }
          .resolve()
      )
    )
    with AnyWordSpecLike
    with PatienceConfiguration
    with BeforeAndAfterAll {

  implicit override def patienceConfig: PatienceConfig = PatienceConfig(3.seconds.dilated, 100.milliseconds.dilated)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    if (startCluster) {
      val promise = Promise[Unit]
      Cluster(system).registerOnMemberUp(promise.success(()))
      Cluster(system).join(Cluster(system).selfAddress)
      promise.future.futureValue
    }
  }

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }
} 
Example 54
Source File: NoScaler.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.proxy.autoscaler

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{Cluster, MemberStatus}
import Autoscaler.{Deployment, Scale}


class NoScaler(autoscaler: ActorRef) extends Actor with ActorLogging {

  private[this] final val cluster = Cluster(context.system)

  cluster.subscribe(self, classOf[ClusterDomainEvent])
  sendDeployment()

  override def postStop(): Unit =
    cluster.unsubscribe(self)

  private def sendDeployment(): Unit =
    autoscaler ! Deployment(
      name = context.system.name,
      ready = cluster.state.members.count(c => c.status == MemberStatus.Up || c.status == MemberStatus.WeaklyUp),
      scale = cluster.state.members.size,
      upgrading = false
    )

  override def receive: Receive = {
    case Scale(_, scale) =>
      log.info(s"Autoscaler requested scale up to $scale")
    case _: ClusterDomainEvent =>
      // Don't care what the event was, just send the current deployment state to the autoscaler
      sendDeployment()
  }
} 
Example 55
Source File: Main.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.event.subscribers

import java.net.InetAddress

import akka.Done
import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown.PhaseBeforeActorSystemTerminate
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

object Main extends StrictLogging {
  def main(args: Array[String]): Unit = {
    import ai.forestflow.startup.ActorSystemStartup._

    preStartup(typeSafeConfig)

    logger.info(s"Started system: [$system], cluster.selfAddress = ${cluster.selfAddress}")

    shutdown.addTask(PhaseBeforeActorSystemTerminate, "main.cleanup") { () => cleanup(typeSafeConfig) }

    bootstrapCluster(system, cluster)

    logger.info(s"Sharding lease owner for this node will be set to: ${cluster.selfAddress.hostPort}")

    // Start application after self member joined the cluster (Up)
    cluster.registerOnMemberUp({
      logger.info(s"Cluster Member is up: ${cluster.selfMember.toString()}")
      postStartup
    })

  }

  private def bootstrapCluster(system: ActorSystem, cluster: Cluster): Unit = {
    // Akka Management hosts the HTTP routes used by bootstrap
    AkkaManagement(system).start()

    // Starting the bootstrap process needs to be done explicitly
    ClusterBootstrap(system).start()

    system.log.info(s"Akka Management hostname from InetAddress.getLocalHost.getHostAddress is: ${InetAddress.getLocalHost.getHostAddress}")
  }

  private def preStartup(config: Config): Unit = {

  }

  private def postStartup(implicit system: ActorSystem, config: Config): Unit = {
    // Kafka Prediction Logger setup
    import system.log

    val basic_topic = Try(config.getString("application.kafka-prediction-logger.basic-topic")).toOption
    val gp_topic = Try(config.getString("application.kafka-prediction-logger.graphpipe-topic")).toOption

    if (basic_topic.isDefined || gp_topic.isDefined){
      log.info(s"Setting up Kafka prediction logging with basic_topic: $basic_topic graphpipe_topic: $gp_topic")
      val predictionLogger = system.actorOf(PredictionLogger.props(basic_topic, gp_topic))
    }
  }

  private def cleanup(config: Config)(implicit executionContext: ExecutionContext) = Future {
    Done
  }
} 
Example 56
Source File: ClusterEventSubscription.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.cluster

import akka.actor.{Actor, ActorLogging}
import akka.cluster.ClusterEvent.{MemberEvent, MemberRemoved, MemberUp, UnreachableMember}
import akka.cluster.{Cluster, ClusterEvent}

trait ClusterEventSubscription {
  this: Actor with ActorLogging =>

  implicit val cluster: Cluster = Cluster(context.system)

  // subscribe to cluster changes, re-subscribe when restart
  override def preStart(): Unit = {
    cluster.subscribe(self, ClusterEvent.initialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
  }
  override def postStop(): Unit = cluster.unsubscribe(self)

  def clusterEvents : Receive = {
    case MemberUp(member) =>
      log.info("Member is Up: {}", member.address)
    case UnreachableMember(member) =>
      log.info("Member detected as unreachable: {}", member)
    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}",
        member.address, previousStatus)
    case _: MemberEvent => // ignore
  }
} 
Example 57
Source File: NodeActor.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.cluster

import java.io.File

import akka.actor.{Actor, ActorLogging, ActorRef, Props, Timers}
import akka.cluster.Cluster
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Subscribe
import ai.forestflow.domain.CleanupLocalStorage
import org.apache.commons.io.FileUtils
import com.typesafe.scalalogging.LazyLogging
import ai.forestflow.utils.ThrowableImplicits._

import scala.util.{Failure, Success, Try}

/***
 * This actor is responsible for node-level (host-level) stuff that should be done on a per-node basis.
 * A good example of this is file system cleanup tasks.
 */
object NodeActor extends LazyLogging {
  
  def props(): Props =
    Props(new NodeActor)
      .withDispatcher("blocking-io-dispatcher")

  def cleanupLocalStorage(path: String): Unit = {
    val localDir = new File(path)
    val localDirExists = localDir.exists()
    logger.info(s"Cleaning up local storage: Local Directory: $localDir , exists? $localDirExists")
    if (localDirExists)
      Try(FileUtils.deleteDirectory(localDir)) match {
        case Success(_) => logger.info(s"Local Directory $localDir cleaned up successfully")
        case Failure(ex) => logger.error(s"Local Directory $localDir cleanup failed! Reason: ${ex.printableStackTrace}")
      }
  }
}

class NodeActor extends Actor
  with ActorLogging
  with Timers {

  
  implicit val cluster: Cluster = Cluster(context.system)
  val mediator: ActorRef = DistributedPubSub(context.system).mediator

  mediator ! Subscribe(classOf[CleanupLocalStorage].getSimpleName, self)

  override def receive: Receive = {
    case CleanupLocalStorage(path) =>
      NodeActor.cleanupLocalStorage(path)
  }
} 
Example 58
Source File: HTTPServer.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.restapi

import ai.forestflow.serving.config.ApplicationEnvironment
import akka.Done
import akka.actor.CoordinatedShutdown.{PhaseServiceUnbind, Reason}
import akka.actor.SupervisorStrategy._
import akka.actor.{ActorRef, ActorSystem, CoordinatedShutdown}
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import ai.forestflow.akka.Supervisor
import ai.forestflow.domain.ServableRoutes.GetProxyRoutes
import ai.forestflow.utils.ThrowableImplicits._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}

//noinspection TypeAnnotation
object HTTPServer {

  private final case object BindFailure extends Reason

}
//noinspection TypeAnnotation
class HTTPServer(servableProxyRef: ActorRef)(implicit system: ActorSystem, cluster: Cluster, shutdown: CoordinatedShutdown) {
  import HTTPServer._
  import system.log

  private implicit val materializer = ActorMaterializer()
  private implicit val executionContext = system.dispatcher
  implicit lazy val timeout: Timeout = Timeout(ApplicationEnvironment.HTTP_COMMAND_TIMEOUT_SECS seconds)

  private val address = ApplicationEnvironment.HTTP_BIND_ADDRESS
  private val port = ApplicationEnvironment.HTTP_PORT

  val routesSupervisor = system.actorOf(Supervisor.props {
    case _: ArithmeticException => Resume
    case _: Exception => Restart
  })


  private val servableRoutesActor = Await.result(
    routesSupervisor
      .ask(ServableRoutes.props(servableProxyRef))
      .mapTo[ActorRef], ApplicationEnvironment.HTTP_COMMAND_TIMEOUT_SECS second)

  servableRoutesActor.ask(GetProxyRoutes()).onComplete {
    case Success(r: Route) =>
      val bindingFuture = Http().bindAndHandle(r, address, port)
      bindingFuture.onComplete {
        case Success(bound) =>
          log.info(s"AKKA HTTP Server online at http://${bound.localAddress.getHostString}:${bound.localAddress.getPort}/")
          shutdown.addTask(PhaseServiceUnbind, "api.unbind") { () =>
            bound.terminate(5 seconds).map(_ => Done)
          }
        case Failure(e) =>
          log.error(s"AKKA HTTP Server could not start! Shutting down... ${e.printableStackTrace}")
          shutdown.run(BindFailure)
      }

    case Failure(e) =>
      log.error(s"Couldn't get dynamic HTTP routes from ServableRoutes actor! Shutting down... ${e.printableStackTrace}")
      shutdown.run(BindFailure)
  }


} 
Example 59
Source File: DocSvr.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.clustersetup

import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.ExtendedActorSystem
import akka.cluster.Cluster
import akka.cluster.Member
import akka.event.LoggingAdapter
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.javadsl.AkkaManagement
import com.raphtory.core.clustersetup.util.ConfigUtils._
import com.raphtory.core.utils.Utils
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigValueFactory

import scala.collection.JavaConversions
import scala.collection.JavaConversions._

trait DocSvr {

  def seedLoc: String

  implicit val system: ActorSystem
  val docker = System.getenv().getOrDefault("DOCKER", "false").trim.toBoolean

  val clusterSystemName: String = Utils.clusterSystemName
  val ssn: String               = java.util.UUID.randomUUID.toString

  
  def printConfigInfo(config: Config, system: ActorSystem): Unit = {
    val log: LoggingAdapter = system.log

    val systemConfig: SystemConfig = config.parse()
    val bindAddress: SocketAddress = systemConfig.bindAddress
    val tcpAddress: SocketAddress  = systemConfig.tcpAddress

    log.info(s"Created ActorSystem with ID: $ssn")

    log.info(s"Binding ActorSystem internally to address ${bindAddress.host}:${bindAddress.port}")
    log.info(s"Binding ActorSystem externally to host ${tcpAddress.host}:${tcpAddress.port}")

    log.info(s"Registering the following seeds to ActorSystem: ${systemConfig.seeds}")
    log.info(s"Registering the following roles to ActorSystem: ${systemConfig.roles}")

    // FIXME: This is bit unorthodox ...
    val akkaSystemUrl: Address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
    log.info(s"ActorSystem successfully initialised at the following Akka URL: $akkaSystemUrl")
  }
} 
Example 60
Source File: SeedActor.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.components.ClusterManagement

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import com.raphtory.core.clustersetup.DocSvr

class SeedActor(svr: DocSvr) extends Actor with ActorLogging {
  val cluster: Cluster = Cluster(context.system)

  override def preStart(): Unit = {
    log.debug("SeedActor is being started.")

    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive: Receive = {

    case evt: MemberUp          => processMemberUpEvent(evt)
    case evt: MemberRemoved     => processMemberRemovedEvent(evt)
    case evt: UnreachableMember => processUnreachableMemberEvent(evt)
    case evt: MemberExited      => processMemberExitedEvent(evt)
    case x                      => log.warning("SeedActor received unknown [{}] message.", x)
  }

  private def processMemberUpEvent(evt: MemberUp): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    svr.nodes.synchronized {
      svr.nodes += evt.member
    }
  }

  private def processMemberRemovedEvent(evt: MemberRemoved): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    svr.nodes.synchronized {
      svr.nodes -= evt.member
    }
  }
  private def processMemberExitedEvent(evt: MemberExited): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    svr.nodes.synchronized {
      svr.nodes -= evt.member
    }
  }

  private def processUnreachableMemberEvent(evt: UnreachableMember): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    log.warning("processUnreachableMemberEvent in SeedActor has not been implemented. Ignoring request.")
  }
} 
Example 61
Source File: EventsByTagPubsubSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query

import java.time.{ LocalDate, ZoneOffset }

import akka.cluster.Cluster
import akka.persistence.cassandra.CassandraSpec
import akka.persistence.cassandra.journal.JournalSettings
import akka.persistence.query.{ EventEnvelope, NoOffset }
import akka.stream.testkit.scaladsl.TestSink
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

object EventsByTagPubsubSpec {
  val today = LocalDate.now(ZoneOffset.UTC)

  val config = ConfigFactory.parseString(s"""
    akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
    akka.actor.serialize-messages = off
    akka.actor.serialize-creators = off
    akka.remote.netty.tcp.port = 0
    akka.remote.artery.canonical.port = 0
    akka.remote.netty.tcp.hostname = "127.0.0.1"
    akka.persistence.cassandra {
      
      query.refresh-interval = 10s

      events-by-tag {
        pubsub-notification = on
        flush-interval = 0ms
        eventual-consistency-delay = 0s
      }
    }
    """).withFallback(EventsByTagSpec.config)
}

class EventsByTagPubsubSpec extends CassandraSpec(EventsByTagPubsubSpec.config) {

  val journalSettings = new JournalSettings(system, system.settings.config.getConfig("akka.persistence.cassandra"))

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    Cluster(system).join(Cluster(system).selfAddress)
  }

  "Cassandra query getEventsByTag when running clustered with pubsub enabled" must {
    "present new events to an ongoing getEventsByTag stream long before polling would kick in" in {
      val actor = system.actorOf(TestActor.props("EventsByTagPubsubSpec_a"))

      val blackSrc = queries.eventsByTag(tag = "black", offset = NoOffset)
      val probe = blackSrc.runWith(TestSink.probe[Any])
      probe.request(2)
      probe.expectNoMessage(300.millis)

      actor ! "a black car"
      probe.within(5.seconds) { // long before refresh-interval, which is 10s
        probe.expectNextPF { case e @ EventEnvelope(_, _, _, "a black car") => e }
      }
    }
  }
} 
Example 62
Source File: ClusterShardingQuickTerminationSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.sharding

import akka.actor.{ ActorLogging, ActorRef, Props, ReceiveTimeout }
import akka.cluster.{ Cluster, MemberStatus }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings, ShardRegion }
import akka.persistence.PersistentActor
import akka.persistence.cassandra.CassandraSpec
import akka.testkit.TestProbe

import scala.concurrent.duration._

object ClusterShardingQuickTerminationSpec {

  case object Increment
  case object Decrement
  final case class Get(counterId: Long)
  final case class EntityEnvelope(id: Long, payload: Any)
  case object Ack

  case object Stop
  final case class CounterChanged(delta: Int)

  class Counter extends PersistentActor with ActorLogging {
    import ShardRegion.Passivate

    context.setReceiveTimeout(5.seconds)

    // self.path.name is the entity identifier (utf-8 URL-encoded)
    override def persistenceId: String = "Counter-" + self.path.name

    var count = 0

    def updateState(event: CounterChanged): Unit =
      count += event.delta

    override def receiveRecover: Receive = {
      case evt: CounterChanged => updateState(evt)
      case other               => log.debug("Other: {}", other)
    }

    override def receiveCommand: Receive = {
      case Increment      => persist(CounterChanged(+1))(updateState)
      case Decrement      => persist(CounterChanged(-1))(updateState)
      case Get(_)         => sender() ! count
      case ReceiveTimeout => context.parent ! Passivate(stopMessage = Stop)
      case Stop =>
        sender() ! Ack
        context.stop(self)
    }
  }
  val extractEntityId: ShardRegion.ExtractEntityId = {
    case EntityEnvelope(id, payload) => (id.toString, payload)
    case msg @ Get(id)               => (id.toString, msg)
  }

  val numberOfShards = 100

  val extractShardId: ShardRegion.ExtractShardId = {
    case EntityEnvelope(id, _) => (id % numberOfShards).toString
    case Get(id)               => (id % numberOfShards).toString
  }

}

class ClusterShardingQuickTerminationSpec extends CassandraSpec("""
    akka.actor.provider = cluster
  """.stripMargin) {

  import ClusterShardingQuickTerminationSpec._

  "Cassandra Plugin with Cluster Sharding" must {
    "clear state if persistent actor shuts down" in {
      Cluster(system).join(Cluster(system).selfMember.address)
      awaitAssert {
        Cluster(system).selfMember.status shouldEqual MemberStatus.Up
      }
      ClusterSharding(system).start(
        typeName = "tagging",
        entityProps = Props[Counter],
        settings = ClusterShardingSettings(system),
        extractEntityId = extractEntityId,
        extractShardId = extractShardId)

      (0 to 100).foreach { i =>
        val counterRegion: ActorRef = ClusterSharding(system).shardRegion("tagging")
        awaitAssert {
          val sender = TestProbe()
          counterRegion.tell(Get(123), sender.ref)
          sender.expectMsg(500.millis, i)
        }

        counterRegion ! EntityEnvelope(123, Increment)
        counterRegion ! Get(123)
        expectMsg(i + 1)

        counterRegion ! EntityEnvelope(123, Stop)
        expectMsg(Ack)
      }
    }
  }
} 
Example 63
Source File: ClusterSpec.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package test.support

import java.io.{File, IOException}
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes

import akka.actor.{ActorIdentity, Identify, Props}
import akka.cluster.Cluster
import akka.persistence.Persistence
import akka.persistence.journal.leveldb.{SharedLeveldbJournal, SharedLeveldbStore}
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender

import scala.util.control.NonFatal

abstract class ClusterSpec
  extends MultiNodeSpec(ClusterConfig)
  with SbtMultiNodeSpec
  with ImplicitSender {

  import ClusterConfig._

  implicit val logger = system.log

  def initialParticipants = roles.size

  def deleteDirectory(path: Path): Unit = if (Files.exists(path)) {

    Files.walkFileTree(path, new SimpleFileVisitor[Path] {

      def deleteAndContinue(file: Path): FileVisitResult = {
        Files.delete(file)
        FileVisitResult.CONTINUE
      }

      override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = deleteAndContinue(file)

      override def visitFileFailed(file: Path, exc: IOException): FileVisitResult =  deleteAndContinue(file)

      override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = {
        Option(exc).fold(deleteAndContinue(dir)) {
          case NonFatal(e) => throw e
        }
      }
    })
  }

  val storageLocations = List(
    "akka.persistence.journal.leveldb.dir",
    "akka.persistence.journal.leveldb-shared.store.dir",
    "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s)))

  override protected def atStartup() {
    on(node1) {
      storageLocations.foreach(dir => deleteDirectory(dir.toPath))
    }
  }

  override protected def afterTermination() {
    on(node1) {
      storageLocations.foreach(dir => deleteDirectory(dir.toPath))
    }
  }

  def join(startOn: RoleName, joinTo: RoleName) {
    on(startOn) {
      Cluster(system) join node(joinTo).address
    }
    enterBarrier(startOn.name + "-joined")
  }

  def setupSharedJournal() {
    Persistence(system)
    on(node1) {
      system.actorOf(Props[SharedLeveldbStore], "store")
    }
    enterBarrier("persistence-started")

    system.actorSelection(node(node1) / "user" / "store") ! Identify(None)
    val sharedStore = expectMsgType[ActorIdentity].ref.get
    SharedLeveldbJournal.setStore(sharedStore, system)

    enterBarrier("after-1")
  }

  def joinCluster() {
    join(startOn = node1, joinTo = node1)
    join(startOn = node2, joinTo = node1)
    enterBarrier("after-2")
  }

  def on(nodes: RoleName*)(thunk: => Unit): Unit = {
    runOn(nodes: _*)(thunk)
  }
} 
Example 64
Source File: SimpleDowningSpec.scala    From simple-akka-downing   with Apache License 2.0 5 votes vote down vote up
package com.ajjpj.simpleakkadowning.util

import akka.actor.Props
import akka.cluster.{Cluster, MemberStatus}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec
import akka.remote.transport.ThrottlerTransportAdapter.Direction
import akka.stream.ActorMaterializer
import akka.testkit.ImplicitSender

import scala.concurrent.duration._
import scala.util.control.NonFatal

abstract class SimpleDowningSpec(config: SimpleDowningConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {

  def initialParticipants = roles.size

  private var portToNode = Map.empty[Int,RoleName]

  def init(): Unit = {
    if (roles.headOption contains myself) {
      enterBarrier("initialized")
    }
    else {
      val cluster = Cluster(system)
      cluster.joinSeedNodes(seedAddresses)
      system.actorOf(Props(new ClusterHttpInspector(httpPort(myself))), "http-server")

      while (cluster.state.members.count(_.status == MemberStatus.Up) < roles.tail.size) Thread.sleep(100)
      enterBarrier("initialized")
    }

    portToNode = roles.map(r => node(r).address.port.get -> r).toMap
  }

  def httpPort (node: RoleName) = {
    val nodeNo = roles.indexOf(node)
    require(nodeNo > 0)
    8080 + nodeNo
  }

  def seedAddresses = roles.tail.map(node(_).root.address)

  private def httpGetNodes(node: RoleName, path: String): Set[RoleName] = {
    try {
      import system.dispatcher
      implicit val mat = ActorMaterializer()

      val uri = Uri (s"http://localhost:${httpPort (node)}$path")
      val response = Http (system).singleRequest (HttpRequest (uri = uri)).await
      val strict = response.entity.toStrict (10.seconds).await
      strict.data.decodeString ("utf-8") match {
        case s if s.isEmpty => Set.empty
        case s => s.split (' ').map (_.toInt).map (portToNode).toSet
      }
    }
    catch {
      case NonFatal(th) =>
        th.printStackTrace()
        Set.empty
    }
  }

  def upNodesFor(node: RoleName) = httpGetNodes(node, "/cluster-members/up")
  def unreachableNodesFor (node: RoleName) = httpGetNodes(node, "/cluster-members/unreachable")

  
  def createPartition(nodes: RoleName*) = {
    val otherNodes = roles.tail.toSet -- nodes
    for (n1 <- nodes; n2 <- otherNodes) testConductor.blackhole(n1, n2, Direction.Both).await
  }

  def healPartition(): Unit = {
    for (n1 <- roles.tail; n2 <- roles.tail) testConductor.passThrough(n1, n2, Direction.Both).await
  }
} 
Example 65
Source File: ClusterHttpInspector.scala    From simple-akka-downing   with Apache License 2.0 5 votes vote down vote up
package com.ajjpj.simpleakkadowning.util

import akka.actor.Actor
import akka.cluster.{Cluster, MemberStatus}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives
import akka.stream.ActorMaterializer

import scala.concurrent.Await


class ClusterHttpInspector(httpPort: Int) extends Actor {
  val cluster = Cluster.get(context.system)
  val routes = {
    import Directives._

    pathPrefix("cluster-members") {
      path("up") { complete {
        cluster.state.members.filter(_.status == MemberStatus.Up).map(_.address.port.get).mkString(" ")
      }} ~
      path("unreachable") { complete {
        cluster.state.unreachable.map(_.address.port.get).mkString(" ")
      }}
    }
  }

  import context.dispatcher
  implicit val mat = ActorMaterializer()

  val fServerBinding =
    Http(context.system)
      .bindAndHandle(routes, "localhost", httpPort)


  override def postStop () = {
    import scala.concurrent.duration._
    super.postStop ()
    fServerBinding.foreach(sb => Await.ready (sb.unbind(), 5.seconds))
  }

  override def receive = Actor.emptyBehavior

} 
Example 66
Source File: ClusterBootstrapAutostart.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.rp.akkaclusterbootstrap

import akka.actor._
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import com.lightbend.rp.common.{ AkkaClusterBootstrapModule, Module, Platform }

final class ClusterBootstrapAutostart(system: ExtendedActorSystem) extends Extension {
  if (Module.enabled(AkkaClusterBootstrapModule)) {
    val cluster = Cluster(system)

    if (cluster.settings.SeedNodes.nonEmpty) {
      system.log.warning("ClusterBootstrapAutostart is enabled but seed nodes are defined, no action will be taken")
    } else if (Platform.active.isEmpty) {
      system.log.info("ClusterBootstrapAutostart is enabled but no active platform detected (i.e. running locally), no action will be taken")
    } else {
      ClusterBootstrap(system).start()
    }
  }
}

object ClusterBootstrapAutostart extends ExtensionId[ClusterBootstrapAutostart] with ExtensionIdProvider {
  override def lookup: ClusterBootstrapAutostart.type = ClusterBootstrapAutostart
  override def get(system: ActorSystem): ClusterBootstrapAutostart = super.get(system)
  override def createExtension(system: ExtendedActorSystem): ClusterBootstrapAutostart = new ClusterBootstrapAutostart(system)
} 
Example 67
Source File: ClusterStatusCheck.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.rp.akkaclusterbootstrap

import akka.actor.ExtendedActorSystem
import akka.cluster.{ Cluster, MemberStatus }
import com.lightbend.rp.status.{ HealthCheck, ReadinessCheck }
import scala.concurrent.{ ExecutionContext, Future }

class ClusterStatusCheck extends ReadinessCheck with HealthCheck {
  def healthy(actorSystem: ExtendedActorSystem)(implicit ec: ExecutionContext): Future[Boolean] =
    status(actorSystem) map {
      case MemberStatus.Joining => true
      case MemberStatus.WeaklyUp => true
      case MemberStatus.Up => true
      case MemberStatus.Leaving => true
      case MemberStatus.Exiting => true
      case MemberStatus.Down => false
      case MemberStatus.Removed => false
    }

  def ready(actorSystem: ExtendedActorSystem)(implicit ec: ExecutionContext): Future[Boolean] =
    status(actorSystem) map {
      case MemberStatus.Joining => false
      case MemberStatus.WeaklyUp => true
      case MemberStatus.Up => true
      case MemberStatus.Leaving => false
      case MemberStatus.Exiting => false
      case MemberStatus.Down => false
      case MemberStatus.Removed => false
    }

  private def status(actorSystem: ExtendedActorSystem)(implicit ec: ExecutionContext): Future[MemberStatus] = {
    val cluster = Cluster(actorSystem)
    val selfNow = cluster.selfMember
    Future.successful(selfNow.status)
  }
} 
Example 68
Source File: ClusterApp.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 69
Source File: DemoApp.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("Appka")

  import system.log
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")
  log.info("something2")
  //#start-akka-management
  AkkaManagement(system).start()
  //#start-akka-management
  ClusterBootstrap(system).start()

  cluster.subscribe(
    system.actorOf(Props[ClusterWatcher]),
    ClusterEvent.InitialStateAsEvents,
    classOf[ClusterDomainEvent])

  // add real app routes here
  val routes =
    path("hello") {
      get {
        complete(
          HttpEntity(
            ContentTypes.`text/html(UTF-8)`,
            "<h1>Hello</h1>"))
      }
    }
  Http().bindAndHandle(routes, "0.0.0.0", 8080)

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 70
Source File: ClusterStateInformer.scala    From distributed-cache-on-k8s-poc   with MIT License 5 votes vote down vote up
package cluster

import akka.actor.{ Actor, ActorLogging, Props }
import akka.cluster.ClusterEvent._
import akka.cluster.{ Cluster, ClusterEvent }

class ClusterStateInformer extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def preStart(): Unit = {
    cluster.subscribe(
      subscriber = self,
      initialStateMode = ClusterEvent.InitialStateAsEvents,
      to = classOf[MemberEvent], classOf[UnreachableMember]
    )
  }

  override def postStop(): Unit = cluster.unsubscribe(self)

  override def receive: Receive = {
    case MemberJoined(member) => log.info(s"Member ${member.address} Joined")
    case MemberUp(member) => log.info("Member is Up: {}", member.address)
    case UnreachableMember(member) => log.info("Member detected as unreachable: {}", member)
    case MemberRemoved(member, previousStatus) =>
      log.info(
        "Member is Removed: {} after {}",
        member.address, previousStatus)
    case me: MemberEvent ⇒ log.info(s"Received Member event $me for Member: ${me.member.address}")
  }
}

object ClusterStateInformer {
  def props():Props = Props(new ClusterStateInformer)
} 
Example 71
Source File: ClusterListener.scala    From akka-cluster-playground   with MIT License 5 votes vote down vote up
package com.elleflorio.cluster.playground.node.cluster

import akka.actor.{Actor, ActorLogging, Props}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._

object ClusterListener {
  def props(nodeId: String, cluster: Cluster) = Props(new ClusterListener(nodeId, cluster))
}

class ClusterListener(nodeId: String, cluster: Cluster) extends Actor with ActorLogging {

  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case MemberUp(member) =>
      log.info("Node {} - Member is Up: {}", nodeId, member.address)
    case UnreachableMember(member) =>
      log.info(s"Node {} - Member detected as unreachable: {}", nodeId, member)
    case MemberRemoved(member, previousStatus) =>
      log.info(s"Node {} - Member is Removed: {} after {}",
        nodeId, member.address, previousStatus)
    case _: MemberEvent => // ignore
  }
} 
Example 72
Source File: ClusterManager.scala    From akka-cluster-playground   with MIT License 5 votes vote down vote up
package com.elleflorio.cluster.playground.node.cluster

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.{Cluster, MemberStatus}
import com.elleflorio.cluster.playground.Server.system
import com.elleflorio.cluster.playground.node.cluster.ClusterManager.GetMembers

object ClusterManager {

  sealed trait ClusterMessage
  case object GetMembers extends ClusterMessage

  def props(nodeId: String) = Props(new ClusterManager(nodeId))
}

class ClusterManager(nodeId: String) extends Actor with ActorLogging {

  val cluster: Cluster = Cluster(context.system)
  val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener")

  override def receive: Receive = {
    case GetMembers => {
      sender() ! cluster.state.members.filter(_.status == MemberStatus.up)
        .map(_.address.toString)
        .toList
    }
  }
} 
Example 73
Source File: NSDbActors.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster

import java.util.concurrent.TimeUnit

import akka.actor._
import akka.cluster.Cluster
import akka.cluster.ddata.DistributedData
import akka.cluster.singleton._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.util.Timeout
import io.radicalbit.nsdb.cluster.actor._


trait NSDbActors {

  implicit def system: ActorSystem

  implicit lazy val timeout: Timeout =
    Timeout(system.settings.config.getDuration("nsdb.global.timeout", TimeUnit.SECONDS), TimeUnit.SECONDS)

  def initTopLevelActors(): Unit = {
    AkkaManagement(system).start()
    ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(singletonProps = Props(classOf[DatabaseActorsGuardian]),
                                    terminationMessage = PoisonPill,
                                    settings = ClusterSingletonManagerSettings(system)),
      name = "databaseActorGuardian"
    )

    system.actorOf(
      ClusterSingletonProxy.props(singletonManagerPath = "/user/databaseActorGuardian",
                                  settings = ClusterSingletonProxySettings(system)),
      name = "databaseActorGuardianProxy"
    )

    DistributedData(system).replicator

    system.actorOf(ClusterListener.props(true),
                   name = s"cluster-listener_${createNodeName(Cluster(system).selfMember)}")
  }
} 
Example 74
Source File: ClusterListenerTestActor.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.actor

import akka.actor.{Actor, ActorLogging, Deploy}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.{InitialStateAsEvents, MemberEvent, MemberUp, UnreachableMember}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Subscribe
import akka.remote.RemoteScope
import io.radicalbit.nsdb.cluster.PubSubTopics.NODE_GUARDIANS_TOPIC
import io.radicalbit.nsdb.cluster.actor.ClusterListener.{GetNodeMetrics, NodeMetricsGot}
import io.radicalbit.nsdb.cluster.createNodeName

class ClusterListenerTestActor extends Actor with ActorLogging {

  private lazy val cluster             = Cluster(context.system)
  private lazy val mediator = DistributedPubSub(context.system).mediator

  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def receive: Receive = {
    case MemberUp(member) if member == cluster.selfMember =>

      val nodeName = createNodeName(member)

      val nodeActorsGuardian =
        context.system.actorOf(NodeActorsGuardian.props(self).withDeploy(Deploy(scope = RemoteScope(member.address))),
          name = s"guardian_$nodeName")

      mediator ! Subscribe(NODE_GUARDIANS_TOPIC, nodeActorsGuardian)
    case GetNodeMetrics =>
    sender() ! NodeMetricsGot(Set.empty)
  }

} 
Example 75
Source File: SplitBrainFiveNodesSpec.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.split_brain

import akka.cluster.Cluster
import io.radicalbit.nsdb.split_brain.configs.SplitBrainFiveNodesSpecConfig

import scala.concurrent.duration._

class SplitBrainFiveNodesSpecMultiJvmNode1 extends SplitBrainFiveNodesSpec
class SplitBrainFiveNodesSpecMultiJvmNode2 extends SplitBrainFiveNodesSpec
class SplitBrainFiveNodesSpecMultiJvmNode3 extends SplitBrainFiveNodesSpec
class SplitBrainFiveNodesSpecMultiJvmNode4 extends SplitBrainFiveNodesSpec
class SplitBrainFiveNodesSpecMultiJvmNode5 extends SplitBrainFiveNodesSpec


class SplitBrainFiveNodesSpec extends MultiNodeBaseSpec(SplitBrainFiveNodesSpecConfig) {

  import SplitBrainFiveNodesSpecConfig._

  val side1 = Vector(node1, node2)
  val side2 = Vector(node3, node4, node5)

  "SplitBrainFiveNodesSpec" must {
    "start node-1" in within(30 seconds) {
      runOn(node1) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1)
      }

      enterBarrier("node-1-up")
    }

    "start node-2" in within(30 seconds) {
      runOn(node2) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2)
      }
      enterBarrier("node-2-up")
    }

    "start node-3" in within(30 seconds) {
      runOn(node3) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3)
      }
      enterBarrier("node-3-up")
    }

    "start node-4" in within(30 seconds) {
      runOn(node4) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3, node4)
      }
      enterBarrier("node-4-up")
    }

    "start node-5" in within(30 seconds) {
      runOn(node5) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3, node4, node5)
      }
      enterBarrier("node-5-up")
    }

    "handle split brain scenario" in within(60 seconds) {
      runOn(node1) {
        for (role1 <- side1; role2 <- side2) switchOffConnection(role1, role2)
      }
      enterBarrier("links-failed")

      runOn(side2: _*) {
        awaitClusterLeader(side2: _*)
        awaitAssert(side1.foreach(role => cluster.down(addressOf(role)))) // manually healing the cluster
        awaitExistingMembers(side2:_*) // the new cluster is composed only by side1 nodes
      }

      runOn(side1:_*) {
        an[java.lang.AssertionError] shouldBe thrownBy(awaitSelfDowningNode(5 seconds)) // demonstrating that isolated node doesn't down by itself
      }
      enterBarrier("5 nodes split-brain")
    }
  }
} 
Example 76
Source File: SplitBrainThreeNodesSpec.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.split_brain

import akka.cluster.Cluster
import io.radicalbit.nsdb.split_brain.configs.SplitBrainThreeNodeSpecConfig

import scala.concurrent.duration._

class SplitBrainThreeNodesSpecMultiJvmNode1 extends SplitBrainThreeNodesSpec
class SplitBrainThreeNodesSpecMultiJvmNode2 extends SplitBrainThreeNodesSpec
class SplitBrainThreeNodesSpecMultiJvmNode3 extends SplitBrainThreeNodesSpec


class SplitBrainThreeNodesSpec() extends MultiNodeBaseSpec(SplitBrainThreeNodeSpecConfig) {

  import SplitBrainThreeNodeSpecConfig._

  val side1 = Vector(node1, node2)
  val side2 = Vector(node3)

  "SplitBrainThreeNodesSpec" must {
    "start node-1" in within(30 seconds) {
      runOn(node1) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1)
      }

      enterBarrier("node-1-up")
    }

    "start node-2" in within(30 seconds) {
      runOn(node2) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2)
      }
      enterBarrier("node-2-up")
    }

    "start node-3" in within(30 seconds) {
      runOn(node3) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3)
      }
      enterBarrier("node-3-up")
    }

    "handle split brain scenario" in within(60 seconds) {
      runOn(node1) {
        for (role1 <- side1; role2 <- side2) switchOffConnection(role1, role2)
      }
      enterBarrier("links-failed")

      runOn(side1: _*) {
        awaitClusterLeader(side1: _*)
        awaitAssert(side2.foreach(role => cluster.down(addressOf(role)))) // manually healing the cluster
        awaitExistingMembers(side1:_*) // the new cluster is composed only by side1 nodes
      }

      runOn(side2:_*) {
        an[java.lang.AssertionError] shouldBe thrownBy(awaitSelfDowningNode(5 seconds)) // demonstrating that isolated node doesn't down by itself
      }
      enterBarrier("3 nodes split-brain")
    }
  }
} 
Example 77
Source File: SplitBrainThreeNodesResolutionSpec.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.split_brain

import akka.cluster.Cluster
import io.radicalbit.nsdb.split_brain.configs.SplitBrainThreeNodesResolutionSpecConfig

import scala.concurrent.duration._

class SplitBrainThreeNodesResolutionSpecMultiJvmNode1 extends SplitBrainThreeNodesResolutionSpec
class SplitBrainThreeNodesResolutionSpecMultiJvmNode2 extends SplitBrainThreeNodesResolutionSpec
class SplitBrainThreeNodesResolutionSpecMultiJvmNode3 extends SplitBrainThreeNodesResolutionSpec


class SplitBrainThreeNodesResolutionSpec extends MultiNodeBaseSpec(SplitBrainThreeNodesResolutionSpecConfig) {

  import SplitBrainThreeNodesResolutionSpecConfig._

  val side1 = Vector(node1, node2)
  val side2 = Vector(node3)

  "SplitBrainThreeNodesResolutionSpec" must {
    "start node-1" in within(30 seconds) {
      runOn(node1) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1)
      }

      enterBarrier("node-1-up")
    }

    "start node-2" in within(30 seconds) {
      runOn(node2) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2)
      }
      enterBarrier("node-2-up")
    }

    "start node-3" in within(30 seconds) {
      runOn(node3) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3)
      }
      enterBarrier("node-3-up")
    }

    "solve split brain scenario" in within(60 seconds) {
      runOn(node1) {
        for (role1 <- side1; role2 <- side2) switchOffConnection(role1, role2)
      }
      enterBarrier("links-failed")

      runOn(side1: _*) {
        awaitSurvivorsNodes(side1: _*)
        awaitAllLeavingNodes(side2: _*)
        awaitClusterLeader(side1: _*)
      }

      runOn(side2: _*) {
        awaitSelfDowningNode()
      }
      enterBarrier("3 nodes split-brain solved")
    }
  }
} 
Example 78
Source File: SplitBrainFiveNodesResolutionSpec.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.split_brain

import akka.cluster.Cluster
import io.radicalbit.nsdb.split_brain.configs.SplitBrainFiveNodesResolutionSpecConfig

import scala.concurrent.duration._

class SplitBrainFiveNodesResolutionSpecMultiJvmNode1 extends SplitBrainFiveNodesResolutionSpec
class SplitBrainFiveNodesResolutionSpecMultiJvmNode2 extends SplitBrainFiveNodesResolutionSpec
class SplitBrainFiveNodesResolutionSpecMultiJvmNode3 extends SplitBrainFiveNodesResolutionSpec
class SplitBrainFiveNodesResolutionSpecMultiJvmNode4 extends SplitBrainFiveNodesResolutionSpec
class SplitBrainFiveNodesResolutionSpecMultiJvmNode5 extends SplitBrainFiveNodesResolutionSpec


class SplitBrainFiveNodesResolutionSpec extends MultiNodeBaseSpec(SplitBrainFiveNodesResolutionSpecConfig) {

  import SplitBrainFiveNodesResolutionSpecConfig._

  val side1 = Vector(node1, node2)
  val side2 = Vector(node3, node4, node5)

  "SplitBrainFiveNodesResolutionSpec" must {
    "start node-1" in within(30 seconds) {
      runOn(node1) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1)
      }

      enterBarrier("node-1-up")
    }

    "start node-2" in within(30 seconds) {
      runOn(node2) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2)
      }
      enterBarrier("node-2-up")
    }

    "start node-3" in within(30 seconds) {
      runOn(node3) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3)
      }
      enterBarrier("node-3-up")
    }

    "start node-4" in within(30 seconds) {
      runOn(node4) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3, node4)
      }
      enterBarrier("node-4-up")
    }

    "start node-5" in within(30 seconds) {
      runOn(node5) {
        Cluster(system).join(addressOf(node1))
        awaitClusterNodesForUp(node1, node2, node3, node4, node5)
      }
      enterBarrier("node-5-up")
    }

    "solve split brain scenario" in within(60 seconds) {
      runOn(node1) {
        for (role1 <- side1; role2 <- side2) switchOffConnection(role1, role2)
      }
      enterBarrier("links-failed")

      runOn(side2: _*) {
        awaitSurvivorsNodes(side2: _*)
        awaitAllLeavingNodes(side1: _*)
        awaitClusterLeader(side2: _*)
      }

      runOn(side1: _*) {
        awaitSelfDowningNode()
      }

      enterBarrier("5 nodes split-brain solved")
    }
  }
} 
Example 79
Source File: MemberListenerActor.scala    From asura   with MIT License 5 votes vote down vote up
package asura.cluster.actor

import akka.actor.Props
import akka.cluster.ClusterEvent._
import akka.cluster.{Cluster, Member, MemberStatus}
import asura.cluster.actor.MemberListenerActor.GetAllMembers
import asura.cluster.model.MemberInfo
import asura.common.actor.BaseActor

class MemberListenerActor extends BaseActor {

  val cluster = Cluster(context.system)
  var nodes = Set.empty[Member]

  override def preStart(): Unit = {
    cluster.subscribe(self, classOf[MemberEvent])
  }

  override def postStop(): Unit = {
    cluster.unsubscribe(self)
  }

  override def receive: Receive = {
    case state: CurrentClusterState =>
      nodes = state.members.collect {
        case m if m.status == MemberStatus.Up => m
      }
    case MemberUp(member) =>
      log.info("Member({}) is Up: {}", member.roles.mkString(","), member.address)
      nodes += member
    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}", member.address, previousStatus)
      nodes -= member
    case _: MemberEvent =>
    case GetAllMembers =>
      sender() ! nodes.map(MemberInfo.fromMember(_))
  }

}

object MemberListenerActor {
  def props() = Props(new MemberListenerActor())

  case class GetAllMembers()

} 
Example 80
Source File: ExecutorClusterListener.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.executor.manager

import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.{Actor, ActorLogging, Address}

import scala.collection.immutable

class ExecutorClusterListener(seedNodes: immutable.Seq[Address]) extends Actor with ActorLogging {

  var cluster: Cluster = _

  override def preStart(): Unit = {
    cluster = Cluster(context.system)

    log.info(s"Joining to the cluster ${context.system.name} ...")
    cluster.joinSeedNodes(seedNodes)

    log.info(s"Subscribing to the cluster ${context.system.name} ...")
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberUp], classOf[MemberEvent], classOf[UnreachableMember])

    log.info(s"Cluster configuration done! :-P")
    log.info(s"Cluster Node Address is ${cluster.selfAddress}")
  }

  override def postStop(): Unit = {
    log.info(s"Leaving cluster ${context.system.name} :-( ...")
    cluster.unsubscribe(self)
    cluster.leave(cluster.selfAddress)
    log.info("Left cluster with success!")
  }

  def receive = {
    case MemberUp(member) =>
      log.info("Member is Up: {}", member.address)

    case UnreachableMember(member) =>
      log.info("Member detected as unreachable: {}", member)

    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}", member.address, previousStatus)

    case _:MemberEvent =>
      log.info("Unknow Message received ...")
  }
} 
Example 81
Source File: AkkaKubernetes.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.actor.{Actor, ActorLogging, ActorSystem, PoisonPill, Props}
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.cluster.{Cluster, ClusterEvent}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("KubernetesTest")

  import system.{dispatcher, log}

  implicit val mat = ActorMaterializer()
  implicit val cluster = Cluster(system)

  log.info("Running with [{}]", new Resources())
  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  system.actorOf(
    ClusterSingletonManager.props(singletonProps = Props(new AkkaBoss("patriknw")),
                                  terminationMessage = PoisonPill,
                                  settings = ClusterSingletonManagerSettings(system)),
    "boss"
  )

  val bossProxy = system.actorOf(
    ClusterSingletonProxy.props(singletonManagerPath = "/user/boss", settings = ClusterSingletonProxySettings(system)),
    name = "bossProxy"
  )

  val teamMembers = ClusterSharding(system).start(
    "team-member",
    Props(new AkkaMember()),
    ClusterShardingSettings(system),
    AkkaMember.extractEntityId,
    AkkaMember.extractShardId
  )

  cluster.subscribe(system.actorOf(Props[ClusterWatcher]),
                    ClusterEvent.InitialStateAsEvents,
                    classOf[ClusterDomainEvent])

  val talkToTheBoss = new TalkToTheBossRouteRoute(bossProxy)
  val talkToATeamMember = new TalkToATeamMemberRoute(teamMembers)

  Http().bindAndHandle(
    concat(talkToTheBoss.route(),
           talkToATeamMember.route(),
           ClusterStateRoute.routeGetMembers(cluster),
           VersionRoute.versionRoute),
    "0.0.0.0",
    8080
  )

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  implicit val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 82
Source File: AkkaBoss.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.actor.Actor
import akka.cluster.Cluster
import akka.event.Logging
import akka.kubernetes.sample.AkkaBoss.{GoToJobCentre, JobSpec, WhatCanIDo}

object AkkaBoss {
  case class WhatCanIDo(name: String)
  case class JobSpec(roles: Set[String])
  case class GoToJobCentre(name: String)
}

class AkkaBoss(name: String) extends Actor {

  val log = Logging(this)
  val cluster = Cluster(context.system)

  log.info("The boss is up and running on Cluster Node [{}]", cluster.selfMember)

  val teamMembers = Map(
    "virtualvoid" -> Set("Any and everything"),
    "johan" -> Set("radiation", "streams"),
    "raboof" -> Set("Buy a house", "HTTP"),
    "ktoso" -> Set("Stop fruiting and come home"),
    "helena" -> Set("Stopped fruiting and came home", "Drink complexi tea"),
    "chbatey" -> Set("Anything but Cassandra", "Drink coffee")
  )

  override def receive: Receive = {
    case "hello" =>
      log.info("Boss to say hello to [{}]", sender())
      sender() ! s"Hello from $name"
    case WhatCanIDo(n) =>
      teamMembers.get(n) match {
        case Some(roles) => sender() ! JobSpec(roles)
        case None => sender() ! GoToJobCentre(n)
      }
    case msg =>
      log.info("Boss just says hello, what is this? [{}]", msg)
  }
} 
Example 83
Source File: ClusterStateRoute.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.cluster.{Cluster, Member, MemberStatus}
import akka.management.cluster.{
  ClusterHttpManagementJsonProtocol,
  ClusterMembers,
  ClusterReadViewAccess,
  ClusterUnreachableMember
}

// Just want the read view
object ClusterStateRoute extends ClusterHttpManagementJsonProtocol {

  import akka.http.scaladsl.server.Directives._
  import akka.management.cluster.ClusterHttpManagementHelper._

  def routeGetMembers(cluster: Cluster) =
    path("cluster" / "members") {
      get {
        complete {
          val readView = ClusterReadViewAccess.internalReadView(cluster)
          val members = readView.state.members.map(memberToClusterMember)

          val unreachable = readView.reachability.observersGroupedByUnreachable.toSeq
            .sortBy(_._1)
            .map {
              case (subject, observers) ⇒
                ClusterUnreachableMember(s"${subject.address}", observers.toSeq.sorted.map(m ⇒ s"${m.address}").toList)
            }
            .toList

          val thisDcMembers =
            cluster.state.members.toSeq
              .filter(node => node.status == MemberStatus.Up && node.dataCenter == cluster.selfDataCenter)

          val leader = readView.leader.map(_.toString)

          val oldest = if (thisDcMembers.isEmpty) None else Some(thisDcMembers.min(Member.ageOrdering).address.toString)

          ClusterMembers(s"${readView.selfAddress}", members, unreachable, leader, oldest, oldestPerRole(thisDcMembers))
        }
      }
    }

} 
Example 84
Source File: CqrsApp.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.couchbase

import java.util.UUID
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.persistence.couchbase.scaladsl.CouchbaseReadJournal
import akka.persistence.query.{NoOffset, PersistenceQuery}
import akka.stream.ActorMaterializer

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.{Random, Try}

object CqrsApp {

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("CouchbaseSystem")
    val materializer = ActorMaterializer()(system)
    val ec: ExecutionContext = system.dispatcher
    val log = system.log

    AkkaManagement(system).start()
    ClusterBootstrap(system).start()

    Cluster(system).registerOnMemberUp {

      val selfRoles = Cluster(system).selfRoles

      log.info("Running with roles {}", selfRoles)

      val shardedSwitchEntity = ShardedSwitchEntity(system)
      shardedSwitchEntity.start()
      EventProcessorWrapper(system).start()

      if (selfRoles.contains("load-generation")) {
        log.info("Starting load generation")
        testIt(system, shardedSwitchEntity)
      }

      if (selfRoles.contains("simple-query")) {
        log.info("Starting simple query")
        verySimpleRead(system, materializer, ec)
      }
    }

    def verySimpleRead(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Unit = {
      val query = PersistenceQuery(system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
      val startTime = System.currentTimeMillis()
      query
        .currentEventsByTag("tag1", NoOffset)
        .runFold(0)((count, _) => count + 1)
        .onComplete { t: Try[Int] =>
          system.log.info("Query finished for tag1 in {}. Read {} rows",
                          (System.currentTimeMillis() - startTime).millis.toSeconds,
                          t)
        }
    }

    // Every instance will add 100 persistent actors and second 2 messages to each per 2 seconds
    def testIt(system: ActorSystem, shardedSwitch: ShardedSwitchEntity): Unit = {
      val uuid = UUID.randomUUID()
      val nrSwitches = 100
      def switchName(nr: Int) = s"switch-$uuid-$nr"
      log.info("Creating {} switches with uuid {}", nrSwitches, uuid)
      (0 until nrSwitches) foreach { s =>
        shardedSwitch.tell(switchName(s), SwitchEntity.CreateSwitch(6))
      }
      import system.dispatcher
      system.scheduler.schedule(3.seconds, 2.second) {
        (0 until nrSwitches) foreach { s =>
          val switch = switchName(s)
          log.debug("Sending messages to switch {}", switch)
          shardedSwitch.tell(switch, SwitchEntity.SetPortStatus(Random.nextInt(6), portEnabled = true))
          shardedSwitch.tell(switch, SwitchEntity.SendPortStatus)
        }
      }
    }
  }

} 
Example 85
Source File: ChaosMain.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.chaos
import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.remote.RARP
import akka.stream.ActorMaterializer

object ChaosMain extends App {

  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val log = system.log
  val management = AkkaManagement(system).start()
  val bootstrap = ClusterBootstrap(system).start()

  val route =
    path("hello") {
      get {
        println("Time to say good bye")
        Runtime.getRuntime.halt(-1)
        complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>You won't get this</h1>"))
      }
    }

  val listeningOn = RARP(system).provider.getDefaultAddress.host.getOrElse("0.0.0.0")
  log.info("Listening on {}", listeningOn)

  Http().bindAndHandle(route, listeningOn, 8080)

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

} 
Example 86
Source File: SoakMain.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.soak

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.remote.RARP
import akka.stream.ActorMaterializer
import com.sun.management.OperatingSystemMXBean
import scala.concurrent.duration._
import akka.util.PrettyDuration._

object SoakMain extends App {

  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  val log = system.log

  import java.lang.management.ManagementFactory
  log.info("Java version: {}", sys.props("java.version"))
  log.info("Cores: " + Runtime.getRuntime.availableProcessors)
  log.info("Total Memory: " + Runtime.getRuntime.totalMemory / 1000000 + "Mb")
  log.info("Max Memory: " + Runtime.getRuntime.maxMemory / 1000000 + "Mb")
  log.info("Free Memory: " + Runtime.getRuntime.freeMemory / 1000000 + "Mb")

  val memorySize =
    ManagementFactory.getOperatingSystemMXBean.asInstanceOf[OperatingSystemMXBean].getTotalPhysicalMemorySize
  log.info("RAM: " + memorySize / 1000000 + "Mb")

  log.info("JAVA env vars: {}", sys.env.filterKeys(_.contains("JAVA")))
  log.info("JVM env vars: {}", sys.env.filterKeys(_.contains("JVM")))

  val management = AkkaManagement(system).start()
  val bootstrapStart = System.nanoTime()
  val bootstrap = ClusterBootstrap(system).start()

  val listeningOn = RARP(system).provider.getDefaultAddress.host.getOrElse("0.0.0.0")
  log.info("Listening on {}", listeningOn)

  Cluster(system).registerOnMemberUp({
    val joiningTime = (System.nanoTime() - bootstrapStart).nano
    system.actorOf(PingPong.serverProps(), "server")
    val client = system.actorOf(PingPong.clientProps(joiningTime), "client")
    val clusterStats = new StatsEndpoint(system, client)
    log.info("Cluster member is up! Starting tests and binding http server. Joining time: {}", joiningTime.pretty)
    Http().bindAndHandle(clusterStats.route, listeningOn, 8080)
  })

} 
Example 87
Source File: AkkaUdpClusterMain.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object AkkaUdpClusterMain extends App {
  implicit val system = ActorSystem("ArteryUdp")

  implicit val mat = ActorMaterializer()
  implicit val cluster = Cluster(system)

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  Cluster(system).registerOnMemberUp({
    system.log.info("Cluster member is up!")
  })

} 
Example 88
Source File: FactorialFrontend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.factorial

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, ReceiveTimeout }
import akka.cluster.Cluster
import akka.routing.FromConfig
import com.typesafe.config.ConfigFactory

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging {
  val backend =
    context.actorOf(FromConfig.props(), name = "factorialBackendRouter")

  override def preStart(): Unit = {
    sendJobs()
    if (repeat) {
      context.setReceiveTimeout(10.seconds)
    }
  }

  def receive = {
    case (n: Int, factorial: BigInt) =>
      log.info("{}! = {}", n, factorial)
      if (n == upToN) {
        if (repeat) sendJobs()
        else context.stop(self)
      }
    case ReceiveTimeout =>
      log.info("Timeout")
      sendJobs()
  }

  def sendJobs(): Unit =
    //    log.info("Starting batch of factorials up to [{}]", upToN)
    1 to upToN foreach { backend ! _ }
}

object FactorialFrontend {
  def main(args: Array[String]): Unit = {
    val upToN = 200

    val config =
      ConfigFactory.parseString("akka.cluster.roles = [frontend]").withFallback(ConfigFactory.load("factorial"))

    val system = ActorSystem("ClusterSystem", config)
    system.log.info("Factorials will start when 2 backend members in the cluster.")

    Cluster(system) registerOnMemberUp {
      system.actorOf(Props(classOf[FactorialFrontend], upToN, false), name = "factorialFrontend")
    }

    Cluster(system).registerOnMemberRemoved {
      // exit JVM when ActorSystem has been terminated
      system.registerOnTermination(System.exit(0))
      // shut down ActorSystem
      system.terminate()

      // In case ActorSystem shutdown takes longer than 10 seconds,
      // exit the JVM forcefully anyway.
      // We must spawn a separate thread to not block current thread,
      // since that would have blocked the shutdown of the ActorSystem.
      new Thread {
        override def run(): Unit =
          if (Try(Await.ready(system.whenTerminated, 10.seconds)).isFailure)
            System.exit(-1)
      }.start()
    }
  }
} 
Example 89
Source File: MetricsListener.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.factorial

import akka.actor.{ Actor, ActorLogging }
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.CurrentClusterState
import akka.cluster.metrics.{ ClusterMetricsChanged, ClusterMetricsExtension, NodeMetrics }
import akka.cluster.metrics.StandardMetrics.{ Cpu, HeapMemory }

class MetricsListener extends Actor with ActorLogging {
  val selfAddress = Cluster(context.system).selfAddress
  val extension = ClusterMetricsExtension(context.system)

  // Subscribe unto ClusterMetricsEvent events.
  override def preStart(): Unit = extension.subscribe(self)

  // Unsubscribe from ClusterMetricsEvent events.
  override def postStop(): Unit = extension.unsubscribe(self)

  def receive = {
    case ClusterMetricsChanged(clusterMetrics) =>
      clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics =>
        logHeap(nodeMetrics)
        logCpu(nodeMetrics)
      }
    case state: CurrentClusterState => // Ignore.
  }

  def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
    case HeapMemory(address, timestamp, used, committed, max) =>
      log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024)
    case _ => // No heap info.
  }

  def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
    case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, cpuStolen, processors) =>
      log.info("Load: {} ({} processors)", systemLoadAverage, processors)
    case _ => // No cpu info.
  }
} 
Example 90
Source File: SimpleClusterListener.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.sample

import java.util.concurrent.TimeUnit

import akka.actor.Actor
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future

class SimpleClusterListener extends Actor with StrictLogging {
  val cluster = Cluster(context.system)

  override def preStart(): Unit =
    cluster.subscribe(self, InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])

  override def postStop(): Unit =
    cluster.unsubscribe(self)

  override def receive: Receive = {
    case MemberUp(member) =>
      logger.info(s"${cluster.selfAddress} 节点已可使用:$member")
    case UnreachableMember(member) =>
      logger.warn(s"${cluster.selfAddress} 节点分离不可达:$member")
    case MemberRemoved(member, previousStatus) =>
      logger.info(s"${cluster.selfAddress} 成员已移除:$member,之前状态:$previousStatus")
    case me: MemberEvent =>
      logger.debug(s"${cluster.selfAddress} ignore event: $me")
    case "Leave" =>
      cluster.leave(cluster.selfAddress)
      Future {
        TimeUnit.SECONDS.sleep(3)
      }(Implicits.global)
  }
} 
Example 91
Source File: SimpleClusterApp.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.sample

import java.util.concurrent.TimeUnit

import akka.actor.{ ActorSystem, Props }
import akka.cluster.Cluster
import com.typesafe.config.ConfigFactory

import scala.concurrent.{ ExecutionContext, Future }

object SimpleClusterApp extends App {
  var tm = 0
  Seq("2551", "2552", "0").foreach { port =>
    // Override the configuration of the port
    val config = ConfigFactory.parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """).withFallback(ConfigFactory.load("simple-cluster"))

    // Create an Akka system
    val system = ActorSystem("ClusterSystem", config)
    val cluster = Cluster(system)
    // Create an actor that handles cluster domain events
    val simpleClusterListener =
      system.actorOf(Props[SimpleClusterListener], name = "clusterListener")

    tm += 5
    Future {
      TimeUnit.SECONDS.sleep(7 + tm)
      simpleClusterListener ! "Leave"
    }(ExecutionContext.Implicits.global)
  }
} 
Example 92
Source File: StatsSample.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.stats

import java.util.concurrent.ThreadLocalRandom

import akka.actor.{ Actor, ActorSystem, Address, Props, RelativeActorPath, RootActorPath }
import akka.cluster.ClusterEvent._
import akka.cluster.{ Cluster, MemberStatus }
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

object StatsSample {
  def main(args: Array[String]): Unit =
    if (args.isEmpty) {
      startup(Seq("2551", "2552", "0"))
      StatsSampleClient.main(Array.empty)
    } else {
      startup(args)
    }

  def startup(ports: Seq[String]): Unit =
    ports foreach { port =>
      // Override the configuration of the port when specified as program argument
      val config = ConfigFactory
        .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
        .withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]"))
        .withFallback(ConfigFactory.load("stats1"))

      val system = ActorSystem("ClusterSystem", config)

      system.actorOf(Props[StatsWorker], name = "statsWorker")
      system.actorOf(Props[StatsService], name = "statsService")
    }
}

object StatsSampleClient {
  def main(args: Array[String]): Unit = {
    // note that client is not a compute node, role not defined
    val system = ActorSystem("ClusterSystem")
    system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client")
  }
}

class StatsSampleClient(servicePath: String) extends Actor {
  val cluster = Cluster(context.system)

  val servicePathElements = servicePath match {
    case RelativeActorPath(elements) => elements
    case _ =>
      throw new IllegalArgumentException("servicePath [%s] is not a valid relative actor path" format servicePath)
  }
  import context.dispatcher

  val tickTask =
    context.system.scheduler.schedule(2.seconds, 2.seconds, self, "tick")

  var nodes = Set.empty[Address]

  override def preStart(): Unit =
    cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent])
  override def postStop(): Unit = {
    cluster.unsubscribe(self)
    tickTask.cancel()
  }

  def receive = {
    case "tick" if nodes.nonEmpty =>
      // just pick any one
      val address =
        nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size))
      val service =
        context.actorSelection(RootActorPath(address) / servicePathElements)
      service ! StatsJob("this is the text that will be analyzed")
    case result: StatsResult =>
      println(result)
    case failed: JobFailed =>
      println(failed)
    case state: CurrentClusterState =>
      nodes = state.members.collect {
        case m if m.hasRole("compute") && m.status == MemberStatus.Up =>
          m.address
      }
    case MemberUp(m) if m.hasRole("compute")        => nodes += m.address
    case other: MemberEvent                         => nodes -= other.member.address
    case UnreachableMember(m)                       => nodes -= m.address
    case ReachableMember(m) if m.hasRole("compute") => nodes += m.address
  }
} 
Example 93
Source File: TransformationBackend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.transformation

import java.util.concurrent.TimeUnit

import akka.actor.{ Actor, ActorSystem, Props, RootActorPath }
import akka.cluster.ClusterEvent.{ CurrentClusterState, MemberUp }
import akka.cluster.{ Cluster, Member, MemberStatus }
import com.typesafe.config.ConfigFactory

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future
import scala.util.Random

//#backend
class TransformationBackend extends Actor {
  val cluster = Cluster(context.system)

  // subscribe to cluster changes, MemberUp
  // re-subscribe when restart
  override def preStart(): Unit = cluster.subscribe(self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case TransformationJob(text) =>
      sender() ! TransformationResult(text.toUpperCase)
    case state: CurrentClusterState =>
      state.members.filter(_.status == MemberStatus.Up) foreach register
    case MemberUp(m) => register(m)
  }

  def register(member: Member): Unit =
    if (member.hasRole("frontend"))
      context.actorSelection(RootActorPath(member.address) / "user" / "frontend") !
      BackendRegistration
}
//#backend

object TransformationBackend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]"))
      .withFallback(ConfigFactory.load("simple-cluster"))

    val system = ActorSystem("ClusterSystem", config)
    system.actorOf(Props[TransformationBackend], name = "backend")
    Future {
      TimeUnit.SECONDS.sleep(10)
      TimeUnit.SECONDS.sleep(Random.nextInt(50))
      system.terminate()
    }(Implicits.global)
  }
} 
Example 94
Source File: Status.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.Done
import akka.actor.ActorSystem
import akka.cluster.{Cluster, MemberStatus}
import akka.event.Logging
import akka.persistence.cassandra.CassandraPluginConfig
import akka.persistence.cassandra.session.scaladsl.CassandraSession
import ch.epfl.bluebrain.nexus.kg.config.AppConfig.PersistenceConfig
import monix.eval.Task

import scala.concurrent.Future

sealed trait Status {

  
  def check: Task[Boolean]
}

object Status {

  class CassandraStatus(implicit as: ActorSystem, persistence: PersistenceConfig) extends Status {
    implicit val ec     = as.dispatcher
    private val log     = Logging(as, "CassandraHeathCheck")
    private val config  = new CassandraPluginConfig(as, as.settings.config.getConfig(persistence.journalPlugin))
    private val (p, s)  = (config.sessionProvider, config.sessionSettings)
    private val session = new CassandraSession(as, p, s, ec, log, "health", _ => Future.successful(Done.done()))
    private val query   = s"SELECT now() FROM ${config.keyspace}.messages;"

    override def check: Task[Boolean] =
      Task.deferFuture(session.selectOne(query).map(_ => true).recover {
        case err =>
          log.error("Error while attempting to query for health check", err)
          false
      })
  }

  class ClusterStatus(cluster: Cluster) extends Status {
    override def check: Task[Boolean] =
      Task.pure(
        !cluster.isTerminated &&
          cluster.state.leader.isDefined && cluster.state.members.nonEmpty &&
          !cluster.state.members.exists(_.status != MemberStatus.Up) && cluster.state.unreachable.isEmpty
      )
  }
} 
Example 95
Source File: ChatApp.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.chat

import akka.NotUsed
import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy, Terminated }
import akka.cluster.Cluster
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.http.scaladsl.model.Uri
import scala.concurrent.Await
import scala.concurrent.duration.Duration

object ChatApp {

  private final class Root extends Actor with ActorLogging {

    override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

    private val userRepository = {
      val userEvents =
        Uri(context.system.settings.config.getString("gabbler-chat.user-repository.user-events"))
      val userRepository =
        context.actorOf(
          ClusterSingletonManager.props(UserRepository(userEvents),
                                        NotUsed,
                                        ClusterSingletonManagerSettings(context.system)),
          UserRepository.Name
        )
      context.actorOf(
        ClusterSingletonProxy.props(userRepository.path.elements.mkString("/", "/", ""),
                                    ClusterSingletonProxySettings(context.system)),
        s"${UserRepository.Name}-proxy"
      )
    }

    context.watch(userRepository)
    log.info("gabbler-chat up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error("Terminating the system because {} terminated!", actor.path)
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("gabbler-chat")
    Cluster(system).registerOnMemberUp(system.actorOf(Props(new Root), "root"))
  }
} 
Example 96
Source File: UserApp.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.user

import akka.NotUsed
import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy, Terminated }
import akka.cluster.Cluster
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery

object UserApp {

  final class Root extends Actor with ActorLogging {

    override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

    private val userRepository = {
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userRepository =
        context.actorOf(
          ClusterSingletonManager.props(UserRepository(readJournal),
                                        NotUsed,
                                        ClusterSingletonManagerSettings(context.system)),
          UserRepository.Name
        )
      context.actorOf(
        ClusterSingletonProxy.props(userRepository.path.elements.mkString("/", "/", ""),
                                    ClusterSingletonProxySettings(context.system)),
        s"${UserRepository.Name}-proxy"
      )
    }

    private val userApi = {
      val config  = context.system.settings.config
      val address = config.getString("gabbler-user.user-api.address")
      val port    = config.getInt("gabbler-user.user-api.port")
      val timeout = config.getDuration("gabbler-user.user-api.user-repository-timeout").asScala
      context.actorOf(UserApi(address, port, userRepository, timeout), UserApi.Name)
    }

    context.watch(userRepository)
    context.watch(userApi)
    log.info("gabbler-user up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error("Terminating the system because {} terminated!", actor.path)
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("gabbler-user")
    Cluster(system).registerOnMemberUp(system.actorOf(Props(new Root), "root"))
  }
} 
Example 97
Source File: Constructr.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr

import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated }
import akka.cluster.{ Cluster, Member }
import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberExited, MemberLeft, MemberRemoved }
import akka.cluster.MemberStatus.Up
import de.heikoseeberger.constructr.coordination.Coordination
import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS }

object Constructr {

  final val Name = "constructr"

  def props: Props =
    Props(new Constructr)
}

final class Constructr private extends Actor with ActorLogging {

  override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

  private val cluster = Cluster(context.system)

  if (cluster.settings.SeedNodes.isEmpty) {
    log.info("Creating constructr-machine, because no seed-nodes defined")
    cluster.subscribe(self,
                      InitialStateAsEvents,
                      classOf[MemberLeft],
                      classOf[MemberExited],
                      classOf[MemberRemoved])
    context.become(active(context.watch(createConstructrMachine())))
  } else {
    log.info("Stopping self, because seed-nodes defined")
    context.stop(self)
  }

  override def receive = Actor.emptyBehavior

  private def active(machine: ActorRef): Receive = {
    case Terminated(`machine`) =>
      val selfAddress = cluster.selfAddress
      def isSelfAndUp(member: Member) =
        member.address == selfAddress && member.status == Up
      if (cluster.state.members.exists(isSelfAndUp)) {
        log.error("Leaving, because constructr-machine terminated!")
        cluster.leave(selfAddress)
      } else {
        log.error("Terminating system, because constructr-machine terminated!")
        context.system.terminate()
      }

    case MemberRemoved(member, _) if member.address == cluster.selfAddress =>
      log.error("Terminating system, because member has been removed!")
      context.system.terminate()
  }

  private def createConstructrMachine() = {
    val config = context.system.settings.config
    def getDuration(key: String) =
      FiniteDuration(config.getDuration(key).toNanos, NANOSECONDS)

    val coordinationTimeout   = getDuration("constructr.coordination-timeout")
    val nrOfRetries           = config.getInt("constructr.nr-of-retries")
    val retryDelay            = getDuration("constructr.retry-delay")
    val refreshInterval       = getDuration("constructr.refresh-interval")
    val ttlFactor             = config.getDouble("constructr.ttl-factor")
    val maxNrOfSeedNodes      = config.getInt("constructr.max-nr-of-seed-nodes")
    val joinTimeout           = getDuration("constructr.join-timeout")
    val abortOnJoinTimeout    = config.getBoolean("constructr.abort-on-join-timeout")
    val ignoreRefreshFailures = config.getBoolean("constructr.ignore-refresh-failures")

    context.actorOf(
      ConstructrMachine.props(
        cluster.selfAddress,
        Coordination(context.system.name, context.system),
        coordinationTimeout,
        nrOfRetries,
        retryDelay,
        refreshInterval,
        ttlFactor,
        if (maxNrOfSeedNodes <= 0) Int.MaxValue else maxNrOfSeedNodes,
        joinTimeout,
        abortOnJoinTimeout,
        ignoreRefreshFailures
      ),
      ConstructrMachine.Name
    )
  }
} 
Example 98
Source File: Leadership.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import akka.actor.{Actor, ActorLogging, Address}
import akka.cluster.ClusterEvent._
import akka.cluster.{Cluster, Member}

object Leadership {
  object IsLeader
}

class Leadership(address: Address) extends Actor with ActorLogging {

  private val cluster = Cluster(context.system)
  private var members = Set.empty[Member]

  private var isLeader = false

  override def preStart(): Unit =
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent],
      classOf[UnreachableMember],
      classOf[ClusterDomainEvent])

  override def postStop(): Unit = cluster.unsubscribe(self)

  import actors.Leadership._

  def receive = {

    case IsLeader =>
      sender ! isLeader

    case state: CurrentClusterState =>
      log.warning("Initial state: " + state.leader)
      setLeader(state.leader)

    case MemberUp(member) =>
      log.warning(s"Member up($member)")
      members += member

    case MemberRemoved(member, previousStatus) =>
      log.warning(s"Member removed($member)")
      members.find(_.address == member.address) foreach (members -= _)

    case LeaderChanged(member) =>
      log.warning("Leader changed, now: " + member)
      setLeader(member)

    case e: MemberEvent =>
      log.warning(s"Member event($e)")
  }

  private def setLeader(leader: Option[Address]): Unit = {
    isLeader = leader exists (_ == address)
  }
} 
Example 99
Source File: KMClusterListener.scala    From Swallow   with Apache License 2.0 5 votes vote down vote up
package examples



import swallow.core._
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.ActorLogging
import akka.actor.Actor


object KMClusterListener {
  final case class SuperviseFlow(flow: KMFlow)
}

class KMClusterListener extends Actor with ActorLogging {
  import KMClusterListener._

  val cluster = Cluster(context.system)

  // subscribe to cluster changes, re-subscribe when restart
  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }
  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case state: CurrentClusterState =>
      log.info("****** Current members: {} ******", state.members.mkString(", "))
    case MemberUp(member) =>
      log.info("****** Member is Up: {} ******", member.address)
    case UnreachableMember(member) =>
      log.info("****** Member detected as unreachable: {} ******", member)
    case MemberRemoved(member, previousStatus) =>
      log.info("****** Member is Removed: {} after {} ******",
        member.address, previousStatus)
    case _: MemberEvent => // ignore


    case SuperviseFlow(flow: KMFlow) =>
      log.info(s"[KMClusterListener] superviseFlow; [From sender]: $sender")
      log.info(s"[Flow Info] from: ${flow.flowInfo.from}; to: ${flow.flowInfo.to}; content: ${flow.flowInfo.content}")
  }
} 
Example 100
Source File: KMCLusterSupervisor.scala    From Swallow   with Apache License 2.0 5 votes vote down vote up
package swallow.cluster



import akka.actor.Props
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import swallow.core.{KMFlow, KMNode}
import swallow.core.KMActorMessages._


object KMClusterSupervisor {
  def props: Props = Props(new KMClusterSupervisor)
}

class KMClusterSupervisor extends KMNode {

  val cluster = Cluster(context.system)

  // subscribe to cluster changes, re-subscribe when restart
  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }
  override def postStop(): Unit = cluster.unsubscribe(self)

  override def receive = {
    case state: CurrentClusterState =>
      log.info("****** Current members: {} ******", state.members.mkString(", "))
    case MemberUp(member) =>
      log.info("****** Member is Up: {} ******", member.address)
    case UnreachableMember(member) =>
      log.info("****** Member detected as unreachable: {} ******", member)
    case MemberRemoved(member, previousStatus) =>
      log.info("****** Member is Removed: {} after {} ******",
        member.address, previousStatus)
    case _: MemberEvent => // ignore


    case ClusterSuperviseFlow(flow: KMFlow) =>
      log.info(s"[KMClusterListener] superviseFlow; [From sender]: $sender")
      log.info(s"[Flow Info] from: ${flow.flowInfo.from}; to: ${flow.flowInfo.to}; content: ${flow.flowInfo.content}")
  }
} 
Example 101
Source File: HelloServiceImpl.scala    From scala-tutorials   with MIT License 5 votes vote down vote up
package com.baeldung.hello.impl

import akka.NotUsed
import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.routing.{ClusterRouterGroup, ClusterRouterGroupSettings}
import akka.pattern.ask
import akka.routing.ConsistentHashingGroup
import akka.stream.scaladsl.Source
import akka.util.Timeout
import com.baeldung.hello.akka.{Job, JobAccepted, JobStatus, Worker}
import com.baeldung.hello.api.HelloService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.pubsub.{PubSubRegistry, TopicId}

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._

class HelloServiceImpl(system: ActorSystem, pubSub: PubSubRegistry)(implicit ec: ExecutionContext)
  extends HelloService {

  if (Cluster.get(system).selfRoles("worker-node")) {
    system.actorOf(Worker.props(pubSub), "worker")
  }

  val workerRouter = {
    val paths = List("/user/worker")
    val groupConf = ConsistentHashingGroup(paths, hashMapping = {
      case Job(_, task, _) => task
    })
    val routerProps = ClusterRouterGroup(
      groupConf,
      ClusterRouterGroupSettings(
        totalInstances = 1000,
        routeesPaths = paths,
        allowLocalRoutees = true,
        useRoles = Set("worker-node")
      )
    ).props
    system.actorOf(routerProps, "workerRouter")
  }

  override def submit(): ServiceCall[Job, JobAccepted] = ServiceCall {
    job =>
      //Future{JobAccepted(job.jobId)}
      implicit val timeout = Timeout(5.seconds)
      (workerRouter ? job).mapTo[JobAccepted]
  }

  override def status(): ServiceCall[NotUsed, Source[JobStatus, NotUsed]] = ServiceCall {
    _ =>
      val topic = pubSub.refFor(TopicId[JobStatus]("job-status"))
      Future.successful(topic.subscriber)
  }

} 
Example 102
Source File: ThreeNodeSpec.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

import akka.cluster.Cluster

import scala.concurrent.duration._

abstract class ThreeNodeSpec(name: String, config: ThreeNodeSpecConfig) extends LithiumMultiNodeSpec(config) {

  def assertions(): Unit

  final protected val node1 = config.node1
  final protected val node2 = config.node2
  final protected val node3 = config.node3

  override def initialParticipants: Int = roles.size

  // Starts the cluster in order so that the oldest
  // node can always be statically determined in the
  // tests.
  s"$name" must {
    "start node-1" in within(30 seconds) {
      runOn(node1) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1)
      }

      enterBarrier("node-1-up")
    }

    "start node-2" in within(30 seconds) {
      runOn(node2) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1, node2)
      }

      enterBarrier("node-2-up")
    }

    "start node-3" in within(30 seconds) {
      runOn(node3) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1, node2, node3)
      }

      enterBarrier("node-3-up")
    }

    assertions()
  }
} 
Example 103
Source File: FiveNodeSpec.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

import akka.cluster.Cluster
import akka.remote.testconductor.RoleName

import scala.concurrent.duration._

abstract class FiveNodeSpec(name: String, config: FiveNodeSpecConfig) extends LithiumMultiNodeSpec(config) {
  def assertions(): Unit

  protected val node1: RoleName = config.node1
  protected val node2: RoleName = config.node2
  protected val node3: RoleName = config.node3
  protected val node4: RoleName = config.node4
  protected val node5: RoleName = config.node5

  override def initialParticipants: Int = roles.size

  // Starts the cluster in order so that the oldest
  // node can always be statically determined in the
  // tests.
  s"$name" must {
    "start node-1" in within(30 seconds) {
      runOn(node1) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1)
      }

      enterBarrier("node-1-up")
    }

    "start node-2" in within(30 seconds) {
      runOn(node2) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1, node2)
      }

      enterBarrier("node-2-up")
    }

    "start node-3" in within(30 seconds) {
      runOn(node3) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1, node2, node3)
      }

      enterBarrier("node-3-up")
    }

    "start node-4" in within(30 seconds) {
      runOn(node4) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1, node2, node3, node4)
      }

      enterBarrier("node-4-up")
    }

    "start node-5" in within(30 seconds) {
      runOn(node5) {
        Cluster(system).join(addressOf(node1))
        waitForUp(node1, node2, node3, node4, node5)
      }

      enterBarrier("node-5-up")
    }

    assertions()
  }
} 
Example 104
Source File: LithiumMultiNodeSpec.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

import akka.actor.{ActorSystem, Address}
import akka.cluster.Cluster
import akka.cluster.MemberStatus._
import akka.remote.testconductor.RoleName
import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec, MultiNodeSpecCallbacks}
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

abstract class LithiumMultiNodeSpec(val config: MultiNodeConfig)
    extends MultiNodeSpec(config)
    with MultiNodeSpecCallbacks
    with AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ImplicitSender
    with Eventually
    with IntegrationPatience {
  override def beforeAll(): Unit = multiNodeSpecBeforeAll()
  override def afterAll(): Unit  = multiNodeSpecAfterAll()

  private val addresses: Map[RoleName, Address] = roles.map(r => r -> node(r).address).toMap

  protected def addressOf(roleName: RoleName): Address = addresses(roleName)

  protected def waitToBecomeUnreachable(roleNames: RoleName*): Unit =
    awaitCond(allUnreachable(roleNames: _*))

  protected def waitForSurvivors(roleNames: RoleName*): Unit =
    awaitCond(allSurvivors(roleNames: _*))

  protected def waitForUp(roleNames: RoleName*): Unit = awaitCond(allUp(roleNames: _*))

  protected def waitForSelfDowning(implicit system: ActorSystem): Unit = awaitCond(downedItself)

  protected def waitForAllLeaving(roleNames: RoleName*): Unit =
    awaitCond(allLeaving(roleNames: _*))

  protected def waitExistsAllDownOrGone(groups: Seq[Seq[RoleName]]): Unit =
    awaitCond(existsAllDownOrGone(groups))

  private def allUnreachable(roleNames: RoleName*): Boolean =
    roleNames.forall(
      role => Cluster(system).state.unreachable.exists(_.address === addressOf(role))
    )

  private def allSurvivors(roleNames: RoleName*): Boolean =
    roleNames.forall(role => Cluster(system).state.members.exists(_.address === addressOf(role)))

  private def allUp(roleNames: RoleName*): Boolean =
    roleNames.forall(
      role => Cluster(system).state.members.exists(m => m.address === addressOf(role) && m.status === Up)
    )

  private def existsAllDownOrGone(groups: Seq[Seq[RoleName]]): Boolean =
    groups.exists(group => allLeaving(group: _*))

  private def downedItself(implicit system: ActorSystem): Boolean = {
    val selfAddress = Cluster(system).selfAddress
    Cluster(system).state.members
      .exists(
        m => m.address === selfAddress && (m.status === Exiting || m.status === Down || m.status === Removed)
      )
  }

  private def allLeaving(roleNames: RoleName*): Boolean =
    roleNames.forall { role =>
      val members     = Cluster(system).state.members
      val unreachable = Cluster(system).state.unreachable

      val address = addressOf(role)

      unreachable.isEmpty &&                                                                        // no unreachable members
      (members.exists(m => m.address === address && (m.status === Down || m.status === Exiting)) || // member is down
      !members.exists(_.address === address))                                                       // member is not in the cluster
    }
} 
Example 105
Source File: ClusterApp.scala    From reactive-cli   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 106
Source File: ClusterApp.scala    From reactive-cli   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 107
Source File: Router.scala    From akka-cluster-manager   with MIT License 5 votes vote down vote up
package io.orkestra.cluster.routing

import io.orkestra.cluster.protocol.Response.Success.RouteeDeleted
import io.orkestra.cluster.routing.ClusterListener.DeleteRoutee

import scala.collection.immutable.Queue
import akka.actor._
import akka.cluster.{Member, Cluster}

class RouterRR(memberId: String, cluster: Cluster)
    extends Actor
    with ActorLogging {

  import RouterRR._

  var members: Queue[ActorRef] = Queue()

  var quarantineMembers: List[ActorRef] = List.empty[ActorRef]

  def receive = {

    case GetRoutee(role) =>
      sender ! Routee(getMember)

    case GetRoutees =>
      sender ! members.toList

    case RegisterRoutee(path) =>
      if (isQuarantine(path))
        recoverMember(path)
      else
        probeRoutee(path)

    case RemoveRoutee(path) =>
      removeMember(path)

    case DeleteRoutee(role, path) =>
      members = members.filterNot(_.path.toString == path)
      sender ! RouteeDeleted(role, path)

    case QuarantineRoutee(path) =>
      quarantineMember(path)

    case RecoverRoutee(path) =>
      recoverMember(path)

    case CleanQuarantine(path) =>
      quarantineCleaner(path)

    case ActorIdentity(`memberId`, Some(routeeRef)) =>
      registerMember(routeeRef)

    case ActorIdentity(`memberId`, None) =>
      log.warning(s"member with id $memberId not found")

    case Terminated(memberRef) =>
      log.info(s"Member ${memberRef.path} was Terminated")
      removeMember(memberRef.path)
      SupervisorStrategy

  }

  
  def quarantineCleaner(path: ActorPath) = {
    log.debug(s"Quarantine is being cleaned of $path...")
    quarantineMembers.filter(_.path == path).map { m =>
      log.warning(s"Removing quarantined member ${m.path.address}")
      cluster.down(m.path.address)
    }
  }
}

object RouterRR {
  case class RegisterRoutee(x: ActorPath)
  case class RemoveRoutee(x: ActorPath)
  case class QuarantineRoutee(x: ActorPath)
  case class RecoverRoutee(x: ActorPath)
  case class GetRoutee(role: String)
  case object GetRoutees
  case class Routee(ref: Option[ActorRef])
  case class CleanQuarantine(path: ActorPath)
} 
Example 108
Source File: HttpManagementServer.scala    From akka-cluster-manager   with MIT License 5 votes vote down vote up
package io.orkestra.cluster.management

import java.util.concurrent.atomic.AtomicReference

import akka.Done
import akka.actor.{ActorSystem, ActorRef}
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
import io.orkestra.cluster.protocol.Response.{Failure, Success}
import io.orkestra.cluster.routing.ClusterListener._
import akka.pattern.ask
import play.api.libs.json.Json
import scala.concurrent.{Promise, Future, ExecutionContext}
import scala.concurrent.duration._

class HttpManagementServer(clusterListener: ActorRef, hostName: String = "127.0.0.1", port: Int = 33333)(
    implicit
    val system:                ActorSystem,
    implicit val materializer: Materializer,
    implicit val executer:     ExecutionContext
) {

  import PlayJsonSupport._

  def handleOrkestraRequest(req: ManagementReguest) =
    (clusterListener ? req)(3.seconds).map {
      case res: Success =>
        res.httpStatusCode -> res.asJson
      case res: Failure =>
        res.httpStatusCode -> res.asJson
    }

  def orkestraRoutes =
    pathPrefix("orkestra" / "routers") {
      pathEndOrSingleSlash {
        get {
          complete(handleOrkestraRequest(GetRouters))
        }
      } ~
        path(Segment ~ Slash.?) { role =>
          get {
            complete(handleOrkestraRequest(GetRouter(role)))
          }
        } ~
        path(Segment / Remaining ~ Slash.?) { (role, routeePath) =>
          delete {
            complete(handleOrkestraRequest(DeleteRoutee(role, routeePath)))
          }
        }
    }

  private val bindingFuture = new AtomicReference[Future[Http.ServerBinding]]()

  def start() = {
    val serverBindingPromise = Promise[Http.ServerBinding]()
    if (bindingFuture.compareAndSet(null, serverBindingPromise.future)) {
      Http().bindAndHandle(orkestraRoutes, hostName, port)
      println(Console.CYAN + s"cluster http management server online at http://${hostName}:${port}/" + Console.WHITE)
    }
  }

  def shutdown =
    if (bindingFuture.get() == null) {
      Future(Done)
    } else {
      val stopFuture = bindingFuture.get().flatMap(_.unbind()).map(_ => Done)
      bindingFuture.set(null)
      stopFuture
    }

} 
Example 109
Source File: Main.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, Terminated }
import akka.actor.typed.SupervisorStrategy.restartWithBackoff
import akka.actor.typed.scaladsl.Actor.supervise
import akka.cluster.Cluster
import akka.cluster.bootstrap.ClusterBootstrap
import akka.cluster.http.management.ClusterHttpManagement
import akka.cluster.typed.{ ClusterSingleton, ClusterSingletonSettings }
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import akka.stream.{ ActorMaterializer, Materializer }
import pureconfig.loadConfigOrThrow

object Main {
  import akka.actor.typed.scaladsl.adapter._

  final class Root(config: Config) extends Actor with ActorLogging {

    private implicit val mat: Materializer = ActorMaterializer()

    private val clusterSingletonSettings = ClusterSingletonSettings(context.system.toTyped)

    private val userRepository =
      ClusterSingleton(context.system.toTyped).spawn(UserRepository(),
                                                     UserRepository.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserRepository.Stop)

    private val userView = context.spawn(UserView(), UserView.Name)

    private val userProjection = {
      import config.userProjection._
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userProjection =
        supervise(UserProjection(readJournal, userView, askTimeout))
          .onFailure[UserProjection.EventStreamCompleteException](
            restartWithBackoff(minBackoff, maxBackoff, 0)
          )
      ClusterSingleton(context.system.toTyped).spawn(userProjection,
                                                     UserProjection.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserProjection.Stop)
    }

    private val api = {
      import config.api._
      context.spawn(Api(address, port, userRepository, userView, askTimeout), Api.Name)
    }

    context.watch(userRepository)
    context.watch(userView)
    context.watch(userProjection)
    context.watch(api)
    log.info(s"${context.system.name} up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error(s"Shutting down, because actor ${actor.path} terminated!")
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    sys.props += "log4j2.contextSelector" -> "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector"

    val config  = loadConfigOrThrow[Config]("wtat")
    val system  = ActorSystem("wtat")
    val cluster = Cluster(system)

    if (config.useClusterBootstrap) {
      ClusterHttpManagement(cluster).start()
      ClusterBootstrap(system).start()
    }

    cluster.registerOnMemberUp(system.actorOf(Props(new Root(config)), "root"))
  }
} 
Example 110
Source File: UserProjection.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.Scheduler
import akka.actor.typed.{ ActorRef, Behavior }
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.cluster.Cluster
import akka.cluster.ddata.{ ORSet, ORSetKey }
import akka.cluster.ddata.Replicator.WriteLocal
import akka.cluster.ddata.typed.scaladsl.{ DistributedData, Replicator }
import akka.persistence.query.EventEnvelope
import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import cats.instances.string._
import cats.syntax.eq._
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration

object UserProjection extends Logging {
  import akka.actor.typed.scaladsl.adapter._

  sealed trait Command
  final case object Stop                              extends Command
  private final case object HandleEventStreamComplete extends Command

  abstract class EventStreamCompleteException
      extends IllegalStateException("Event stream completed unexpectedly!")
  private final case object EventStreamCompleteException extends EventStreamCompleteException

  final val Name = "user-projection"

  final val usersKey: ORSetKey[User] =
    ORSetKey("users")

  def apply(readJournal: EventsByPersistenceIdQuery,
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      implicit val c: Cluster   = Cluster(context.system.toUntyped)
      implicit val s: Scheduler = context.system.scheduler
      implicit val t: Timeout   = askTimeout
      val replicator            = DistributedData(context.system).replicator
      val self                  = context.self

      readJournal
        .eventsByPersistenceId(UserRepository.Name, 0, Long.MaxValue)
        .collect { case EventEnvelope(_, _, _, event: UserRepository.Event) => event }
        .mapAsync(1) {
          case UserRepository.UserAdded(user) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal)(_ + user)

          case UserRepository.UserRemoved(username) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal) { users =>
              users.elements.find(_.username.value === username).fold(users)(users - _)
            }
        }
        .runWith(Sink.onComplete(_ => self ! HandleEventStreamComplete))
      logger.debug("Running event stream")

      Actor.immutable {
        case (_, Stop)                      => Actor.stopped
        case (_, HandleEventStreamComplete) => throw EventStreamCompleteException
      }
    }
} 
Example 111
Source File: ActorSystemFixture.scala    From nexus   with Apache License 2.0 4 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing.projections

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.testkit._
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.concurrent.ScalaFutures._
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.Promise
import scala.concurrent.duration._

class ActorSystemFixture(name: String, startCluster: Boolean = false)
    extends TestKit(ActorSystem(name, ConfigFactory.load("service-test.conf")))
    with AnyWordSpecLike
    with PatienceConfiguration
    with BeforeAndAfterAll {

  implicit override def patienceConfig: PatienceConfig = PatienceConfig(3.seconds.dilated, 100.milliseconds.dilated)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    if (startCluster) {
      val promise = Promise[Unit]
      Cluster(system).registerOnMemberUp(promise.success(()))
      Cluster(system).join(Cluster(system).selfAddress)
      promise.future.futureValue
    }
  }

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }
}