akka.cluster.MemberStatus Scala Examples

The following examples show how to use akka.cluster.MemberStatus. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ClusterMembershipCheckSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.scaladsl

import akka.actor.ActorSystem
import akka.cluster.MemberStatus
import akka.testkit.TestKit
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ Matchers, WordSpecLike }

class ClusterMembershipCheckSpec
    extends TestKit(ActorSystem("ClusterHealthCheck"))
    with WordSpecLike
    with Matchers
    with ScalaFutures {

  "Cluster Health" should {
    "be unhealthy if current state not one of healthy states" in {
      val chc = new ClusterMembershipCheck(
        system,
        () => MemberStatus.joining,
        new ClusterMembershipCheckSettings(Set(MemberStatus.Up)))

      chc().futureValue shouldEqual false
    }
    "be unhealthy if current state is one of healthy states" in {
      val chc =
        new ClusterMembershipCheck(
          system,
          () => MemberStatus.Up,
          new ClusterMembershipCheckSettings(Set(MemberStatus.Up)))

      chc().futureValue shouldEqual true
    }
  }
} 
Example 2
Source File: TestMember.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.swissborg

import akka.actor.Address
import akka.cluster.ClusterSettings.DataCenter
import akka.cluster.{ClusterSettings, Member, MemberStatus, UniqueAddress}

object TestMember {

  def apply(address: Address, status: MemberStatus): Member =
    apply(address, status, Set.empty[String], ClusterSettings.DefaultDataCenter)

  def apply(address: Address, status: MemberStatus, dataCenter: DataCenter): Member =
    apply(address, status, Set.empty[String], dataCenter)

  def apply(address: Address, status: MemberStatus, roles: Set[String]): Member =
    apply(address, status, roles, dataCenter = ClusterSettings.DefaultDataCenter)

  def apply(address: Address, status: MemberStatus, roles: Set[String], dataCenter: DataCenter): Member =
    withUniqueAddress(UniqueAddress(address, 0L), status, roles, dataCenter)

  def withUniqueAddress(uniqueAddress: UniqueAddress,
                        status: MemberStatus,
                        roles: Set[String],
                        dataCenter: DataCenter): Member =
    new Member(uniqueAddress, Int.MaxValue, status, roles + (ClusterSettings.DcRolePrefix + dataCenter))
} 
Example 3
Source File: KeepReferee.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

package strategy

import akka.cluster.MemberStatus
import akka.cluster.MemberStatus._
import cats.Applicative
import cats.implicits._


private[lithium] class KeepReferee[F[_]: Applicative](config: KeepRefereeConfig) extends Strategy[F] {

  import config._

  override def takeDecision(worldView: WorldView): F[Decision] =
    worldView.reachableNodes
      .find(_.member.address.toString === referee)
      .fold(Decision.downReachable(worldView)) { _ =>
        val nbrOfConsideredReachableNodes = worldView.reachableNodes.count { node =>
          Set[MemberStatus](Up, Leaving).contains(node.member.status)
        }

        if (nbrOfConsideredReachableNodes < downAllIfLessThanNodes)
          Decision.downReachable(worldView)
        else
          Decision.downUnreachable(worldView)
      }
      .pure[F]

  override def toString: String = s"KeepReferee($config)"
} 
Example 4
Source File: Status.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.Done
import akka.actor.ActorSystem
import akka.cluster.{Cluster, MemberStatus}
import akka.event.Logging
import akka.persistence.cassandra.CassandraPluginConfig
import akka.persistence.cassandra.session.scaladsl.CassandraSession
import ch.epfl.bluebrain.nexus.kg.config.AppConfig.PersistenceConfig
import monix.eval.Task

import scala.concurrent.Future

sealed trait Status {

  
  def check: Task[Boolean]
}

object Status {

  class CassandraStatus(implicit as: ActorSystem, persistence: PersistenceConfig) extends Status {
    implicit val ec     = as.dispatcher
    private val log     = Logging(as, "CassandraHeathCheck")
    private val config  = new CassandraPluginConfig(as, as.settings.config.getConfig(persistence.journalPlugin))
    private val (p, s)  = (config.sessionProvider, config.sessionSettings)
    private val session = new CassandraSession(as, p, s, ec, log, "health", _ => Future.successful(Done.done()))
    private val query   = s"SELECT now() FROM ${config.keyspace}.messages;"

    override def check: Task[Boolean] =
      Task.deferFuture(session.selectOne(query).map(_ => true).recover {
        case err =>
          log.error("Error while attempting to query for health check", err)
          false
      })
  }

  class ClusterStatus(cluster: Cluster) extends Status {
    override def check: Task[Boolean] =
      Task.pure(
        !cluster.isTerminated &&
          cluster.state.leader.isDefined && cluster.state.members.nonEmpty &&
          !cluster.state.members.exists(_.status != MemberStatus.Up) && cluster.state.unreachable.isEmpty
      )
  }
} 
Example 5
Source File: TransformationBackend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.transformation

import java.util.concurrent.TimeUnit

import akka.actor.{ Actor, ActorSystem, Props, RootActorPath }
import akka.cluster.ClusterEvent.{ CurrentClusterState, MemberUp }
import akka.cluster.{ Cluster, Member, MemberStatus }
import com.typesafe.config.ConfigFactory

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future
import scala.util.Random

//#backend
class TransformationBackend extends Actor {
  val cluster = Cluster(context.system)

  // subscribe to cluster changes, MemberUp
  // re-subscribe when restart
  override def preStart(): Unit = cluster.subscribe(self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case TransformationJob(text) =>
      sender() ! TransformationResult(text.toUpperCase)
    case state: CurrentClusterState =>
      state.members.filter(_.status == MemberStatus.Up) foreach register
    case MemberUp(m) => register(m)
  }

  def register(member: Member): Unit =
    if (member.hasRole("frontend"))
      context.actorSelection(RootActorPath(member.address) / "user" / "frontend") !
      BackendRegistration
}
//#backend

object TransformationBackend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]"))
      .withFallback(ConfigFactory.load("simple-cluster"))

    val system = ActorSystem("ClusterSystem", config)
    system.actorOf(Props[TransformationBackend], name = "backend")
    Future {
      TimeUnit.SECONDS.sleep(10)
      TimeUnit.SECONDS.sleep(Random.nextInt(50))
      system.terminate()
    }(Implicits.global)
  }
} 
Example 6
Source File: StatsSample.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.stats

import java.util.concurrent.ThreadLocalRandom

import akka.actor.{ Actor, ActorSystem, Address, Props, RelativeActorPath, RootActorPath }
import akka.cluster.ClusterEvent._
import akka.cluster.{ Cluster, MemberStatus }
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

object StatsSample {
  def main(args: Array[String]): Unit =
    if (args.isEmpty) {
      startup(Seq("2551", "2552", "0"))
      StatsSampleClient.main(Array.empty)
    } else {
      startup(args)
    }

  def startup(ports: Seq[String]): Unit =
    ports foreach { port =>
      // Override the configuration of the port when specified as program argument
      val config = ConfigFactory
        .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
        .withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]"))
        .withFallback(ConfigFactory.load("stats1"))

      val system = ActorSystem("ClusterSystem", config)

      system.actorOf(Props[StatsWorker], name = "statsWorker")
      system.actorOf(Props[StatsService], name = "statsService")
    }
}

object StatsSampleClient {
  def main(args: Array[String]): Unit = {
    // note that client is not a compute node, role not defined
    val system = ActorSystem("ClusterSystem")
    system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client")
  }
}

class StatsSampleClient(servicePath: String) extends Actor {
  val cluster = Cluster(context.system)

  val servicePathElements = servicePath match {
    case RelativeActorPath(elements) => elements
    case _ =>
      throw new IllegalArgumentException("servicePath [%s] is not a valid relative actor path" format servicePath)
  }
  import context.dispatcher

  val tickTask =
    context.system.scheduler.schedule(2.seconds, 2.seconds, self, "tick")

  var nodes = Set.empty[Address]

  override def preStart(): Unit =
    cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent])
  override def postStop(): Unit = {
    cluster.unsubscribe(self)
    tickTask.cancel()
  }

  def receive = {
    case "tick" if nodes.nonEmpty =>
      // just pick any one
      val address =
        nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size))
      val service =
        context.actorSelection(RootActorPath(address) / servicePathElements)
      service ! StatsJob("this is the text that will be analyzed")
    case result: StatsResult =>
      println(result)
    case failed: JobFailed =>
      println(failed)
    case state: CurrentClusterState =>
      nodes = state.members.collect {
        case m if m.hasRole("compute") && m.status == MemberStatus.Up =>
          m.address
      }
    case MemberUp(m) if m.hasRole("compute")        => nodes += m.address
    case other: MemberEvent                         => nodes -= other.member.address
    case UnreachableMember(m)                       => nodes -= m.address
    case ReachableMember(m) if m.hasRole("compute") => nodes += m.address
  }
} 
Example 7
Source File: ClusterStateRoute.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.cluster.{Cluster, Member, MemberStatus}
import akka.management.cluster.{
  ClusterHttpManagementJsonProtocol,
  ClusterMembers,
  ClusterReadViewAccess,
  ClusterUnreachableMember
}

// Just want the read view
object ClusterStateRoute extends ClusterHttpManagementJsonProtocol {

  import akka.http.scaladsl.server.Directives._
  import akka.management.cluster.ClusterHttpManagementHelper._

  def routeGetMembers(cluster: Cluster) =
    path("cluster" / "members") {
      get {
        complete {
          val readView = ClusterReadViewAccess.internalReadView(cluster)
          val members = readView.state.members.map(memberToClusterMember)

          val unreachable = readView.reachability.observersGroupedByUnreachable.toSeq
            .sortBy(_._1)
            .map {
              case (subject, observers) ⇒
                ClusterUnreachableMember(s"${subject.address}", observers.toSeq.sorted.map(m ⇒ s"${m.address}").toList)
            }
            .toList

          val thisDcMembers =
            cluster.state.members.toSeq
              .filter(node => node.status == MemberStatus.Up && node.dataCenter == cluster.selfDataCenter)

          val leader = readView.leader.map(_.toString)

          val oldest = if (thisDcMembers.isEmpty) None else Some(thisDcMembers.min(Member.ageOrdering).address.toString)

          ClusterMembers(s"${readView.selfAddress}", members, unreachable, leader, oldest, oldestPerRole(thisDcMembers))
        }
      }
    }

} 
Example 8
Source File: MemberListenerActor.scala    From asura   with MIT License 5 votes vote down vote up
package asura.cluster.actor

import akka.actor.Props
import akka.cluster.ClusterEvent._
import akka.cluster.{Cluster, Member, MemberStatus}
import asura.cluster.actor.MemberListenerActor.GetAllMembers
import asura.cluster.model.MemberInfo
import asura.common.actor.BaseActor

class MemberListenerActor extends BaseActor {

  val cluster = Cluster(context.system)
  var nodes = Set.empty[Member]

  override def preStart(): Unit = {
    cluster.subscribe(self, classOf[MemberEvent])
  }

  override def postStop(): Unit = {
    cluster.unsubscribe(self)
  }

  override def receive: Receive = {
    case state: CurrentClusterState =>
      nodes = state.members.collect {
        case m if m.status == MemberStatus.Up => m
      }
    case MemberUp(member) =>
      log.info("Member({}) is Up: {}", member.roles.mkString(","), member.address)
      nodes += member
    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}", member.address, previousStatus)
      nodes -= member
    case _: MemberEvent =>
    case GetAllMembers =>
      sender() ! nodes.map(MemberInfo.fromMember(_))
  }

}

object MemberListenerActor {
  def props() = Props(new MemberListenerActor())

  case class GetAllMembers()

} 
Example 9
Source File: ClusterManager.scala    From akka-cluster-playground   with MIT License 5 votes vote down vote up
package com.elleflorio.cluster.playground.node.cluster

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.{Cluster, MemberStatus}
import com.elleflorio.cluster.playground.Server.system
import com.elleflorio.cluster.playground.node.cluster.ClusterManager.GetMembers

object ClusterManager {

  sealed trait ClusterMessage
  case object GetMembers extends ClusterMessage

  def props(nodeId: String) = Props(new ClusterManager(nodeId))
}

class ClusterManager(nodeId: String) extends Actor with ActorLogging {

  val cluster: Cluster = Cluster(context.system)
  val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener")

  override def receive: Receive = {
    case GetMembers => {
      sender() ! cluster.state.members.filter(_.status == MemberStatus.up)
        .map(_.address.toString)
        .toList
    }
  }
} 
Example 10
Source File: ClusterStatusCheck.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.rp.akkaclusterbootstrap

import akka.actor.ExtendedActorSystem
import akka.cluster.{ Cluster, MemberStatus }
import com.lightbend.rp.status.{ HealthCheck, ReadinessCheck }
import scala.concurrent.{ ExecutionContext, Future }

class ClusterStatusCheck extends ReadinessCheck with HealthCheck {
  def healthy(actorSystem: ExtendedActorSystem)(implicit ec: ExecutionContext): Future[Boolean] =
    status(actorSystem) map {
      case MemberStatus.Joining => true
      case MemberStatus.WeaklyUp => true
      case MemberStatus.Up => true
      case MemberStatus.Leaving => true
      case MemberStatus.Exiting => true
      case MemberStatus.Down => false
      case MemberStatus.Removed => false
    }

  def ready(actorSystem: ExtendedActorSystem)(implicit ec: ExecutionContext): Future[Boolean] =
    status(actorSystem) map {
      case MemberStatus.Joining => false
      case MemberStatus.WeaklyUp => true
      case MemberStatus.Up => true
      case MemberStatus.Leaving => false
      case MemberStatus.Exiting => false
      case MemberStatus.Down => false
      case MemberStatus.Removed => false
    }

  private def status(actorSystem: ExtendedActorSystem)(implicit ec: ExecutionContext): Future[MemberStatus] = {
    val cluster = Cluster(actorSystem)
    val selfNow = cluster.selfMember
    Future.successful(selfNow.status)
  }
} 
Example 11
Source File: ClusterHttpInspector.scala    From simple-akka-downing   with Apache License 2.0 5 votes vote down vote up
package com.ajjpj.simpleakkadowning.util

import akka.actor.Actor
import akka.cluster.{Cluster, MemberStatus}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives
import akka.stream.ActorMaterializer

import scala.concurrent.Await


class ClusterHttpInspector(httpPort: Int) extends Actor {
  val cluster = Cluster.get(context.system)
  val routes = {
    import Directives._

    pathPrefix("cluster-members") {
      path("up") { complete {
        cluster.state.members.filter(_.status == MemberStatus.Up).map(_.address.port.get).mkString(" ")
      }} ~
      path("unreachable") { complete {
        cluster.state.unreachable.map(_.address.port.get).mkString(" ")
      }}
    }
  }

  import context.dispatcher
  implicit val mat = ActorMaterializer()

  val fServerBinding =
    Http(context.system)
      .bindAndHandle(routes, "localhost", httpPort)


  override def postStop () = {
    import scala.concurrent.duration._
    super.postStop ()
    fServerBinding.foreach(sb => Await.ready (sb.unbind(), 5.seconds))
  }

  override def receive = Actor.emptyBehavior

} 
Example 12
Source File: SimpleDowningSpec.scala    From simple-akka-downing   with Apache License 2.0 5 votes vote down vote up
package com.ajjpj.simpleakkadowning.util

import akka.actor.Props
import akka.cluster.{Cluster, MemberStatus}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec
import akka.remote.transport.ThrottlerTransportAdapter.Direction
import akka.stream.ActorMaterializer
import akka.testkit.ImplicitSender

import scala.concurrent.duration._
import scala.util.control.NonFatal

abstract class SimpleDowningSpec(config: SimpleDowningConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {

  def initialParticipants = roles.size

  private var portToNode = Map.empty[Int,RoleName]

  def init(): Unit = {
    if (roles.headOption contains myself) {
      enterBarrier("initialized")
    }
    else {
      val cluster = Cluster(system)
      cluster.joinSeedNodes(seedAddresses)
      system.actorOf(Props(new ClusterHttpInspector(httpPort(myself))), "http-server")

      while (cluster.state.members.count(_.status == MemberStatus.Up) < roles.tail.size) Thread.sleep(100)
      enterBarrier("initialized")
    }

    portToNode = roles.map(r => node(r).address.port.get -> r).toMap
  }

  def httpPort (node: RoleName) = {
    val nodeNo = roles.indexOf(node)
    require(nodeNo > 0)
    8080 + nodeNo
  }

  def seedAddresses = roles.tail.map(node(_).root.address)

  private def httpGetNodes(node: RoleName, path: String): Set[RoleName] = {
    try {
      import system.dispatcher
      implicit val mat = ActorMaterializer()

      val uri = Uri (s"http://localhost:${httpPort (node)}$path")
      val response = Http (system).singleRequest (HttpRequest (uri = uri)).await
      val strict = response.entity.toStrict (10.seconds).await
      strict.data.decodeString ("utf-8") match {
        case s if s.isEmpty => Set.empty
        case s => s.split (' ').map (_.toInt).map (portToNode).toSet
      }
    }
    catch {
      case NonFatal(th) =>
        th.printStackTrace()
        Set.empty
    }
  }

  def upNodesFor(node: RoleName) = httpGetNodes(node, "/cluster-members/up")
  def unreachableNodesFor (node: RoleName) = httpGetNodes(node, "/cluster-members/unreachable")

  
  def createPartition(nodes: RoleName*) = {
    val otherNodes = roles.tail.toSet -- nodes
    for (n1 <- nodes; n2 <- otherNodes) testConductor.blackhole(n1, n2, Direction.Both).await
  }

  def healPartition(): Unit = {
    for (n1 <- roles.tail; n2 <- roles.tail) testConductor.passThrough(n1, n2, Direction.Both).await
  }
} 
Example 13
Source File: ClusterShardingQuickTerminationSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.sharding

import akka.actor.{ ActorLogging, ActorRef, Props, ReceiveTimeout }
import akka.cluster.{ Cluster, MemberStatus }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings, ShardRegion }
import akka.persistence.PersistentActor
import akka.persistence.cassandra.CassandraSpec
import akka.testkit.TestProbe

import scala.concurrent.duration._

object ClusterShardingQuickTerminationSpec {

  case object Increment
  case object Decrement
  final case class Get(counterId: Long)
  final case class EntityEnvelope(id: Long, payload: Any)
  case object Ack

  case object Stop
  final case class CounterChanged(delta: Int)

  class Counter extends PersistentActor with ActorLogging {
    import ShardRegion.Passivate

    context.setReceiveTimeout(5.seconds)

    // self.path.name is the entity identifier (utf-8 URL-encoded)
    override def persistenceId: String = "Counter-" + self.path.name

    var count = 0

    def updateState(event: CounterChanged): Unit =
      count += event.delta

    override def receiveRecover: Receive = {
      case evt: CounterChanged => updateState(evt)
      case other               => log.debug("Other: {}", other)
    }

    override def receiveCommand: Receive = {
      case Increment      => persist(CounterChanged(+1))(updateState)
      case Decrement      => persist(CounterChanged(-1))(updateState)
      case Get(_)         => sender() ! count
      case ReceiveTimeout => context.parent ! Passivate(stopMessage = Stop)
      case Stop =>
        sender() ! Ack
        context.stop(self)
    }
  }
  val extractEntityId: ShardRegion.ExtractEntityId = {
    case EntityEnvelope(id, payload) => (id.toString, payload)
    case msg @ Get(id)               => (id.toString, msg)
  }

  val numberOfShards = 100

  val extractShardId: ShardRegion.ExtractShardId = {
    case EntityEnvelope(id, _) => (id % numberOfShards).toString
    case Get(id)               => (id % numberOfShards).toString
  }

}

class ClusterShardingQuickTerminationSpec extends CassandraSpec("""
    akka.actor.provider = cluster
  """.stripMargin) {

  import ClusterShardingQuickTerminationSpec._

  "Cassandra Plugin with Cluster Sharding" must {
    "clear state if persistent actor shuts down" in {
      Cluster(system).join(Cluster(system).selfMember.address)
      awaitAssert {
        Cluster(system).selfMember.status shouldEqual MemberStatus.Up
      }
      ClusterSharding(system).start(
        typeName = "tagging",
        entityProps = Props[Counter],
        settings = ClusterShardingSettings(system),
        extractEntityId = extractEntityId,
        extractShardId = extractShardId)

      (0 to 100).foreach { i =>
        val counterRegion: ActorRef = ClusterSharding(system).shardRegion("tagging")
        awaitAssert {
          val sender = TestProbe()
          counterRegion.tell(Get(123), sender.ref)
          sender.expectMsg(500.millis, i)
        }

        counterRegion ! EntityEnvelope(123, Increment)
        counterRegion ! Get(123)
        expectMsg(i + 1)

        counterRegion ! EntityEnvelope(123, Stop)
        expectMsg(Ack)
      }
    }
  }
} 
Example 14
Source File: NoScaler.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.proxy.autoscaler

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{Cluster, MemberStatus}
import Autoscaler.{Deployment, Scale}


class NoScaler(autoscaler: ActorRef) extends Actor with ActorLogging {

  private[this] final val cluster = Cluster(context.system)

  cluster.subscribe(self, classOf[ClusterDomainEvent])
  sendDeployment()

  override def postStop(): Unit =
    cluster.unsubscribe(self)

  private def sendDeployment(): Unit =
    autoscaler ! Deployment(
      name = context.system.name,
      ready = cluster.state.members.count(c => c.status == MemberStatus.Up || c.status == MemberStatus.WeaklyUp),
      scale = cluster.state.members.size,
      upgrading = false
    )

  override def receive: Receive = {
    case Scale(_, scale) =>
      log.info(s"Autoscaler requested scale up to $scale")
    case _: ClusterDomainEvent =>
      // Don't care what the event was, just send the current deployment state to the autoscaler
      sendDeployment()
  }
} 
Example 15
Source File: Status.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.routes

import akka.cluster.{Cluster, MemberStatus}
import monix.eval.Task

sealed trait Status {

  
  def check: Task[Boolean]
}

object Status {

  class ClusterStatus(cluster: Cluster) extends Status {
    override def check: Task[Boolean] =
      Task.pure(
        !cluster.isTerminated &&
          cluster.state.leader.isDefined && cluster.state.members.nonEmpty &&
          !cluster.state.members.exists(_.status != MemberStatus.Up) && cluster.state.unreachable.isEmpty
      )
  }
} 
Example 16
Source File: OldestAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.cluster.MemberStatus.Down
import akka.cluster.{MemberStatus, Member}

import scala.concurrent.duration.FiniteDuration

abstract class OldestAutoDownBase(oldestMemberRole: Option[String], downIfAlone: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends OldestAwareCustomAutoDownBase(autoDownUnreachableAfter){

  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isOldestOf(oldestMemberRole))
      downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isOldestOf(oldestMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  def downOnSecondary(member: Member): Unit = {
    if (isSecondaryOldest(oldestMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    val oldest = oldestMember(oldestMemberRole)
    if (downIfAlone && isOldestAlone(oldestMemberRole)) {
      if (isOldestOf(oldestMemberRole)) {
        shutdownSelf()
      } else if (isSecondaryOldest(oldestMemberRole)) {
        members.foreach(downOnSecondary)
      } else {
        members.foreach(downOrAddPending)
      }
    } else {
      if (oldest.fold(true)(o => members.contains(o))) {
        shutdownSelf()
      } else {
        members.foreach(downOrAddPending)
      }
    }
  }

  def downAloneOldest(member: Member): Unit = {
    val oldest = oldestMember(oldestMemberRole)
    if (isOldestOf(oldestMemberRole)) {
      shutdownSelf()
    } else if (isSecondaryOldest(oldestMemberRole) && oldest.contains(member)) {
      oldest.foreach { m =>
        down(m.address)
        replaceMember(m.copy(Down))
      }
    } else {
      pendingAsUnreachable(member)
    }
  }
} 
Example 17
Source File: ClusterMembershipCheckSettingsSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.scaladsl

import akka.cluster.MemberStatus
import org.scalatest.{ Matchers, WordSpec }

class ClusterMembershipCheckSettingsSpec extends WordSpec with Matchers {

  "Member status parsing" must {
    "be case insensitive" in {
      ClusterMembershipCheckSettings.memberStatus("WeaklyUp") shouldEqual MemberStatus.WeaklyUp
      ClusterMembershipCheckSettings.memberStatus("Weaklyup") shouldEqual MemberStatus.WeaklyUp
      ClusterMembershipCheckSettings.memberStatus("weaklyUp") shouldEqual MemberStatus.WeaklyUp
      ClusterMembershipCheckSettings.memberStatus("Up") shouldEqual MemberStatus.Up
      ClusterMembershipCheckSettings.memberStatus("Exiting") shouldEqual MemberStatus.Exiting
      ClusterMembershipCheckSettings.memberStatus("down") shouldEqual MemberStatus.Down
      ClusterMembershipCheckSettings.memberStatus("joininG") shouldEqual MemberStatus.Joining
      ClusterMembershipCheckSettings.memberStatus("leaving") shouldEqual MemberStatus.Leaving
      ClusterMembershipCheckSettings.memberStatus("removed") shouldEqual MemberStatus.Removed
    }

    "have a useful error message for invalid values" in {

      intercept[IllegalArgumentException] {
        ClusterMembershipCheckSettings.memberStatus("cats") shouldEqual MemberStatus.Removed
      }.getMessage shouldEqual "'cats' is not a valid MemberStatus. See reference.conf for valid values"
    }
  }

} 
Example 18
Source File: ClusterMembershipCheck.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.scaladsl

import akka.actor.ActorSystem
import akka.annotation.InternalApi
import akka.cluster.{ Cluster, MemberStatus }
import akka.util.Helpers
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.Future


@InternalApi
private[akka] object ClusterMembershipCheckSettings {
  def memberStatus(status: String): MemberStatus =
    Helpers.toRootLowerCase(status) match {
      case "weaklyup" => MemberStatus.WeaklyUp
      case "up"       => MemberStatus.Up
      case "exiting"  => MemberStatus.Exiting
      case "down"     => MemberStatus.Down
      case "joining"  => MemberStatus.Joining
      case "leaving"  => MemberStatus.Leaving
      case "removed"  => MemberStatus.Removed
      case invalid =>
        throw new IllegalArgumentException(
          s"'$invalid' is not a valid MemberStatus. See reference.conf for valid values"
        )
    }
  def apply(config: Config): ClusterMembershipCheckSettings =
    new ClusterMembershipCheckSettings(config.getStringList("ready-states").asScala.map(memberStatus).toSet)
}

final class ClusterMembershipCheckSettings(val readyStates: Set[MemberStatus])

final class ClusterMembershipCheck @InternalApi private[akka] (
    system: ActorSystem,
    selfStatus: () => MemberStatus,
    settings: ClusterMembershipCheckSettings)
    extends (() => Future[Boolean]) {

  def this(system: ActorSystem) =
    this(
      system,
      () => Cluster(system).selfMember.status,
      ClusterMembershipCheckSettings(system.settings.config.getConfig("akka.management.cluster.health-check")))

  override def apply(): Future[Boolean] = {
    Future.successful(settings.readyStates.contains(selfStatus()))
  }
} 
Example 19
Source File: MarathonApiDockerDemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.ActorSystem
import akka.cluster.{ Cluster, MemberStatus }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object MarathonApiDockerDemoApp extends App {
  implicit val system = ActorSystem("my-system")
  implicit val materializer = ActorMaterializer()

  val cluster = Cluster(system)

  def isReady() = {
    val selfNow = cluster.selfMember

    selfNow.status == MemberStatus.Up
  }

  def isHealthy() = {
    isReady()
  }

  val route =
    concat(
      path("ping")(complete("pong!")),
      path("healthy")(complete(if (isHealthy()) StatusCodes.OK else StatusCodes.ServiceUnavailable)),
      path("ready")(complete(if (isReady()) StatusCodes.OK else StatusCodes.ServiceUnavailable))
    )

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  Http().bindAndHandle(
    route,
    sys.env.get("HOST").getOrElse("127.0.0.1"),
    sys.env.get("PORT_HTTP").map(_.toInt).getOrElse(8080))
} 
Example 20
Source File: DemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.ActorSystem
import akka.cluster.{ Cluster, MemberStatus }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.scaladsl.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object DemoApp extends App {
  implicit val system = ActorSystem("my-system")
  implicit val materializer = ActorMaterializer()

  val cluster = Cluster(system)

  def isReady() = {
    val selfNow = cluster.selfMember

    selfNow.status == MemberStatus.Up
  }

  def isHealthy() = {
    isReady()
  }

  val route =
    concat(
      path("ping")(complete("pong!")),
      path("healthy")(complete(if (isHealthy()) StatusCodes.OK else StatusCodes.ServiceUnavailable)),
      path("ready")(complete(if (isReady()) StatusCodes.OK else StatusCodes.ServiceUnavailable)))

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  Http().bindAndHandle(
    route,
    sys.env.get("HOST").getOrElse("127.0.0.1"),
    sys.env.get("PORT_HTTP").map(_.toInt).getOrElse(8080))
} 
Example 21
Source File: ExampleHealthCheck.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package doc.akka.management

import akka.actor.ActorSystem
import akka.cluster.{ Cluster, MemberStatus }

import scala.concurrent.Future

//#basic
class ExampleHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) {
  override def apply(): Future[Boolean] = {
    Future.successful(true)
  }
}
//#basic

//#cluster
class ClusterHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) {
  private val cluster = Cluster(system)
  override def apply(): Future[Boolean] = {
    Future.successful(cluster.selfMember.status == MemberStatus.Up)
  }
}
//#cluster 
Example 22
Source File: HttpClusterBootstrapRoutes.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.Member
import akka.event.Logging
import akka.event.LoggingAdapter
import akka.http.javadsl.server.directives.RouteAdapter
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.server.Route
import akka.management.cluster.bootstrap.ClusterBootstrapSettings
import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.ClusterMember
import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.SeedNodes

final class HttpClusterBootstrapRoutes(settings: ClusterBootstrapSettings) extends HttpBootstrapJsonProtocol {

  import akka.http.scaladsl.server.Directives._

  private def routeGetSeedNodes: Route = extractClientIP { clientIp =>
    extractActorSystem { implicit system =>
      import akka.cluster.MemberStatus
      val cluster = Cluster(system)

      def memberToClusterMember(m: Member): ClusterMember =
        ClusterMember(m.uniqueAddress.address, m.uniqueAddress.longUid, m.status.toString, m.roles)

      val state = cluster.state

      // TODO shuffle the members so in a big deployment nodes start joining different ones and not all the same?
      val members = state.members
        .diff(state.unreachable)
        .filter(m =>
          m.status == MemberStatus.up || m.status == MemberStatus.weaklyUp || m.status == MemberStatus.joining)
        .take(settings.contactPoint.httpMaxSeedNodesToExpose)
        .map(memberToClusterMember)

      val info = SeedNodes(cluster.selfMember.uniqueAddress.address, members)
      log.info(
        "Bootstrap request from {}: Contact Point returning {} seed-nodes [{}]",
        clientIp,
        members.size,
        members.map(_.node).mkString(", "))
      complete(info)
    }
  }

  
  def getRoutes: akka.http.javadsl.server.Route = RouteAdapter(routes)

  private def log(implicit sys: ActorSystem): LoggingAdapter =
    Logging(sys, classOf[HttpClusterBootstrapRoutes])

}

object ClusterBootstrapRequests {

  import akka.http.scaladsl.client.RequestBuilding._

  def bootstrapSeedNodes(baseUri: Uri): HttpRequest =
    Get(baseUri + "/bootstrap/seed-nodes")

} 
Example 23
Source File: ClusteredTeskit.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.BootstrapSetup
import akka.actor.setup.ActorSystemSetup
import akka.cluster.Cluster
import akka.cluster.MemberStatus
import akka.remote.testconductor.RoleName
import com.typesafe.config.ConfigFactory
import akka.remote.testkit.MultiNodeConfig
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender
import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1
import com.typesafe.config.Config

import scala.concurrent.duration._

object ClusterMultiNodeConfig extends ClusterMultiNodeConfig

// this is reused in multiple multi-jvm tests. There's still some copy/paste around though.
abstract class ClusterMultiNodeConfig extends MultiNodeConfig {
  val node1 = role("node1")
  val node2 = role("node2")
  val node3 = role("node3")

  protected def systemConfig: Config =
    ConfigFactory.parseString(
      """
    akka.loglevel = INFO
    akka.actor.provider = cluster
    terminate-system-after-member-removed = 60s

    # increase default timeouts to leave wider margin for Travis.
    # 30s to 60s
    akka.testconductor.barrier-timeout=60s
    akka.test.single-expect-default = 15s

    akka.cluster.sharding.waiting-for-state-timeout = 5s

    # Don't terminate the actor system when doing a coordinated shutdown
    akka.coordinated-shutdown.terminate-actor-system = off
    akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off
    akka.cluster.run-coordinated-shutdown-when-down = off

    ## The settings below are incidental because this code lives in a project that depends on lagom-cluster and
    ## lagom-akka-management-core.

    # multi-jvm tests forms the cluster programmatically
    # therefore we disable Akka Cluster Bootstrap
    lagom.cluster.bootstrap.enabled = off

    # no jvm exit on tests
    lagom.cluster.exit-jvm-when-system-terminated = off
    """
    )

  commonConfig(systemConfig)
}

// heavily inspired by AbstractClusteredPersistentEntitySpec
// this is reused in multiple multi-jvm tests. There's still some copy/paste around though.
object ClusterMultiNodeActorSystemFactory {
  // Copied from MultiNodeSpec
  private def getCallerName(clazz: Class[_]): String = {
    val s = Thread.currentThread.getStackTrace.map(_.getClassName).drop(1).dropWhile(_.matches(".*MultiNodeSpec.?$"))
    val reduced = s.lastIndexWhere(_ == clazz.getName) match {
      case -1 => s
      case z  => s.drop(z + 1)
    }
    reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_")
  }
  def createActorSystem(): Config => ActorSystem = { config =>
    val setup = ActorSystemSetup(BootstrapSetup(ConfigFactory.load(config)))
    ActorSystem(getCallerName(classOf[MultiNodeSpec]), setup)
  }
} 
Example 24
Source File: ClusteredMultiNodeUtils.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.actor.ActorRef
import akka.actor.Address
import akka.cluster.Cluster
import akka.cluster.MemberStatus
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender
import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1

import scala.concurrent.duration._

abstract class ClusteredMultiNodeUtils(val numOfNodes: Int, multiNodeConfig: ClusterMultiNodeConfig)
    extends MultiNodeSpec(multiNodeConfig, ClusterMultiNodeActorSystemFactory.createActorSystem())
    with STMultiNodeSpec
    with ImplicitSender {
  override def initialParticipants: Int = roles.size

  def join(from: RoleName, to: RoleName): Unit = {
    runOn(from) {
      Cluster(system).join(node(to).address)
    }
    enterBarrier(from.name + "-joined")
  }

  def fullAddress(ref: ActorRef): Address =
    if (ref.path.address.hasLocalScope) Cluster(system).selfAddress
    else ref.path.address

  protected override def atStartup(): Unit = {
    join(node1, node1)
    roles.tail.foreach(n => join(n, node1))
    within(15.seconds) {
      awaitAssert(Cluster(system).state.members.size should be(numOfNodes))
      awaitAssert(
        Cluster(system).state.members.toIndexedSeq.map(_.status).distinct should be(IndexedSeq(MemberStatus.Up))
      )
    }

    enterBarrier("startup")
  }
} 
Example 25
Source File: ClusterDomainEventListener.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.cluster.words

import akka.actor.{ActorLogging, Actor}

import akka.cluster.{MemberStatus, Cluster}
import akka.cluster.ClusterEvent._

class ClusterDomainEventListener extends Actor
                    with ActorLogging {
  Cluster(context.system).subscribe(self, classOf[ClusterDomainEvent])

  def receive ={
    case MemberUp(member) =>
      log.info(s"$member UP.")
    case MemberExited(member)=>
      log.info(s"$member EXITED.")
    case MemberRemoved(member, previousState)=>
      if(previousState == MemberStatus.Exiting) {
        log.info(s"Member $member Previously gracefully exited, REMOVED.")
      } else {
        log.info(s"$member Previously downed after unreachable, REMOVED.")
      }
    case UnreachableMember(member) =>
      log.info(s"$member UNREACHABLE")
    case ReachableMember(member) =>
      log.info(s"$member REACHABLE")
    case state: CurrentClusterState =>
      log.info(s"Current state of the cluster: $state")
  }
  override def postStop(): Unit = {
    Cluster(context.system).unsubscribe(self)
    super.postStop()
  }
} 
Example 26
Source File: ClusterManager.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.cluster

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.{Cluster, MemberStatus}
import com.elleflorio.scalachain.cluster.ClusterManager.GetMembers

object ClusterManager {

  sealed trait ClusterMessage
  case object GetMembers extends ClusterMessage

  def props(nodeId: String) = Props(new ClusterManager(nodeId))
}

class ClusterManager(nodeId: String) extends Actor with ActorLogging {

  val cluster: Cluster = Cluster(context.system)
  val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener")

  override def receive: Receive = {
    case GetMembers => {
      sender() ! cluster.state.members.filter(_.status == MemberStatus.up)
        .map(_.address.toString)
        .toList
    }
  }
} 
Example 27
Source File: StorageNodeActor.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.actors

import akka.actor.{Actor, ActorRef, Props, RootActorPath, Terminated}
import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp}
import akka.cluster.{Cluster, Member, MemberStatus}
import com.typesafe.scalalogging.StrictLogging
import justin.db.actors.protocol.{RegisterNode, _}
import justin.db.cluster.ClusterMembers
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica._
import justin.db.replica.read.{ReplicaLocalReader, ReplicaReadCoordinator, ReplicaRemoteReader}
import justin.db.replica.write.{ReplicaLocalWriter, ReplicaRemoteWriter, ReplicaWriteCoordinator}
import justin.db.storage.PluggableStorageProtocol

import scala.concurrent.ExecutionContext

class StorageNodeActor(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N) extends Actor with StrictLogging {

  private[this] implicit val ec: ExecutionContext = context.dispatcher
  private[this] val cluster = Cluster(context.system)

  private[this] var clusterMembers   = ClusterMembers.empty
  private[this] val readCoordinator  = new ReplicaReadCoordinator(nodeId, ring, n, new ReplicaLocalReader(storage), new ReplicaRemoteReader)
  private[this] val writeCoordinator = new ReplicaWriteCoordinator(nodeId, ring, n, new ReplicaLocalWriter(storage), new ReplicaRemoteWriter)

  private[this] val coordinatorRouter = context.actorOf(
    props = RoundRobinCoordinatorRouter.props(readCoordinator, writeCoordinator),
    name  = RoundRobinCoordinatorRouter.routerName
  )

  private[this] val name = self.path.name

  override def preStart(): Unit = cluster.subscribe(this.self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(this.self)

  def receive: Receive = {
    receiveDataPF orElse receiveClusterDataPF orElse receiveRegisterNodePR orElse notHandledPF
  }

  private[this] def receiveDataPF: Receive = {
    case readReq: StorageNodeReadRequest              =>
      coordinatorRouter ! ReadData(sender(), clusterMembers, readReq)
    case writeLocalDataReq: StorageNodeWriteDataLocal =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeLocalDataReq)
    case writeClientReplicaReq: Internal.WriteReplica =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeClientReplicaReq)
  }

  private[this] def receiveClusterDataPF: Receive = {
    case "members"                  => sender() ! clusterMembers
    case MemberUp(member)           => register(nodeId, ring, member)
    case state: CurrentClusterState => state.members.filter(_.status == MemberStatus.Up).foreach(member => register(nodeId, ring, member))
    case Terminated(actorRef)       => clusterMembers = clusterMembers.removeByRef(StorageNodeActorRef(actorRef))
  }

  private[this] def receiveRegisterNodePR: Receive = {
    case RegisterNode(senderNodeId) if clusterMembers.notContains(senderNodeId) =>
      val senderRef = sender()
      context.watch(senderRef)
      clusterMembers = clusterMembers.add(senderNodeId, StorageNodeActorRef(senderRef))
      senderRef ! RegisterNode(nodeId)
      logger.info(s"Actor[$name]: Successfully registered node [id-${senderNodeId.id}]")
    case RegisterNode(senderNodeId) =>
      logger.info(s"Actor[$name]: Node [id-${senderNodeId.id}] is already registered")
  }

  private[this] def register(nodeId: NodeId, ring: Ring, member: Member) = {
    (member.hasRole(StorageNodeActor.role), datacenter.name == member.dataCenter) match {
      case (true, true) => register()
      case (_,   false) => logger.info(s"Actor[$name]: $member doesn't belong to datacenter [${datacenter.name}]")
      case (false,   _) => logger.info(s"Actor[$name]: $member doesn't have [${StorageNodeActor.role}] role (it has roles ${member.roles}")
    }

    def register() = for {
      ringNodeId    <- ring.nodesId
      nodeName       = StorageNodeActor.name(ringNodeId, Datacenter(member.dataCenter))
      nodeRef        = context.actorSelection(RootActorPath(member.address) / "user" / nodeName)
    } yield nodeRef ! RegisterNode(nodeId)
  }

  private[this] def notHandledPF: Receive = {
    case t => logger.warn(s"Actor[$name]: Not handled message [$t]")
  }
}

object StorageNodeActor {
  def role: String = "storagenode"
  def name(nodeId: NodeId, datacenter: Datacenter): String = s"${datacenter.name}-id-${nodeId.id}"
  def props(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N): Props = {
    Props(new StorageNodeActor(nodeId, datacenter, storage, ring, n))
  }
}

case class StorageNodeActorRef(ref: ActorRef) extends AnyVal 
Example 28
Source File: MajorityLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.MemberStatus.Down
import akka.cluster.{MemberStatus, Member}

import scala.concurrent.duration.FiniteDuration

abstract class MajorityLeaderAutoDownBase(majorityMemberRole: Option[String], downIfInMinority: Boolean, autoDownUnreachableAfter: FiniteDuration)
    extends MajorityAwareCustomAutoDownBase(autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (majorityMemberRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    majorityMemberRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }

  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isMajority(majorityMemberRole)) {
      if (isLeaderOf(majorityMemberRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(majorityMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isMajorityAfterDown(members, majorityMemberRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfInMinority) {
      shutdownSelf()
    }
  }
} 
Example 29
Source File: QuorumLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.{MemberStatus, Member}
import akka.cluster.MemberStatus.Down

import scala.concurrent.duration.FiniteDuration

abstract class QuorumLeaderAutoDownBase(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends QuorumAwareCustomAutoDownBase(quorumSize, autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (quorumRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    quorumRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }


  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isQuorumMet(quorumRole)) {
      if (isLeaderOf(quorumRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(quorumRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isQuorumMetAfterDown(members, quorumRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfOutOfQuorum) {
      shutdownSelf()
    }
  }
}