akka.cluster.sharding.ClusterSharding Scala Examples

The following examples show how to use akka.cluster.sharding.ClusterSharding. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ShardedShoppers.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.persistence.sharded

import aia.persistence._
import akka.actor._
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}

object ShardedShoppers {
  def props= Props(new ShardedShoppers)
  def name = "sharded-shoppers"
}

class ShardedShoppers extends Actor {

  ClusterSharding(context.system).start(
    ShardedShopper.shardName,
    ShardedShopper.props,
    ClusterShardingSettings(context.system),
    ShardedShopper.extractEntityId,
    ShardedShopper.extractShardId
  )

  def shardedShopper = {
    ClusterSharding(context.system).shardRegion(ShardedShopper.shardName)
  }

  def receive = {
    case cmd: Shopper.Command =>
      shardedShopper forward cmd
  }
} 
Example 2
Source File: ClusterShardingApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import com.packt.chapter7.TemperatureActor.{GetCurrentTemperature, Location, UpdateTemperature}
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._

object ClusterShardingApplication extends App {
  val actorSystem = ActorSystem("ClusterSystem")

  import actorSystem.dispatcher

  val temperatureActor: ActorRef = ClusterSharding(actorSystem).start(
    typeName = TemperatureActor.shardName,
    entityProps = Props[TemperatureActor],
    settings = ClusterShardingSettings(actorSystem),
    extractEntityId = TemperatureActor.extractEntityId,
    extractShardId = TemperatureActor.extractShardId)

  //Let's simulate some time has passed. Never use Thread.sleep in production!
  Thread.sleep(30000)

  val locations = Vector(
    Location("USA", "Chicago"),
    Location("ESP", "Madrid"),
    Location("FIN", "Helsinki")
  )

  temperatureActor ! UpdateTemperature(locations(0), 1.0)
  temperatureActor ! UpdateTemperature(locations(1), 20.0)
  temperatureActor ! UpdateTemperature(locations(2), -10.0)

  implicit val timeout = Timeout(5 seconds)

  locations.foreach {
    case location =>
      (temperatureActor ? GetCurrentTemperature(location)).onSuccess {
        case x: Double =>
          println(s"Current temperature in $location is $x")
      }
  }
} 
Example 3
Source File: EventSourcedSupportFactory.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.proxy.eventsourced

import akka.NotUsed
import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.sharding.ShardRegion.HashCodeMessageExtractor
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import akka.event.Logging
import akka.grpc.GrpcClientSettings
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import com.google.protobuf.Descriptors.ServiceDescriptor
import io.cloudstate.protocol.entity.{Entity, Metadata}
import io.cloudstate.protocol.event_sourced.EventSourcedClient
import io.cloudstate.proxy._
import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply}

import scala.concurrent.{ExecutionContext, Future}
import scala.collection.JavaConverters._

class EventSourcedSupportFactory(system: ActorSystem,
                                 config: EntityDiscoveryManager.Configuration,
                                 grpcClientSettings: GrpcClientSettings,
                                 concurrencyEnforcer: ActorRef,
                                 statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer)
    extends EntityTypeSupportFactory {

  private final val log = Logging.getLogger(system, this.getClass)

  private val eventSourcedClient = EventSourcedClient(grpcClientSettings)(system)

  override def buildEntityTypeSupport(entity: Entity,
                                      serviceDescriptor: ServiceDescriptor,
                                      methodDescriptors: Map[String, EntityMethodDescriptor]): EntityTypeSupport = {
    validate(serviceDescriptor, methodDescriptors)

    val stateManagerConfig = EventSourcedEntity.Configuration(entity.serviceName,
                                                              entity.persistenceId,
                                                              config.passivationTimeout,
                                                              config.relayOutputBufferSize)

    log.debug("Starting EventSourcedEntity for {}", entity.persistenceId)
    val clusterSharding = ClusterSharding(system)
    val clusterShardingSettings = ClusterShardingSettings(system)
    val eventSourcedEntity = clusterSharding.start(
      typeName = entity.persistenceId,
      entityProps =
        EventSourcedEntitySupervisor.props(eventSourcedClient, stateManagerConfig, concurrencyEnforcer, statsCollector),
      settings = clusterShardingSettings,
      messageExtractor = new EntityIdExtractor(config.numberOfShards),
      allocationStrategy = new DynamicLeastShardAllocationStrategy(1, 10, 2, 0.0),
      handOffStopMessage = EventSourcedEntity.Stop
    )

    new EventSourcedSupport(eventSourcedEntity, config.proxyParallelism, config.relayTimeout)
  }

  private def validate(serviceDescriptor: ServiceDescriptor,
                       methodDescriptors: Map[String, EntityMethodDescriptor]): Unit = {
    val streamedMethods =
      methodDescriptors.values.filter(m => m.method.toProto.getClientStreaming || m.method.toProto.getServerStreaming)
    if (streamedMethods.nonEmpty) {
      val offendingMethods = streamedMethods.map(_.method.getName).mkString(",")
      throw EntityDiscoveryException(
        s"Event sourced entities do not support streamed methods, but ${serviceDescriptor.getFullName} has the following streamed methods: ${offendingMethods}"
      )
    }
    val methodsWithoutKeys = methodDescriptors.values.filter(_.keyFieldsCount < 1)
    if (methodsWithoutKeys.nonEmpty) {
      val offendingMethods = methodsWithoutKeys.map(_.method.getName).mkString(",")
      throw new EntityDiscoveryException(
        s"Event sourced entities do not support methods whose parameters do not have at least one field marked as entity_key, " +
        "but ${serviceDescriptor.getFullName} has the following methods without keys: ${offendingMethods}"
      )
    }
  }
}

private class EventSourcedSupport(eventSourcedEntity: ActorRef,
                                  parallelism: Int,
                                  private implicit val relayTimeout: Timeout)
    extends EntityTypeSupport {
  import akka.pattern.ask

  override def handler(method: EntityMethodDescriptor,
                       metadata: Metadata): Flow[EntityCommand, UserFunctionReply, NotUsed] =
    Flow[EntityCommand].mapAsync(parallelism)(
      command =>
        (eventSourcedEntity ? EntityTypeSupport.mergeStreamLevelMetadata(metadata, command))
          .mapTo[UserFunctionReply]
    )

  override def handleUnary(command: EntityCommand): Future[UserFunctionReply] =
    (eventSourcedEntity ? command).mapTo[UserFunctionReply]
}

private final class EntityIdExtractor(shards: Int) extends HashCodeMessageExtractor(shards) {
  override final def entityId(message: Any): String = message match {
    case command: EntityCommand => command.entityId
  }
} 
Example 4
Source File: ClusterShardingQuickTerminationSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.sharding

import akka.actor.{ ActorLogging, ActorRef, Props, ReceiveTimeout }
import akka.cluster.{ Cluster, MemberStatus }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings, ShardRegion }
import akka.persistence.PersistentActor
import akka.persistence.cassandra.CassandraSpec
import akka.testkit.TestProbe

import scala.concurrent.duration._

object ClusterShardingQuickTerminationSpec {

  case object Increment
  case object Decrement
  final case class Get(counterId: Long)
  final case class EntityEnvelope(id: Long, payload: Any)
  case object Ack

  case object Stop
  final case class CounterChanged(delta: Int)

  class Counter extends PersistentActor with ActorLogging {
    import ShardRegion.Passivate

    context.setReceiveTimeout(5.seconds)

    // self.path.name is the entity identifier (utf-8 URL-encoded)
    override def persistenceId: String = "Counter-" + self.path.name

    var count = 0

    def updateState(event: CounterChanged): Unit =
      count += event.delta

    override def receiveRecover: Receive = {
      case evt: CounterChanged => updateState(evt)
      case other               => log.debug("Other: {}", other)
    }

    override def receiveCommand: Receive = {
      case Increment      => persist(CounterChanged(+1))(updateState)
      case Decrement      => persist(CounterChanged(-1))(updateState)
      case Get(_)         => sender() ! count
      case ReceiveTimeout => context.parent ! Passivate(stopMessage = Stop)
      case Stop =>
        sender() ! Ack
        context.stop(self)
    }
  }
  val extractEntityId: ShardRegion.ExtractEntityId = {
    case EntityEnvelope(id, payload) => (id.toString, payload)
    case msg @ Get(id)               => (id.toString, msg)
  }

  val numberOfShards = 100

  val extractShardId: ShardRegion.ExtractShardId = {
    case EntityEnvelope(id, _) => (id % numberOfShards).toString
    case Get(id)               => (id % numberOfShards).toString
  }

}

class ClusterShardingQuickTerminationSpec extends CassandraSpec("""
    akka.actor.provider = cluster
  """.stripMargin) {

  import ClusterShardingQuickTerminationSpec._

  "Cassandra Plugin with Cluster Sharding" must {
    "clear state if persistent actor shuts down" in {
      Cluster(system).join(Cluster(system).selfMember.address)
      awaitAssert {
        Cluster(system).selfMember.status shouldEqual MemberStatus.Up
      }
      ClusterSharding(system).start(
        typeName = "tagging",
        entityProps = Props[Counter],
        settings = ClusterShardingSettings(system),
        extractEntityId = extractEntityId,
        extractShardId = extractShardId)

      (0 to 100).foreach { i =>
        val counterRegion: ActorRef = ClusterSharding(system).shardRegion("tagging")
        awaitAssert {
          val sender = TestProbe()
          counterRegion.tell(Get(123), sender.ref)
          sender.expectMsg(500.millis, i)
        }

        counterRegion ! EntityEnvelope(123, Increment)
        counterRegion ! Get(123)
        expectMsg(i + 1)

        counterRegion ! EntityEnvelope(123, Stop)
        expectMsg(Ack)
      }
    }
  }
} 
Example 5
Source File: DistributedProcessing.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import java.net.URLEncoder
import java.nio.charset.StandardCharsets

import aecor.distributedprocessing.DistributedProcessing.{ KillSwitch, Process }
import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning
import aecor.util.effect._
import akka.actor.ActorSystem
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings }
import akka.pattern.{ BackoffOpts, BackoffSupervisor, ask }
import akka.util.Timeout
import cats.effect.Effect
import cats.implicits._

import scala.concurrent.duration.{ FiniteDuration, _ }
final class DistributedProcessing private (system: ActorSystem) {

  
  def start[F[_]: Effect](name: String,
                          processes: List[Process[F]],
                          settings: DistributedProcessingSettings =
                            DistributedProcessingSettings.default(system)): F[KillSwitch[F]] =
    Effect[F].delay {
      val opts = BackoffOpts
        .onFailure(
          DistributedProcessingWorker.props(processes, name),
          "worker",
          settings.minBackoff,
          settings.maxBackoff,
          settings.randomFactor
        )

      val props = BackoffSupervisor.props(opts)

      val region = ClusterSharding(system).start(
        typeName = name,
        entityProps = props,
        settings = settings.clusterShardingSettings,
        extractEntityId = {
          case c @ KeepRunning(workerId) => (workerId.toString, c)
        },
        extractShardId = {
          case KeepRunning(workerId) => (workerId % settings.numberOfShards).toString
          case other                 => throw new IllegalArgumentException(s"Unexpected message [$other]")
        }
      )

      val regionSupervisor = system.actorOf(
        DistributedProcessingSupervisor
          .props(processes.size, region, settings.heartbeatInterval),
        "DistributedProcessingSupervisor-" + URLEncoder
          .encode(name, StandardCharsets.UTF_8.name())
      )
      implicit val timeout = Timeout(settings.shutdownTimeout)
      KillSwitch {
        Effect[F].fromFuture {
          regionSupervisor ? DistributedProcessingSupervisor.GracefulShutdown
        }.void
      }
    }
}

object DistributedProcessing {
  def apply(system: ActorSystem): DistributedProcessing = new DistributedProcessing(system)
  final case class KillSwitch[F[_]](shutdown: F[Unit]) extends AnyVal
  final case class RunningProcess[F[_]](watchTermination: F[Unit], shutdown: F[Unit])
  final case class Process[F[_]](run: F[RunningProcess[F]]) extends AnyVal
}

final case class DistributedProcessingSettings(minBackoff: FiniteDuration,
                                               maxBackoff: FiniteDuration,
                                               randomFactor: Double,
                                               shutdownTimeout: FiniteDuration,
                                               numberOfShards: Int,
                                               heartbeatInterval: FiniteDuration,
                                               clusterShardingSettings: ClusterShardingSettings)

object DistributedProcessingSettings {
  def default(clusterShardingSettings: ClusterShardingSettings): DistributedProcessingSettings =
    DistributedProcessingSettings(
      minBackoff = 3.seconds,
      maxBackoff = 10.seconds,
      randomFactor = 0.2,
      shutdownTimeout = 10.seconds,
      numberOfShards = 100,
      heartbeatInterval = 2.seconds,
      clusterShardingSettings = clusterShardingSettings
    )

  def default(system: ActorSystem): DistributedProcessingSettings =
    default(ClusterShardingSettings(system))
} 
Example 6
Source File: GenericAkkaRuntime.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.runtime.akkageneric

import aecor.encoding.WireProtocol.Encoded
import aecor.encoding.syntax._
import aecor.encoding.{ KeyDecoder, KeyEncoder, WireProtocol }
import aecor.runtime.akkageneric.GenericAkkaRuntime.KeyedCommand
import aecor.runtime.akkageneric.GenericAkkaRuntimeActor.CommandResult
import aecor.runtime.akkageneric.serialization.Message
import aecor.util.effect._
import akka.actor.ActorSystem
import akka.cluster.sharding.{ ClusterSharding, ShardRegion }
import akka.pattern._
import akka.util.Timeout
import cats.effect.Effect
import cats.implicits._
import cats.tagless.FunctorK
import cats.tagless.syntax.functorK._
import cats.~>
import scodec.bits.BitVector

object GenericAkkaRuntime {
  def apply(system: ActorSystem): GenericAkkaRuntime =
    new GenericAkkaRuntime(system)
  private[akkageneric] final case class KeyedCommand(key: String, bytes: BitVector) extends Message
}

final class GenericAkkaRuntime private (system: ActorSystem) {
  def runBehavior[K: KeyEncoder: KeyDecoder, M[_[_]]: FunctorK, F[_]](
    typeName: String,
    createBehavior: K => F[M[F]],
    settings: GenericAkkaRuntimeSettings = GenericAkkaRuntimeSettings.default(system)
  )(implicit M: WireProtocol[M], F: Effect[F]): F[K => M[F]] =
    F.delay {
      val props = GenericAkkaRuntimeActor.props[K, M, F](createBehavior, settings.idleTimeout)

      val extractEntityId: ShardRegion.ExtractEntityId = {
        case KeyedCommand(entityId, c) =>
          (entityId, GenericAkkaRuntimeActor.Command(c))
      }

      val numberOfShards = settings.numberOfShards

      val extractShardId: ShardRegion.ExtractShardId = {
        case KeyedCommand(key, _) =>
          String.valueOf(scala.math.abs(key.hashCode) % numberOfShards)
        case other => throw new IllegalArgumentException(s"Unexpected message [$other]")
      }

      val shardRegion = ClusterSharding(system).start(
        typeName = typeName,
        entityProps = props,
        settings = settings.clusterShardingSettings,
        extractEntityId = extractEntityId,
        extractShardId = extractShardId
      )

      val keyEncoder = KeyEncoder[K]

      key =>
        M.encoder.mapK(new (Encoded ~> F) {

          implicit val askTimeout: Timeout = Timeout(settings.askTimeout)

          override def apply[A](fa: Encoded[A]): F[A] = F.suspend {
            val (bytes, decoder) = fa
            F.fromFuture {
                shardRegion ? KeyedCommand(keyEncoder(key), bytes)
              }
              .flatMap {
                case result: CommandResult =>
                  decoder.decodeValue(result.bytes).lift[F]
                case other =>
                  F.raiseError(
                    new IllegalArgumentException(s"Unexpected response [$other] from shard region")
                  )
              }
          }
        })
    }
} 
Example 7
Source File: AkkaPersistenceRuntime.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.runtime.akkapersistence

import aecor.data.{ EventsourcedBehavior, Tagging }
import aecor.encoding.WireProtocol.Encoded
import aecor.encoding.syntax._
import aecor.encoding.{ KeyDecoder, KeyEncoder, WireProtocol }
import aecor.runtime.akkapersistence.AkkaPersistenceRuntime._
import aecor.runtime.akkapersistence.AkkaPersistenceRuntimeActor.CommandResult
import aecor.runtime.akkapersistence.readside.{ AkkaPersistenceEventJournalQuery, JournalQuery }
import aecor.runtime.akkapersistence.serialization.{ Message, PersistentDecoder, PersistentEncoder }
import aecor.util.effect._
import akka.actor.ActorSystem
import akka.cluster.sharding.{ ClusterSharding, ShardRegion }
import akka.pattern.ask
import akka.util.Timeout
import cats.effect.Effect
import cats.implicits._
import cats.tagless.FunctorK
import cats.tagless.syntax.functorK._
import cats.~>
import scodec.bits.BitVector

object AkkaPersistenceRuntime {
  def apply[O](system: ActorSystem, journalAdapter: JournalAdapter[O]): AkkaPersistenceRuntime[O] =
    new AkkaPersistenceRuntime(system, journalAdapter)

  private[akkapersistence] final case class EntityCommand(entityKey: String,
                                                          commandBytes: BitVector)
      extends Message
}

class AkkaPersistenceRuntime[O] private[akkapersistence] (system: ActorSystem,
                                                          journalAdapter: JournalAdapter[O]) {
  def deploy[M[_[_]]: FunctorK, F[_], State, Event: PersistentEncoder: PersistentDecoder, K: KeyEncoder: KeyDecoder](
    typeName: String,
    behavior: EventsourcedBehavior[M, F, State, Event],
    tagging: Tagging[K],
    snapshotPolicy: SnapshotPolicy[State] = SnapshotPolicy.never,
    settings: AkkaPersistenceRuntimeSettings = AkkaPersistenceRuntimeSettings.default(system)
  )(implicit M: WireProtocol[M], F: Effect[F]): F[K => M[F]] =
    F.delay {
      val props =
        AkkaPersistenceRuntimeActor.props(
          typeName,
          behavior,
          snapshotPolicy,
          tagging,
          settings.idleTimeout,
          journalAdapter.writeJournalId,
          snapshotPolicy.pluginId
        )

      val extractEntityId: ShardRegion.ExtractEntityId = {
        case EntityCommand(entityId, bytes) =>
          (entityId, AkkaPersistenceRuntimeActor.HandleCommand(bytes))
      }

      val numberOfShards = settings.numberOfShards

      val extractShardId: ShardRegion.ExtractShardId = {
        case EntityCommand(entityId, _) =>
          (scala.math.abs(entityId.hashCode) % numberOfShards).toString
        case other => throw new IllegalArgumentException(s"Unexpected message [$other]")
      }

      val shardRegion = ClusterSharding(system).start(
        typeName = typeName,
        entityProps = props,
        settings = settings.clusterShardingSettings,
        extractEntityId = extractEntityId,
        extractShardId = extractShardId
      )

      val keyEncoder = KeyEncoder[K]

      key =>
        M.encoder.mapK(new (Encoded ~> F) {

          implicit val askTimeout: Timeout = Timeout(settings.askTimeout)

          override def apply[A](fa: Encoded[A]): F[A] = F.suspend {
            val (bytes, decoder) = fa
            F.fromFuture {
                shardRegion ? EntityCommand(keyEncoder(key), bytes)
              }
              .flatMap {
                case CommandResult(resultBytes) =>
                  decoder.decodeValue(resultBytes).lift[F]
                case other =>
                  F.raiseError(
                    new IllegalArgumentException(s"Unexpected response [$other] from shard region")
                  )
              }
          }
        })
    }

  def journal[K: KeyDecoder, E: PersistentDecoder]: JournalQuery[O, K, E] =
    AkkaPersistenceEventJournalQuery[O, K, E](journalAdapter)
} 
Example 8
Source File: Route.scala    From distributed-cache-on-k8s-poc   with MIT License 5 votes vote down vote up
package http

import akka.actor.ActorSystem
import akka.cluster.sharding.ClusterSharding
import akka.http.scaladsl.marshalling.Marshaller.StringMarshaller
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.pattern.ask
import akka.util.Timeout
import cluster.CacheDataActor.Get
import cluster.ClusterShardRegion

import scala.concurrent.duration._
import scala.util.{ Failure, Success }

class Route(system: ActorSystem) {
  implicit val timeout: Timeout = 3.seconds
  val shardRegionActor = new ClusterShardRegion(system).clusterShardRegion

  def routes: akka.http.scaladsl.server.Route = path("health") {
    get {
      complete(StatusCodes.OK)
    }
  } ~ path("cache-data" / JavaUUID) { id =>
    get {
      onComplete((shardRegionActor ? Get(id)).mapTo[String]) {
        case Success(s) => complete(s)
        case Failure(f) => complete(f)
      }
    }
  }

} 
Example 9
Source File: ClusterShardRegion.scala    From distributed-cache-on-k8s-poc   with MIT License 5 votes vote down vote up
package cluster

import akka.actor.{ ActorRef, ActorSystem }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings }

class ClusterShardRegion(actorSystem: ActorSystem) {
  val clusterShardRegion: ActorRef = ClusterSharding(actorSystem).start(
    typeName = ClusterShardRegion.SHARD_REGION_NAME,
    entityProps = CacheDataActor.props,
    settings = ClusterShardingSettings(actorSystem),
    extractEntityId = CacheDataActor.extractEntityId,
    extractShardId = CacheDataActor.extractShardId
  )
}

object ClusterShardRegion {
  val SHARD_REGION_NAME = "cache-data"
} 
Example 10
Source File: ConnectedCarCluster.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package connectedcar.streamlets

import akka.actor.{ ActorRef, Props }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings }
import akka.util.Timeout
import cloudflow.akkastream.scaladsl.{ FlowWithCommittableContext, RunnableGraphStreamletLogic }
import cloudflow.streamlets.StreamletShape
import cloudflow.streamlets.avro.{ AvroInlet, AvroOutlet }
import connectedcar.actors.ConnectedCarActor

import scala.concurrent.duration._
import akka.pattern.ask
import cloudflow.akkastream.{ AkkaStreamlet, Clustering }
import connectedcar.data.{ ConnectedCarAgg, ConnectedCarERecord }

object ConnectedCarCluster extends AkkaStreamlet with Clustering {
  val in    = AvroInlet[ConnectedCarERecord]("in")
  val out   = AvroOutlet[ConnectedCarAgg]("out", m ⇒ m.driver.toString)
  val shape = StreamletShape(in).withOutlets(out)

  override def createLogic = new RunnableGraphStreamletLogic() {
    def runnableGraph = sourceWithCommittableContext(in).via(flow).to(committableSink(out))

    val carRegion: ActorRef = ClusterSharding(context.system).start(
      typeName = "Counter",
      entityProps = Props[ConnectedCarActor],
      settings = ClusterShardingSettings(context.system),
      extractEntityId = ConnectedCarActor.extractEntityId,
      extractShardId = ConnectedCarActor.extractShardId
    )

    implicit val timeout: Timeout = 3.seconds
    def flow =
      FlowWithCommittableContext[ConnectedCarERecord]
        .mapAsync(5)(msg ⇒ (carRegion ? msg).mapTo[ConnectedCarAgg])
  }
} 
Example 11
Source File: AkkaKubernetes.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.actor.{Actor, ActorLogging, ActorSystem, PoisonPill, Props}
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.cluster.{Cluster, ClusterEvent}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("KubernetesTest")

  import system.{dispatcher, log}

  implicit val mat = ActorMaterializer()
  implicit val cluster = Cluster(system)

  log.info("Running with [{}]", new Resources())
  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  system.actorOf(
    ClusterSingletonManager.props(singletonProps = Props(new AkkaBoss("patriknw")),
                                  terminationMessage = PoisonPill,
                                  settings = ClusterSingletonManagerSettings(system)),
    "boss"
  )

  val bossProxy = system.actorOf(
    ClusterSingletonProxy.props(singletonManagerPath = "/user/boss", settings = ClusterSingletonProxySettings(system)),
    name = "bossProxy"
  )

  val teamMembers = ClusterSharding(system).start(
    "team-member",
    Props(new AkkaMember()),
    ClusterShardingSettings(system),
    AkkaMember.extractEntityId,
    AkkaMember.extractShardId
  )

  cluster.subscribe(system.actorOf(Props[ClusterWatcher]),
                    ClusterEvent.InitialStateAsEvents,
                    classOf[ClusterDomainEvent])

  val talkToTheBoss = new TalkToTheBossRouteRoute(bossProxy)
  val talkToATeamMember = new TalkToATeamMemberRoute(teamMembers)

  Http().bindAndHandle(
    concat(talkToTheBoss.route(),
           talkToATeamMember.route(),
           ClusterStateRoute.routeGetMembers(cluster),
           VersionRoute.versionRoute),
    "0.0.0.0",
    8080
  )

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  implicit val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 12
Source File: ShardedBankAccountAggregates.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.aggregate

import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings }
import com.github.j5ik2o.bank.adaptor.aggregate.BankAccountAggregate.Protocol.BankAccountCommandRequest

object ShardedBankAccountAggregates {
  def props: Props = Props(new ShardedBankAccountAggregates())
  def name: String = "sharded-bank-accounts"

  def start(system: ActorSystem): ActorRef = {
    system.log.debug("ShardedBankAccounts#start: start")
    val actorRef = ClusterSharding(system).start(
      ShardedBankAccountAggregate.shardName,
      ShardedBankAccountAggregate.props,
      ClusterShardingSettings(system),
      ShardedBankAccountAggregate.extractEntityId,
      ShardedBankAccountAggregate.extractShardId
    )
    system.log.debug("ShardedBankAccounts#start: finish")
    actorRef
  }

  def shardRegion(system: ActorSystem): ActorRef =
    ClusterSharding(system).shardRegion(ShardedBankAccountAggregate.shardName)

}

class ShardedBankAccountAggregates extends Actor with ActorLogging {

  ShardedBankAccountAggregates.start(context.system)

  override def receive: Receive = {
    case cmd: BankAccountCommandRequest =>
      ShardedBankAccountAggregates.shardRegion(context.system) forward cmd
  }

} 
Example 13
Source File: ShardedApp.scala    From akka-sharding-example   with MIT License 5 votes vote down vote up
package com.michalplachta.shoesorter.api

import akka.actor.{Props, ActorSystem}
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import com.michalplachta.shoesorter.SortingDecider
import com.typesafe.config.ConfigFactory

object ShardedApp extends App {
  val config = ConfigFactory.load("sharded")
  implicit val system = ActorSystem(config getString "application.name", config)

  ClusterSharding(system).start(
    typeName = SortingDecider.name,
    entityProps = SortingDecider.props,
    settings = ClusterShardingSettings(system),
    extractShardId = SortingDecider.extractShardId,
    extractEntityId = SortingDecider.extractEntityId
  )

  val decider = ClusterSharding(system).shardRegion(SortingDecider.name)
  system.actorOf(Props(new RestInterface(decider, config getInt "application.exposed-port")))
}