scala.concurrent.duration._ Scala Examples
The following examples show how to use scala.concurrent.duration._.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: IOJSTests.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect import org.scalatest.matchers.should.Matchers import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.duration.{FiniteDuration, _} import scala.concurrent.{ExecutionContext, Future} import scala.scalajs.js.timers.setTimeout class IOJSTests extends AsyncFunSuite with Matchers { implicit override def executionContext = ExecutionContext.global def delayed[A](duration: FiniteDuration)(f: => A): IO[A] = IO.async { callback => setTimeout(duration.toMillis.toDouble)(callback(Right(f))) } test("unsafeToFuture works") { delayed(100.millis)(10).unsafeToFuture().map { r => r shouldEqual 10 } } test("unsafeRunSync is unsupported for async stuff") { Future { try { delayed(100.millis)(10).unsafeRunSync() fail("Expected UnsupportedOperationException") } catch { case _: UnsupportedOperationException => succeed } } } }
Example 2
Source File: PeriodicProcessRuntime.scala From aecor with MIT License | 5 votes |
package aecor.schedule.process import aecor.distributedprocessing.{ AkkaStreamProcess, DistributedProcessing } import aecor.util.effect._ import akka.NotUsed import akka.actor.ActorSystem import akka.stream.Materializer import akka.stream.scaladsl.Source import cats.effect.{ ContextShift, Effect } import scala.collection.immutable._ import scala.concurrent.duration.{ FiniteDuration, _ } object PeriodicProcessRuntime { def apply[F[_]: Effect: ContextShift]( name: String, tickInterval: FiniteDuration, processCycle: F[Unit] )(implicit materializer: Materializer): PeriodicProcessRuntime[F] = new PeriodicProcessRuntime[F](name, tickInterval, processCycle) } class PeriodicProcessRuntime[F[_]: Effect: ContextShift]( name: String, tickInterval: FiniteDuration, processCycle: F[Unit] )(implicit materializer: Materializer) { private def source = Source .tick(0.seconds, tickInterval, processCycle) .mapAsync(1)(_.unsafeToFuture()) .mapMaterializedValue(_ => NotUsed) def run(system: ActorSystem): F[DistributedProcessing.KillSwitch[F]] = DistributedProcessing(system) .start[F](s"$name-Process", List(AkkaStreamProcess[F](source))) }
Example 3
Source File: DistributedProcessing.scala From aecor with MIT License | 5 votes |
package aecor.distributedprocessing import java.net.URLEncoder import java.nio.charset.StandardCharsets import aecor.distributedprocessing.DistributedProcessing.{ KillSwitch, Process } import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning import aecor.util.effect._ import akka.actor.ActorSystem import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings } import akka.pattern.{ BackoffOpts, BackoffSupervisor, ask } import akka.util.Timeout import cats.effect.Effect import cats.implicits._ import scala.concurrent.duration.{ FiniteDuration, _ } final class DistributedProcessing private (system: ActorSystem) { def start[F[_]: Effect](name: String, processes: List[Process[F]], settings: DistributedProcessingSettings = DistributedProcessingSettings.default(system)): F[KillSwitch[F]] = Effect[F].delay { val opts = BackoffOpts .onFailure( DistributedProcessingWorker.props(processes, name), "worker", settings.minBackoff, settings.maxBackoff, settings.randomFactor ) val props = BackoffSupervisor.props(opts) val region = ClusterSharding(system).start( typeName = name, entityProps = props, settings = settings.clusterShardingSettings, extractEntityId = { case c @ KeepRunning(workerId) => (workerId.toString, c) }, extractShardId = { case KeepRunning(workerId) => (workerId % settings.numberOfShards).toString case other => throw new IllegalArgumentException(s"Unexpected message [$other]") } ) val regionSupervisor = system.actorOf( DistributedProcessingSupervisor .props(processes.size, region, settings.heartbeatInterval), "DistributedProcessingSupervisor-" + URLEncoder .encode(name, StandardCharsets.UTF_8.name()) ) implicit val timeout = Timeout(settings.shutdownTimeout) KillSwitch { Effect[F].fromFuture { regionSupervisor ? DistributedProcessingSupervisor.GracefulShutdown }.void } } } object DistributedProcessing { def apply(system: ActorSystem): DistributedProcessing = new DistributedProcessing(system) final case class KillSwitch[F[_]](shutdown: F[Unit]) extends AnyVal final case class RunningProcess[F[_]](watchTermination: F[Unit], shutdown: F[Unit]) final case class Process[F[_]](run: F[RunningProcess[F]]) extends AnyVal } final case class DistributedProcessingSettings(minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, shutdownTimeout: FiniteDuration, numberOfShards: Int, heartbeatInterval: FiniteDuration, clusterShardingSettings: ClusterShardingSettings) object DistributedProcessingSettings { def default(clusterShardingSettings: ClusterShardingSettings): DistributedProcessingSettings = DistributedProcessingSettings( minBackoff = 3.seconds, maxBackoff = 10.seconds, randomFactor = 0.2, shutdownTimeout = 10.seconds, numberOfShards = 100, heartbeatInterval = 2.seconds, clusterShardingSettings = clusterShardingSettings ) def default(system: ActorSystem): DistributedProcessingSettings = default(ClusterShardingSettings(system)) }
Example 4
Source File: DistributedProcessingSupervisor.scala From aecor with MIT License | 5 votes |
package aecor.distributedprocessing import aecor.distributedprocessing.DistributedProcessingSupervisor.{ GracefulShutdown, ShutdownCompleted, Tick } import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } import akka.cluster.sharding.ShardRegion import scala.concurrent.duration.{ FiniteDuration, _ } object DistributedProcessingSupervisor { private final case object Tick final case object GracefulShutdown final case object ShutdownCompleted def props(processCount: Int, shardRegion: ActorRef, heartbeatInterval: FiniteDuration): Props = Props(new DistributedProcessingSupervisor(processCount, shardRegion, heartbeatInterval)) } final class DistributedProcessingSupervisor(processCount: Int, shardRegion: ActorRef, heartbeatInterval: FiniteDuration) extends Actor with ActorLogging { import context.dispatcher private val heartbeat = context.system.scheduler.schedule(0.seconds, heartbeatInterval, self, Tick) context.watch(shardRegion) override def postStop(): Unit = { heartbeat.cancel() () } override def receive: Receive = { case Tick => (0 until processCount).foreach { processId => shardRegion ! KeepRunning(processId) } case Terminated(`shardRegion`) => context.stop(self) case GracefulShutdown => log.info(s"Performing graceful shutdown of [$shardRegion]") shardRegion ! ShardRegion.GracefulShutdown val replyTo = sender() context.become { case Terminated(`shardRegion`) => log.info(s"Graceful shutdown completed for [$shardRegion]") context.stop(self) replyTo ! ShutdownCompleted } } }
Example 5
Source File: TerminatorTest.scala From ingraph with Eclipse Public License 1.0 | 5 votes |
package ingraph.ire.nodes import akka.actor.{ActorSystem, Props, actorRef2Scala} import akka.testkit.{ImplicitSender, TestActors, TestKit} import ingraph.ire.messages.{ChangeSet, Primary, Secondary} import ingraph.ire.nodes.binary.JoinNode import ingraph.ire.nodes.unary.{ProductionNode, SelectionNode} import ingraph.ire.util.TestUtil._ import ingraph.ire.util.Utils.conversions._ import ingraph.ire.messages.Terminator import ingraph.ire.nodes.unary.SelectionNode import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.concurrent.Await import scala.concurrent.duration.{Duration, _} class TerminatorTest(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll { def this() = this(ActorSystem("MySpec")) override def afterAll { TestKit.shutdownActorSystem(system) } "Unary nodes" must { "propagate terminator messages" in { val echoActor = system.actorOf(TestActors.echoActorProps) val production = system.actorOf(Props(new ProductionNode("alpha test", 2))) val intermediary = system.actorOf(Props(new SelectionNode(production ! _, c => true, expectedTerminatorCount = 2))) // TODO wtf // val intermediary = system.actorOf(Props(new SelectionNode(production ! _, c => true))) val input1 = system.actorOf(Props(new SelectionNode(production ! _, c => true))) input1 ! ChangeSet(positive = tupleBag(tuple(15))) input1 ! ChangeSet(positive = tupleBag(tuple(19))) val input2 = system.actorOf(Props(new SelectionNode(intermediary ! _, c => true))) input2 ! ChangeSet(positive = tupleBag(tuple(25))) input2 ! ChangeSet(positive = tupleBag(tuple(29))) val input3 = system.actorOf(Props(new SelectionNode(intermediary ! _, c => true))) val terminator = Terminator(List( input1 ! _, input2 ! _, input3 ! _ ), production) val future = terminator.send() input1 ! ChangeSet(positive = tupleBag(tuple(16))) input1 ! ChangeSet(positive = tupleBag(tuple(17))) input2 ! ChangeSet(positive = tupleBag(tuple(26))) input2 ! ChangeSet(positive = tupleBag(tuple(27))) val expected = Set(tuple(15), tuple(19), tuple(25), tuple(29)) assert(Await.result(future, Duration(1, HOURS)).toSet == expected) } } "Binary nodes" must { "propagate terminator messages" in { val echoActor = system.actorOf(TestActors.echoActorProps) val production = system.actorOf(Props(new ProductionNode("")), "Production") val checker = system.actorOf(Props(new SelectionNode(production ! _, c => true)), "checker") val intermediary = system.actorOf(Props(new JoinNode(checker ! _, 1, 1, mask(0), mask(0))), "intermediary") val input1 = system.actorOf(Props(new JoinNode(intermediary ! Primary(_), 1, 1, mask(0), mask(0))), "inputBeta") val msg15 = ChangeSet(positive = tupleBag(tuple(15))) input1 ! Primary(msg15) input1 ! Secondary(msg15) intermediary ! Secondary(msg15) val msg25 = ChangeSet(positive = tupleBag(tuple(25))) input1 ! Primary(msg25) input1 ! Secondary(msg25) intermediary ! Secondary(msg25) val terminator = Terminator(List(input1.primary, input1.secondary, intermediary.secondary), production) val future = terminator.send() input1 ! Primary(ChangeSet(positive = tupleBag(tuple(16)))) input1 ! Secondary(ChangeSet(positive = tupleBag(tuple(16)))) intermediary ! Secondary(ChangeSet(positive = tupleBag(tuple(16)))) assert(Await.result(future, Duration(1, HOURS)).toSet == Set(tuple(15), tuple(25))) assert(Await.result(terminator.send(), Duration(1, HOURS)).toSet == Set(tuple(15), tuple(25), tuple(16))) (1 to 500).foreach(i => { input1 ! Secondary(ChangeSet(negative = tupleBag(tuple(16)))) assert(Await.result(terminator.send(), Duration(1, HOURS)).toSet == Set(tuple(15), tuple(25))) input1 ! Secondary(ChangeSet(positive = tupleBag(tuple(16)))) intermediary ! Secondary(ChangeSet(negative = tupleBag(tuple(15)))) assert(Await.result(terminator.send(), Duration(1, HOURS)).toSet == Set(tuple(25), tuple(16))) intermediary ! Secondary(ChangeSet(positive = tupleBag(tuple(15)))) assert(Await.result(terminator.send(), Duration(1, HOURS)).toSet == Set(tuple(15), tuple(25), tuple(16))) }) } } "Node splitting" should { "work" in { } } }
Example 6
Source File: TestingUtil.scala From infinispan-spark with Apache License 2.0 | 5 votes |
package org.infinispan.spark.test import java.util.function.BooleanSupplier import scala.annotation.tailrec import scala.concurrent.duration.{Duration, _} import scala.language.postfixOps import scala.util.{Failure, Success, Try} object TestingUtil { val DefaultDuration = 60 seconds val waitBetweenRetries = 500 def waitForCondition(command: () => Boolean, duration: Duration): Unit = { val NumTimes = duration.toMillis.toInt / waitBetweenRetries @tailrec def waitForCondition(numTimes: Int, sleep: Boolean): Unit = { if (sleep) Thread.sleep(waitBetweenRetries) Try(command.apply()) match { case Success(true) => case Success(false) if numTimes == 0 => throw new Exception("Timeout waiting for condition.") case Failure(e) if numTimes == 0 => throw new Exception("Given up trying to execute command.", e) case _ => waitForCondition(numTimes - 1, sleep = true) } } waitForCondition(NumTimes, sleep = false) } def waitForCondition(command: () => Boolean): Unit = waitForCondition(command, DefaultDuration) def waitForCondition(command: BooleanSupplier): Unit = waitForCondition(toScala(command), DefaultDuration) private def toScala(f: BooleanSupplier) = new (() => Boolean) { override def apply() = f.getAsBoolean } }
Example 7
Source File: MetronomeFacade.scala From metronome with Apache License 2.0 | 5 votes |
package dcos.metronome.integrationtest.utils import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.client.RequestBuilding.{Get, Post} import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpRequest, HttpResponse} import akka.stream.Materializer import com.mesosphere.utils.http.RestResult import scala.concurrent.Await.result import scala.concurrent.Future import scala.concurrent.duration.{FiniteDuration, _} class MetronomeFacade(val url: String, implicit val waitTime: FiniteDuration = 30.seconds)(implicit val system: ActorSystem, mat: Materializer ) { import scala.concurrent.ExecutionContext.Implicits.global //info -------------------------------------------------- def info(): RestResult[HttpResponse] = { result(request(Get(s"$url/info")), waitTime) } def createJob(jobDef: String): RestResult[HttpResponse] = { val e = HttpEntity(ContentTypes.`application/json`, jobDef) result(request(Post(s"$url/v1/jobs", e)), waitTime) } def startRun(jobId: String): RestResult[HttpResponse] = { val e = HttpEntity(ContentTypes.`application/json`, "") result(request(Post(s"$url/v1/jobs/${jobId}/runs", e)), waitTime) } def getJob(jobId: String): RestResult[HttpResponse] = { result(request(Get(s"$url/v1/jobs/${jobId}")), waitTime) } def getJobs(): RestResult[HttpResponse] = { result(request(Get(s"$url/v1/jobs")), waitTime) } def getRuns(jobId: String): RestResult[HttpResponse] = { result(request(Get(s"$url/v1/jobs/${jobId}/runs")), waitTime) } private[this] def request(request: HttpRequest): Future[RestResult[HttpResponse]] = { Http(system).singleRequest(request).flatMap { response => response.entity.toStrict(waitTime).map(_.data.decodeString("utf-8")).map(RestResult(response, _)) } } }
Example 8
Source File: DogStatsDReporterSpec.scala From service-container with Apache License 2.0 | 5 votes |
package com.github.vonnagy.service.container.metrics.reporting import java.util.concurrent.TimeUnit import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support import com.typesafe.config.ConfigFactory import org.coursera.metrics.datadog.transport.Transport import org.specs2.mock.Mockito import org.specs2.mutable.SpecificationLike import scala.concurrent.duration.{FiniteDuration, _} class DogStatsDReporterSpec extends AkkaTestkitSpecs2Support with SpecificationLike with Mockito { "The DatadogReporter reporter" should { "report metrics when triggered by the scheduler" in { implicit val conf = ConfigFactory.parseString( """ { enabled=on host="localhost" port=8125 reporting-interval=10ms metric-prefix = "pref" tags = ["boo", "hoo"] api-key = "abc123" } """) val dogStatsDReporter = spy(new DogStatsDReporter) val transport = mock[Transport] dogStatsDReporter.getTransport returns transport val rptr = mock[org.coursera.metrics.datadog.DatadogReporter] dogStatsDReporter.getReporter returns rptr dogStatsDReporter.start(FiniteDuration(2, TimeUnit.MILLISECONDS)) there was after(100.millisecond).atLeastOne(dogStatsDReporter).report() dogStatsDReporter.tags must containAllOf(Seq("boo", "hoo", "app:container-service", "version:1.0.0.N/A")) dogStatsDReporter.prefix must be equalTo "pref" dogStatsDReporter.stop there was one(transport).close() } } }
Example 9
Source File: CatalogDatabase.scala From modelmatrix with Apache License 2.0 | 5 votes |
package com.collective.modelmatrix.catalog import java.util.concurrent.Executors import com.collective.modelmatrix.db.SchemaInstaller import org.scalatest.BeforeAndAfterAll import slick.driver.JdbcProfile import scala.concurrent.duration.{FiniteDuration, _} import scala.concurrent.{Await, ExecutionContext, Future} import scalaz.Tag trait CatalogDatabase { def driver: JdbcProfile import com.collective.modelmatrix.db.GenericSlickDriver.api.Database def db: Database lazy val catalog = new ModelMatrixCatalog(driver) protected implicit val catalogExecutionContext = Tag[ExecutionContext, ModelMatrixCatalog](ExecutionContext.fromExecutor(Executors.newFixedThreadPool(10))) protected def await[T](f: Future[T], duration: FiniteDuration = 10.seconds): T = { Await.result(f, duration) } } trait InstallSchemaBefore extends SchemaInstaller { self: BeforeAndAfterAll with CatalogDatabase => private[this] var schemaInstalled: Boolean = false override protected def beforeAll(): Unit = { this.synchronized { if (!schemaInstalled) { installOrMigrate schemaInstalled = true } } } }
Example 10
Source File: HttpUtil.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.analytics.util import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.RequestEntityAcceptance.Tolerated import akka.http.scaladsl.model.{HttpMethod, HttpRequest, HttpResponse} import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import akka.util.ByteString import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper} import com.typesafe.config.ConfigFactory import scala.concurrent.duration.{MILLISECONDS, _} import scala.concurrent.{Await, ExecutionContextExecutor, Future} object HttpUtil { private val mapper = new ObjectMapper() private val config = ConfigFactory.load private val ReadTimeout = FiniteDuration(config.getDuration("extract-index-from-es.read-timeout").toMillis, MILLISECONDS) // Elasticsearch uses the POST verb in some places where the request is actually idempotent. // Requests that use POST, but are known to be idempotent can use this method. // The presence of any non-idempotent request in-flight causes Akka to not retry, and that will tend result in // entire downloads failing more often. val SAFE_POST = HttpMethod( value = "POST", isSafe = true, isIdempotent = true, requestEntityAcceptance = Tolerated) def resultAsync(request: HttpRequest, action: String) (implicit system: ActorSystem, executionContext: ExecutionContextExecutor, actorMaterializer: ActorMaterializer): Future[ByteString] = Http().singleRequest(request).map { case HttpResponse(status, _, entity, _) if status.isSuccess => entity.dataBytes .fold(ByteString.empty)(_ ++ _) .runWith(Sink.head) case HttpResponse(status, _, entity, _) => val message = Await.result(entity.toStrict(10.seconds).map(_.data), 10.seconds).utf8String throw new RuntimeException(s"HTTP request for $action failed. Status code: $status, message:$message") } .flatMap(identity) def result(request: HttpRequest, action: String, timeout: FiniteDuration = ReadTimeout) (implicit system: ActorSystem, executionContext: ExecutionContextExecutor, actorMaterializer: ActorMaterializer): ByteString = Await.result(resultAsync(request, action), timeout) def jsonResult(request: HttpRequest, action: String, timeout: FiniteDuration = ReadTimeout) (implicit system: ActorSystem, executionContext: ExecutionContextExecutor, actorMaterializer: ActorMaterializer): JsonNode = mapper.readTree(result(request, action, timeout).utf8String) def jsonResultAsync(request: HttpRequest, action: String) (implicit system: ActorSystem, executionContext: ExecutionContextExecutor, actorMaterializer: ActorMaterializer): Future[JsonNode] = resultAsync(request, action).map((bytes: ByteString) => mapper.readTree(bytes.utf8String)) }
Example 11
Source File: ElasticWriteConfig.scala From elastic-indexer4s with MIT License | 5 votes |
package com.yannick_cw.elastic_indexer4s.elasticsearch.elasic_config import com.sksamuel.elastic4s.http.{ElasticClient, ElasticNodeEndpoint} import org.apache.http.HttpHost import org.elasticsearch.client.RestClient import org.elasticsearch.client.sniff.Sniffer import org.joda.time.DateTime import scala.concurrent.duration.{FiniteDuration, _} case class ElasticWriteConfig( elasticNodeEndpoints: List[ElasticNodeEndpoint], indexPrefix: String, docType: String, mappingSetting: MappingSetting = TypedMappingSetting(), writeBatchSize: Int = 50, writeConcurrentRequest: Int = 10, writeMaxAttempts: Int = 5, logWriteSpeedEvery: FiniteDuration = 1 minute, waitForElasticTimeout: FiniteDuration = 5 seconds, sniffCluster: Boolean = false ) { val indexName: String = indexPrefix + "_" + new DateTime().toString("yyyy-MM-dd't'HH:mm:ss") lazy val restClient: RestClient = RestClient .builder(elasticNodeEndpoints.map(e => new HttpHost(e.host, e.port, "http")): _*) .build() lazy val client: ElasticClient = { if (sniffCluster) { // sniffs every 5 minutes for the best hosts to connect to Sniffer.builder(restClient).build() } ElasticClient.fromRestClient(restClient) } } object ElasticWriteConfig { def apply( esNodeEndpoints: List[ElasticNodeEndpoint], esTargetIndexPrefix: String, esTargetType: String ): ElasticWriteConfig = new ElasticWriteConfig(esNodeEndpoints, esTargetIndexPrefix, esTargetType) }
Example 12
Source File: SseConnector.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.common.http import akka.Done import akka.actor.ActorSystem import akka.event.LoggingAdapter import akka.http.scaladsl.model.HttpHeader.ParsingResult.Ok import akka.http.scaladsl.model.sse.ServerSentEvent import akka.http.scaladsl.model.{ HttpHeader, HttpRequest, HttpResponse, Uri } import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Sink, Source } import io.vamp.common.http.EventSource.EventSource import scala.collection.mutable import scala.concurrent.Future import scala.concurrent.duration.{ FiniteDuration, _ } import scala.language.postfixOps import scala.util.{ Failure, Success } private case class SseConnectionConfig(url: String, headers: List[(String, String)], tlsCheck: Boolean) private case class SseConnectionEntryValue(source: EventSource) trait SseListener { def onEvent(event: ServerSentEvent): Unit } object SseConnector { private val retryDelay: FiniteDuration = 5 second private val listeners: mutable.Map[SseConnectionConfig, Set[SseListener]] = mutable.Map() private val connections: mutable.Map[SseConnectionConfig, Future[Done]] = mutable.Map() def open(url: String, headers: List[(String, String)] = Nil, tlsCheck: Boolean)(listener: SseListener)(implicit system: ActorSystem, logger: LoggingAdapter): Unit = synchronized { val config = SseConnectionConfig(url, headers, tlsCheck) implicit val materializer: ActorMaterializer = ActorMaterializer() listeners.update(config, listeners.getOrElse(config, Set()) + listener) connections.getOrElseUpdate(config, { logger.info(s"Opening SSE connection: $url") EventSource(Uri(url), send(config), None, retryDelay).takeWhile { event ⇒ event.eventType.foreach(t ⇒ logger.info(s"SSE: $t")) val receivers = listeners.getOrElse(config, Set()) receivers.foreach(_.onEvent(event)) val continue = receivers.nonEmpty if (!continue) logger.info(s"Closing SSE connection: $url") continue }.runWith(Sink.ignore) }) } def close(listener: SseListener): Unit = synchronized { listeners.transform((_, v) ⇒ v - listener) } private def send(config: SseConnectionConfig)(request: HttpRequest)(implicit system: ActorSystem, materializer: ActorMaterializer): Future[HttpResponse] = { val httpHeaders = config.headers.map { case (k, v) ⇒ HttpHeader.parse(k, v) } collect { case Ok(h, _) ⇒ h } filterNot request.headers.contains Source.single(request.withHeaders(request.headers ++ httpHeaders) → 1).via(HttpClient.pool[Any](config.url, config.tlsCheck)).map { case (Success(response: HttpResponse), _) ⇒ response case (Failure(f), _) ⇒ throw new RuntimeException(f.getMessage) }.runWith(Sink.head) } }
Example 13
Source File: BlockchainSettings.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.blockchain.config import org.apache.kafka.common.config.AbstractConfig import scala.concurrent.duration.{FiniteDuration, _} case class BlockchainSettings(url: String, kafkaTopic: String, addresses: Set[String], openConnectionTimeout: FiniteDuration = 10.seconds, keepAlive:FiniteDuration = 25.seconds, bufferSize:Int = 150000) object BlockchainSettings { def apply(config: AbstractConfig): BlockchainSettings = { val url = config.getString(BlockchainConfigConstants.CONNECTION_URL) require(url != null && !url.trim.isEmpty, s"No ${BlockchainConfigConstants.CONNECTION_URL} provided!") val addresses = Option(config.getString(BlockchainConfigConstants.ADDRESS_SUBSCRIPTION)).map(_.split(",").map(_.trim).toSet).getOrElse(Set.empty) val kafkaTopic = config.getString(BlockchainConfigConstants.KAFKA_TOPIC) require(kafkaTopic != null && kafkaTopic.trim.nonEmpty, s"No ${BlockchainConfigConstants.KAFKA_TOPIC} provided") BlockchainSettings(url, kafkaTopic, addresses) } }
Example 14
Source File: SparkImplicits.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.spark.datasources import java.util.Properties import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.{ Sink, Source } import org.apache.spark.sql._ import org.apache.spark.sql.streaming.DataStreamReader import scala.collection.immutable._ import scala.concurrent.duration.{ FiniteDuration, _ } import scala.concurrent.{ Await, Future } import scala.reflect.runtime.universe._ import slick.driver.PostgresDriver.api._ object SparkImplicits { implicit class DataSourceOps(dfr: DataFrameReader) { def helloworld(path: String): DataFrame = dfr.format("helloworld").load(path) def person(path: String): DataFrame = dfr.format("person").load(path) def jdbc(table: String)(implicit jdbcOptions: Map[String, String]): DataFrame = dfr.format("jdbc").options(jdbcOptions ++ Map("dbtable" -> table)).load() } implicit class DataStreamReaderOps(dsr: DataStreamReader) { def currentPersistenceIds(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-persistence-id").load(path) def eventsByPersistenceId(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-events-by-persistence-id").load(path) } implicit class DataFrameWriterOps[T](dfw: DataFrameWriter[T]) { def ignore = dfw.mode(SaveMode.Ignore) def jdbc(table: String)(implicit jdbcOptions: Map[String, String]) = { val properties = jdbcOptions.foldLeft(new Properties) { case (prop, (k, v)) => prop.put(k, v); prop } dfw.jdbc(jdbcOptions("url"), table, properties) // does not (yet) work see: https://issues.apache.org/jira/browse/SPARK-7646 // dfw.format("jdbc").mode(SaveMode.Overwrite).options(jdbcOptions ++ Map("dbtable" -> table)) } } trait DataFrameQueryGenerator[A] { def upsert: String } implicit class DatasetOps(df: DataFrame) { def withSession[A](db: Database)(f: Session => A): A = { val session = db.createSession() try f(session) finally session.close() } def withStatement[A](db: Database)(f: java.sql.Statement => A): A = withSession(db)(session ⇒ session.withStatement()(f)) def upsert[A](table: String)(implicit db: Database, dfq: DataFrameQueryGenerator[A]): DataFrame = withStatement(db) { stmt => stmt.executeUpdate(dfq.upsert) df } } implicit class SparkSessionOps(spark: SparkSession) { def fromFuture[A <: Product: TypeTag](data: Future[Seq[A]])(implicit _timeout: FiniteDuration = null): DataFrame = spark.createDataFrame(Await.result(data, Option(_timeout).getOrElse(15.minutes))) def fromSource[A <: Product: TypeTag](data: Source[A, NotUsed])(implicit _timeout: FiniteDuration = null, mat: Materializer): DataFrame = fromFuture(data.runWith(Sink.seq)) } }
Example 15
Source File: ReadJournalSource.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package akka.persistence.jdbc.spark.sql.execution.streaming import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.persistence.query.PersistenceQuery import akka.persistence.query.scaladsl.{ CurrentEventsByPersistenceIdQuery, CurrentEventsByTagQuery, CurrentPersistenceIdsQuery, ReadJournal } import akka.stream.scaladsl.Sink import akka.stream.scaladsl.extension.{ Sink => Snk } import akka.stream.{ ActorMaterializer, Materializer } import org.apache.spark.sql._ import org.apache.spark.sql.execution.streaming.{ LongOffset, Offset, Source } import org.apache.spark.sql.types.StructType import scala.collection.immutable._ import scala.concurrent.duration.{ FiniteDuration, _ } import scala.concurrent.{ Await, ExecutionContext, Future } trait ReadJournalSource { _: Source => def readJournalPluginId: String def sqlContext: SQLContext // some machinery implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher // read journal, only interested in the Current queries, as Spark isn't asynchronous lazy val readJournal = PersistenceQuery(system).readJournalFor(readJournalPluginId) .asInstanceOf[ReadJournal with CurrentPersistenceIdsQuery with CurrentEventsByPersistenceIdQuery with CurrentEventsByTagQuery] implicit class FutureOps[A](f: Future[A])(implicit ec: ExecutionContext, timeout: FiniteDuration = null) { def futureValue: A = Await.result(f, Option(timeout).getOrElse(10.seconds)) } def maxPersistenceIds: Long = readJournal.currentPersistenceIds().runWith(Snk.count).futureValue def persistenceIds(start: Long, end: Long) = readJournal.currentPersistenceIds().drop(start).take(end).runWith(Sink.seq).futureValue def maxEventsByPersistenceId(pid: String): Long = readJournal.currentEventsByPersistenceId(pid, 0, Long.MaxValue).runWith(Snk.count).futureValue def eventsByPersistenceId(pid: String, start: Long, end: Long, eventMapperFQCN: String): Seq[Row] = { readJournal.currentEventsByPersistenceId(pid, start, end) .map(env => getMapper(eventMapperFQCN).get.row(env, sqlContext)).runWith(Sink.seq).futureValue } implicit def mapToDataFrame(rows: Seq[Row]): DataFrame = { import scala.collection.JavaConversions._ sqlContext.createDataFrame(rows, schema) } def getStartEnd(_start: Option[Offset], _end: Offset): (Long, Long) = (_start, _end) match { case (Some(LongOffset(start)), LongOffset(end)) => (start, end) case (None, LongOffset(end)) => (0L, end) } def getMapper(eventMapperFQCN: String): Option[EventMapper] = system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[EventMapper](eventMapperFQCN, List.empty) .recover { case cause => cause.printStackTrace(); null }.toOption override def stop(): Unit = { println("Stopping jdbc read journal") system.terminate() } }
Example 16
Source File: BotPluginTestKit.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.test import akka.actor.ActorSystem import akka.testkit.{TestKit, TestProbe} import com.sumologic.sumobot.core.model.{IncomingMessage, InstantMessageChannel, OutgoingMessage, UserSender} import org.scalatest.BeforeAndAfterAll import slack.models.User import scala.concurrent.duration.{FiniteDuration, _} @deprecated("use com.sumologic.sumobot.test.annotated.BotPluginTestKit", "1.0.2") class BotPluginTestKit(_system: ActorSystem) extends TestKit(_system) with SumoBotSpec with BeforeAndAfterAll { protected val outgoingMessageProbe = TestProbe() system.eventStream.subscribe(outgoingMessageProbe.ref, classOf[OutgoingMessage]) protected def confirmOutgoingMessage(test: OutgoingMessage => Unit, timeout: FiniteDuration = 1.second): Unit = { outgoingMessageProbe.expectMsgClass(timeout, classOf[OutgoingMessage]) match { case msg: OutgoingMessage => test(msg) } } protected def instantMessage(text: String, user: User = mockUser("123", "jshmoe")): IncomingMessage = { IncomingMessage(text, true, InstantMessageChannel("125", user), "1527239216000090", sentBy = UserSender(user)) } protected def mockUser(id: String, name: String): User = { User(id, name, None, None, None, None, None, None, None, None, None, None, None, None, None, None) } protected def send(message: IncomingMessage): Unit = { system.eventStream.publish(message) } override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system) } }
Example 17
Source File: BotPluginTestKit.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.test.annotated import akka.actor.ActorSystem import akka.testkit.{TestKit, TestProbe} import com.sumologic.sumobot.core.model.{IncomingMessage, InstantMessageChannel, OutgoingMessage, UserSender} import org.junit.runner.RunWith import org.scalatest.concurrent.Eventually import org.scalatest.junit.JUnitRunner import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import slack.models.User import scala.concurrent.duration.{FiniteDuration, _} @RunWith(classOf[JUnitRunner]) abstract class BotPluginTestKit(actorSystem: ActorSystem) extends TestKit(actorSystem) with WordSpecLike with Eventually with Matchers with BeforeAndAfterAll { protected val outgoingMessageProbe = TestProbe() system.eventStream.subscribe(outgoingMessageProbe.ref, classOf[OutgoingMessage]) protected def confirmOutgoingMessage(test: OutgoingMessage => Unit, timeout: FiniteDuration = 1.second): Unit = { outgoingMessageProbe.expectMsgClass(timeout, classOf[OutgoingMessage]) match { case msg: OutgoingMessage => test(msg) } } protected def instantMessage(text: String, user: User = mockUser("123", "jshmoe")): IncomingMessage = { IncomingMessage(text, true, InstantMessageChannel("125", user), "1527239216000090", sentBy = UserSender(user)) } protected def mockUser(id: String, name: String): User = { User(id, name, None, None, None, None, None, None, None, None, None, None, None, None, None, None) } protected def send(message: IncomingMessage): Unit = { system.eventStream.publish(message) } override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system) } }
Example 18
Source File: MongoDsl.scala From gatling-mongodb-protocol with MIT License | 5 votes |
package com.ringcentral.gatling.mongo import com.ringcentral.gatling.mongo.action.MongoActionBuilder import com.ringcentral.gatling.mongo.check.MongoCheckSupport import com.ringcentral.gatling.mongo.command.{MongoCommandBuilder, MongoDslBuilder} import com.ringcentral.gatling.mongo.feeder.MongoFeederSource import com.ringcentral.gatling.mongo.protocol.{MongoProtocol, MongoProtocolFieldsBuilder, MongoProtocolUriBuilder} import io.gatling.core.action.builder.ActionBuilder import io.gatling.core.config.GatlingConfiguration import io.gatling.core.feeder.RecordSeqFeederBuilder import io.gatling.core.session.Expression import play.api.libs.json.JsObject import scala.concurrent.duration.{FiniteDuration, _} trait MongoDsl extends MongoCheckSupport { def mongo(implicit configuration: GatlingConfiguration) = MongoProtocol def mongo(requestName: Expression[String])(implicit configuration: GatlingConfiguration) = new MongoDslBuilder(requestName, configuration) def mongoFeeder(url: String, collection: String, query: String, limit: Int = 100, batchSize: Int = 0, connectionTimeout: FiniteDuration = 5 seconds, receiveTimeout: FiniteDuration = 30 seconds, postProcessor: JsObject => Map[String, Any] = MongoFeederSource.defaultPostProcessor): RecordSeqFeederBuilder[Any] = RecordSeqFeederBuilder(MongoFeederSource(url, collection, query, limit, batchSize, connectionTimeout, receiveTimeout, postProcessor)) implicit def mongoProtocolUriBuilder2mongoProtocol(builder: MongoProtocolUriBuilder): MongoProtocol = builder.build() implicit def mongoProtocolBuilder2mongoProtocol(builder: MongoProtocolFieldsBuilder): MongoProtocol = builder.build() implicit def mongoCommandBuilder2ActionBuilder(commandBuilder: MongoCommandBuilder)(implicit configuration: GatlingConfiguration): ActionBuilder = { new MongoActionBuilder(commandBuilder.build(), configuration) } }
Example 19
Source File: HmacAuthMiddleware.scala From iotchain with MIT License | 5 votes |
package jbok.network.http.server.middleware import java.time.{Duration, Instant} import cats.data.{Kleisli, OptionT} import cats.effect.Sync import jbok.network.http.server.authentication.HMAC import org.http4s.headers.Authorization import org.http4s.util.CaseInsensitiveString import org.http4s.{AuthScheme, Credentials, HttpRoutes, Request, Response, Status} import tsec.mac.jca.{HMACSHA256, MacSigningKey} import scala.concurrent.duration.{FiniteDuration, _} sealed abstract class HmacAuthError(val message: String) extends Exception(message) object HmacAuthError { case object NoAuthHeader extends HmacAuthError("Could not find an Authorization header") case object NoDatetimeHeader extends HmacAuthError("Could not find an X-Datetime header") case object BadMAC extends HmacAuthError("Bad MAC") case object InvalidMacFormat extends HmacAuthError("The MAC is not a valid Base64 string") case object InvalidDatetime extends HmacAuthError("The datetime is not a valid UTC datetime string") case object Timeout extends HmacAuthError("The request time window is closed") } object HmacAuthMiddleware { val defaultDuration: FiniteDuration = 5.minutes private def verifyFromHeader[F[_]]( req: Request[F], key: MacSigningKey[HMACSHA256], duration: FiniteDuration ): Either[HmacAuthError, Unit] = for { authHeader <- req.headers .get(Authorization) .flatMap { t => t.credentials match { case Credentials.Token(scheme, token) if scheme == AuthScheme.Bearer => Some(token) case _ => None } } .toRight(HmacAuthError.NoAuthHeader) datetimeHeader <- req.headers .get(CaseInsensitiveString("X-Datetime")) .toRight(HmacAuthError.NoDatetimeHeader) instant <- HMAC.http.verifyFromHeader( req.method.name, req.uri.renderString, datetimeHeader.value, authHeader, key ) _ <- Either.cond( Instant.now().isBefore(instant.plus(Duration.ofNanos(duration.toNanos))), (), HmacAuthError.Timeout ) } yield () def apply[F[_]: Sync](key: MacSigningKey[HMACSHA256], duration: FiniteDuration = defaultDuration)(routes: HttpRoutes[F]): HttpRoutes[F] = Kleisli { req: Request[F] => verifyFromHeader(req, key, duration) match { case Left(error) => OptionT.some[F](Response[F](Status.Forbidden).withEntity(error.message)) case Right(_) => routes(req) } } }
Example 20
Source File: HasJwt.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.it.api.websockets import java.security import java.security.KeyPairGenerator import java.util.Base64 import com.typesafe.config.{Config, ConfigFactory} import com.wavesplatform.dex.api.ws.protocol.WsAddressSubscribe.JwtPayload import com.wavesplatform.dex.auth.JwtUtils import com.wavesplatform.dex.domain.account.KeyPair import play.api.libs.json.Json import scala.concurrent.duration.{FiniteDuration, _} trait HasJwt extends JwtUtils { protected val authServiceKeyPair: security.KeyPair = { val kpg = KeyPairGenerator.getInstance("RSA") kpg.initialize(2048) kpg.generateKeyPair() } protected def jwtPublicKeyConfig: Config = ConfigFactory.parseString( s"""waves.dex.web-sockets.external-client-handler.jwt-public-key = \"\"\"-----BEGIN PUBLIC KEY----- |${Base64.getEncoder.encodeToString(authServiceKeyPair.getPublic.getEncoded).grouped(64).mkString("\n")} |-----END PUBLIC KEY-----\"\"\" |""".stripMargin ) protected def mkJwt(payload: JwtPayload): String = mkJwt(authServiceKeyPair, Json.toJsObject(payload)) protected def mkJwt(clientKeyPair: KeyPair, lifetime: FiniteDuration = 1.hour): String = { mkJwt(mkJwtSignedPayload(clientKeyPair, lifetime = lifetime)) } }
Example 21
Source File: FOps.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.it.fp import java.nio.charset.StandardCharsets import cats.syntax.apply._ import cats.syntax.either._ import cats.syntax.flatMap._ import cats.syntax.functor._ import com.softwaremill.sttp.{DeserializationError, Response} import play.api.libs.json._ import scala.concurrent.duration.{FiniteDuration, _} import scala.util.control.NonFatal case class RepeatRequestOptions(delayBetweenRequests: FiniteDuration, maxAttempts: Int) { def decreaseAttempts: RepeatRequestOptions = copy(maxAttempts = maxAttempts - 1) } class FOps[F[_]](implicit M: ThrowableMonadError[F], W: CanWait[F]) { def repeatUntil[T](f: => F[T], options: RepeatRequestOptions = RepeatRequestOptions(1.second, 30))(stopCond: T => Boolean): F[T] = f.flatMap { firstResp => (firstResp, options).tailRecM[F, (T, RepeatRequestOptions)] { case (resp, currOptions) => if (stopCond(resp)) M.pure((resp, currOptions).asRight) else if (currOptions.maxAttempts <= 0) M.raiseError(new RuntimeException(s"All attempts are out! The last response is: $resp")) else W.wait(options.delayBetweenRequests).productR(f).map(x => (x, currOptions.decreaseAttempts).asLeft) } } .map(_._1) def repeatUntil[T](f: => F[T], delay: FiniteDuration)(pred: T => Boolean): F[T] = f.flatMap { _.tailRecM[F, T] { x => if (pred(x)) M.pure(x.asRight) else W.wait(delay).productR(f).map(_.asLeft) } } def repeatUntilResponse[T](f: => F[Response[Either[DeserializationError[JsError], T]]], delay: FiniteDuration)( pred: Response[Either[DeserializationError[JsError], T]] => Boolean): F[T] = repeatUntil(f, delay)(pred).flatMap(parseResponse) def parseResponse[T](resp: Response[Either[DeserializationError[JsError], T]]): F[T] = resp.rawErrorBody match { case Left(e) => M.raiseError[T]( new RuntimeException(s"The server returned an error. HTTP code is ${resp.code}, body: ${new String(e, StandardCharsets.UTF_8)}")) case Right(Left(error)) => M.raiseError[T](new RuntimeException(s"Can't parse the response: $error")) case Right(Right(r)) => M.pure(r) } def parseTryResponse[E: Reads, T](resp: Response[T]): F[Either[E, T]] = resp.rawErrorBody match { case Right(r) => M.pure(Right(r)) case Left(bytes) => try Json.parse(bytes).validate[E] match { case JsSuccess(x, _) => M.pure(Left(x)) case JsError(e) => M.raiseError[Either[E, T]](JsResultException(e)) } catch { case NonFatal(e) => M.raiseError[Either[E, T]](new RuntimeException(s"The server returned an error: ${resp.code}, also can't parse as MatcherError", e)) } } def parseTryResponseEither[E: Reads, T](resp: Response[Either[DeserializationError[JsError], T]]): F[Either[E, T]] = resp.rawErrorBody match { case Right(Right(r)) => M.pure(Right(r)) case Right(Left(e)) => M.raiseError[Either[E, T]](new RuntimeException(s"The server returned success, but can't parse response: $e")) case Left(bytes) => try Json.parse(bytes).validate[E] match { case JsSuccess(x, _) => M.pure(Left(x)) case JsError(e) => M.raiseError[Either[E, T]](JsResultException(e)) } catch { case NonFatal(e) => M.raiseError[Either[E, T]](new RuntimeException(s"The server returned an error: ${resp.code}, also can't parse as MatcherError", e)) } } } object FOps { def apply[F[_]: CanWait: ThrowableMonadError]: FOps[F] = new FOps[F] }
Example 22
Source File: IngestorRegistryEndpoint.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.http import akka.actor.ActorSystem import akka.http.scaladsl.server.Route import akka.pattern.ask import akka.util.Timeout import hydra.common.config.ConfigSupport import ConfigSupport._ import hydra.core.http.RouteSupport import hydra.ingest.bootstrap.HydraIngestorRegistryClient import hydra.ingest.services.IngestorRegistry.{FindAll, LookupResult} import scala.concurrent.duration.{FiniteDuration, _} class IngestorRegistryEndpoint()(implicit system:ActorSystem) extends RouteSupport with HydraIngestJsonSupport with ConfigSupport { private val registryLookupTimeout = applicationConfig .getDurationOpt("ingest.service-lookup.timeout") .getOrElse(5.seconds) lazy val registry = HydraIngestorRegistryClient(applicationConfig).registry private implicit val timeout = Timeout(registryLookupTimeout) override val route: Route = path("ingestors" ~ Slash.?) { get { onSuccess(registry ? FindAll) { case response: LookupResult => complete(response.ingestors) } } } }
Example 23
Source File: RoleLeaderAutoDowningRoles.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.{ActorSystem, Address, Props} import akka.cluster.{Cluster, DowningProvider} import com.typesafe.config.Config import scala.collection.JavaConverters._ import scala.concurrent.duration.{FiniteDuration, _} final class RoleLeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider { private[this] val cluster = Cluster(system) private val config: Config = system.settings.config override def downRemovalMargin: FiniteDuration = { val key = "custom-downing.down-removal-margin" config.getString(key) match { case "off" => Duration.Zero case _ => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS) } } override def downingActorProps: Option[Props] = { val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis val leaderRole = system.settings.config.getString("custom-downing.role-leader-auto-downing-roles.leader-role") val roles = system.settings.config.getStringList("custom-downing.role-leader-auto-downing-roles.target-roles").asScala.toSet if (roles.isEmpty) None else Some(RoleLeaderAutoDownRoles.props(leaderRole, roles, stableAfter)) } } private[autodown] object RoleLeaderAutoDownRoles { def props(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[RoleLeaderAutoDownRoles], leaderRole, targetRoles, autoDownUnreachableAfter) } private[autodown] class RoleLeaderAutoDownRoles(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration) extends RoleLeaderAutoDownRolesBase(leaderRole, targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning { override def down(node: Address): Unit = { log.info("RoleLeader is auto-downing unreachable node [{}]", node) cluster.down(node) } }
Example 24
Source File: LeaderAutoDowningRoles.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.{ActorSystem, Address, Props} import akka.cluster.{Cluster, DowningProvider} import com.typesafe.config.Config import scala.collection.JavaConverters._ import scala.concurrent.duration.{FiniteDuration, _} final class LeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider { private[this] val cluster = Cluster(system) private val config: Config = system.settings.config override def downRemovalMargin: FiniteDuration = { val key = "custom-downing.down-removal-margin" config.getString(key) match { case "off" => Duration.Zero case _ => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS) } } override def downingActorProps: Option[Props] = { val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis val roles = system.settings.config.getStringList("custom-downing.leader-auto-downing-roles.target-roles").asScala.toSet if (roles.isEmpty) None else Some(LeaderAutoDownRoles.props(roles, stableAfter)) } } private[autodown] object LeaderAutoDownRoles { def props(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[LeaderAutoDownRoles], targetRoles, autoDownUnreachableAfter) } private[autodown] class LeaderAutoDownRoles(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration) extends LeaderAutoDownRolesBase(targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning { override def down(node: Address): Unit = { log.info("Leader is auto-downing unreachable node [{}]", node) cluster.down(node) } }
Example 25
Source File: CarbonClient.scala From akka-http-metrics with Apache License 2.0 | 5 votes |
package fr.davit.akka.http.metrics.graphite import java.time.{Clock, Instant} import akka.NotUsed import akka.actor.ActorSystem import akka.event.Logging import akka.stream.scaladsl.{Flow, Keep, RestartFlow, Sink, Source, Tcp} import akka.stream.{OverflowStrategy, QueueOfferResult} import akka.util.ByteString import fr.davit.akka.http.metrics.core.Dimension import scala.concurrent.Await import scala.concurrent.duration.{Duration, _} object CarbonClient { def apply(host: String, port: Int)(implicit system: ActorSystem): CarbonClient = new CarbonClient(host, port) } class CarbonClient(host: String, port: Int)(implicit system: ActorSystem) extends AutoCloseable { private val logger = Logging(system.eventStream, classOf[CarbonClient]) protected val clock: Clock = Clock.systemUTC() private def serialize[T](name: String, value: T, dimensions: Seq[Dimension], ts: Instant): ByteString = { val tags = dimensions.map(d => d.key + "=" + d.value).toList val taggedMetric = (name :: tags).mkString(";") ByteString(s"$taggedMetric $value ${ts.getEpochSecond}\n") } // TODO read backoff from config private def connection: Flow[ByteString, ByteString, NotUsed] = RestartFlow.withBackoff( minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly maxRestarts = -1 // keep retrying forever )(() => Tcp().outgoingConnection(host, port)) private val queue = Source .queue[ByteString](19, OverflowStrategy.dropHead) .via(connection) .toMat(Sink.ignore)(Keep.left) .run() def publish[T]( name: String, value: T, dimensions: Seq[Dimension] = Seq.empty, ts: Instant = Instant .now(clock) ): Unit = { // it's reasonable to block until the message in enqueued Await.result(queue.offer(serialize(name, value, dimensions, ts)), Duration.Inf) match { case QueueOfferResult.Enqueued => logger.debug("Metric {} enqueued", name) case QueueOfferResult.Dropped => logger.debug("Metric {} dropped", name) case QueueOfferResult.Failure(e) => logger.error(e, s"Failed publishing metric $name") case QueueOfferResult.QueueClosed => throw new Exception("Failed publishing metric to closed carbon client") } } override def close(): Unit = { queue.complete() Await.result(queue.watchCompletion(), Duration.Inf) } }
Example 26
Source File: BackOffSupervision.scala From schedoscope with Apache License 2.0 | 5 votes |
package org.schedoscope.scheduler.utils import akka.actor.{ActorRef, ActorSystem} import org.slf4j.LoggerFactory import scala.collection.mutable.HashMap import scala.concurrent.duration.{FiniteDuration, _} def manageActorLifecycle(managedActor: ActorRef, backOffSlotTime: FiniteDuration = null, backOffMinimumDelay: FiniteDuration = null): FiniteDuration = { val managedActorName = managedActor.path.toStringWithoutAddress if (actorBackOffWaitTime.contains(managedActorName)) { val newBackOff = actorBackOffWaitTime(managedActorName).nextBackOff actorBackOffWaitTime.put(managedActorName, newBackOff) log.warn(s"$managerName: Set new back-off waiting " + s"time to value ${newBackOff.backOffWaitTime} for rebooted actor ${managedActorName}; " + s"(retries=${newBackOff.retries}, resets=${newBackOff.resets}, total-retries=${newBackOff.totalRetries})") //schedule tick response based on backoff newBackOff.backOffWaitTime } else { val backOff = ExponentialBackOff(backOffSlotTime = backOffSlotTime, constantDelay = backOffMinimumDelay) log.debug(s"$managerName: Set initial back-off waiting " + s"time to value ${backOff.backOffWaitTime} for booted actor ${managedActorName}; " + s"(retries=${backOff.retries}, resets=${backOff.resets}, total-retries=${backOff.totalRetries})") actorBackOffWaitTime.put(managedActorName, backOff) //schedule immediate tick response 0 millis } } }
Example 27
Source File: TrackerMap.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import java.util.concurrent.atomic.AtomicReference import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.ledger.api.v1.completion.Completion import com.daml.logging.{ContextualizedLogger, LoggingContext} import org.slf4j.LoggerFactory import scala.collection.immutable.HashMap import scala.concurrent.duration.{FiniteDuration, _} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} final class AsyncResource[T <: AutoCloseable](future: Future[T]) { private val logger = LoggerFactory.getLogger(this.getClass) // Must progress Waiting => Ready => Closed or Waiting => Closed. val state: AtomicReference[AsyncResourceState[T]] = new AtomicReference(Waiting) future.andThen({ case Success(t) => if (!state.compareAndSet(Waiting, Ready(t))) { // This is the punch line of AsyncResource. // If we've been closed in the meantime, we must close the underlying resource also. // This "on-failure-to-complete" behavior is not present in scala or java Futures. t.close() } // Someone should be listening to this failure downstream // TODO(mthvedt): Refactor so at least one downstream listener is always present, // and exceptions are never dropped. case Failure(ex) => logger.error("failure to get async resource", ex) state.set(Closed) })(DirectExecutionContext) def flatMap[U](f: T => Future[U])(implicit ex: ExecutionContext): Future[U] = { state.get() match { case Waiting => future.flatMap(f) case Closed => throw new IllegalStateException() case Ready(t) => f(t) } } def map[U](f: T => U)(implicit ex: ExecutionContext): Future[U] = flatMap(t => Future.successful(f(t))) def ifPresent[U](f: T => U): Option[U] = state.get() match { case Ready(t) => Some(f(t)) case _ => None } def close(): Unit = state.getAndSet(Closed) match { case Ready(t) => t.close() case _ => } } def apply(retentionPeriod: FiniteDuration)(implicit logCtx: LoggingContext): TrackerMap = new TrackerMap(retentionPeriod) }
Example 28
Source File: GlobalAppConfig.scala From mqtt-mongo with MIT License | 5 votes |
package com.izmailoff.mm.config import java.util.concurrent.TimeUnit import com.izmailoff.mm.util.HoconMap import com.typesafe.config.ConfigFactory import scala.concurrent.duration.{FiniteDuration, _} object GlobalAppConfig { val config = ConfigFactory.load() object Application { object MqttBroker { private lazy val brokerConf = config.getConfig("application.mqttBroker") lazy val url = brokerConf.getString("url") lazy val userName = brokerConf.getString("userName") lazy val password = brokerConf.getString("password") lazy val stashTimeToLive: FiniteDuration = brokerConf.getDuration("stashTimeToLive", TimeUnit.SECONDS) seconds lazy val stashCapacity = brokerConf.getInt("stashCapacity") lazy val reconnectDelayMin: FiniteDuration = brokerConf.getDuration("reconnectDelayMin", TimeUnit.SECONDS) seconds lazy val reconnectDelayMax: FiniteDuration = brokerConf.getDuration("reconnectDelayMax", TimeUnit.SECONDS) seconds } object Mongo { private lazy val mongoConf = config.getConfig("application.mongo") lazy val host = mongoConf.getString("host") lazy val port = mongoConf.getInt("port") lazy val dbName = mongoConf.getString("dbName") } object MqttMongo { private lazy val mqttMongoConf = config.getConfig("application.mqttMongo") lazy val topicsToCollectionsMappings: Map[String, Set[String]] = HoconMap.getMap(identity(_), getElems, mqttMongoConf, "topicsToCollectionsMappings").withDefaultValue(Set.empty) val getElems: String => Set[String] = _.split(";").toList.map(_.trim).filter(!_.isEmpty).toSet lazy val serializationFormat = SerializationFormat.withName(mqttMongoConf.getString("serializationFormat")) } } }
Example 29
Source File: KubernetesSettings.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.coordination.lease.kubernetes import akka.actor.ActorSystem import akka.annotation.InternalApi import akka.coordination.lease.TimeoutSettings import akka.util.JavaDurationConverters._ import com.typesafe.config.Config import scala.concurrent.duration.{ FiniteDuration, _ } @InternalApi private[akka] class KubernetesSettings( val apiCaPath: String, val apiTokenPath: String, val apiServerHost: String, val apiServerPort: Int, val namespace: Option[String], val namespacePath: String, val apiServerRequestTimeout: FiniteDuration, val secure: Boolean = true, val bodyReadTimeout: FiniteDuration = 1.second)
Example 30
Source File: Retry.scala From akka-cloudpubsub with Apache License 2.0 | 5 votes |
package com.qubit.pubsub import com.typesafe.scalalogging.LazyLogging import scala.concurrent.duration.{FiniteDuration, _} import scala.util.{Random, Try} object Retry extends LazyLogging { def apply[T](operationName: String, operation: () => Try[T], maxRetries: Int, retryDelay: FiniteDuration, minJitterSecs: Int, maxJitterSecs: Int): Try[T] = { var successful: Boolean = false var attemptNum: Int = 1 var attemptResult: Try[T] = operation() while ((!successful) && (attemptNum <= maxRetries)) { if (attemptResult.isFailure) { logger.warn(s"Attempt failed for operation [$operationName]", attemptResult.failed.get) val retryWaitTime = (retryDelay * attemptNum) + (Random.nextInt( maxJitterSecs - minJitterSecs) + minJitterSecs).seconds attemptNum += 1 logger.info( s"Retrying [$operationName] in ${retryWaitTime.toSeconds} seconds (attempt $attemptNum)") Try(Thread.sleep(retryWaitTime.toMillis)) attemptResult = operation() } else { successful = true } } attemptResult } }
Example 31
Source File: ServerExecutor.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.akkautil.http import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.stream.ActorMaterializer import ch.megard.akka.http.cors.scaladsl.CorsDirectives import ch.megard.akka.http.cors.scaladsl.CorsDirectives._ import scala.concurrent.duration.{Duration, _} import scala.concurrent.{Await, Future} case class ServerExecutor(port: Int, servers: Server*)(implicit system: ActorSystem, materializer: ActorMaterializer) { import system.dispatcher val routes: Route = { handleRejections(CorsDirectives.corsRejectionHandler) { cors() { val routes = servers.map(_.routes) :+ statusRoute routes.reduceLeft(_ ~ _) } } } def statusRoute: Route = (get & path("status")) { val checks = Future.sequence(servers.map(_.healthCheck)) onSuccess(checks) { _ => complete("OK") } } lazy val serverBinding: Future[ServerBinding] = { val binding = Http().bindAndHandle(Route.handlerFlow(routes), "0.0.0.0", port) binding.onSuccess { case b => println(s"Server running on :${b.localAddress.getPort}") } binding } def start: Future[_] = Future.sequence[Any, Seq](servers.map(_.onStart) :+ serverBinding) def stop: Future[_] = Future.sequence[Any, Seq](servers.map(_.onStop) :+ serverBinding.map(_.unbind)) // Starts the server and blocks the calling thread until the underlying actor system terminates. def startBlocking(duration: Duration = 15.seconds): Unit = { start Await.result(system.whenTerminated, Duration.Inf) } def stopBlocking(duration: Duration = 15.seconds): Unit = Await.result(stop, duration) }
Example 32
Source File: TitForTatThrottle.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.tools.data.utils.akka import akka.stream.ThrottleMode.{Enforcing, Shaping} import akka.stream.stage.GraphStage import akka.stream.stage._ import akka.stream.{Attributes, RateExceededException, ThrottleMode, _} import akka.util.NanoTimeTokenBucket import scala.concurrent.duration.{FiniteDuration, _} class TitForTatThrottle[T] extends GraphStage[FlowShape[T, T]] { val in = Inlet[T]("TitForTatThrottle.in") val out = Outlet[T]("TitForTatThrottle.out") override val shape = FlowShape(in, out) // There is some loss of precision here because of rounding, but this only happens if nanosBetweenTokens is very // small which is usually at rates where that precision is highly unlikely anyway as the overhead of this stage // is likely higher than the required accuracy interval. // private val nanosBetweenTokens = per.toNanos / cost private val timerName: String = "ThrottleTimer" override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) { // private val tokenBucket = new NanoTimeTokenBucket(maximumBurst, nanosBetweenTokens) var willStop = false var currentElement: T = _ // This scope is here just to not retain an extra reference to the handler below. // We can't put this code into preRestart() because setHandler() must be called before that. { val handler = new InHandler with OutHandler { var timeOfPreviousElmement = System.currentTimeMillis() override def onUpstreamFinish(): Unit = if (isAvailable(out) && isTimerActive(timerName)) willStop = true else completeStage() override def onPush(): Unit = { val elem = grab(in) val now = System.currentTimeMillis() val delayMillis = now - timeOfPreviousElmement timeOfPreviousElmement = now if (delayMillis == 0L) push(out, elem) else { currentElement = elem System.err.println(s"scheduled push in ${delayMillis.milliseconds}") scheduleOnce(timerName, delayMillis.milliseconds) } } override def onPull(): Unit = pull(in) } setHandler(in, handler) setHandler(out, handler) // After this point, we no longer need the `handler` so it can just fall out of scope. } override protected def onTimer(key: Any): Unit = { push(out, currentElement) currentElement = null.asInstanceOf[T] if (willStop) completeStage() } } override def toString = "Throttle" }
Example 33
Source File: S3ObjectUploader.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.tools.neptune.export import java.io._ import java.util import java.util.concurrent.{Executors, TimeoutException} import java.util.stream.Collectors import java.util.{Collections, Vector} import com.amazonaws.auth.profile.ProfileCredentialsProvider import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.amazonaws.services.s3.model.{ObjectMetadata, PutObjectRequest} import com.amazonaws.{AmazonServiceException, ClientConfiguration, Protocol, SdkClientException} import org.apache.commons.io.{FileUtils, IOUtils} import org.slf4j.LoggerFactory import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration.{FiniteDuration, _} object S3ObjectUploader{ val executor = Executors.newFixedThreadPool(1) implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.fromExecutor(executor) protected lazy val logger = LoggerFactory.getLogger("s3_uploader") def init(proxyHost:Option[String], proxyPort:Option[Int]) = { val clientRegion = "us-east-1" val config = new ClientConfiguration config.setProtocol(Protocol.HTTPS) proxyHost.foreach(host => config.setProxyHost(host)) proxyPort.foreach(port => config.setProxyPort(port)) val s3Client = AmazonS3ClientBuilder.standard() .withRegion(clientRegion) .withClientConfiguration(config) .withCredentials(new ProfileCredentialsProvider()) .build() s3Client } def persistChunkToS3Bucket(chunkData:String, fileName:String, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String) = { try{ init(proxyHost, proxyPort).putObject(s3Directory, fileName, chunkData) } catch { case e: AmazonServiceException => e.printStackTrace() throw e case e: SdkClientException => e.printStackTrace() throw e } } def persistChunkToS3Bucket(tmpFile:File, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String, retryCount:Int = 3):Unit = { try{ val s3UploadTask = Future{init(proxyHost, proxyPort).putObject(s3Directory, tmpFile.getName, tmpFile)}(ec) Await.result(s3UploadTask, 5.minutes) tmpFile.delete() } catch { case e:TimeoutException => if(retryCount > 0) { logger.error("S3 upload task run more than 5 minutes..Going to retry") persistChunkToS3Bucket(tmpFile, proxyHost, proxyPort, s3Directory, retryCount-1) } else{ throw new Exception( "S3 upload task duration was more than 5 minutes") } case e: AmazonServiceException => e.printStackTrace() throw e case e: SdkClientException => e.printStackTrace() throw e } } }
Example 34
Source File: RollingFileLogger.scala From odin with Apache License 2.0 | 4 votes |
package io.odin.loggers import java.nio.file.{Files, Path, Paths} import java.time.{Instant, LocalDateTime} import java.time.format.DateTimeFormatter import java.util.TimeZone import java.util.concurrent.TimeUnit import cats.Monad import cats.effect.concurrent.Ref import cats.effect.{Concurrent, ContextShift, Fiber, Resource, Timer} import cats.syntax.all._ import io.odin.formatter.Formatter import io.odin.{Level, Logger, LoggerMessage} import scala.concurrent.duration.{FiniteDuration, _} object RollingFileLogger { def apply[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]): Resource[F, Logger[F]] = { new RollingFileLoggerFactory( fileNamePattern, maxFileSizeInBytes, rolloverInterval, formatter, minLevel, FileLogger.apply[F] ).mk } private[odin] class RefLogger[F[_]: Timer: Monad]( current: Ref[F, Logger[F]], override val minLevel: Level ) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = current.get.flatMap(_.log(msg)) override def log(msgs: List[LoggerMessage]): F[Unit] = current.get.flatMap(_.log(msgs)) } private[odin] class RollingFileLoggerFactory[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level, underlyingLogger: (String, Formatter, Level) => Resource[F, Logger[F]], fileSizeCheck: Path => Long = Files.size )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]) { val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH-mm-ss") def mk: Resource[F, Logger[F]] = { val logger = for { ((logger, watcherFiber), release) <- allocate.allocated refLogger <- Ref.of(logger) refRelease <- Ref.of(release) _ <- F.start(rollingLoop(watcherFiber, refLogger, refRelease)) } yield { (new RefLogger(refLogger, minLevel), refRelease) } Resource.make(logger)(_._2.get.flatten).map { case (logger, _) => logger } } def now: F[Long] = timer.clock.realTime(TimeUnit.MILLISECONDS) def rollingLoop(watcher: Fiber[F, Unit], logger: Ref[F, Logger[F]], release: Ref[F, F[Unit]]): F[Unit] = for { _ <- watcher.join oldRelease <- release.get ((newLogger, newWatcher), newRelease) <- allocate.allocated _ <- logger.set(newLogger) _ <- release.set(newRelease) _ <- oldRelease _ <- rollingLoop(newWatcher, logger, release) } yield () } }