scala.concurrent.duration.FiniteDuration Scala Examples

The following examples show how to use scala.concurrent.duration.FiniteDuration. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: NoOpHttpClient.scala    From cornichon   with Apache License 2.0 6 votes vote down vote up
package com.github.agourlay.cornichon.http.client

import cats.Show
import cats.data.EitherT
import cats.syntax.either._
import com.github.agourlay.cornichon.core.Done
import com.github.agourlay.cornichon.http.{ CornichonHttpResponse, HttpRequest, HttpStreamedRequest }
import monix.eval.Task
import org.http4s.EntityEncoder

import scala.concurrent.duration.FiniteDuration

class NoOpHttpClient extends HttpClient {

  def runRequest[A: Show](cReq: HttpRequest[A], t: FiniteDuration)(implicit ee: EntityEncoder[Task, A]) =
    EitherT.apply(Task.now(CornichonHttpResponse(200, Nil, "NoOpBody").asRight))

  def openStream(req: HttpStreamedRequest, t: FiniteDuration) =
    Task.now(CornichonHttpResponse(200, Nil, "NoOpBody").asRight)

  def shutdown() =
    Done.taskDone

  def paramsFromUrl(url: String) =
    Right(Nil)
} 
Example 2
Source File: TimeBoundObserver.scala    From daml   with Apache License 2.0 6 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.testing

import com.daml.timer.Delayed
import io.grpc.Context
import io.grpc.stub.StreamObserver

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future, Promise}

final class TimeBoundObserver[T](duration: FiniteDuration)(
    implicit executionContext: ExecutionContext)
    extends StreamObserver[T] {

  private val promise = Promise[Vector[T]]
  private val buffer = Vector.newBuilder[T]

  Delayed.by(duration)(onCompleted())

  def result: Future[Vector[T]] = promise.future

  override def onNext(value: T): Unit = {
    buffer += value
  }

  override def onError(t: Throwable): Unit = {
    val _ = promise.tryFailure(t)
  }

  override def onCompleted(): Unit = {
    val _succeeded = promise.trySuccess(buffer.result())
    val _cancelled = Context.current().withCancellation().cancel(null)
  }
} 
Example 3
Source File: Components.scala    From gbf-raidfinder   with MIT License 6 votes vote down vote up
package walfie.gbf.raidfinder.server

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.trueaccord.scalapb.json.JsonFormat
import monix.execution.Scheduler
import play.api.BuiltInComponents
import play.api.http.{ContentTypes, DefaultHttpErrorHandler}
import play.api.libs.json.Json
import play.api.Mode.Mode
import play.api.mvc._
import play.api.routing.Router
import play.api.routing.sird._
import play.core.server._
import play.filters.cors.{CORSConfig, CORSFilter}
import play.filters.gzip.GzipFilterComponents
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future
import walfie.gbf.raidfinder.protocol.{RaidBossesResponse, BinaryProtobuf}
import walfie.gbf.raidfinder.RaidFinder
import walfie.gbf.raidfinder.server.controller._
import walfie.gbf.raidfinder.server.syntax.ProtocolConverters.RaidBossDomainOps

class Components(
  raidFinder:                 RaidFinder[BinaryProtobuf],
  translator:                 BossNameTranslator,
  port:                       Int,
  mode:                       Mode,
  websocketKeepAliveInterval: FiniteDuration,
  metricsCollector:           MetricsCollector
) extends NettyServerComponents
  with BuiltInComponents with GzipFilterComponents with Controller {

  override lazy val serverConfig = ServerConfig(port = Some(port), mode = mode)

  private val corsFilter = new CORSFilter(corsConfig = CORSConfig().withAnyOriginAllowed)
  override lazy val httpFilters = List(gzipFilter, corsFilter)

  lazy val websocketController = new WebsocketController(
    raidFinder, translator, websocketKeepAliveInterval, metricsCollector
  )(actorSystem, materializer, Scheduler.Implicits.global)

  // The charset isn't necessary, but without it, Chrome displays Japanese incorrectly
  // if you try to view the JSON directly.
  // https://bugs.chromium.org/p/chromium/issues/detail?id=438464
  private val ContentTypeJsonWithUtf8 = "application/json; charset=utf-8"

  lazy val router = Router.from {
    case GET(p"/") =>
      controllers.Assets.at(path = "/public", "index.html")

    case GET(p"/api/bosses.json" ? q_s"name=$names") =>
      val bosses = if (names.nonEmpty) {
        val knownBossesMap = raidFinder.getKnownBosses
        names.collect(knownBossesMap)
      } else raidFinder.getKnownBosses.values

      val responseProtobuf = RaidBossesResponse(
        raidBosses = bosses.map(_.toProtocol(translator)).toSeq
      )
      val responseJson = JsonFormat.toJsonString(responseProtobuf)
      Action(Ok(responseJson).as(ContentTypeJsonWithUtf8))

    case GET(p"/api/metrics.json") =>
      val activeUsers = metricsCollector.getActiveWebSocketCount()
      val json = Json.obj("activeUsers" -> activeUsers)
      Action(Ok(json))

    case GET(p"/ws/raids" ? q_o"keepAlive=${ bool(keepAlive) }") =>
      websocketController.raids(keepAlive = keepAlive.getOrElse(false))

    case GET(p"/$file*") =>
      controllers.Assets.at(path = "/public", file = file)
  }

  override lazy val httpErrorHandler = new ErrorHandler

  override def serverStopHook = () => Future.successful {
    actorSystem.terminate()
  }
} 
Example 4
Source File: JScheduledExecutorServiceWrapper.scala    From gfc-concurrent   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.concurrent

import java.util.concurrent.{TimeUnit, ScheduledFuture, Delayed, Callable, ScheduledExecutorService => JScheduledExecutorService}
import scala.concurrent.{Future, ExecutionContext}
import scala.concurrent.duration.{Duration, FiniteDuration}


trait JScheduledExecutorServiceWrapper extends JExecutorServiceWrapper with AsyncScheduledExecutorService {
  override def executorService: JScheduledExecutorService

  override def scheduleWithFixedDelay(r: Runnable, initialDelay: Long, delay: Long, timeUnit: TimeUnit): ScheduledFuture[_] = executorService.scheduleWithFixedDelay(r, initialDelay, delay, timeUnit)
  override def scheduleAtFixedRate(r: Runnable, initialDelay: Long, period: Long, timeUnit: TimeUnit): ScheduledFuture[_] = executorService.scheduleAtFixedRate(r, initialDelay, period, timeUnit)
  override def schedule[V](c: Callable[V], delay: Long, timeUnit: TimeUnit): ScheduledFuture[V] = executorService.schedule(c, delay, timeUnit)
  override def schedule(r: Runnable, delay: Long, timeUnit: TimeUnit): ScheduledFuture[_] = executorService.schedule(r, delay, timeUnit)

  override def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(f: => Unit): ScheduledFuture[_] = {
    scheduleWithFixedDelay(initialDelay.toMillis, delay.toMillis, TimeUnit.MILLISECONDS)(f)
  }

  override def scheduleWithFixedDelay(initialDelay: Long, delay: Long, timeUnit: TimeUnit)(f: => Unit): ScheduledFuture[_] = {
    scheduleWithFixedDelay(asRunnable(f), initialDelay, delay, timeUnit)
  }

  override def scheduleAtFixedRate(initialDelay: FiniteDuration, period: FiniteDuration)(f: => Unit): ScheduledFuture[_] = {
    scheduleAtFixedRate(initialDelay.toMillis, period.toMillis, TimeUnit.MILLISECONDS)(f)
  }

  override def scheduleAtFixedRate(initialDelay: Long, period: Long, timeUnit: TimeUnit)(f: => Unit): ScheduledFuture[_] = {
    scheduleAtFixedRate(asRunnable(f), initialDelay, period, timeUnit)
  }

  override def schedule[V](delay: FiniteDuration)(f: => V): ScheduledFuture[V] = {
    schedule(delay.toMillis, TimeUnit.MILLISECONDS)(f)
  }

  override def schedule[V](delay: Long, timeUnit: TimeUnit)(f: => V): ScheduledFuture[V] = {
    schedule(asCallable(f), delay, timeUnit)
  }

  override def asyncSchedule(initialDelay: FiniteDuration, delayUntilNext: FiniteDuration => FiniteDuration)
                            (futureTask: => Future[_])
                            (implicit executor: ExecutionContext): ScheduledFuture[_] = {
    val wrapper: ScheduledFutureWrapper[Unit] = new ScheduledFutureWrapper()
    def doSchedule(delay: FiniteDuration): Unit = {
      if (!wrapper.isCancelled) {
        delay.max(Duration.Zero)
        val future: ScheduledFuture[Unit] = schedule(delay.max(Duration.Zero)) {
          val start = System.currentTimeMillis()
          try {
            futureTask.onComplete { _ =>
              // Task complete: Schedule again
              doSchedule(delayUntilNext(FiniteDuration(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS)))
            }
          } catch {
            case e: Throwable =>
              // Exception in futureTask(): Schedule again
              doSchedule(delayUntilNext(FiniteDuration(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS)))
              throw e
          }
        }
        // store future in wrapper so that it can be cancelled
        wrapper.set(future)
      }
    }
    doSchedule(initialDelay)
    wrapper
  }

  private class ScheduledFutureWrapper[V] extends ScheduledFuture[V] {
    @volatile private var delegate: ScheduledFuture[V] = _
    @volatile private var cancelled: Boolean = false

    def set(future: ScheduledFuture[V]): Unit = this.synchronized {
      if (!cancelled) {
        delegate = future
      } else {
        future.cancel(true)
      }
    }

    override def getDelay(p1: TimeUnit): Long = delegate.getDelay(p1)

    override def isCancelled: Boolean = cancelled

    override def get(): V = delegate.get

    override def get(p1: Long, p2: TimeUnit): V = delegate.get(p1, p2)

    override def cancel(p1: Boolean): Boolean = this.synchronized {
      cancelled = true
      delegate.cancel(p1)
    }

    override def isDone: Boolean = cancelled && delegate.isDone

    override def compareTo(p1: Delayed): Int = delegate.compareTo(p1)
  }
} 
Example 5
Source File: StreamConsumer.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.testing

import com.daml.dec.DirectExecutionContext
import io.grpc.stub.StreamObserver

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration

final class StreamConsumer[A](attach: StreamObserver[A] => Unit) {

  
  def filterTake(p: A => Boolean)(n: Int): Future[Vector[A]] =
    if (n < 0) {
      Future.failed(new IllegalArgumentException(s"Bad argument $n, non-negative integer required"))
    } else if (n == 0) {
      Future.successful(Vector.empty[A])
    } else {
      val observer = new SizeBoundObserver[A](n, p)
      attach(observer)
      observer.result
    }

  def take(n: Int): Future[Vector[A]] = filterTake(_ => true)(n)

  def find(p: A => Boolean): Future[Option[A]] =
    filterTake(p)(1).map(_.headOption)(DirectExecutionContext)

  def first(): Future[Option[A]] = find(_ => true)

  def within(duration: FiniteDuration)(implicit ec: ExecutionContext): Future[Vector[A]] = {
    val observer = new TimeBoundObserver[A](duration)
    attach(observer)
    observer.result
  }

} 
Example 6
Source File: InfiniteRetries.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.perf

import akka.actor.ActorSystem

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}

trait InfiniteRetries {

  protected def retry[T](action: => Future[T], delay: FiniteDuration = 10.millis)(
      implicit system: ActorSystem): Future[T] = {
    implicit val ec: ExecutionContext = system.dispatcher
    action.transformWith {
      case Success(v) =>
        Future.successful(v)
      case Failure(t) =>
        val p = Promise[T]()
        system.scheduler.scheduleOnce(
          delay,
          () =>
            retry[T](action, delay).onComplete {
              case Success(s) => p.success(s)
              case Failure(throwable) => p.failure(throwable)
          }
        )
        p.future
    }
  }
} 
Example 7
Source File: CommandConfiguration.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.configuration

import scala.concurrent.duration.{DurationInt, FiniteDuration}


final case class CommandConfiguration(
    inputBufferSize: Int,
    maxParallelSubmissions: Int,
    maxCommandsInFlight: Int,
    limitMaxCommandsInFlight: Boolean,
    retentionPeriod: FiniteDuration
)

object CommandConfiguration {
  lazy val default: CommandConfiguration =
    CommandConfiguration(
      inputBufferSize = 512,
      maxParallelSubmissions = 512,
      maxCommandsInFlight = 256,
      limitMaxCommandsInFlight = true,
      retentionPeriod = 24.hours
    )
} 
Example 8
Source File: TrackerMap.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services.tracking

import java.util.concurrent.atomic.AtomicReference

import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest
import com.daml.ledger.api.v1.completion.Completion
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import org.slf4j.LoggerFactory

import scala.collection.immutable.HashMap
import scala.concurrent.duration.{FiniteDuration, _}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


  final class AsyncResource[T <: AutoCloseable](future: Future[T]) {
    private val logger = LoggerFactory.getLogger(this.getClass)

    // Must progress Waiting => Ready => Closed or Waiting => Closed.
    val state: AtomicReference[AsyncResourceState[T]] = new AtomicReference(Waiting)

    future.andThen({
      case Success(t) =>
        if (!state.compareAndSet(Waiting, Ready(t))) {
          // This is the punch line of AsyncResource.
          // If we've been closed in the meantime, we must close the underlying resource also.
          // This "on-failure-to-complete" behavior is not present in scala or java Futures.
          t.close()
        }
      // Someone should be listening to this failure downstream
      // TODO(mthvedt): Refactor so at least one downstream listener is always present,
      // and exceptions are never dropped.
      case Failure(ex) =>
        logger.error("failure to get async resource", ex)
        state.set(Closed)
    })(DirectExecutionContext)

    def flatMap[U](f: T => Future[U])(implicit ex: ExecutionContext): Future[U] = {
      state.get() match {
        case Waiting => future.flatMap(f)
        case Closed => throw new IllegalStateException()
        case Ready(t) => f(t)
      }
    }

    def map[U](f: T => U)(implicit ex: ExecutionContext): Future[U] =
      flatMap(t => Future.successful(f(t)))

    def ifPresent[U](f: T => U): Option[U] = state.get() match {
      case Ready(t) => Some(f(t))
      case _ => None
    }

    def close(): Unit = state.getAndSet(Closed) match {
      case Ready(t) => t.close()
      case _ =>
    }
  }

  def apply(retentionPeriod: FiniteDuration)(implicit logCtx: LoggingContext): TrackerMap =
    new TrackerMap(retentionPeriod)
} 
Example 9
Source File: IndexerConfig.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.indexer

import com.daml.ledger.participant.state.v1.ParticipantId
import com.daml.platform.configuration.IndexConfiguration
import com.daml.platform.indexer.IndexerConfig._

import scala.concurrent.duration.{DurationInt, FiniteDuration}

case class IndexerConfig(
    participantId: ParticipantId,
    jdbcUrl: String,
    startupMode: IndexerStartupMode,
    restartDelay: FiniteDuration = DefaultRestartDelay,
    eventsPageSize: Int = IndexConfiguration.DefaultEventsPageSize,
    allowExistingSchema: Boolean = false,
)

object IndexerConfig {

  val DefaultRestartDelay: FiniteDuration = 10.seconds

} 
Example 10
Source File: GrpcHealthService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.server.api.services.grpc

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.health.HealthChecks
import com.daml.platform.api.grpc.GrpcApiService
import com.daml.platform.server.api.DropRepeated
import com.daml.platform.server.api.services.grpc.GrpcHealthService._
import io.grpc.health.v1.health.{
  HealthAkkaGrpc,
  HealthCheckRequest,
  HealthCheckResponse,
  HealthGrpc
}
import io.grpc.{ServerServiceDefinition, Status, StatusException}

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

class GrpcHealthService(
    healthChecks: HealthChecks,
    maximumWatchFrequency: FiniteDuration = 1.second,
)(
    implicit protected val esf: ExecutionSequencerFactory,
    protected val mat: Materializer,
    executionContext: ExecutionContext,
) extends HealthAkkaGrpc
    with GrpcApiService {
  override def bindService(): ServerServiceDefinition =
    HealthGrpc.bindService(this, DirectExecutionContext)

  override def check(request: HealthCheckRequest): Future[HealthCheckResponse] =
    Future.fromTry(matchResponse(serviceFrom(request)))

  override def watchSource(request: HealthCheckRequest): Source[HealthCheckResponse, NotUsed] =
    Source
      .fromIterator(() => Iterator.continually(matchResponse(serviceFrom(request)).get))
      .throttle(1, per = maximumWatchFrequency)
      .via(DropRepeated())

  private def matchResponse(componentName: Option[String]): Try[HealthCheckResponse] =
    if (!componentName.forall(healthChecks.hasComponent))
      Failure(new StatusException(Status.NOT_FOUND))
    else if (healthChecks.isHealthy(componentName))
      Success(servingResponse)
    else
      Success(notServingResponse)
}

object GrpcHealthService {
  private[grpc] val servingResponse =
    HealthCheckResponse(HealthCheckResponse.ServingStatus.SERVING)

  private[grpc] val notServingResponse =
    HealthCheckResponse(HealthCheckResponse.ServingStatus.NOT_SERVING)

  private def serviceFrom(request: HealthCheckRequest): Option[String] = {
    Option(request.service).filter(_.nonEmpty)
  }
} 
Example 11
Source File: FutureTimeouts.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams
import akka.actor.ActorSystem
import com.daml.dec.DirectExecutionContext
import org.scalatest.{Assertion, AsyncWordSpec}

import scala.concurrent.{Future, Promise, TimeoutException}
import scala.concurrent.duration.FiniteDuration
import scala.util.Try
import scala.util.control.NoStackTrace

trait FutureTimeouts { self: AsyncWordSpec =>

  protected def system: ActorSystem

  protected def expectTimeout(f: Future[Any], duration: FiniteDuration): Future[Assertion] = {
    val promise: Promise[Any] = Promise[Any]()

    val cancellable = system.scheduler.scheduleOnce(duration, { () =>
      promise.failure(
        new TimeoutException(s"Future timed out after $duration as expected.") with NoStackTrace)
      ()
    })(system.dispatcher)

    f.onComplete((_: Try[Any]) => cancellable.cancel())(DirectExecutionContext)

    recoverToSucceededIf[TimeoutException](
      Future.firstCompletedOf[Any](List[Future[Any]](f, promise.future))(DirectExecutionContext))
  }
} 
Example 12
Source File: OwnedResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import com.daml.resources

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext}
import scala.reflect.ClassTag

final class OwnedResource[T: ClassTag](
    owner: resources.ResourceOwner[T],
    acquisitionTimeout: FiniteDuration = 30.seconds,
    releaseTimeout: FiniteDuration = 30.seconds,
)(implicit executionContext: ExecutionContext)
    extends ManagedResource[T] {
  private var resource: resources.Resource[T] = _

  override def construct(): T = {
    resource = owner.acquire()
    Await.result(resource.asFuture, acquisitionTimeout)
  }

  override def destruct(value: T): Unit = {
    Await.result(resource.release(), releaseTimeout)
  }
} 
Example 13
Source File: AkkaExecutionSequencer.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, ExtendedActorSystem, Props}
import akka.pattern.{AskTimeoutException, ask}
import akka.util.Timeout
import com.daml.grpc.adapter.RunnableSequencingActor.ShutdownRequest

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import com.daml.dec.DirectExecutionContext


  def closeAsync(implicit ec: ExecutionContext): Future[Done] =
    (actorRef ? ShutdownRequest).mapTo[Done].recover {
      case askTimeoutException: AskTimeoutException if actorIsTerminated(askTimeoutException) =>
        Done
    }

  private def actorIsTerminated(askTimeoutException: AskTimeoutException) = {
    AkkaExecutionSequencer.actorTerminatedRegex.findFirstIn(askTimeoutException.getMessage).nonEmpty
  }
}

object AkkaExecutionSequencer {
  def apply(name: String, terminationTimeout: FiniteDuration)(
      implicit system: ActorSystem): AkkaExecutionSequencer = {
    system match {
      case extendedSystem: ExtendedActorSystem =>
        new AkkaExecutionSequencer(
          extendedSystem.systemActorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))
      case _ =>
        new AkkaExecutionSequencer(system.actorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))

    }
  }

  private val actorTerminatedRegex = """Recipient\[.*]\] had already been terminated.""".r
}

private[grpc] class RunnableSequencingActor extends Actor with ActorLogging {
  @SuppressWarnings(Array("org.wartremover.warts.Any"))
  override val receive: Receive = {
    case runnable: Runnable =>
      try {
        runnable.run()
      } catch {
        case NonFatal(t) => log.error("Unexpected exception while executing Runnable", t)
      }
    case ShutdownRequest =>
      context.stop(self) // processing of the current message will continue
      sender() ! Done
  }
}

private[grpc] object RunnableSequencingActor {
  case object ShutdownRequest
} 
Example 14
Source File: AkkaExecutionSequencerPool.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import java.util.concurrent.atomic.AtomicInteger

import akka.Done
import akka.actor.ActorSystem

import scala.collection.breakOut
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext, Future}

class AkkaExecutionSequencerPool(
    poolName: String,
    actorCount: Int = AkkaExecutionSequencerPool.defaultActorCount,
    terminationTimeout: FiniteDuration = 30.seconds,
)(implicit system: ActorSystem)
    extends ExecutionSequencerFactory {
  require(actorCount > 0)

  private val counter = new AtomicInteger()

  private val pool =
    Array.fill(actorCount)(
      AkkaExecutionSequencer(s"$poolName-${counter.getAndIncrement()}", terminationTimeout))

  override def getExecutionSequencer: ExecutionSequencer =
    pool(counter.getAndIncrement() % actorCount)

  override def close(): Unit =
    Await.result(closeAsync(), terminationTimeout)

  def closeAsync(): Future[Unit] = {
    implicit val ec: ExecutionContext = system.dispatcher
    val eventuallyClosed: Future[Seq[Done]] = Future.sequence(pool.map(_.closeAsync)(breakOut))
    Future.firstCompletedOf(
      Seq(
        system.whenTerminated.map(_ => ()), //  Cut it short if the ActorSystem stops.
        eventuallyClosed.map(_ => ()),
      )
    )
  }
}

object AkkaExecutionSequencerPool {

  
  private val defaultActorCount: Int = Runtime.getRuntime.availableProcessors() * 8
} 
Example 15
Source File: RetryHelper.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.retrying

import java.lang.Math.floor

import akka.actor.Scheduler
import akka.pattern.after
import com.daml.ledger.client.binding.config.IRetryConfig
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object RetryHelper extends LazyLogging {

  
  val always: RetryStrategy = {
    case NonFatal(_) => true
  }

  def retry[T](retryConfig: Option[(Scheduler, IRetryConfig)])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        implicit val scheduler: Scheduler = rc._1
        retry(Option(rc._2))(retryStrategy)(f)
    }
  }

  def retry[T](retryConfig: Option[IRetryConfig])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        val maxAttempts = floor(rc.timeout / rc.interval).toInt
        retry(maxAttempts, rc.interval)(retryStrategy)(f)
    }
  }

  def retry[T](maxAttempts: Int, delay: FiniteDuration)(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {

    def shouldRetry(n: Int, e: Throwable): Boolean =
      n > 0 && retryStrategy.applyOrElse(e, (_: Throwable) => false)

    val remainingAttempts = maxAttempts - 1 // the next line will trigger a future evaluation

    f.recoverWith {
      case NonFatal(e) if shouldRetry(remainingAttempts, e) =>
        logWarning(remainingAttempts, e)
        after(delay, s)(retry(remainingAttempts, delay)(retryStrategy)(f))
    }
  }

  private def logWarning(remainingAttempts: Int, e: Throwable): Unit = {
    logger.warn(
      s"Retrying after failure. Attempts remaining: $remainingAttempts. Error: ${e.getMessage}")
  }
} 
Example 16
Source File: package.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.engine

import java.time.Duration
import java.util.UUID

import com.daml.lf.data.Ref.Identifier
import com.daml.platform.services.time.TimeProviderType

import scala.concurrent.duration.FiniteDuration

package object trigger {

  case class LedgerConfig(
      host: String,
      port: Int,
      timeProvider: TimeProviderType,
      commandTtl: Duration,
      maxInboundMessageSize: Int,
  )

  case class TriggerRestartConfig(
      minRestartInterval: FiniteDuration,
      maxRestartInterval: FiniteDuration,
      restartIntervalRandomFactor: Double = 0.2,
  )

  final case class SecretKey(value: String)
  final case class UserCredentials(token: EncryptedToken)

  final case class RunningTrigger(
      triggerInstance: UUID,
      triggerName: Identifier,
      credentials: UserCredentials,
      // TODO(SF, 2020-0610): Add access token field here in the
      // presence of authentication.
  )
} 
Example 17
Source File: ProgramResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import java.util.concurrent.{Executors, TimeUnit}

import com.daml.logging.ContextualizedLogger
import com.daml.logging.LoggingContext.newLoggingContext
import com.daml.resources.ProgramResource._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Try
import scala.util.control.{NoStackTrace, NonFatal}

class ProgramResource[T](
    owner: => ResourceOwner[T],
    tearDownTimeout: FiniteDuration = 10.seconds,
) {
  private val logger = ContextualizedLogger.get(getClass)

  private val executorService = Executors.newCachedThreadPool()

  def run(): Unit = {
    newLoggingContext { implicit logCtx =>
      val resource = {
        implicit val executionContext: ExecutionContext =
          ExecutionContext.fromExecutor(executorService)
        Try(owner.acquire()).fold(Resource.failed, identity)
      }

      def stop(): Unit = {
        Await.result(resource.release(), tearDownTimeout)
        executorService.shutdown()
        executorService.awaitTermination(tearDownTimeout.toMillis, TimeUnit.MILLISECONDS)
        ()
      }

      sys.runtime.addShutdownHook(new Thread(() => {
        try {
          stop()
        } catch {
          case NonFatal(exception) =>
            logger.error("Failed to stop successfully.", exception)
        }
      }))

      // On failure, shut down immediately.
      resource.asFuture.failed.foreach { exception =>
        exception match {
          // The error is suppressed; we don't need to print anything more.
          case _: SuppressedStartupException =>
          case _: StartupException =>
            logger.error(
              s"Shutting down because of an initialization error.\n${exception.getMessage}")
          case NonFatal(_) =>
            logger.error("Shutting down because of an initialization error.", exception)
        }
        sys.exit(1) // `stop` will be triggered by the shutdown hook.
      }(ExecutionContext.global) // Run on the global execution context to avoid deadlock.
    }
  }
}

object ProgramResource {

  trait StartupException extends NoStackTrace {
    self: Exception =>
  }

  trait SuppressedStartupException {
    self: Exception =>
  }
} 
Example 18
Source File: DelayedReleaseResourceOwner.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

object DelayedReleaseResourceOwner {
  def apply[T](value: T, releaseDelay: FiniteDuration)(
      implicit executionContext: ExecutionContext
  ): TestResourceOwner[T] =
    new TestResourceOwner(
      Future.successful(value),
      _ => Future(Thread.sleep(releaseDelay.toMillis))(ExecutionContext.global))
} 
Example 19
Source File: HmacAuthMiddleware.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.network.http.server.middleware

import java.time.{Duration, Instant}

import cats.data.{Kleisli, OptionT}
import cats.effect.Sync
import jbok.network.http.server.authentication.HMAC
import org.http4s.headers.Authorization
import org.http4s.util.CaseInsensitiveString
import org.http4s.{AuthScheme, Credentials, HttpRoutes, Request, Response, Status}
import tsec.mac.jca.{HMACSHA256, MacSigningKey}

import scala.concurrent.duration.{FiniteDuration, _}

sealed abstract class HmacAuthError(val message: String) extends Exception(message)
object HmacAuthError {
  case object NoAuthHeader     extends HmacAuthError("Could not find an Authorization header")
  case object NoDatetimeHeader extends HmacAuthError("Could not find an X-Datetime header")
  case object BadMAC           extends HmacAuthError("Bad MAC")
  case object InvalidMacFormat extends HmacAuthError("The MAC is not a valid Base64 string")
  case object InvalidDatetime  extends HmacAuthError("The datetime is not a valid UTC datetime string")
  case object Timeout          extends HmacAuthError("The request time window is closed")
}

object HmacAuthMiddleware {
  val defaultDuration: FiniteDuration = 5.minutes

  private def verifyFromHeader[F[_]](
      req: Request[F],
      key: MacSigningKey[HMACSHA256],
      duration: FiniteDuration
  ): Either[HmacAuthError, Unit] =
    for {
      authHeader <- req.headers
        .get(Authorization)
        .flatMap { t =>
          t.credentials match {
            case Credentials.Token(scheme, token) if scheme == AuthScheme.Bearer =>
              Some(token)
            case _ => None
          }
        }
        .toRight(HmacAuthError.NoAuthHeader)
      datetimeHeader <- req.headers
        .get(CaseInsensitiveString("X-Datetime"))
        .toRight(HmacAuthError.NoDatetimeHeader)
      instant <- HMAC.http.verifyFromHeader(
        req.method.name,
        req.uri.renderString,
        datetimeHeader.value,
        authHeader,
        key
      )
      _ <- Either.cond(
        Instant.now().isBefore(instant.plus(Duration.ofNanos(duration.toNanos))),
        (),
        HmacAuthError.Timeout
      )
    } yield ()

  def apply[F[_]: Sync](key: MacSigningKey[HMACSHA256], duration: FiniteDuration = defaultDuration)(routes: HttpRoutes[F]): HttpRoutes[F] =
    Kleisli { req: Request[F] =>
      verifyFromHeader(req, key, duration) match {
        case Left(error) => OptionT.some[F](Response[F](Status.Forbidden).withEntity(error.message))
        case Right(_)    => routes(req)
      }
    }
} 
Example 20
Source File: implicits.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.codec.json

import io.circe._
import io.circe.generic.extras._
import jbok.codec.rlp.RlpEncoded
import scodec.bits.ByteVector
import shapeless._
import spire.math.SafeLong

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.util.Try

@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf"))
trait implicits {
  implicit val jsonConfig: Configuration = Configuration.default

  implicit val bytesJsonEncoder: Encoder[ByteVector] = Encoder.encodeString.contramap[ByteVector](_.toHex)
  implicit val bytesJsonDecoder: Decoder[ByteVector] = Decoder.decodeString.emap[ByteVector](ByteVector.fromHexDescriptive(_))

  implicit val rlpJsonEncoder: Encoder[RlpEncoded] = bytesJsonEncoder.contramap(_.bytes)
  implicit val rlpJsonDecoder: Decoder[RlpEncoded] = bytesJsonDecoder.map(bytes => RlpEncoded.coerce(bytes.bits))

  implicit val finiteDurationJsonEncoder: Encoder[FiniteDuration] = Encoder.encodeString.contramap[FiniteDuration](d => s"${d.length} ${d.unit.toString.toLowerCase}")
  implicit val finiteDurationJsonDecoder: Decoder[FiniteDuration] = Decoder.decodeString.emapTry[FiniteDuration](s => Try(Duration.apply(s).asInstanceOf[FiniteDuration]))

  implicit val bigIntJsonEncoder: Encoder[BigInt] = Encoder.encodeString.contramap[BigInt](_.toString(10))
  implicit val bigIntJsonDecoder: Decoder[BigInt] = Decoder.decodeString.emapTry[BigInt](s => Try(BigInt(s)))

  implicit val safeLongJsonEncoder: Encoder[SafeLong] = Encoder.encodeString.contramap[SafeLong](_.toString)
  implicit val safeLongJsonDecoder: Decoder[SafeLong] = Decoder.decodeString.emapTry[SafeLong](
    s =>
      if (s.startsWith("0x")) Try(SafeLong(BigInt(s.drop(2), 16)))
      else Try(SafeLong(BigInt(s)))
  )

  // key codecs
  implicit val bigIntKeyEncoder: KeyEncoder[BigInt] = KeyEncoder.encodeKeyString.contramap[BigInt](_.toString(10))
  implicit val bigIntKeyDecoder: KeyDecoder[BigInt] = KeyDecoder.decodeKeyString.map[BigInt](BigInt.apply)

  implicit val safeLongKeyEncoder: KeyEncoder[SafeLong] = KeyEncoder.encodeKeyString.contramap[SafeLong](_.toString)
  implicit val safeLongKeyDecoder: KeyDecoder[SafeLong] = KeyDecoder.decodeKeyString.map[SafeLong](s => SafeLong(BigInt(s)))

  // codec for value classes
  implicit def decoderJsonValueClass[T <: AnyVal, V](
      implicit
      g: Lazy[Generic.Aux[T, V :: HNil]],
      d: Decoder[V]
  ): Decoder[T] = Decoder.instance { cursor ⇒
    d(cursor).map { value ⇒
      g.value.from(value :: HNil)
    }
  }

  implicit def encoderJsonValueClass[T <: AnyVal, V](
      implicit
      g: Lazy[Generic.Aux[T, V :: HNil]],
      e: Encoder[V]
  ): Encoder[T] = Encoder.instance { value ⇒
    e(g.value.to(value).head)
  }
}

object implicits extends implicits 
Example 21
Source File: SyncConfig.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.config

import io.circe.generic.extras.ConfiguredJsonCodec

import scala.concurrent.duration.FiniteDuration
import jbok.codec.json.implicits._
import scala.concurrent.duration._

@ConfiguredJsonCodec
final case class SyncConfig(
    maxBlockHeadersPerRequest: Int,
    maxBlockBodiesPerRequest: Int,
    offset: Int,
    checkInterval: FiniteDuration,
    banDuration: FiniteDuration,
    requestTimeout: FiniteDuration,
    keepaliveInterval: FiniteDuration = 30.seconds
) 
Example 22
Source File: MiningConfig.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.config

import cats.effect.Sync
import cats.implicits._
import io.circe.generic.extras.ConfiguredJsonCodec
import jbok.core.keystore.KeyStore
import jbok.core.models.Address
import jbok.crypto.signature.KeyPair
import jbok.codec.json.implicits._

import scala.concurrent.duration.FiniteDuration

@ConfiguredJsonCodec
final case class MiningConfig(
    enabled: Boolean,
    address: Address,
    passphrase: String,
    coinbase: Address,
    period: FiniteDuration,
    epoch: Int,
    minBroadcastPeers: Int
)

object MiningConfig {
  def getKeyPair[F[_]](config: MiningConfig, keyStore: KeyStore[F])(implicit F: Sync[F]): F[Option[KeyPair]] =
    if (config.enabled) {
      keyStore.unlockAccount(config.coinbase, config.passphrase).map(_.keyPair.some)
    } else F.pure(None)
} 
Example 23
Source File: PoolConfig.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.core.config
import io.circe.generic.extras.ConfiguredJsonCodec

import scala.concurrent.duration.FiniteDuration
import jbok.codec.json.implicits._

@ConfiguredJsonCodec
final case class TxPoolConfig(
    poolSize: Int,
    transactionTimeout: FiniteDuration
)

@ConfiguredJsonCodec
final case class BlockPoolConfig(
    maxBlockAhead: Int,
    maxBlockBehind: Int
) 
Example 24
Source File: BatchProducerIT.scala    From Scala-Programming-Projects   with MIT License 5 votes vote down vote up
package coinyser

import java.sql.Timestamp
import java.time.Instant
import java.util.concurrent.TimeUnit

import cats.effect.{IO, Timer}
import org.apache.spark.sql.test.SharedSparkSession
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.duration.FiniteDuration


class BatchProducerIT extends WordSpec with Matchers with SharedSparkSession {

  import testImplicits._

  "BatchProducer.save" should {
    "save a Dataset[Transaction] to parquet" in withTempDir { tmpDir =>
      val transaction1 = Transaction(timestamp = new Timestamp(1532365695000L), tid = 70683282, price = 7740.00, sell = false, amount = 0.10041719)
      val transaction2 = Transaction(timestamp = new Timestamp(1532365693000L), tid = 70683281, price = 7739.99, sell = false, amount = 0.00148564)
      val sourceDS = Seq(transaction1, transaction2).toDS()

      val uri = tmpDir.toURI
      BatchProducer.save(sourceDS, uri).unsafeRunSync()
      tmpDir.list() should contain("date=2018-07-23")
      val readDS = spark.read.parquet(uri.toString).as[Transaction]
      spark.read.parquet(uri + "/date=2018-07-23").show()
      sourceDS.collect() should contain theSameElementsAs readDS.collect()
    }
  }

  "BatchProducer.processOneBatch" should {
    "filter and save a batch of transaction, wait 59 mn, fetch the next batch" in withTempDir { tmpDir =>
      implicit object FakeTimer extends Timer[IO] {
        private var clockRealTimeInMillis: Long = Instant.parse("2018-08-02T01:00:00Z").toEpochMilli

        def clockRealTime(unit: TimeUnit): IO[Long] =
          IO(unit.convert(clockRealTimeInMillis, TimeUnit.MILLISECONDS))

        def sleep(duration: FiniteDuration): IO[Unit] = IO {
          clockRealTimeInMillis = clockRealTimeInMillis + duration.toMillis
        }

        def shift: IO[Unit] = ???

        def clockMonotonic(unit: TimeUnit): IO[Long] = ???
      }
      implicit val appContext: AppContext = new AppContext(transactionStorePath = tmpDir.toURI)

      implicit def toTimestamp(str: String): Timestamp = Timestamp.from(Instant.parse(str))
      val tx1 = Transaction("2018-08-01T23:00:00Z", 1, 7657.58, true, 0.021762)
      val tx2 = Transaction("2018-08-02T01:00:00Z", 2, 7663.85, false, 0.01385517)
      val tx3 = Transaction("2018-08-02T01:58:30Z", 3, 7663.85, false, 0.03782426)
      val tx4 = Transaction("2018-08-02T01:58:59Z", 4, 7663.86, false, 0.15750809)
      val tx5 = Transaction("2018-08-02T02:30:00Z", 5, 7661.49, true, 0.1)

     // Start at 01:00, tx 2 ignored (too soon)
      val txs0 = Seq(tx1)
      // Fetch at 01:59, get nb 2 and 3, but will miss nb 4 because of Api lag
      val txs1 = Seq(tx2, tx3)
      // Fetch at 02:58, get nb 3, 4, 5
      val txs2 = Seq(tx3, tx4, tx5)
      // Fetch at 03:57, get nothing
      val txs3 = Seq.empty[Transaction]

      val start0 = Instant.parse("2018-08-02T00:00:00Z")
      val end0 = Instant.parse("2018-08-02T00:59:55Z")
      val threeBatchesIO =
        for {
          tuple1 <- BatchProducer.processOneBatch(IO(txs1.toDS()), txs0.toDS(), start0, end0) // end - Api lag
          (ds1, start1, end1) = tuple1

          tuple2 <- BatchProducer.processOneBatch(IO(txs2.toDS()), ds1, start1, end1)
          (ds2, start2, end2) = tuple2

          _ <- BatchProducer.processOneBatch(IO(txs3.toDS()), ds2, start2, end2)
        } yield (ds1, start1, end1, ds2, start2, end2)

      val (ds1, start1, end1, ds2, start2, end2) = threeBatchesIO.unsafeRunSync()
      ds1.collect() should contain theSameElementsAs txs1
      start1 should ===(end0)
      end1 should ===(Instant.parse("2018-08-02T01:58:55Z")) // initialClock + 1mn - 15s - 5s

      ds2.collect() should contain theSameElementsAs txs2
      start2 should ===(end1)
      end2 should ===(Instant.parse("2018-08-02T02:57:55Z")) // initialClock + 1mn -15s + 1mn -15s -5s = end1 + 45s

      val lastClock = Instant.ofEpochMilli(
        FakeTimer.clockRealTime(TimeUnit.MILLISECONDS).unsafeRunSync())
      lastClock should === (Instant.parse("2018-08-02T03:57:00Z"))

      val savedTransactions = spark.read.parquet(tmpDir.toString).as[Transaction].collect()
      val expectedTxs = Seq(tx2, tx3, tx4, tx5)
      savedTransactions should contain theSameElementsAs expectedTxs
    }
  }


} 
Example 25
Source File: Field.scala    From scarango   with MIT License 5 votes vote down vote up
package com.outr.arango

import scala.concurrent.duration.FiniteDuration

case class Field[F](fieldName: String) {
  protected def field[T](name: String): Field[T] = Field(s"$fieldName.$name")

  object index {
    def persistent(sparse: Boolean = false,
                 unique: Boolean = false): Index = {
      Index(IndexType.Persistent, List(fieldName), sparse, unique)
    }
    def geo(geoJson: Boolean = true): Index = {
      Index(IndexType.Geo, List(fieldName), geoJson = geoJson)
    }
    def fullText(minLength: Long = 3L): Index = {
      Index(IndexType.FullText, List(fieldName), minLength = minLength)
    }
    def ttl(expireAfter: FiniteDuration): Index = {
      val seconds = expireAfter.toSeconds.toInt
      Index(IndexType.TTL, List(fieldName), expireAfterSeconds = seconds)
    }
  }

  lazy val opt: Field[Option[F]] = Field[Option[F]](fieldName)
} 
Example 26
Source File: WebsocketController.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.server.controller

import akka.actor._
import akka.stream.scaladsl.Flow
import akka.stream.{Materializer, OverflowStrategy}
import monix.execution.Scheduler
import play.api.http.websocket.Message
import play.api.libs.streams._
import play.api.mvc._
import play.api.mvc.WebSocket.MessageFlowTransformer
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future
import walfie.gbf.raidfinder.domain._
import walfie.gbf.raidfinder.protocol._
import walfie.gbf.raidfinder.RaidFinder
import walfie.gbf.raidfinder.server.actor.WebsocketRaidsHandler
import walfie.gbf.raidfinder.server.util.MessageFlowTransformerUtil
import walfie.gbf.raidfinder.server.{BossNameTranslator, MetricsCollector}

class WebsocketController(
  raidFinder:        RaidFinder[BinaryProtobuf],
  translator:        BossNameTranslator,
  keepAliveInterval: FiniteDuration,
  metricsCollector:  MetricsCollector
)(implicit system: ActorSystem, materializer: Materializer, scheduler: Scheduler) extends Controller {
  private val jsonTransformer = MessageFlowTransformerUtil.protobufJsonMessageFlowTransformer
  private val binaryTransformer = MessageFlowTransformerUtil.protobufBinaryMessageFlowTransformer
  private val defaultTransformer = jsonTransformer

  
        val flow = ActorFlow.actorRef(props = props)
        transformer.transform(flow)
      }
      case None => Left {
        val unsupportedProtocols = requestedProtocols.mkString("[", ", ", "]")
        Results.BadRequest("Unsupported websocket subprotocols " + unsupportedProtocols)
      }
    }

    Future.successful(result)
  }
} 
Example 27
Source File: DiscoveryConfig.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.network.discovery

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._

case class DiscoveryConfig(
    discoveryEnabled: Boolean,
    interface: String,
    port: Int,
    bootstrapNodes: Set[Node],
    nodesLimit: Int ,
    scanInitialDelay: FiniteDuration,
    scanInterval: FiniteDuration,
    messageExpiration: FiniteDuration)

object DiscoveryConfig {
  def apply(etcClientConfig: com.typesafe.config.Config): DiscoveryConfig = {
    import scala.collection.JavaConverters._
    val discoveryConfig = etcClientConfig.getConfig("network.discovery")
    val bootstrapNodes = NodeParser.parseNodes(discoveryConfig.getStringList("bootstrap-nodes").asScala.toSet)

    DiscoveryConfig(
      discoveryEnabled = discoveryConfig.getBoolean("discovery-enabled"),
      interface = discoveryConfig.getString("interface"),
      port = discoveryConfig.getInt("port"),
      bootstrapNodes = bootstrapNodes,
      nodesLimit = discoveryConfig.getInt("nodes-limit"),
      scanMaxNodes = discoveryConfig.getInt("scan-max-nodes"),
      scanInitialDelay = discoveryConfig.getDuration("scan-initial-delay").toMillis.millis,
      scanInterval = discoveryConfig.getDuration("scan-interval").toMillis.millis,
      messageExpiration = discoveryConfig.getDuration("message-expiration").toMillis.millis)
  }

} 
Example 28
Source File: BlacklistSupport.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import scala.concurrent.duration.FiniteDuration
import akka.actor.{Actor, ActorLogging, Cancellable, Scheduler}
import io.iohk.ethereum.network.PeerId

import scala.concurrent.ExecutionContext.Implicits.global

trait BlacklistSupport {
  selfActor: Actor with ActorLogging =>

  import BlacklistSupport._

  def scheduler: Scheduler

  var blacklistedPeers: Seq[(PeerId, Cancellable)] = Nil

  def blacklist(peerId: PeerId, duration: FiniteDuration, reason: String): Unit = {
    undoBlacklist(peerId)
    log.debug(s"Blacklisting peer ($peerId), $reason")
    val unblacklistCancellable = scheduler.scheduleOnce(duration, self, UnblacklistPeer(peerId))
    blacklistedPeers :+= (peerId, unblacklistCancellable)
  }

  def undoBlacklist(peerId: PeerId): Unit = {
    blacklistedPeers.find(_._1 == peerId).foreach(_._2.cancel())
    blacklistedPeers = blacklistedPeers.filterNot(_._1 == peerId)
  }

  def isBlacklisted(peerId: PeerId): Boolean =
    blacklistedPeers.exists(_._1 == peerId)

  def handleBlacklistMessages: Receive = {
    case UnblacklistPeer(ref) => undoBlacklist(ref)
  }
}

object BlacklistSupport {
  private case class UnblacklistPeer(peerId: PeerId)
} 
Example 29
Source File: PeerRequestHandler.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import scala.concurrent.ExecutionContext.Implicits.global
import scala.reflect.ClassTag
import akka.actor._
import io.iohk.ethereum.network.{EtcPeerManagerActor, Peer}
import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.{MessageFromPeer, PeerDisconnected}
import io.iohk.ethereum.network.PeerEventBusActor.SubscriptionClassifier.{MessageClassifier, PeerDisconnectedClassifier}
import io.iohk.ethereum.network.PeerEventBusActor.{PeerSelector, Subscribe, Unsubscribe}
import io.iohk.ethereum.network.p2p.{Message, MessageSerializable}

import scala.concurrent.duration.FiniteDuration

class PeerRequestHandler[RequestMsg <: Message, ResponseMsg <: Message : ClassTag]
    (peer: Peer, responseTimeout: FiniteDuration, etcPeerManager: ActorRef, peerEventBus: ActorRef, requestMsg: RequestMsg, responseMsgCode: Int)
    (implicit scheduler: Scheduler, toSerializable: RequestMsg => MessageSerializable)
  extends Actor with ActorLogging {

  import PeerRequestHandler._

  val initiator: ActorRef = context.parent

  val timeout: Cancellable = scheduler.scheduleOnce(responseTimeout, self, Timeout)

  val startTime: Long = System.currentTimeMillis()

  private def subscribeMessageClassifier = MessageClassifier(Set(responseMsgCode), PeerSelector.WithId(peer.id))

  def timeTakenSoFar(): Long = System.currentTimeMillis() - startTime

  override def preStart(): Unit = {
    etcPeerManager ! EtcPeerManagerActor.SendMessage(toSerializable(requestMsg), peer.id)
    peerEventBus ! Subscribe(PeerDisconnectedClassifier(PeerSelector.WithId(peer.id)))
    peerEventBus ! Subscribe(subscribeMessageClassifier)
  }

  override def receive: Receive = {
    case MessageFromPeer(responseMsg: ResponseMsg, _) => handleResponseMsg(responseMsg)
    case Timeout => handleTimeout()
    case PeerDisconnected(peerId) if peerId == peer.id => handleTerminated()
  }

  def handleResponseMsg(responseMsg: ResponseMsg): Unit = {
    cleanupAndStop()
    initiator ! ResponseReceived(peer, responseMsg, timeTaken = timeTakenSoFar())
  }

  def handleTimeout(): Unit = {
    cleanupAndStop()
    initiator ! RequestFailed(peer, "request timeout")
  }

  def handleTerminated(): Unit = {
    cleanupAndStop()
    initiator ! RequestFailed(peer, "connection closed")
  }

  def cleanupAndStop(): Unit = {
    timeout.cancel()
    peerEventBus ! Unsubscribe()
    context stop self
  }
}

object PeerRequestHandler {
  def props[RequestMsg <: Message,
            ResponseMsg <: Message : ClassTag]
  (peer: Peer, responseTimeout: FiniteDuration, etcPeerManager: ActorRef, peerEventBus: ActorRef, requestMsg: RequestMsg, responseMsgCode: Int)
  (implicit scheduler: Scheduler, toSerializable: RequestMsg => MessageSerializable): Props =
    Props(new PeerRequestHandler(peer, responseTimeout, etcPeerManager, peerEventBus, requestMsg, responseMsgCode))

  case class RequestFailed(peer: Peer, reason: String)
  case class ResponseReceived[T](peer: Peer, response: T, timeTaken: Long)

  private case object Timeout
} 
Example 30
Source File: ProgressLogger.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.snappy

import io.iohk.ethereum.utils.Logger

import scala.concurrent.duration.FiniteDuration

class ProgressLogger(startN: BigInt, targetN: BigInt, interval: FiniteDuration) extends Logger {

  private val startTimestamp = System.currentTimeMillis()
  private var lastUpdateMillis: Long = 0

  def start(): Unit =
    log.info(s"About to execute blocks $startN through $targetN (${targetN - startN + 1} total)")

  def update(n: BigInt): Unit = {
    val now = System.currentTimeMillis()
    if (now - lastUpdateMillis > interval.toMillis || n == targetN) {
      lastUpdateMillis = now
      val percent = n.toDouble / targetN.toDouble * 100
      log.info(f"Executed blocks up to $n ($percent%.1f%%). ETA: ${eta(now, n)}")
    }
  }

  private def eta(now: Long, n: BigInt): String = {
    val elapsed = (now - startTimestamp) / 1000
    if (n - startN > 0 && elapsed > 0) {
      val r = (targetN - startN + 1).toDouble / (n - startN + 1).toDouble
      val estimated = elapsed * (r - 1)
      val h = (estimated / 3600).toInt
      val m = ((estimated % 3600) / 60).toInt
      val s = (estimated % 60).toInt
      f"$h%02d:$m%02d:$s%02d"
    } else "N/A"
  }
} 
Example 31
Source File: SilhouetteConfiguration.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.configurations

import com.google.inject.{Inject, Singleton}
import com.mohiva.play.silhouette.api.util.Clock
import com.typesafe.config.Config
import play.api.Configuration
import net.ceedubs.ficus.Ficus._
import com.mohiva.play.silhouette.api.Authenticator.Implicits._

import scala.concurrent.duration.FiniteDuration

@Singleton
class SilhouetteConfiguration @Inject()(configuration: Configuration, clock: Clock) {

  val underlying: Config = configuration.underlying

  def authenticatorExpiry: FiniteDuration =
    underlying.as[FiniteDuration]("silhouette.authenticator.rememberMe.authenticatorExpiry")
  def authenticatorIdleTimeout: Option[FiniteDuration] =
    underlying.getAs[FiniteDuration]("silhouette.authenticator.rememberMe.authenticatorIdleTimeout")

  def recoverPasswordTimeout: Long =
    (clock.now + underlying.as[FiniteDuration]("silhouette.authenticator.recoverPasswordExpiry")).getMillis

} 
Example 32
Source File: IdentitiesArbitrary.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.identities.arbitraries

import java.util.UUID
import java.util.concurrent.TimeUnit

import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.authenticators.JWTAuthenticator
import com.dataengi.crm.common.arbitraries.CommonArbitrary
import com.dataengi.crm.identities.models.Actions.Action
import com.dataengi.crm.identities.models.InviteStatuses.InviteStatus
import com.dataengi.crm.identities.models._
import com.dataengi.crm.identities.models.PermissionStates.PermissionState
import com.mohiva.play.silhouette.api.util.PasswordInfo
import org.joda.time.DateTime
import org.scalacheck.{Arbitrary, Gen}
import play.api.libs.json.Json

import scala.concurrent.duration.FiniteDuration

trait IdentitiesArbitrary extends CommonArbitrary {

  lazy val companyArbitrary: Arbitrary[Company] = Arbitrary(Gen.resultOf(Company))

  implicit val actionArbitrary: Arbitrary[Action]                   = Arbitrary(Gen.oneOf(Actions.values.toList))
  implicit val permissionStateArbitrary: Arbitrary[PermissionState] = Arbitrary(Gen.oneOf(PermissionStates.values.toList))
  implicit val permissionArbitrary: Arbitrary[Permission]           = Arbitrary(Gen.resultOf(Permission))
  implicit val roleArbitrary: Arbitrary[Role]                       = Arbitrary(Gen.resultOf(Role))
  implicit val inviteStatusArbitrary: Arbitrary[InviteStatus]       = Arbitrary(Gen.oneOf(InviteStatuses.values.toList))
  implicit val uuidArbitrary: Arbitrary[UUID]                       = Arbitrary(Gen.uuid)
  implicit val inviteArbitrary: Arbitrary[Invite]                   = Arbitrary(Gen.resultOf(Invite))

  val dateTimeGen = for {
    value <- Gen.Choose.chooseLong.choose(0, Long.MaxValue)
  } yield new DateTime(value)

  val finiteDurationGen = for {
    value <- Gen.Choose.chooseLong.choose(0, Long.MaxValue)
  } yield new FiniteDuration(value, TimeUnit.NANOSECONDS)

  val jsObject = Gen.oneOf(List(Some(Json.obj("a" -> "b")), None))

  implicit val jsObjectArbitrary       = Arbitrary(jsObject)
  implicit val dateTimeArbitrary       = Arbitrary(dateTimeGen)
  implicit val finiteDurationArbitrary = Arbitrary(finiteDurationGen)
  implicit val loginInfoArbitrary      = Arbitrary(Gen.resultOf(LoginInfo))
  implicit val authenticatorArbitrary  = Arbitrary(Gen.resultOf(JWTAuthenticator.apply _))
  implicit val passwordInfoArbitrary   = Arbitrary(Gen.resultOf(PasswordInfo))
} 
Example 33
Source File: AskActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import akka.actor.{Actor, ActorRef, ActorSystem, Props, Status}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise, TimeoutException}
import scala.reflect.ClassTag

class AskActor[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) extends Actor {
  import context.dispatcher
  private val timeoutCancelable = context.system.scheduler.scheduleOnce(timeout, self, AskActor.timeoutMessage)

  override val receive: Receive = {
    case x => // Fix in Scala 2.13
      timeoutCancelable.cancel()
      context.stop(self)
      x match {
        case x: T if x.getClass == ct.runtimeClass => p.trySuccess(x)
        case e: Status.Failure                     => p.tryFailure(e.cause)
        case _                                     => p.tryFailure(new IllegalArgumentException(s"Expected ${ct.runtimeClass.getName}, but got $x"))
      }
  }
}

object AskActor {
  private val timeoutMessage = {
    val reason = new TimeoutException("Typed ask is timed out!")
    reason.setStackTrace(Array.empty)
    Status.Failure(reason)
  }

  def props[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) = Props(new AskActor(p, timeout))
  def mk[T](timeout: FiniteDuration)(implicit ct: ClassTag[T], system: ActorSystem): (ActorRef, Future[T]) = {
    val p   = Promise[T]()
    val ref = system.actorOf(props(p, timeout))
    (ref, p.future)
  }
} 
Example 34
Source File: WatchDistributedCompletionActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import akka.actor.{Actor, ActorRef, Cancellable, Props, Terminated}
import com.wavesplatform.dex.domain.utils.ScorexLogging

import scala.concurrent.duration.FiniteDuration

class WatchDistributedCompletionActor(workers: Set[ActorRef],
                                      completionReceiver: ActorRef,
                                      startWorkCommand: Any,
                                      workCompleted: Any,
                                      timeout: FiniteDuration)
    extends Actor
    with ScorexLogging {

  import context.dispatcher

  if (workers.isEmpty) stop(Cancellable.alreadyCancelled)
  else
    workers.foreach { x =>
      context.watch(x)
      x ! startWorkCommand
    }

  override def receive: Receive = state(workers, context.system.scheduler.scheduleOnce(timeout, self, TimedOut))

  private def state(rest: Set[ActorRef], timer: Cancellable): Receive = {
    case `workCompleted` =>
      switchTo(rest - sender(), timer)
      context.unwatch(sender())

    case Terminated(ref) =>
      switchTo(rest - ref, timer)

    case TimedOut =>
      val workerPairs = workers.iterator.map(_.path.name).mkString(", ")
      log.error(s"$startWorkCommand is timed out! Workers those didn't respond: $workerPairs")
      stop(timer)
  }

  private def switchTo(updatedRest: Set[ActorRef], timer: Cancellable): Unit =
    if (updatedRest.isEmpty) stop(timer) else context.become(state(updatedRest, timer))

  private def stop(timer: Cancellable): Unit = {
    timer.cancel()
    completionReceiver ! workCompleted
    context.stop(self)
  }
}

object WatchDistributedCompletionActor {
  def props(workers: Set[ActorRef], completionReceiver: ActorRef, startWorkCommand: Any, workCompleted: Any, timeout: FiniteDuration): Props =
    Props(new WatchDistributedCompletionActor(workers, completionReceiver, startWorkCommand, workCompleted, timeout))
} 
Example 35
Source File: BatchOrderCancelActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors.address

import akka.actor.{Actor, ActorRef, Cancellable, Props}
import com.wavesplatform.dex.actors.TimedOut
import com.wavesplatform.dex.actors.address.AddressActor.Command.CancelOrder
import com.wavesplatform.dex.actors.address.AddressActor.Event
import com.wavesplatform.dex.actors.address.BatchOrderCancelActor.CancelResponse.OrderCancelResult
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.error

import scala.concurrent.duration.FiniteDuration

class BatchOrderCancelActor private (
    orderIds: Set[Order.Id],
    processorActor: ActorRef,
    clientActor: ActorRef,
    timeout: FiniteDuration,
    initResponse: Map[Order.Id, OrderCancelResult]
) extends Actor
    with ScorexLogging {

  import BatchOrderCancelActor._
  import context.dispatcher

  orderIds.foreach(processorActor ! CancelOrder(_))

  override def receive: Receive = state(orderIds, initResponse, context.system.scheduler.scheduleOnce(timeout, self, TimedOut))

  private def state(restOrderIds: Set[Order.Id], response: Map[Order.Id, OrderCancelResult], timer: Cancellable): Receive = {
    case CancelResponse(id, x) =>
      val updatedRestOrderIds = restOrderIds - id
      val updatedResponse     = response.updated(id, x)

      if (updatedRestOrderIds.isEmpty) stop(Event.BatchCancelCompleted(updatedResponse), timer)
      else context.become(state(restOrderIds - id, updatedResponse, timer))

    // case Terminated(ref) => // Can't terminate before processorActor, because processorActor is a parent

    case TimedOut =>
      log.error(s"CancelOrder is timed out for orders: ${restOrderIds.mkString(", ")}")
      stop(Event.BatchCancelCompleted(response), timer)
  }

  private def stop(response: Event.BatchCancelCompleted, timer: Cancellable): Unit = {
    timer.cancel()
    clientActor ! response
    context.stop(self)
  }
}

object BatchOrderCancelActor {
  def props(orderIds: Set[Order.Id],
            processorActor: ActorRef,
            clientActor: ActorRef,
            timeout: FiniteDuration,
            initResponse: Map[Order.Id, OrderCancelResult] = Map.empty): Props = {
    require(orderIds.nonEmpty, "orderIds is empty")
    Props(new BatchOrderCancelActor(orderIds, processorActor, clientActor, timeout, initResponse))
  }

  object CancelResponse {

    type OrderCancelResult = Either[error.MatcherError, Event.OrderCanceled]

    def unapply(arg: Any): Option[(Order.Id, OrderCancelResult)] = helper.lift(arg)

    private val helper: PartialFunction[Any, (Order.Id, OrderCancelResult)] = {
      case x @ Event.OrderCanceled(id)     => (id, Right(x))
      case x @ error.OrderNotFound(id)     => (id, Left(x))
      case x @ error.OrderCanceled(id)     => (id, Left(x))
      case x @ error.OrderFull(id)         => (id, Left(x))
      case x @ error.MarketOrderCancel(id) => (id, Left(x))
    }
  }
} 
Example 36
Source File: OrderBookAskAdapter.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import java.util.concurrent.atomic.AtomicReference

import akka.actor._
import akka.actor.typed.scaladsl.adapter._
import akka.http.scaladsl.model.HttpResponse
import cats.syntax.either._
import cats.syntax.option._
import com.wavesplatform.dex.actors.orderbook.AggregatedOrderBookActor.{Depth, Query}
import com.wavesplatform.dex.actors.orderbook.OrderBookActor.MarketStatus
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.error
import com.wavesplatform.dex.error.MatcherError
import com.wavesplatform.dex.model.MatcherModel.DecimalsFormat
import com.wavesplatform.dex.model.OrderBookAggregatedSnapshot

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.reflect.ClassTag

// Will be removed after a migration to typed actors
class OrderBookAskAdapter(orderBooks: AtomicReference[Map[AssetPair, Either[Unit, ActorRef]]], askTimeout: FiniteDuration)(
    implicit system: ActorSystem) {
  import system.dispatcher

  type Result[T] = Future[Either[MatcherError, Option[T]]]

  def getMarketStatus(assetPair: AssetPair): Result[MarketStatus] = get[Query.GetMarketStatus, MarketStatus](assetPair, Query.GetMarketStatus(_))

  def getAggregatedSnapshot(assetPair: AssetPair): Result[OrderBookAggregatedSnapshot] =
    get[Query.GetAggregatedSnapshot, OrderBookAggregatedSnapshot](assetPair, Query.GetAggregatedSnapshot(_))

  def getHttpView(assetPair: AssetPair, format: DecimalsFormat, depth: Depth): Result[HttpResponse] =
    get[Query.GetHttpView, HttpResponse](assetPair, Query.GetHttpView(format, depth, _))

  private val default = Future.successful(Right(None))

  private def get[M <: Query, R: ClassTag](assetPair: AssetPair, message: ActorRef => M): Result[R] = orderBooks.get().get(assetPair) match {
    case None => default
    case Some(ob) =>
      ob match {
        case Left(_) => Future.successful(error.OrderBookBroken(assetPair).asLeft)
        case Right(ob) =>
          val (askRef, r) = AskActor.mk[R](askTimeout)
          ob ! message(askRef)
          r.map(_.some.asRight)
      }
  }
} 
Example 37
Source File: JwtUtils.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.auth

import java.security

import com.wavesplatform.dex.api.ws.protocol.WsAddressSubscribe.JwtPayload
import com.wavesplatform.dex.domain.account.{AddressScheme, KeyPair, PublicKey}
import com.wavesplatform.dex.domain.bytes.ByteStr
import pdi.jwt.{JwtAlgorithm, JwtJson}
import play.api.libs.json.{JsObject, Json}

import scala.concurrent.duration.{DurationInt, FiniteDuration}

trait JwtUtils {

  def mkJwt(authServiceKeyPair: security.KeyPair, payload: JwtPayload): String = mkJwt(authServiceKeyPair, Json.toJsObject(payload))
  def mkJwt(authServiceKeyPrivateKey: security.PrivateKey, payload: JsObject): String =
    JwtJson.encode(payload, authServiceKeyPrivateKey, JwtAlgorithm.RS256)
  def mkJwt(authServiceKeyPair: security.KeyPair, payload: JsObject): String =
    JwtJson.encode(payload, authServiceKeyPair.getPrivate, JwtAlgorithm.RS256)

  def mkJwtSignedPayload(clientKeyPair: KeyPair, networkByte: Byte = AddressScheme.current.chainId, lifetime: FiniteDuration = 1.hour): JwtPayload =
    mkJwtNotSignedPayload(clientKeyPair, networkByte, lifetime).signed(clientKeyPair)

  def mkJwtNotSignedPayload(clientPublicKey: PublicKey,
                            networkByte: Byte = AddressScheme.current.chainId,
                            lifetime: FiniteDuration = 1.hour): JwtPayload = {
    val exp = System.currentTimeMillis() / 1000 + lifetime.toSeconds
    JwtPayload(
      signature = ByteStr(Array.emptyByteArray),
      publicKey = clientPublicKey,
      networkByte = networkByte.toChar.toString,
      clientId = "test",
      firstTokenExpirationInSeconds = exp,
      activeTokenExpirationInSeconds = exp,
      scope = List("general")
    )
  }
}

object JwtUtils extends JwtUtils 
Example 38
Source File: EventsQueueSettings.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.settings

import com.wavesplatform.dex.queue.{KafkaMatcherQueue, LocalMatcherQueue}
import com.wavesplatform.dex.settings.EventsQueueSettings.CircuitBreakerSettings
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader.arbitraryTypeValueReader
import net.ceedubs.ficus.readers.{NameMapper, ValueReader}

import scala.concurrent.duration.FiniteDuration

case class EventsQueueSettings(tpe: String,
                               local: LocalMatcherQueue.Settings,
                               kafka: KafkaMatcherQueue.Settings,
                               circuitBreaker: CircuitBreakerSettings)

object EventsQueueSettings {

  implicit val chosenCase: NameMapper = MatcherSettings.chosenCase

  implicit val eventsQueueSettingsReader: ValueReader[EventsQueueSettings] = { (cfg, path) =>
    EventsQueueSettings(
      tpe = cfg.getString(s"$path.type"),
      local = cfg.as[LocalMatcherQueue.Settings](s"$path.local"),
      kafka = cfg.as[KafkaMatcherQueue.Settings](s"$path.kafka"),
      circuitBreaker = cfg.as[CircuitBreakerSettings](s"$path.circuit-breaker")
    )
  }

  case class CircuitBreakerSettings(maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration)
} 
Example 39
Source File: MatcherQueue.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.queue

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

trait MatcherQueue {
  def startConsume(fromOffset: QueueEventWithMeta.Offset, process: Seq[QueueEventWithMeta] => Future[Unit]): Unit

  
  def lastEventOffset: Future[QueueEventWithMeta.Offset]
  def close(timeout: FiniteDuration): Unit
}

object MatcherQueue {
  private val stored: Future[Option[QueueEventWithMeta]] = Future.successful(None)

  private[queue] trait Producer {
    def storeEvent(event: QueueEvent): Future[Option[QueueEventWithMeta]]
    def close(timeout: FiniteDuration): Unit
  }

  private[queue] object IgnoreProducer extends Producer {
    override def storeEvent(event: QueueEvent): Future[Option[QueueEventWithMeta]] = stored
    override def close(timeout: FiniteDuration): Unit                              = {}
  }
} 
Example 40
Source File: TestTime.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.time

import scala.concurrent.duration.FiniteDuration

class TestTime(var t: Long = System.currentTimeMillis()) extends Time {

  def setTime(tt: Long): this.type = {
    t = tt
    this
  }

  def advance(d: FiniteDuration): this.type = {
    t += d.toMillis
    this
  }

  override def correctedTime(): Long = t

  override def getTimestamp(): Long = {
    t += 1
    t
  }
} 
Example 41
Source File: WsSuiteBase.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import com.softwaremill.diffx.{Derived, Diff}
import com.wavesplatform.dex.api.ws.connection.WsConnection
import com.wavesplatform.dex.api.ws.entities.WsFullOrder
import com.wavesplatform.dex.api.ws.protocol.{WsError, WsServerMessage}
import com.wavesplatform.dex.it.api.websockets.HasWebSockets

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.reflect.ClassTag

trait WsSuiteBase extends MatcherSuiteBase with HasWebSockets {

  protected implicit val wsErrorDiff: Diff[WsError] = Derived[Diff[WsError]].ignore[WsError, Long](_.timestamp)
  protected implicit val wsCompleteOrderDiff: Diff[WsFullOrder] =
    Derived[Diff[WsFullOrder]].ignore[WsFullOrder, Long](_.timestamp).ignore[WsFullOrder, Long](_.eventTimestamp)

  final implicit class WsConnectionOps(val self: WsConnection) {
    def receiveAtLeastN[T <: WsServerMessage: ClassTag](n: Int): List[T] = {
      val r = eventually {
        val xs = self.collectMessages[T]
        xs.size should be >= n
        xs
      }
      Thread.sleep(200) // Waiting for additional messages
      r
    }

    def receiveNoMessages(duration: FiniteDuration = 1.second): Unit = {
      val sizeBefore = self.messages.size
      Thread.sleep(duration.toMillis)
      self.messages.size shouldBe sizeBefore
    }

    def receiveNoMessagesOf[T <: WsServerMessage: ClassTag](duration: FiniteDuration = 1.second): Unit = {
      val sizeBefore = self.collectMessages[T].size
      Thread.sleep(duration.toMillis)
      self.collectMessages[T].size shouldBe sizeBefore
    }
  }
} 
Example 42
Source File: it.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform

import com.wavesplatform.dex.domain.account.{KeyPair, PublicKey}
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.domain.order.{Order, OrderType}
import com.wavesplatform.dex.waves.WavesFeeConstants._
import com.wavesplatform.it.api.MatcherCommand
import org.scalacheck.Gen

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, Future}
import scala.util.Random
import scala.util.control.NonFatal

package object it {

  
  def executeCommands(xs: Seq[MatcherCommand], ignoreErrors: Boolean = true, timeout: FiniteDuration = 3.minutes): Int = {
    Await.result(Future.sequence(xs.map(executeCommand(_, ignoreErrors))), timeout).sum
  }

  private def executeCommand(x: MatcherCommand, ignoreErrors: Boolean): Future[Int] =
    try x match {
      case MatcherCommand.Place(api, order) => api.tryPlace(order).map(_.fold(_ => 0, _ => 1))
      case MatcherCommand.Cancel(api, owner, order) =>
        api.tryCancel(owner, order).map(_.fold(_ => 0, _ => 1))
    } catch {
      case NonFatal(e) =>
        if (ignoreErrors) Future.successful(0)
        else Future.failed(e)
    }

  def orderGen(matcher: PublicKey,
               trader: KeyPair,
               assetPairs: Seq[AssetPair],
               types: Seq[OrderType] = Seq(OrderType.BUY, OrderType.SELL)): Gen[Order] = {
    val ts = System.currentTimeMillis()
    for {
      assetPair      <- Gen.oneOf(assetPairs)
      tpe            <- Gen.oneOf(types)
      amount         <- Gen.choose(10, 100)
      price          <- Gen.choose(10, 100)
      orderVersion   <- Gen.choose[Byte](1, 3)
      expirationDiff <- Gen.choose(600000, 6000000)
    } yield {
      if (tpe == OrderType.BUY)
        Order.buy(
          trader,
          matcher,
          assetPair,
          amount,
          price * Order.PriceConstant,
          ts,
          ts + expirationDiff,
          matcherFee,
          orderVersion
        )
      else
        Order.sell(
          trader,
          matcher,
          assetPair,
          amount,
          price * Order.PriceConstant,
          ts,
          ts + expirationDiff,
          matcherFee,
          orderVersion
        )
    }
  }

  def choose[T](xs: IndexedSeq[T]): T = xs(Random.nextInt(xs.size))
} 
Example 43
Source File: WavesBlockchainCachingClient.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.clients

import java.net.InetAddress
import java.time.Duration

import com.wavesplatform.dex.domain.account.Address
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.domain.transaction.ExchangeTransaction
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.grpc.integration.caches.{AssetDescriptionsCache, FeaturesCache}
import com.wavesplatform.dex.grpc.integration.clients.WavesBlockchainClient.SpendableBalanceChanges
import com.wavesplatform.dex.grpc.integration.dto.BriefAssetDescription
import monix.execution.Scheduler
import monix.reactive.Observable

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

class WavesBlockchainCachingClient(underlying: WavesBlockchainClient[Future], defaultCacheExpiration: FiniteDuration, monixScheduler: Scheduler)(
    implicit grpcExecutionContext: ExecutionContext)
    extends WavesBlockchainClient[Future]
    with ScorexLogging {

  private val cacheExpiration: Duration = Duration.ofMillis(defaultCacheExpiration.toMillis)

  private val featuresCache          = new FeaturesCache(underlying.isFeatureActivated, invalidationPredicate = !_) // we don't keep knowledge about unactivated features
  private val assetDescriptionsCache = new AssetDescriptionsCache(underlying.assetDescription, cacheExpiration)

  // TODO remove after release 2.1.3
  override def spendableBalance(address: Address, asset: Asset): Future[Long] = underlying.spendableBalance(address, asset)

  override def spendableBalanceChanges: Observable[SpendableBalanceChanges]                      = underlying.spendableBalanceChanges
  override def realTimeBalanceChanges: Observable[WavesBlockchainClient.BalanceChanges]          = underlying.realTimeBalanceChanges
  override def spendableBalances(address: Address, assets: Set[Asset]): Future[Map[Asset, Long]] = underlying.spendableBalances(address, assets)
  override def allAssetsSpendableBalance(address: Address): Future[Map[Asset, Long]]             = underlying.allAssetsSpendableBalance(address)

  override def isFeatureActivated(id: Short): Future[Boolean] = featuresCache.get(id) map Boolean2boolean

  override def assetDescription(asset: Asset.IssuedAsset): Future[Option[BriefAssetDescription]] = assetDescriptionsCache.get(asset)

  override def hasScript(asset: Asset.IssuedAsset): Future[Boolean]                                     = underlying.hasScript(asset)
  override def runScript(asset: Asset.IssuedAsset, input: ExchangeTransaction): Future[RunScriptResult] = underlying.runScript(asset, input)

  override def hasScript(address: Address): Future[Boolean]                       = underlying.hasScript(address)
  override def runScript(address: Address, input: Order): Future[RunScriptResult] = underlying.runScript(address, input)

  override def wereForged(txIds: Seq[ByteStr]): Future[Map[ByteStr, Boolean]] = underlying.wereForged(txIds)
  override def broadcastTx(tx: ExchangeTransaction): Future[Boolean]          = underlying.broadcastTx(tx)

  override def forgedOrder(orderId: ByteStr): Future[Boolean] = underlying.forgedOrder(orderId)

  override def getNodeAddress: Future[InetAddress] = underlying.getNodeAddress

  override def close(): Future[Unit] = underlying.close()
} 
Example 44
Source File: GrpcClientSettings.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.settings

import com.wavesplatform.dex.grpc.integration.settings.GrpcClientSettings.ChannelOptionsSettings
import io.grpc.netty.NettyChannelBuilder
import io.netty.channel.ChannelOption

import scala.concurrent.duration.FiniteDuration

case class GrpcClientSettings(target: String,
                              maxHedgedAttempts: Int,
                              maxRetryAttempts: Int,
                              keepAliveWithoutCalls: Boolean,
                              keepAliveTime: FiniteDuration,
                              keepAliveTimeout: FiniteDuration,
                              idleTimeout: FiniteDuration,
                              channelOptions: ChannelOptionsSettings) {
  def toNettyChannelBuilder: NettyChannelBuilder =
    NettyChannelBuilder
      .forTarget(target)
      .maxHedgedAttempts(maxHedgedAttempts)
      .maxRetryAttempts(maxRetryAttempts)
      .keepAliveWithoutCalls(keepAliveWithoutCalls)
      .keepAliveTime(keepAliveTime.length, keepAliveTime.unit)
      .keepAliveTimeout(keepAliveTimeout.length, keepAliveTimeout.unit)
      .idleTimeout(idleTimeout.length, idleTimeout.unit)
      .withOption[Integer](ChannelOption.CONNECT_TIMEOUT_MILLIS, channelOptions.connectTimeout.toMillis.toInt)
}

object GrpcClientSettings {
  case class ChannelOptionsSettings(connectTimeout: FiniteDuration)
} 
Example 45
Source File: HasJwt.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.websockets

import java.security
import java.security.KeyPairGenerator
import java.util.Base64

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.ws.protocol.WsAddressSubscribe.JwtPayload
import com.wavesplatform.dex.auth.JwtUtils
import com.wavesplatform.dex.domain.account.KeyPair
import play.api.libs.json.Json

import scala.concurrent.duration.{FiniteDuration, _}

trait HasJwt extends JwtUtils {

  protected val authServiceKeyPair: security.KeyPair = {
    val kpg = KeyPairGenerator.getInstance("RSA")
    kpg.initialize(2048)
    kpg.generateKeyPair()
  }

  protected def jwtPublicKeyConfig: Config = ConfigFactory.parseString(
    s"""waves.dex.web-sockets.external-client-handler.jwt-public-key = \"\"\"-----BEGIN PUBLIC KEY-----
       |${Base64.getEncoder.encodeToString(authServiceKeyPair.getPublic.getEncoded).grouped(64).mkString("\n")}
       |-----END PUBLIC KEY-----\"\"\"
       |""".stripMargin
  )

  protected def mkJwt(payload: JwtPayload): String = mkJwt(authServiceKeyPair, Json.toJsObject(payload))

  protected def mkJwt(clientKeyPair: KeyPair, lifetime: FiniteDuration = 1.hour): String = {
    mkJwt(mkJwtSignedPayload(clientKeyPair, lifetime = lifetime))
  }
} 
Example 46
Source File: GlobalTimer.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.time

import io.netty.util.{HashedWheelTimer, Timer}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}
import scala.util.control.NonFatal

object GlobalTimer {

  val instance: Timer = new HashedWheelTimer()

  sys.addShutdownHook {
    instance.stop()
  }

  implicit class TimerOpsImplicits(val timer: Timer) extends AnyVal {
    def schedule[A](f: => Future[A], delay: FiniteDuration): Future[A] = {
      val p = Promise[A]
      try {
        timer.newTimeout(_ => p.completeWith(f), delay.length, delay.unit)
      } catch {
        case NonFatal(e) => p.failure(e)
      }
      p.future
    }

    def sleep(term: FiniteDuration): Future[Unit] = schedule(Future.successful(()), term)
  }
} 
Example 47
Source File: FOps.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.fp

import java.nio.charset.StandardCharsets

import cats.syntax.apply._
import cats.syntax.either._
import cats.syntax.flatMap._
import cats.syntax.functor._
import com.softwaremill.sttp.{DeserializationError, Response}
import play.api.libs.json._

import scala.concurrent.duration.{FiniteDuration, _}
import scala.util.control.NonFatal

case class RepeatRequestOptions(delayBetweenRequests: FiniteDuration, maxAttempts: Int) {
  def decreaseAttempts: RepeatRequestOptions = copy(maxAttempts = maxAttempts - 1)
}

class FOps[F[_]](implicit M: ThrowableMonadError[F], W: CanWait[F]) {

  def repeatUntil[T](f: => F[T], options: RepeatRequestOptions = RepeatRequestOptions(1.second, 30))(stopCond: T => Boolean): F[T] =
    f.flatMap { firstResp =>
        (firstResp, options).tailRecM[F, (T, RepeatRequestOptions)] {
          case (resp, currOptions) =>
            if (stopCond(resp)) M.pure((resp, currOptions).asRight)
            else if (currOptions.maxAttempts <= 0) M.raiseError(new RuntimeException(s"All attempts are out! The last response is: $resp"))
            else W.wait(options.delayBetweenRequests).productR(f).map(x => (x, currOptions.decreaseAttempts).asLeft)
        }
      }
      .map(_._1)

  def repeatUntil[T](f: => F[T], delay: FiniteDuration)(pred: T => Boolean): F[T] =
    f.flatMap {
      _.tailRecM[F, T] { x =>
        if (pred(x)) M.pure(x.asRight)
        else W.wait(delay).productR(f).map(_.asLeft)
      }
    }

  def repeatUntilResponse[T](f: => F[Response[Either[DeserializationError[JsError], T]]], delay: FiniteDuration)(
      pred: Response[Either[DeserializationError[JsError], T]] => Boolean): F[T] =
    repeatUntil(f, delay)(pred).flatMap(parseResponse)

  def parseResponse[T](resp: Response[Either[DeserializationError[JsError], T]]): F[T] =
    resp.rawErrorBody match {
      case Left(e) =>
        M.raiseError[T](
          new RuntimeException(s"The server returned an error. HTTP code is ${resp.code}, body: ${new String(e, StandardCharsets.UTF_8)}"))
      case Right(Left(error)) => M.raiseError[T](new RuntimeException(s"Can't parse the response: $error"))
      case Right(Right(r))    => M.pure(r)
    }

  def parseTryResponse[E: Reads, T](resp: Response[T]): F[Either[E, T]] = resp.rawErrorBody match {
    case Right(r) => M.pure(Right(r))
    case Left(bytes) =>
      try Json.parse(bytes).validate[E] match {
        case JsSuccess(x, _) => M.pure(Left(x))
        case JsError(e)      => M.raiseError[Either[E, T]](JsResultException(e))
      } catch {
        case NonFatal(e) =>
          M.raiseError[Either[E, T]](new RuntimeException(s"The server returned an error: ${resp.code}, also can't parse as MatcherError", e))
      }
  }

  def parseTryResponseEither[E: Reads, T](resp: Response[Either[DeserializationError[JsError], T]]): F[Either[E, T]] = resp.rawErrorBody match {
    case Right(Right(r)) => M.pure(Right(r))
    case Right(Left(e))  => M.raiseError[Either[E, T]](new RuntimeException(s"The server returned success, but can't parse response: $e"))
    case Left(bytes) =>
      try Json.parse(bytes).validate[E] match {
        case JsSuccess(x, _) => M.pure(Left(x))
        case JsError(e)      => M.raiseError[Either[E, T]](JsResultException(e))
      } catch {
        case NonFatal(e) =>
          M.raiseError[Either[E, T]](new RuntimeException(s"The server returned an error: ${resp.code}, also can't parse as MatcherError", e))
      }
  }
}

object FOps {
  def apply[F[_]: CanWait: ThrowableMonadError]: FOps[F] = new FOps[F]
} 
Example 48
Source File: CanWait.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.fp

import com.wavesplatform.dex.it.time.GlobalTimer
import com.wavesplatform.dex.it.time.GlobalTimer.TimerOpsImplicits

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.{Success, Try}

trait CanWait[F[_]] {
  def wait(duration: FiniteDuration): F[Unit]
}

object CanWait {

  implicit val future: CanWait[Future] = (duration: FiniteDuration) => GlobalTimer.instance.sleep(duration)

  implicit val tryCanWait: CanWait[Try] = (duration: FiniteDuration) => {
    Thread.sleep(duration.toMillis)
    Success(())
  }
} 
Example 49
Source File: TAC.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.example2.trustaccountcreation

import java.util.concurrent.TimeUnit

import akka.actor.Status.Failure
import akka.actor.{ActorSystem, Props, ActorPath}
import no.nextgentel.oss.akkatools.aggregate._
import no.nextgentel.oss.akkatools.example2.other.{DoCreateTrustAccount, DoPerformESigning, DoSendEmailToCustomer}

import scala.concurrent.duration.FiniteDuration

class TACAggregate
(
  dmSelf:ActorPath,
  eSigningSystem:ActorPath,
  emailSystem:ActorPath,
  trustAccountSystem:ActorPath
) extends GeneralAggregateDMViaEvent[TACEvent, TACState](dmSelf) {

  override def persistenceIdBase() = TACAggregate.persistenceIdBase

  // Override this one to set different timeout
  override def idleTimeout() = FiniteDuration(60, TimeUnit.SECONDS)

  override var state = TACState.empty() // This is the state of our initial state (empty)

  // transform command to event
  override def cmdToEvent = {
    case c:CreateNewTACCmd        =>
      ResultingEvent( RegisteredEvent(c.info) )
        .onSuccess{ sender() ! "ok" }
        .onError{   (e) => sender() ! Failure(new Exception(s"Failed: $e"))}

    case c:ESigningFailedCmd      => ResultingEvent( ESigningFailedEvent() )
    case c:ESigningCompletedCmd   => ResultingEvent( ESigningCompletedEvent() )
    case c:CompletedCmd           => ResultingEvent( CreatedEvent(c.trustAccountId) )
    case c:DeclinedCmd            => ResultingEvent( DeclinedEvent(c.cause) )
  }

  override def generateDMs = {
    case e:RegisteredEvent  =>
      // We must send message to eSigningSystem
      val msg = DoPerformESigning(dispatchId, e.info.customerNo)
      ResultingDMs( msg, eSigningSystem)

    case e:ESigningCompletedEvent =>
      // ESigning is completed, so we should init creation of the TrustAccount
      val info = state.info.get
      val msg = DoCreateTrustAccount(dispatchId, info.customerNo, info.trustAccountType)
      ResultingDMs(msg, trustAccountSystem)


    case e:DeclinedEvent =>
      // The TrustAccountCreation-process failed - must notify customer
      val msg = DoSendEmailToCustomer(state.info.get.customerNo, s"Sorry.. TAC-failed: ${e.cause}")
      ResultingDMs(msg, emailSystem)

    case e:CreatedEvent =>
      // The TrustAccountCreation-process was success - must notify customer
      val msg = DoSendEmailToCustomer(state.info.get.customerNo, s"Your TrustAccount '${e.trustAccountId}' has been created!")
      ResultingDMs(msg, emailSystem)

  }
}

object TACAggregate {

  val persistenceIdBase = "TAC-"

  def props(dmSelf:ActorPath,
            eSigningSystem:ActorPath,
            emailSystem:ActorPath,
            trustAccountSystem:ActorPath) = Props(new TACAggregate(dmSelf, eSigningSystem, emailSystem ,trustAccountSystem))
}


class TACStarter(system:ActorSystem) extends AggregateStarter("tac", system) with AggregateViewStarter {

  def config(eSigningSystem:ActorPath,
             emailSystem:ActorPath,
             trustAccountSystem:ActorPath):TACStarter = {
    setAggregatePropsCreator{
      dmSelf =>
        TACAggregate.props(dmSelf, eSigningSystem, emailSystem, trustAccountSystem)
    }
    this
  }

  override def createViewProps(aggregateId: String): Props =
    Props( new GeneralAggregateView[TACEvent, TACState](TACAggregate.persistenceIdBase, aggregateId, TACState.empty(), true))
} 
Example 50
Source File: SeedNodesListOrderingResolver.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.cluster

import java.util.concurrent.TimeUnit

import org.slf4j.LoggerFactory

import scala.concurrent.duration.FiniteDuration

// Must be used together with ClusterListener
object SeedNodesListOrderingResolver {
  val log = LoggerFactory.getLogger(getClass)
  def resolveSeedNodesList(repo:ClusterNodeRepo, clusterConfig:AkkaClusterConfig, maxAliveAge:FiniteDuration = FiniteDuration(20, TimeUnit.SECONDS)):AkkaClusterConfig = {

    val ourNode = clusterConfig.thisHostnameAndPort()

    // Since we're starting up, just make sure that we do not find info about ourself from our last run
    log.debug(s"removeClusterNodeAlive for $ourNode")
    repo.removeClusterNodeAlive(ourNode)

    val allSeedNodes = clusterConfig.seedNodes

    val weAreSeedNode = allSeedNodes.contains(ourNode)
    if ( !weAreSeedNode) {
      log.info("We are NOT a seedNode")
    }

    val aliveNodes = repo.findAliveClusterNodes(maxAliveAge, onlyJoined = false).map {
      node =>
        // alive nodes are listed on this form:
        //    akka.tcp://SomeAkkaSystem@host1:9999
        // We must remove everything before hostname:port
        val index = node.indexOf('@')
        if ( index >= 0) node.substring(index+1) else node
    }

    val seedNodeListToUse = if ( aliveNodes.isEmpty ) {
      if (weAreSeedNode) {
        val allNodesExceptOur = allSeedNodes.filter(n => n != ourNode)
        val list = List(ourNode) ++ allNodesExceptOur

        log.info("No other clusterNodes found as alive - We must be first seed node - seedNodeListToUse: " + list)
        list
      } else {
        log.info("No other clusterNodes found as alive - Since we're not a seedNode, we're using the list as is - seedNodeListToUse: " + allSeedNodes)
        allSeedNodes
      }
    } else {

      if (weAreSeedNode) {
        val allNodesExceptOurAndAliveOnes = allSeedNodes.filter(n => n != ourNode && !aliveNodes.contains(n))

        val list = aliveNodes ++ List(ourNode) ++ allNodesExceptOurAndAliveOnes

        log.info("Found other alive clusterNodes - we should not be first seed node. Alive cluster nodes: " + aliveNodes.mkString(",") + " - seedNodeListToUse: " + list)
        list
      } else {
        val allNodesExceptAliveOnes = allSeedNodes.filter(n => !aliveNodes.contains(n))

        val list = aliveNodes ++ allNodesExceptAliveOnes

        log.info("Found other alive clusterNodes - Alive cluster nodes: " + aliveNodes.mkString(",") + " - seedNodeListToUse: " + list)
        list

      }
    }

    clusterConfig.withSeedNodeList(seedNodeListToUse)
  }
} 
Example 51
Source File: SeedNodesListOrderingResolverTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.cluster

import java.time.OffsetDateTime

import org.scalatest.{Matchers, FunSuite}

import scala.concurrent.duration.FiniteDuration

class SeedNodesListOrderingResolverTest extends FunSuite with Matchers {

  test("no live nodes") {
    val repo = new OurClusterNodeRepo(List())
    assert(AkkaClusterConfig(Some("host1"), 9999, List("host1:9999", "host2:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host1"), 9999, List("host1:9999", "host2:9999"))))

    assert(AkkaClusterConfig(Some("host1"), 9999, List("host1:9999", "host2:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host1"), 9999, List("host2:9999", "host1:9999"))))
  }

  test("alive nodes found") {
    val repo = new OurClusterNodeRepo(List(NodeInfo("akka.tcp://MobilityService@host1:9999", true)))
    assert(AkkaClusterConfig(Some("host2"), 9999, List("host1:9999", "host2:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host2"), 9999, List("host1:9999", "host2:9999"))))

    assert(AkkaClusterConfig(Some("host2"), 9999, List("host1:9999", "host2:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host2"), 9999, List("host2:9999", "host1:9999"))))
  }

  test("alive node (not joined yet) found ") {
    val repo = new OurClusterNodeRepo(List(NodeInfo("akka.tcp://MobilityService@host1:9999", false)))
    assert(AkkaClusterConfig(Some("host2"), 9999, List("host1:9999", "host2:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host2"), 9999, List("host1:9999", "host2:9999"))))

    assert(AkkaClusterConfig(Some("host2"), 9999, List("host1:9999", "host2:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host2"), 9999, List("host2:9999", "host1:9999"))))
  }

  test("This node is not a seedNode - with alive Nodes") {
    val repo = new OurClusterNodeRepo(List(NodeInfo("akka.tcp://MobilityService@host1:9999", true), NodeInfo("akka.tcp://MobilityService@host2:9999", true)))
    assert(AkkaClusterConfig(Some("host3"), 9999, List("host1:9999", "host2:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host3"), 9999, List("host1:9999", "host2:9999"))))
  }

  test("This node is not a seedNode - with no alive Nodes") {
    val repo = new OurClusterNodeRepo(List())
    assert(AkkaClusterConfig(Some("host3"), 9999, List("host2:9999", "host1:9999")) ==
      SeedNodesListOrderingResolver.resolveSeedNodesList(repo, AkkaClusterConfig(Some("host3"), 9999, List("host2:9999", "host1:9999"))))
  }

  case class NodeInfo(host:String, joined:Boolean)

  class OurClusterNodeRepo(aliveClusterNodes:List[NodeInfo]) extends ClusterNodeRepo {
    // Writes to db that this clusterNode is alive
    override def writeClusterNodeAlive(nodeNameAndPort: String, timestamp: OffsetDateTime, joined:Boolean): Unit = {}

    override def removeClusterNodeAlive(nodeNameAndPort: String): Unit = {}

    // Returns list of all nodeNameAndPorts that has written it is alive since aliveAfter
    override def findAliveClusterNodes(clusterNodesAliveSinceCheck: FiniteDuration, onlyJoined:Boolean): List[String] = {
      if ( onlyJoined) {
        aliveClusterNodes.filter(_.joined).map(_.host)
      } else {
        aliveClusterNodes.map(_.host)
      }
    }
  }
} 
Example 52
Source File: ActorCache.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.utils

import java.util.concurrent.TimeUnit

import akka.actor._
import com.google.common.cache._

import scala.concurrent.duration.{FiniteDuration, Duration}
import scala.reflect.ClassTag

case class CheckCache()
case class ForwardToCachedActor[K](key:K, msg:AnyRef)

object ActorCache {
  def props[K:ClassTag](cacheLoader:(K)=>Props, expireAfter: Duration = Duration(2, TimeUnit.MINUTES)) = Props(new ActorCache[K](cacheLoader, expireAfter))
}

class ActorCache[K:ClassTag](cacheLoader:(K)=>Props, expireAfter: Duration) extends Actor with ActorLogging {

  implicit val ec = context.dispatcher

  val removalListener = new RemovalListener[AnyRef, ActorRef] {
    override def onRemoval(notification: RemovalNotification[AnyRef, ActorRef]): Unit = {
      val key = notification.getKey.asInstanceOf[K]
      log.debug("Stopping actor for " + key)
      val actor = notification.getValue
      actor ! PoisonPill
    }
  }

  val realCachLoader = new CacheLoader[AnyRef,ActorRef] {
    override def load(key: AnyRef): ActorRef = {
      log.debug("Creating actor for " + key)
      val props:Props = cacheLoader(key.asInstanceOf[K])
      context.actorOf( props )
    }
  }

  val cache:LoadingCache[AnyRef, ActorRef] = CacheBuilder.newBuilder
    .expireAfterAccess(expireAfter.toMillis, TimeUnit.MILLISECONDS)
    .removalListener(removalListener)
    .build(realCachLoader)

  val waitPeriode = FiniteDuration.apply(expireAfter.toMillis / 2, TimeUnit.MILLISECONDS)

  scheduleNextCacheCheck()

  def scheduleNextCacheCheck(): Unit = {
    context.system.scheduler.scheduleOnce(waitPeriode, self, CheckCache())
  }

  def receive = {
    case CheckCache() => {
      cache.cleanUp()
      scheduleNextCacheCheck()
    }
    case ForwardToCachedActor(key:K, msg) =>
      try {
        val actor = cache.get(key.asInstanceOf[AnyRef])
        log.debug("Forwarding message for " + key + " to " + actor)
        actor forward msg
      } catch {
        case e:Exception =>
          log.error(e, "Error forwarding message (with key "+key+") " + msg)
      }
    case x:AnyRef =>
      log.warning("Droping unknown msg: " + x)
  }

  @throws(classOf[Exception])
  override def postStop {
    super.postStop
    cache.invalidateAll
    cache.cleanUp
  }

} 
Example 53
Source File: ActorWithDMSupportTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.persistence

import java.util.concurrent.TimeUnit

import akka.actor.{Props, ActorSystem}
import akka.testkit.{TestProbe, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, FunSuiteLike}

import scala.concurrent.duration.FiniteDuration

class ActorWithDMSupportTest(_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {
  def this() = this(ActorSystem("ActorWithDMSupportTest", ConfigFactory.load("application-test.conf")))

  test("success with dm") {
    val a = system.actorOf(Props(new TestActorWithDMSupport()))
    val s = TestProbe()

    // send raw
    s.send(a, "sendok")
    s.expectMsg("ok")

    // send via dm and withNewPayload
    val dm = DurableMessage(1L, "sendok", s.ref.path)
    s.send(a, dm)
    s.expectMsg(dm.withNewPayload("ok"))

    // send raw - do nothing
    s.send(a, "silent")


    // send silent - wait for configm
    s.send(a, DurableMessage(1L, "silent", s.ref.path))
    s.expectMsg( DurableMessageReceived(1,None) )


    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm-custom", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm-custom")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

  }


}

class TestActorWithDMSupport extends ActorWithDMSupport {
  // All raw messages or payloads in DMs are passed to this function.
  override def receivePayload = {
    case "sendok" =>
      send(sender.path, "ok")
    case "silent" =>
      Unit
    case "no-confirm" =>
      throw new LogWarningAndSkipDMConfirmException("something went wrong")
    case "no-confirm-custom" =>
      throw new CustomLogWarningAndSkipDMConfirm()
  }
}

class CustomLogWarningAndSkipDMConfirm extends Exception("") with LogWarningAndSkipDMConfirm 
Example 54
Source File: DynamoDBClientV2Config.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.config.client.v2

import com.github.j5ik2o.akka.persistence.dynamodb.client.v1.ExecutionInterceptorsProvider
import com.github.j5ik2o.akka.persistence.dynamodb.client.v2.RetryPolicyProvider
import com.github.j5ik2o.akka.persistence.dynamodb.utils.ConfigOps._
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ ClassCheckUtils, LoggingSupport }
import com.typesafe.config.{ Config, ConfigFactory }
import net.ceedubs.ficus.Ficus._
import software.amazon.awssdk.core.interceptor.ExecutionInterceptor
import software.amazon.awssdk.core.retry.RetryMode

import scala.collection.immutable._
import scala.concurrent.duration.FiniteDuration

object DynamoDBClientV2Config extends LoggingSupport {

  val dispatcherNameKey                        = "dispatcher-name"
  val asyncKey                                 = "async"
  val syncKey                                  = "sync"
  val headersKey                               = "headers"
  val retryModeKey                             = "retry-mode"
  val retryPolicyProviderClassNameKey          = "retry-policy-provider-class-name"
  val executionInterceptorClassNamesKey        = "execution-interceptor-class-names"
  val executionInterceptorProviderClassNameKey = "execution-interceptor-provider-class-name"
  val apiCallTimeoutKey                        = "api-call-timeout"
  val apiCallAttemptTimeoutKey                 = "api-call-attempt-timeout"

  val keyNames: Seq[String] =
    Seq(dispatcherNameKey, asyncKey, syncKey, retryModeKey, apiCallTimeoutKey, apiCallAttemptTimeoutKey)

  def existsKeyNames(config: Config): Map[String, Boolean] = {
    keyNames.map(v => (v, config.exists(v))).toMap
  }

  def fromConfig(config: Config, legacy: Boolean): DynamoDBClientV2Config = {
    logger.debug("config = {}", config)
    val result = DynamoDBClientV2Config(
      sourceConfig = config,
      dispatcherName = config.getAs[String](dispatcherNameKey),
      asyncClientConfig = {
        if (legacy) {
          logger.warn(
            "<<<!!!CAUTION: PLEASE MIGRATE TO NEW CONFIG FORMAT!!!>>>\n" +
            "\tThe configuration items of AWS-SDK V2 client remain with the old key names: (j5ik2o.dynamo-db-journal.dynamo-db-client).\n" +
            "\tPlease change current key name to the new key name: (j5ik2o.dynamo-db-journal.dynamo-db-client.v2.async). \n\t" +
            AsyncClientConfig.existsKeyNames(config).filter(_._2).keys.mkString("child-keys = [ ", ", ", " ]")
          )
          AsyncClientConfig.fromConfig(config)
        } else
          AsyncClientConfig.fromConfig(config.getOrElse[Config](asyncKey, ConfigFactory.empty()))
      },
      syncClientConfig = SyncClientConfig.fromConfig(config.getOrElse[Config](syncKey, ConfigFactory.empty())),
      headers = config.getOrElse[Map[String, Seq[String]]](headersKey, Map.empty),
      retryMode = config.getAs[String](retryModeKey).map(s => RetryMode.valueOf(s)),
      retryPolicyProviderClassName = {
        val className = config
          .getAs[String](retryPolicyProviderClassNameKey).orElse(Some(classOf[RetryPolicyProvider.Default].getName))
        ClassCheckUtils.requireClass(classOf[RetryPolicyProvider], className)
      },
      executionInterceptorClassNames = {
        val classNames = config.getOrElse[Seq[String]](executionInterceptorClassNamesKey, Seq.empty)
        classNames.map(s => ClassCheckUtils.requireClass(classOf[ExecutionInterceptor], s))
      },
      executionInterceptorsProviderClassName = {
        val className = config.getOrElse[String](
          executionInterceptorProviderClassNameKey,
          classOf[ExecutionInterceptorsProvider.Default].getName
        )
        ClassCheckUtils.requireClass(classOf[ExecutionInterceptorsProvider], className)
      },
      apiCallTimeout = config.getAs[FiniteDuration](apiCallTimeoutKey),
      apiCallAttemptTimeout = config.getAs[FiniteDuration](apiCallAttemptTimeoutKey)
    )
    logger.debug("result = {}", result)
    result
  }
}

case class DynamoDBClientV2Config(
    sourceConfig: Config,
    dispatcherName: Option[String],
    asyncClientConfig: AsyncClientConfig,
    syncClientConfig: SyncClientConfig,
    headers: Map[String, Seq[String]],
    retryMode: Option[RetryMode],
    retryPolicyProviderClassName: Option[String],
    executionInterceptorClassNames: Seq[String],
    executionInterceptorsProviderClassName: String,
    apiCallTimeout: Option[FiniteDuration],
    apiCallAttemptTimeout: Option[FiniteDuration]
) 
Example 55
Source File: WriteJournalDao.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.journal.dao

import akka.NotUsed
import akka.actor.Scheduler
import akka.persistence.PersistentRepr
import akka.stream.scaladsl.Source
import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow
import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber }

import scala.concurrent.duration.FiniteDuration
import scala.util.Try

trait WriteJournalDao extends JournalDaoWithReadMessages {

  def deleteMessages(
      persistenceId: PersistenceId,
      toSequenceNr: SequenceNumber
  ): Source[Long, NotUsed]

  def highestSequenceNr(persistenceId: PersistenceId, fromSequenceNr: SequenceNumber): Source[Long, NotUsed]

  def putMessages(messages: Seq[JournalRow]): Source[Long, NotUsed]

}

trait JournalDaoWithUpdates extends WriteJournalDao {

  def updateMessage(journalRow: JournalRow): Source[Unit, NotUsed]

}

trait JournalDaoWithReadMessages {

  def getMessagesAsPersistentRepr(
      persistenceId: PersistenceId,
      fromSequenceNr: SequenceNumber,
      toSequenceNr: SequenceNumber,
      max: Long,
      deleted: Option[Boolean] = Some(false)
  ): Source[Try[PersistentRepr], NotUsed]

  def getMessagesAsPersistentReprWithBatch(
      persistenceId: String,
      fromSequenceNr: Long,
      toSequenceNr: Long,
      batchSize: Int,
      refreshInterval: Option[(FiniteDuration, Scheduler)]
  ): Source[Try[PersistentRepr], NotUsed]

} 
Example 56
Source File: Philosopher.scala    From didactic-computing-machine   with GNU Affero General Public License v3.0 5 votes vote down vote up
package DiningPhilosophers

import DiningPhilosophers.ForkMessages._
import DiningPhilosophers.PhilosopherMessages._
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration.DurationInt
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext.Implicits.global

class Philosopher(val leftFork: ActorRef, val rightFork: ActorRef) extends Actor with ActorLogging {

  def name = self.path.name

  private val eatingTime = 2500.millis
  private val thinkingTime = 5000.millis
  private val retryTime = 10.millis


  def thinkFor(duration: FiniteDuration) = {
    context.system.scheduler.scheduleOnce(duration, self, Eat)
    context.become(thinking)
  }

  def thinking: Receive = {
    case Eat =>
      log.info(s"Philosopher ${self.path.name} wants to eat")
      leftFork ! Take
      rightFork ! Take
      context.become(hungry)
  }

  def hungry: Receive = {
    case ForkBeingUsed => handleForkBeingUsed()
    case ForkTaken =>
      log.info(s"Philosopher ${self.path.name} found one fork to be taken by other philosopher")
      context.become(waitingForOtherFork)
  }

  def waitingForOtherFork: Receive = {
    case ForkBeingUsed => handleForkBeingUsed()
    case ForkTaken =>
      log.info(s"Philosopher ${self.path.name} starts to eat")
      context.system.scheduler.scheduleOnce(eatingTime, self, Think)
      context.become(eating)
  }

  def eating: Receive = {
    case Think =>
      log.info(s"Philosopher ${self.path.name} starts to think")
      leftFork ! Put
      rightFork ! Put
      thinkFor(thinkingTime)
  }

  def handleForkBeingUsed(): Unit = {
    log.info(s"Philosopher ${self.path.name} found one fork to be in use")
    
    leftFork ! Put
    rightFork ! Put
    thinkFor(retryTime)
  }

  def receive = {
    case Think =>
      log.info(s"Philosopher ${self.path.name} started thinking")
      thinkFor(thinkingTime)

  }
} 
Example 57
Source File: DateTimeBuilder.scala    From nyaya   with GNU Lesser General Public License v2.1 5 votes vote down vote up
package nyaya.gen

import java.util.Date
import scala.concurrent.duration.FiniteDuration
import DateTimeBuilder._
import Gen.Now

object DateTimeBuilder {

  sealed abstract class TimeSpec {
    final type Spec = Either[Long, Long => Long]
    protected def fixed(l: Long): Spec = Left(l)
    protected def fn(f: Long => Long): Spec = Right(f)
    val past, future: Spec
  }
  case class Delta(ms: Long) extends TimeSpec {
    override val past = fn(_ - ms)
    override val future = fn(_ + ms)
  }
  case class Fixed(epochMs: Long) extends TimeSpec {
    override val past = fixed(epochMs)
    override val future = past
  }
  case object Unlimited extends TimeSpec {
    override val past = fixed(0)
    override val future = fixed(Long.MaxValue - 1)
  }

  def default(implicit genNow: Gen[Now]): DateTimeBuilder =
    new DateTimeBuilder(genNow, Unlimited, Unlimited)

  val DayMs = 86400000L.toDouble
  val YearMs = DayMs * 365.25
  val MonthMs = YearMs / 12
  val WeekMs = YearMs / 52
}

final class DateTimeBuilder(genNow: Gen[Now], past: TimeSpec, future: TimeSpec) extends DateTimeBuilderJava8 {

  protected def copy(genNow: Gen[Now] = genNow, past: TimeSpec = past, future: TimeSpec = future): DateTimeBuilder =
    new DateTimeBuilder(genNow, past = past, future = future)

  def fromEpochMs(e: Long)            = copy(past = Fixed(e))
  def fromNowMinusMs(d: Long)         = copy(past = Delta(d))
  def fromDate(d: Date)               = fromEpochMs(d.getTime)
  def fromNow                         = fromNowMinusMs(0)
  def fromNowMinus(d: FiniteDuration) = fromNowMinusMs(d.toMillis)
  def fromNowMinusYears(d: Double)    = fromNowMinusMs((YearMs * d).toLong)
  def fromNowMinusMonths(d: Double)   = fromNowMinusMs((MonthMs * d).toLong)
  def fromNowMinusWeeks(d: Double)    = fromNowMinusMs((WeekMs * d).toLong)
  def fromNowMinusDays(d: Double)     = fromNowMinusMs((DayMs * d).toLong)

  def untilEpochMs(e: Long)           = copy(future = Fixed(e))
  def untilNowPlusMs(d: Long)         = copy(future = Delta(d))
  def untilDate(d: Date)              = untilEpochMs(d.getTime)
  def untilNow                        = untilNowPlusMs(0)
  def untilNowPlus(d: FiniteDuration) = untilNowPlusMs(d.toMillis)
  def untilNowPlusYears(d: Double)    = untilNowPlusMs((YearMs * d).toLong)
  def untilNowPlusMonths(d: Double)   = untilNowPlusMs((MonthMs * d).toLong)
  def untilNowPlusWeeks(d: Double)    = untilNowPlusMs((WeekMs * d).toLong)
  def untilNowPlusDays(d: Double)     = untilNowPlusMs((DayMs * d).toLong)

  def aroundNow(d: FiniteDuration) = fromNowMinus(d).untilNowPlus(d)
  def aroundNowMs(d: Long)         = fromNowMinusMs(d).untilNowPlusMs(d)
  def aroundNowDays(d: Double)     = fromNowMinusDays(d).untilNowPlusDays(d)
  def aroundNowMonths(d: Double)   = fromNowMinusMonths(d).untilNowPlusMonths(d)
  def aroundNowWeeks(d: Double)    = fromNowMinusWeeks(d).untilNowPlusWeeks(d)
  def aroundNowYears(d: Double)    = fromNowMinusYears(d).untilNowPlusYears(d)

  def withNowGen(g: Gen[Now]): DateTimeBuilder =
    copy(genNow = g)

  def withNow(now: => Now): DateTimeBuilder =
    withNowGen(Gen point now)

  def withNowMs(nowMs: => Long): DateTimeBuilder =
    withNowGen(Gen point Now(nowMs))

  
  def withNowLive: DateTimeBuilder =
    withNowGen(Now.genNowByName)

  // ===================================================================================================================

  lazy val asEpochMs: Gen[Long] = {
    def specToFn(s: TimeSpec#Spec): Long => Long = s.fold(Function const, identity)
    (past.past, future.future) match {
      case (Left(a), Left(b)) =>
        Gen.chooseLong(a, b)
      case (x, y) =>
        val a = specToFn(x)
        val b = specToFn(y)
        genNow.flatMap(now => Gen.chooseLong(a(now.millisSinceEpoch), b(now.millisSinceEpoch)))
    }
  }

  def asDate: Gen[Date] =
    asEpochMs.map(new Date(_))
} 
Example 58
Source File: IndefiniteStreamParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s
import akka.stream.FlowShape
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Keep, Sink, ZipWith}
import com.github.mjakubowski84.parquet4s.ParquetWriter.ParquetWriterFactory
import org.apache.hadoop.fs.Path
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.duration.FiniteDuration


private[parquet4s] object IndefiniteStreamParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[In, ToWrite: ParquetWriterFactory, Mat](path: Path,
                                                    maxChunkSize: Int,
                                                    chunkWriteTimeWindow: FiniteDuration,
                                                    buildChunkPath: ChunkPathBuilder[In] = ChunkPathBuilder.default,
                                                    preWriteTransformation: In => ToWrite = identity[In] _,
                                                    postWriteSink: Sink[Seq[In], Mat] = Sink.ignore,
                                                    options: ParquetWriter.Options = ParquetWriter.Options()
                                            ): Sink[In, Mat] = {
    validateWritePath(path, options)

    val internalFlow = Flow.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._
    
      val inChunkFlow = b.add(Flow[In].groupedWithin(maxChunkSize, chunkWriteTimeWindow))
      val broadcastChunks = b.add(Broadcast[Seq[In]](outputPorts = 2))
      val writeFlow = Flow[Seq[In]].map { chunk =>
        val toWrite = chunk.map(preWriteTransformation)
        val chunkPath = buildChunkPath(path, chunk)
        if (logger.isDebugEnabled()) logger.debug(s"Writing ${toWrite.size} records to $chunkPath")
        ParquetWriter.writeAndClose(chunkPath.toString, toWrite, options)
      }
      val zip = b.add(ZipWith[Seq[In], Unit, Seq[In]]((chunk, _) => chunk))
      
      inChunkFlow ~> broadcastChunks ~> writeFlow ~> zip.in1
                     broadcastChunks ~> zip.in0

      FlowShape(inChunkFlow.in, zip.out)               
    })

    internalFlow.toMat(postWriteSink)(Keep.right)
  }

} 
Example 59
Source File: MDCPropagatingDispatcherConfigurator.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.common.monitoring

import java.util.concurrent.TimeUnit

import akka.dispatch._
import com.typesafe.config.Config
import org.slf4j.MDC

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.{Duration, FiniteDuration}


class MDCPropagatingDispatcher(_configurator: MessageDispatcherConfigurator,
                               id: String,
                               throughput: Int,
                               throughputDeadlineTime: Duration,
                               executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
                               shutdownTimeout: FiniteDuration)
  extends Dispatcher(_configurator, id, throughput, throughputDeadlineTime, executorServiceFactoryProvider, shutdownTimeout ) {

  self =>

  override def prepare(): ExecutionContext = new ExecutionContext {
    // capture the MDC
    val mdcContext = MDC.getCopyOfContextMap
    //val parent = Thread.currentThread().getId

    def execute(r: Runnable) = self.execute(new Runnable {
      def run() = {
        // backup the callee MDC context
        val oldMDCContext = MDC.getCopyOfContextMap

        // Run the runnable with the captured context
        setContextMap(mdcContext)
        //println(s"setto ${Thread.currentThread().getId} - $mdcContext - from $parent")
        try {
          r.run()
        } finally {
          // restore the callee MDC context

          setContextMap(oldMDCContext)
          //println(s"ripristino ${Thread.currentThread().getId} - $oldMDCContext - from $parent")
        }
      }
    })
    def reportFailure(t: Throwable) = self.reportFailure(t)
  }

  private[this] def setContextMap(context: java.util.Map[String, String]):Unit = {
    if (context == null) {
      MDC.clear()
    } else {
      MDC.setContextMap(context)
    }
  }

} 
Example 60
Source File: Chronometer.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws.util

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ ExecutionContext, Future }

object Chronometer {

  case class Lap[A](result: A, nanos: Long) {

    def millis = (nanos / 1000000).toInt
    def micros = (nanos / 1000).toInt

    def logIfSlow(threshold: Int)(msg: A => String) = {
      if (millis >= threshold) println(s"<${millis}ms> ${msg(result)}")
      this
    }

    def pp: A = {
      println(s"chrono $showDuration")
      result
    }

    def pp(msg: String): A = {
      println(s"chrono $msg - $showDuration")
      result
    }
    def ppIfGt(msg: String, duration: FiniteDuration): A =
      if (nanos > duration.toNanos) pp(msg)
      else result

    def showDuration: String = if (millis >= 1) f"$millis%.2f ms" else s"$micros micros"
  }

  case class FuLap[A](lap: Future[Lap[A]]) extends AnyVal {

    def logIfSlow(threshold: Int)(msg: A => String)(implicit ec: ExecutionContext) = {
      lap.foreach(_.logIfSlow(threshold)(msg))
      this
    }

    def pp(implicit ec: ExecutionContext): Future[A]              = lap map (_.pp)
    def pp(msg: String)(implicit ec: ExecutionContext): Future[A] = lap map (_ pp msg)
    def ppIfGt(msg: String, duration: FiniteDuration)(implicit ec: ExecutionContext): Future[A] =
      lap map (_.ppIfGt(msg, duration))

    def result(implicit ec: ExecutionContext) = lap.map(_.result)
  }

  def apply[A](f: => Future[A])(implicit ec: ExecutionContext): FuLap[A] = {
    val start = nowNanos
    FuLap(f map { Lap(_, nowNanos - start) })
  }

  def sync[A](f: => A): Lap[A] = {
    val start = nowNanos
    val res   = f
    Lap(res, nowNanos - start)
  }

  def syncEffect[A](f: => A)(effect: Lap[A] => Unit): A = {
    val lap = sync(f)
    effect(lap)
    lap.result
  }

  def nowNanos: Long = System.nanoTime()
} 
Example 61
Source File: GroupedWithin.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws
package util

import akka.actor.typed.Scheduler
import akka.actor.Cancellable
import scala.collection.immutable.VectorBuilder
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext

final class GroupedWithin()(implicit scheduler: Scheduler, ec: ExecutionContext) {

  def apply[A](nb: Int, interval: FiniteDuration)(emit: Emit[Vector[A]]) =
    new GroupedWithinStage[A](nb, interval, emit)
}

final class GroupedWithinStage[A](
    nb: Int,
    interval: FiniteDuration,
    emit: Emit[Vector[A]]
)(implicit
    scheduler: Scheduler,
    ec: ExecutionContext
) {

  private val buffer: VectorBuilder[A] = new VectorBuilder

  private var scheduledFlush: Cancellable = scheduler.scheduleOnce(interval, () => flush)

  def apply(elem: A): Unit =
    synchronized {
      buffer += elem
      if (buffer.size >= nb) unsafeFlush
    }

  private def flush(): Unit = synchronized { unsafeFlush }

  private def unsafeFlush(): Unit = {
    if (buffer.nonEmpty) {
      emit(buffer.result)
      buffer.clear
    }
    scheduledFlush.cancel
    scheduledFlush = scheduler.scheduleOnce(interval, () => flush)
  }
} 
Example 62
Source File: DedupEmit.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws
package util

import akka.actor.typed.Scheduler
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext

final class DedupEmit[A](interval: FiniteDuration)(emit: Emit[A])(implicit
    scheduler: Scheduler,
    ec: ExecutionContext
) {

  // don't care about race conditions,
  // this is just about not sending the same message too many times
  private var seen = Set.empty[A]

  def apply(a: A): Unit =
    if (!seen(a)) {
      seen = seen + a
      emit(a)
    }

  scheduler.scheduleWithFixedDelay(interval, interval) { () => seen = Set.empty }
} 
Example 63
Source File: DOMScheduler.scala    From suzaku   with Apache License 2.0 5 votes vote down vote up
package suzaku.platform.web

import org.scalajs.dom
import suzaku.platform.{Cancellable, Scheduler}

import scala.concurrent.duration.FiniteDuration
import scala.scalajs.js.timers._

class DOMScheduler extends Scheduler {

  private class TimeoutCancellable(handle: SetTimeoutHandle) extends Cancellable {
    var isCancelled = false

    override def cancel(): Unit = {
      isCancelled = true
      clearTimeout(handle)
    }
  }

  private class IntervalCancellable(handle: SetIntervalHandle) extends Cancellable {
    var isCancelled = false

    override def cancel(): Unit = {
      isCancelled = true
      clearInterval(handle)
    }
  }

  private class FrameCancellable extends Cancellable {
    var isCancelled = false

    override def cancel(): Unit = {
      isCancelled = true
    }
  }

  override def scheduleOnce(after: FiniteDuration, callback: ScheduleCB): Cancellable = {
    new TimeoutCancellable(setTimeout(after) {
      val time = (dom.window.performance.now() * 1e6).toLong
      callback(time)
    })
  }

  override def schedule(interval: FiniteDuration, callback: ScheduleCB): Cancellable = {
    new IntervalCancellable(setInterval(interval) {
      val time = (dom.window.performance.now() * 1e6).toLong
      callback(time)
    })
  }

  private def frameCB(cancellable: FrameCancellable, callback: ScheduleCB)(time: Double): Unit = {
    if (!cancellable.isCancelled) {
      callback((time * 1e6).toLong)
      dom.window.requestAnimationFrame(frameCB(cancellable, callback) _)
    }
  }

  override def scheduleFrame(callback: ScheduleCB): Cancellable = {
    val cancellable = new FrameCancellable
    dom.window.requestAnimationFrame(frameCB(cancellable, callback) _)
    cancellable
  }
} 
Example 64
Source File: Stream.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse

import io.phdata.pulse.log.{ HttpManager, JsonParser }
import monix.reactive.subjects.ConcurrentSubject
import monix.execution.Scheduler.Implicits.global
import monix.reactive.OverflowStrategy
import org.apache.log4j.helpers.LogLog
import org.apache.log4j.spi.LoggingEvent

import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success, Try }

abstract class Stream[E](flushDuration: FiniteDuration, flushSize: Int, maxBuffer: Int) {

  val overflowStragegy = OverflowStrategy.DropNewAndSignal(maxBuffer, (_: Long) => None)
  val subject          = ConcurrentSubject.publish[E](overflowStragegy)

  subject
    .bufferTimedAndCounted(flushDuration, flushSize)
    .map(save)
    .subscribe()

  def append(value: E): Unit =
    Try { subject.onNext(value) } match {
      case Success(_) => ()
      case Failure(e) => LogLog.error("Error appending to stream", e)
    }

  def save(values: Seq[E])

}

class HttpStream(flushDuration: FiniteDuration,
                 flushSize: Int,
                 maxBuffer: Int,
                 httpManager: HttpManager)
    extends Stream[LoggingEvent](flushDuration, flushSize, maxBuffer) {

  val jsonParser = new JsonParser

  override def save(values: Seq[LoggingEvent]): Unit = {
    val logArray = values.toArray
    LogLog.debug(s"Flushing ${logArray.length} messages")
    val logMessage = jsonParser.marshallArray(logArray)

    httpManager.send(logMessage)
  }
} 
Example 65
Source File: AmqpConnectionFactory.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqp

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.rabbitmq.client.{Connection, ConnectionFactory}
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent._
import scala.util._

object AmqpConnectionFactory {
  private final val DEFAULT_RETRY_CONFIG = AmqpConnectionRetry(
    count = 10,
    delay = Duration(5, TimeUnit.SECONDS)
  )

  def connect(actorSystem: ActorSystem): Future[Connection] = {
    import ArbitraryTypeReader._
    import actorSystem.dispatcher
    val config = actorSystem.settings.config.as[AmqpConfig]("amqp")
    connect(config)
  }

  def connect(config: AmqpConfig)
             (implicit executionContext: ExecutionContext): Future[Connection] =
    Future {
      blocking {
        val factory = new ConnectionFactory()
        config.port.foreach(factory.setPort)
        config.virtualHost.foreach(factory.setVirtualHost)
        config.userName.foreach(factory.setUsername)
        config.password.foreach(factory.setPassword)
        factory.setAutomaticRecoveryEnabled(true)
        val retryConfig = config.retry.getOrElse(DEFAULT_RETRY_CONFIG)
        retry(
          n = retryConfig.count,
          delay = retryConfig.delay.toMillis) {

          Try {
            // Could By IOException or TimeoutException
            val addresses = config.hosts.map(com.rabbitmq.client.Address.parseAddress).toArray
            factory.newConnection(addresses)
          }
        }
      }
    }

  private def retry[T](n: Int, delay: Long)(fn: => Try[T]): T = {
    fn match {
      case Success(x) => x
      case _ if n > 1 =>
        Thread.sleep(delay)
        retry(n - 1, delay)(fn)
      case Failure(e) => throw e
    }
  }
}

case class AmqpConfig(hosts: Seq[String],
                      port: Option[Int],
                      virtualHost: Option[String] = None,
                      userName: Option[String] = None,
                      password: Option[String] = None,
                      retry: Option[AmqpConnectionRetry] = None)

case class AmqpConnectionRetry(count: Int,
                               delay: FiniteDuration) 
Example 66
Source File: QueueActor.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.pattern._
import akka.actor.{Actor, ActorLogging, ActorRef, Props, Stash}
import akka.routing.{RoundRobinRoutingLogic, Routee, Router}
import akka.util.Timeout
import rhttpc.transport.{Message, RejectingMessage}

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

private class QueueActor(consumeTimeout: FiniteDuration,
                         retryDelay: FiniteDuration) extends Actor with Stash with ActorLogging {

  import context.dispatcher

  private var consumers = Map.empty[ActorRef, AskingActorRefRouteeWithSpecifiedMessageType]

  private var router = Router(RoundRobinRoutingLogic(), collection.immutable.IndexedSeq.empty)

  override def receive: Receive = {
    case RegisterConsumer(consumer, fullMessage) =>
      val routee = AskingActorRefRouteeWithSpecifiedMessageType(consumer, consumeTimeout, handleResponse, fullMessage)
      consumers += consumer -> routee
      router = router.addRoutee(routee)
      log.debug(s"${self.path.name}: registered consumer, unstashing")
      unstashAll()
    case UnregisterConsumer(consumer) =>
      log.debug(s"${self.path.name}: unregistered consumer")
      consumers.get(consumer).foreach { routee =>
        consumers -= consumer
        router = router.removeRoutee(routee)
      }
      sender() ! ((): Unit)
    case msg: Message[_] =>
      if (consumers.isEmpty) {
        log.debug(s"${self.path.name}: got message when no consumer registered, stashing")
        stash()
        implicit val timeout = Timeout(consumeTimeout)
        sender() ! ((): Unit)
      } else {
        router.route(msg, sender())
      }
  }

  private def handleResponse(future: Future[Any], msg: Message[_]): Unit =
    future.recover {
      case ex: AskTimeoutException =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of ask timeout")
      case ex: Exception with RejectingMessage =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of rejecting failure")
      case NonFatal(ex) =>
        log.error(ex, s"${self.path.name}: will RETRY [${msg.content.getClass.getName}] after $retryDelay because of failure")
        context.system.scheduler.scheduleOnce(retryDelay, self, msg)
    }

}

object QueueActor {
  def props(consumeTimeout: FiniteDuration,
            retryDelay: FiniteDuration): Props = Props(
    new QueueActor(
      consumeTimeout = consumeTimeout,
      retryDelay = retryDelay))
}

private[inmem] case class AskingActorRefRouteeWithSpecifiedMessageType(ref: ActorRef,
                                                                       askTimeout: FiniteDuration,
                                                                       handleResponse: (Future[Any], Message[_]) => Unit,
                                                                       fullMessage: Boolean)
  extends Routee {

  override def send(message: Any, sender: ActorRef): Unit = {
    val typedMessage = message.asInstanceOf[Message[_]]
    val msgToSend = if (fullMessage) message else typedMessage.content
    handleResponse(ref.ask(msgToSend)(askTimeout, sender), typedMessage)
  }
}

private[inmem] case class RegisterConsumer(consumer: ActorRef, fullMessage: Boolean)

private[inmem] case class UnregisterConsumer(consumer: ActorRef) 
Example 67
Source File: InMemTransport.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern._
import akka.util.Timeout
import rhttpc.transport.{InboundQueueData, Publisher, _}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, Future}

private[inmem] class InMemTransport(transportActor: ActorRef) // TODO: stopping of transports / actors
                                   (createTimeout: FiniteDuration,
                                    stopConsumingTimeout: FiniteDuration,
                                    stopTimeout: FiniteDuration)
                                   (implicit system: ActorSystem) extends PubSubTransport {

  import system.dispatcher

  override def publisher[PubMsg: Serializer](queueData: OutboundQueueData): Publisher[PubMsg] = {
    val queueActor = getOrCreateQueueActor(queueData.name)
    new InMemPublisher[PubMsg](queueActor)
  }

  override def subscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = {
    val queueActor = getOrCreateQueueActor(queueData.name)
    new InMemSubscriber[SubMsg](queueActor, consumer, fullMessage = false)(stopConsumingTimeout)
  }

  override def fullMessageSubscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = {
    val queueActor = getOrCreateQueueActor(queueData.name)
    new InMemSubscriber[SubMsg](queueActor, consumer, fullMessage = true)(stopConsumingTimeout)
  }

  private def getOrCreateQueueActor(name: String): ActorRef = {
    implicit val timeout = Timeout(createTimeout)
    Await.result((transportActor ? GetOrCreateQueue(name)).mapTo[ActorRef], createTimeout)
  }

  override def stop(): Future[Unit] = gracefulStop(transportActor, stopTimeout).map(_ => Unit)
}

object InMemTransport {
  def apply(createTimeout: FiniteDuration = InMemDefaults.createTimeout,
            consumeTimeout: FiniteDuration = InMemDefaults.consumeTimeout,
            retryDelay: FiniteDuration = InMemDefaults.retryDelay,
            stopConsumingTimeout: FiniteDuration = InMemDefaults.stopConsumingTimeout,
            stopTimeout: FiniteDuration = InMemDefaults.stopTimeout)
           (implicit system: ActorSystem): PubSubTransport = {
    val actor = system.actorOf(TransportActor.props(
      QueueActor.props(
        consumeTimeout = consumeTimeout,
        retryDelay = retryDelay
      )))
    new InMemTransport(actor)(
      createTimeout = createTimeout,
      stopConsumingTimeout = stopConsumingTimeout,
      stopTimeout = stopTimeout)
  }
} 
Example 68
Source File: InMemSubscriber.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.actor.ActorRef
import akka.pattern._
import akka.util.Timeout
import rhttpc.transport.Subscriber

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

class InMemSubscriber[Msg](queueActor: ActorRef,
                           consumer: ActorRef,
                           fullMessage: Boolean)
                          (stopTimeout: FiniteDuration) extends Subscriber[Msg] {

  override def start(): Unit = {
    queueActor ! RegisterConsumer(consumer, fullMessage)
  }

  override def stop(): Future[Unit] = {
    implicit val timeout = Timeout(stopTimeout)
    (queueActor ? UnregisterConsumer(consumer)).mapTo[Unit]
  }

} 
Example 69
Source File: AmqpJdbcScheduler.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc

import akka.actor.{Cancellable, Scheduler}
import org.slf4j.LoggerFactory
import rhttpc.transport.SerializingPublisher.SerializedMessage
import rhttpc.transport._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}

private[amqpjdbc] trait AmqpJdbcScheduler[PubMsg] {

  def schedule(msg: Message[PubMsg], delay: FiniteDuration): Future[Unit]

  def start(): Unit

  def stop(): Future[Unit]

}

private[amqpjdbc] class AmqpJdbcSchedulerImpl[PubMsg](scheduler: Scheduler,
                                                      checkInterval: FiniteDuration,
                                                      repo: ScheduledMessagesRepository,
                                                      queueName: String,
                                                      batchSize: Int,
                                                      publisher: SerializingPublisher[PubMsg])
                                                     (implicit ec: ExecutionContext,
                                                      serializer: Serializer[PubMsg]) extends AmqpJdbcScheduler[PubMsg] {
  private val logger = LoggerFactory.getLogger(getClass)

  private var ran: Boolean = false
  private var scheduledCheck: Option[Cancellable] = None
  private var currentPublishedFetchedFuture: Future[Int] = Future.successful(0)

  override def schedule(msg: Message[PubMsg], delay: FiniteDuration): Future[Unit] = {
    val serialized = serializer.serialize(msg.content)
    repo.save(MessageToSchedule(queueName, serialized, msg.properties, delay))
  }

  override def start(): Unit = {
    synchronized {
      if (!ran) {
        ran = true
        publishFetchedMessagesThanReschedule()
      }
    }
  }

  private def publishFetchedMessagesThanReschedule(): Unit = {
    synchronized {
      if (ran) {
        val publishedFetchedFuture = repo.fetchMessagesShouldByRun(queueName, batchSize)(publish)
        currentPublishedFetchedFuture = publishedFetchedFuture
        publishedFetchedFuture onComplete handlePublicationResult
      }
    }
  }

  private def publish(messages: Seq[ScheduledMessage]): Future[Seq[Unit]] = {
    if (messages.nonEmpty) {
      logger.debug(s"Fetched ${messages.size}, publishing")
    }
    val handlingFutures = messages.map { message =>
      publisher.publishSerialized(SerializedMessage(message.content.getBytes(), message.properties))
    }
    Future.sequence(handlingFutures)
  }

  private def handlePublicationResult(tryResult: Try[Int]): Unit = {
    tryResult match {
      case Failure(ex) =>
        logger.error("Exception while publishing fetched messages", ex)
      case _ =>
    }
    synchronized {
      if (ran) {
        scheduledCheck = Some(scheduler.scheduleOnce(checkInterval)(publishFetchedMessagesThanReschedule()))
      } else {
        logger.debug(s"Scheduler is stopping, next check will be skipped")
      }
    }
  }

  override def stop(): Future[Unit] = {
    synchronized {
      scheduledCheck.foreach(_.cancel())
      ran = false
      currentPublishedFetchedFuture.map(_ => Unit)
    }
  }

} 
Example 70
Source File: FallbackPublisher.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.fallback

import akka.actor.{ActorSystem, Scheduler}
import akka.pattern.CircuitBreaker
import org.slf4j.LoggerFactory
import rhttpc.transport.{Message, Publisher}

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

private[fallback] class FallbackPublisher[Msg](main: Publisher[Msg],
                                               fallback: Publisher[Msg])
                                              (maxFailures: Int,
                                               callTimeout: FiniteDuration,
                                               resetTimeout: FiniteDuration)
                                              (implicit system: ActorSystem) extends Publisher[Msg] {

  import system.dispatcher

  private val logger = LoggerFactory.getLogger(getClass)

  private val circuitBreaker = new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout)
    .onOpen(logger.debug("Circuit opened"))
    .onHalfOpen(logger.debug("Circuit half-opened"))
    .onClose(logger.debug("Circuit closed"))

  override def publish(msg: Message[Msg]): Future[Unit] = {
    circuitBreaker.withCircuitBreaker(main.publish(msg)).recoverWith {
      case NonFatal(ex) =>
        logger.debug(s"Circuit is opened, sending message [${msg.getClass.getName}] to fallback transport")
        fallback.publish(msg)
    }
  }

  override def start(): Unit = {
    main.start()
    fallback.start()
  }

  override def stop(): Future[Unit] = {
    import rhttpc.utils.Recovered._
    recoveredFuture("stopping main publisher", main.stop())
      .flatMap(_ => recoveredFuture("stopping fallback publisher", fallback.stop()))
  }
} 
Example 71
Source File: FallbackTransport.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.fallback

import akka.actor.{ActorRef, ActorSystem}
import rhttpc.transport._

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

class FallbackTransport(main: PubSubTransport,
                        fallback: PubSubTransport)
                       (maxFailures: Int,
                        callTimeout: FiniteDuration,
                        resetTimeout: FiniteDuration)
                       (implicit system: ActorSystem) extends PubSubTransport {

  import system.dispatcher

  override def publisher[PubMsg: Serializer](queueData: OutboundQueueData): Publisher[PubMsg] =
    new FallbackPublisher[PubMsg](
      main = main.publisher(queueData),
      fallback = fallback.publisher(queueData))(
      maxFailures = maxFailures,
      callTimeout = callTimeout,
      resetTimeout = resetTimeout
    )

  override def subscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] =
    new SubscriberAggregate[SubMsg](Seq(
      main.subscriber(queueData, consumer),
      fallback.subscriber(queueData, consumer)
    ))

  override def fullMessageSubscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] =
    new SubscriberAggregate[SubMsg](Seq(
      main.fullMessageSubscriber(queueData, consumer),
      fallback.fullMessageSubscriber(queueData, consumer)
    ))

  override def stop(): Future[Unit] = {
    import rhttpc.utils.Recovered._
    recoveredFuture("stopping main transport", main.stop())
      .flatMap(_ => recoveredFuture("stopping fallback transport", fallback.stop()))
  }

} 
Example 72
Source File: InMemDelayedEchoClient.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.sample

import java.util.UUID

import akka.actor.{ActorRef, ActorSystem}
import akka.util.Timeout
import rhttpc.client.subscription._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

class InMemDelayedEchoClient(delay: FiniteDuration)(implicit system: ActorSystem) extends DelayedEchoClient {
  import system.dispatcher

  private val subOnMsg: collection.concurrent.Map[SubscriptionOnResponse, String] = collection.concurrent.TrieMap()

  val subscriptionManager: SubscriptionManager =
    new SubscriptionManager {

      override def confirmOrRegister(subscription: SubscriptionOnResponse, consumer: ActorRef): Unit = {
        system.scheduler.scheduleOnce(delay) {
          subOnMsg.remove(subscription).foreach { msg =>
            consumer ! msg
          }
        }
      }

      override def start(): Unit = {}

      override def stop(): Future[Unit] = Future.successful(Unit)
    }

  override def requestResponse(msg: String): ReplyFuture = {
    val uniqueSubOnResponse = SubscriptionOnResponse(UUID.randomUUID().toString)
    subOnMsg.put(uniqueSubOnResponse, msg)
    new ReplyFuture {
      override def pipeTo(listener: PublicationListener)
                         (implicit ec: ExecutionContext): Unit = {
        listener.subscriptionPromiseRegistered(uniqueSubOnResponse)
        listener.self ! RequestPublished(uniqueSubOnResponse)
      }

      override def toFuture(implicit system: ActorSystem, timeout: Timeout): Future[Any] = ???
    }
  }
} 
Example 73
Source File: Request.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.client.protocol

import java.time.{Instant, LocalDateTime}

import scala.concurrent.duration.FiniteDuration

case class Request[+T](correlated: Correlated[T], attempt: Int, lastPlannedDelay: Option[FiniteDuration], firstAttemptTimestamp: Instant) {
  def msg = correlated.msg

  def correlationId = correlated.correlationId

  def isFirstAttempt: Boolean = attempt == 1

  def nextAttempt: Request[T] =
    copy(attempt = attempt + 1)
}

object Request {
  def apply[T](correlated: Correlated[T], attempt: Int, lastPlannedDelay: FiniteDuration, firstAttemptTimestamp: Instant): Request[T] = {
    Request(
      correlated = correlated,
      attempt = attempt,
      lastPlannedDelay = Some(lastPlannedDelay),
      firstAttemptTimestamp = firstAttemptTimestamp
    )
  }

  def firstAttempt[T](correlated: Correlated[T], firstAttemptTimestamp: Instant): Request[T] = {
    Request(
      correlated = correlated,
      attempt = 1,
      lastPlannedDelay = None,
      firstAttemptTimestamp = firstAttemptTimestamp
    )
  }
} 
Example 74
Source File: Tools.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.ActorSystem
import scala.concurrent.duration.FiniteDuration
import akka.testkit.TestProbe
import akka.actor.{ ActorRef, Actor }
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.Props
import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import scala.concurrent.duration._

object Tools {
  class TestRefWrappingActor(val probe: TestProbe) extends Actor {
    def receive = { case msg => probe.ref forward msg }
  }
}


trait Tools { this: TestKit with FunSuiteLike with Matchers with ImplicitSender =>

  import Arbiter._
  import Tools._

  def probeProps(probe: TestProbe): Props = Props(classOf[TestRefWrappingActor], probe)

  class Session(val probe: TestProbe, val replica: ActorRef) {
    import Replica._

    @volatile private var seq = 0L
    private def nextSeq: Long = {
      val next = seq
      seq += 1
      next
    }

    @volatile private var referenceMap = Map.empty[String, String]

    def waitAck(s: Long): Unit = probe.expectMsg(OperationAck(s))

    def waitFailed(s: Long): Unit = probe.expectMsg(OperationFailed(s))

    def set(key: String, value: String): Long = {
      referenceMap += key -> value
      val s = nextSeq
      probe.send(replica, Insert(key, value, s))
      s
    }

    def setAcked(key: String, value: String): Unit = waitAck(set(key, value))

    def remove(key: String): Long = {
      referenceMap -= key
      val s = nextSeq
      probe.send(replica, Remove(key, s))
      s
    }

    def removeAcked(key: String): Unit = waitAck(remove(key))

    def getAndVerify(key: String): Unit = {
      val s = nextSeq
      probe.send(replica, Get(key, s))
      probe.expectMsg(GetResult(key, referenceMap.get(key), s))
    }

    def get(key: String): Option[String] = {
      val s = nextSeq
      probe.send(replica, Get(key, s))
      probe.expectMsgType[GetResult].valueOption
    }

    def nothingHappens(duration: FiniteDuration): Unit = probe.expectNoMsg(duration)
  }

  def session(replica: ActorRef)(implicit system: ActorSystem) = new Session(TestProbe(), replica)


} 
Example 75
Source File: SidechainDeliveryTracker.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.network

import akka.actor.{ActorRef, ActorSystem}
import scorex.core.network.ModifiersStatus.{Received, Requested}
import scorex.core.network.{ConnectedPeer, DeliveryTracker, ModifiersStatus}
import scorex.util.ModifierId

import scala.concurrent.duration.FiniteDuration

class SidechainDeliveryTracker(system: ActorSystem,
                               deliveryTimeout: FiniteDuration,
                               maxDeliveryChecks: Int,
                               nvsRef: ActorRef)
  extends DeliveryTracker(system, deliveryTimeout, maxDeliveryChecks, nvsRef) {

  def peerInfo(id: ModifierId): Option[ConnectedPeer] = {
    val modifierStatus: ModifiersStatus = status(id)
    modifierStatus match {
      case Requested =>
        requested.get(id).flatMap(_.peer)
      case Received =>
        received.get(id)
      case _ =>
        None
    }
  }
} 
Example 76
Source File: DefaultWebSocketReconnectionHandler.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.websocket

import scorex.util.ScorexLogging
import com.horizen.WebSocketSettings

import scala.concurrent.duration.FiniteDuration

class DefaultWebSocketReconnectionHandler(conf: WebSocketSettings) extends WebSocketReconnectionHandler with ScorexLogging {

  var onConnectFailureCounter = 0

  override def onConnectionFailed(cause: Throwable): Boolean = {
    onConnectFailureCounter = onConnectFailureCounter + 1
    if (onConnectFailureCounter <= conf.reconnectionMaxAttempts) {
      log.info("onConnectFailure. Reconnecting... (attempt " + onConnectFailureCounter + ") " + cause.getMessage)
      true
    }
    else false
  }

  override def onDisconnection(code: DisconnectionCode.Value, reason: String): Boolean = {
    onConnectFailureCounter = onConnectFailureCounter + 1
    if (onConnectFailureCounter <= conf.reconnectionMaxAttempts && code != DisconnectionCode.ON_SUCCESS) {
      log.info("onDisconnect. Reconnecting... (attempt " + onConnectFailureCounter + ")")
      true
    } else false
  }

  override def getDelay: FiniteDuration = conf.reconnectionDelay

  override def onConnectionSuccess(): Unit = onConnectFailureCounter = 0

} 
Example 77
Source File: WebSocketConnectorImpl.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.websocket

import java.net.URI

import javax.websocket.{ClientEndpoint, CloseReason, MessageHandler, SendHandler, SendResult, Session}
import org.glassfish.tyrus.client.{ClientManager, ClientProperties}
import scorex.util.ScorexLogging

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}
import scala.util.Try

@ClientEndpoint
class WebSocketConnectorImpl(bindAddress: String, connectionTimeout: FiniteDuration, messageHandler: WebSocketMessageHandler, reconnectionHandler: WebSocketReconnectionHandler) extends WebSocketConnector with WebSocketChannel with ScorexLogging {

  private var userSession: Session = _
  private val client = ClientManager.createClient()
  private val reconnectHandler: ClientManager.ReconnectHandler = new ClientManager.ReconnectHandler() {
    
    override def getDelay: Long = {
      reconnectionHandler.getDelay.toSeconds
    }

    // will be executed whenever @OnClose annotated method (or Endpoint.onClose(..)) is executed on client side.
    // this should happen when established connection is lost for any reason
    override def onDisconnect(closeReason: CloseReason): Boolean = {
      log.info("onDisconnect. Reason: " + closeReason.toString)
      if (closeReason.getCloseCode.getCode == 1000)
        reconnectionHandler.onDisconnection(DisconnectionCode.ON_SUCCESS, closeReason.getReasonPhrase)
      else
        reconnectionHandler.onDisconnection(DisconnectionCode.UNEXPECTED, closeReason.getReasonPhrase)
    }

    // is invoked when client fails to connect to remote endpoint
    override def onConnectFailure(exception: Exception): Boolean = reconnectionHandler.onConnectionFailed(exception)
  }

  override def isStarted: Boolean =
    userSession != null && userSession.isOpen

  override def start(): Try[Unit] = Try {

    if (isStarted) throw new IllegalStateException("Connector is already started.")

    client.getProperties.put(ClientProperties.RECONNECT_HANDLER, reconnectHandler)
    client.getProperties.put(ClientProperties.HANDSHAKE_TIMEOUT, String.valueOf(connectionTimeout.toMillis))
    log.info("Starting web socket connector...")
    userSession = client.connectToServer(this, new URI(bindAddress))
    reconnectionHandler.onConnectionSuccess()
    log.info("Web socket connector started.")

    userSession.addMessageHandler(new MessageHandler.Whole[String]() {
      override def onMessage(t: String): Unit = {
        log.info("Message received from server: " + t)
        messageHandler.onReceivedMessage(t)
      }
    })

  }

  override def asyncStart(): Future[Try[Unit]] = {
    val promise: Promise[Try[Unit]] = Promise[Try[Unit]]

    new Thread(new Runnable {
      override def run(): Unit = {
        promise.success(start())
      }
    }).start()

    promise.future
  }

  override def stop(): Try[Unit] = Try {
    log.info("Stopping web socket connector...")
    userSession.close()
    log.info("Web socket connector stopped.")
  }

  override def sendMessage(message: String): Unit = {
    try {
      userSession.getAsyncRemote().sendText(message, new SendHandler {
        override def onResult(sendResult: SendResult): Unit = {
          if (!sendResult.isOK) {
            log.info("Send message failed.")
            messageHandler.onSendMessageErrorOccurred(message, sendResult.getException)
          }
          else log.info("Message sent")
        }
      }
      )
    } catch {
      case e: Throwable => messageHandler.onSendMessageErrorOccurred(message, e)
    }

  }

} 
Example 78
Source File: SidechainSettings.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen

import scorex.core.settings.ScorexSettings

import scala.concurrent.duration.FiniteDuration


case class WebSocketSettings(address: String,
                             connectionTimeout: FiniteDuration,
                             reconnectionDelay: FiniteDuration,
                             reconnectionMaxAttempts: Int,
                             zencliCommandLine: String,
                             allowNoConnectionInRegtest: Boolean = true // In Regtest allow to forge new blocks without connection to MC node, for example.
                            )

case class GenesisDataSettings(scGenesisBlockHex: String,
                               scId: String,
                               mcBlockHeight: Int,
                               powData: String,
                               mcNetwork: String,
                               withdrawalEpochLength: Int
                              )

case class withdrawalEpochCertificateSettings(submitterIsEnabled: Boolean,
                                              signersPublicKeys: Seq[String],
                                              signersThreshold: Int,
                                              signersSecrets: Seq[String],
                                              provingKeyFilePath: String,
                                              verificationKeyFilePath: String)

case class WalletSettings(seed: String,
                          genesisSecrets: Seq[String])

case class MainchainSettings(
                              path: String
                            )

case class SidechainSettings(
                              scorexSettings: ScorexSettings,
                              genesisData: GenesisDataSettings,
                              websocket: WebSocketSettings,
                              withdrawalEpochCertificateSettings: withdrawalEpochCertificateSettings,
                              wallet: WalletSettings
                            ) 
Example 79
Source File: NodeSettingsReader.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.settings

import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ValueReader
import scala.concurrent.duration.FiniteDuration

trait NodeSettingsReader {

  implicit val nodeSettingsReader: ValueReader[NodeSettings] = (cfg, path) =>
    NodeSettings(
      cfg.as[Int](s"$path.blocksToKeep"),
      cfg.as[Int](s"$path.modifiersCacheSize"),
      cfg.as[Boolean](s"$path.mining"),
      cfg.as[Int](s"$path.numberOfMiningWorkers"),
      cfg.as[FiniteDuration](s"$path.miningDelay"),
      cfg.as[Boolean](s"$path.offlineGeneration"),
      cfg.as[Boolean](s"$path.useCli")
    )
} 
Example 80
Source File: PowLinearController.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.consensus

import org.encryfoundation.common.modifiers.history.Header
import org.encryfoundation.common.utils.TaggedTypes.{Difficulty, Height}
import supertagged.@@

import scala.concurrent.duration.FiniteDuration

object PowLinearController {

  val PrecisionConstant: Int = 1000000000

  def getDifficulty(previousHeaders: Seq[(Int, Header)], epochLength: Int, desiredBlockInterval: FiniteDuration,
                    initialDifficulty: Difficulty): Difficulty =
    if (previousHeaders.lengthCompare(1) == 0 || previousHeaders.head._2.timestamp >= previousHeaders.last._2.timestamp)
      previousHeaders.head._2.difficulty
    else {
      val data: Seq[(Int, Difficulty)] = previousHeaders.sliding(2).toList.map { d =>
        val start: (Int, Header) = d.head
        val end: (Int, Header) = d.last
        require(end._1 - start._1 == epochLength, s"Incorrect heights interval for $d")
        val diff: @@[BigInt, Difficulty.Tag] = Difficulty @@ (end._2.requiredDifficulty * desiredBlockInterval.toMillis *
          epochLength / (end._2.timestamp - start._2.timestamp))
        (end._1, diff)
      }
      val diff: Difficulty = interpolate(data, epochLength)
      if (diff >= initialDifficulty) diff else initialDifficulty
    }

  
  def getHeightsForRetargetingAt(height: Height, epochLength: Int, retargetingEpochsQty: Int): Seq[Height] = {
    if ((height - 1) % epochLength == 0 && height > epochLength * retargetingEpochsQty)
      (0 to retargetingEpochsQty).reverse.map(i => (height - 1) - i * epochLength)
    else Seq(height - 1)
  }.map(i => Height @@ i)

  // y = a + bx
  private def interpolate(data: Seq[(Int, Difficulty)], epochLength: Int): Difficulty = {
    val size: Int = data.size
    val xy: Iterable[BigInt] = data.map(d => d._1 * d._2)
    val x: Iterable[BigInt] = data.map(d => BigInt(d._1))
    val x2: Iterable[BigInt] = data.map(d => BigInt(d._1) * d._1)
    val y: Iterable[BigInt] = data.map(d => d._2)
    val xySum: BigInt = xy.sum
    val x2Sum: BigInt = x2.sum
    val ySum: BigInt = y.sum
    val xSum: BigInt = x.sum

    val k: BigInt = (xySum * size - xSum * ySum) * PrecisionConstant / (x2Sum * size - xSum * xSum)
    val b: BigInt = (ySum * PrecisionConstant - k * xSum) / size / PrecisionConstant

    val point: Int = data.map(_._1).max + epochLength
    Difficulty @@ (b + k * point / PrecisionConstant)
  }
} 
Example 81
Source File: GlobalTimer.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.it.util

import io.netty.util.{HashedWheelTimer, Timer}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

object GlobalTimer {

  val timer: Timer = new HashedWheelTimer()
  sys.addShutdownHook {
    timer.stop()
  }

  implicit class TimerExt(val timer: Timer) extends AnyVal {
    def schedule[A](f: => Future[A], delay: FiniteDuration): Future[A] = {
      val p = Promise[A]
      try {
        timer.newTimeout(_ => p.completeWith(f), delay.length, delay.unit)
      } catch {
        case NonFatal(e) => p.failure(e)
      }
      p.future
    }

    def sleep(term: FiniteDuration): Future[Unit] = schedule(Future.successful(()), term)

    def retryUntil[A](f: => Future[A], cond: A => Boolean, retryInterval: FiniteDuration)(implicit ec: ExecutionContext): Future[A] =
      f.flatMap(v => if (cond(v)) Future.successful(v) else schedule(retryUntil(f, cond, retryInterval), retryInterval))
  }
} 
Example 82
Source File: ConfigSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.io.{BufferedWriter, File, FileWriter}
import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.app.HarnessActor.ConfigChange
import com.webtrends.harness.config.ConfigWatcherActor
import com.webtrends.harness.health.{ComponentState, HealthComponent}
import com.webtrends.harness.service.messages.CheckHealth
import org.specs2.mutable.SpecificationWithJUnit

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.FiniteDuration
import scala.reflect.io.{Directory, Path}

class ConfigSpec extends SpecificationWithJUnit {
  implicit val dur = FiniteDuration(2, TimeUnit.SECONDS)
  new File("services/test/conf").mkdirs()
  implicit val sys = ActorSystem("system", ConfigFactory.parseString( """
    akka.actor.provider = "akka.actor.LocalActorRefProvider"
    services { path = "services" }
    """).withFallback(ConfigFactory.load))

  implicit val ec: ExecutionContextExecutor =  sys.dispatcher

  val probe = TestProbe()
  val parent = sys.actorOf(Props(new Actor {
    val child = context.actorOf(ConfigWatcherActor.props, "child")
    def receive = {
      case x if sender == child => probe.ref forward x
      case x => child forward x
    }
  }))

  sequential

  "config " should {
    "be in good health" in {
      probe.send(parent, CheckHealth)
      val msg = probe.expectMsgClass(classOf[HealthComponent])
      msg.state equals ComponentState.NORMAL
    }

    "detect changes in config" in {
      val file = new File("services/test/conf/test.conf")
      val bw = new BufferedWriter(new FileWriter(file))
      bw.write("test = \"value\"")
      bw.close()
      val msg = probe.expectMsgClass(classOf[ConfigChange])
      msg.isInstanceOf[ConfigChange]
    }
  }

  step {
    sys.terminate().onComplete { _ =>
        Directory(Path(new File("services"))).deleteRecursively()
    }
  }
} 
Example 83
Source File: InternalHttpSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.http

import java.net.{HttpURLConnection, URL}
import java.util.concurrent.TimeUnit
import akka.actor.{Props, ActorSystem}
import akka.testkit.TestKit
import akka.util.Timeout
import com.webtrends.harness.TestKitSpecificationWithJUnit
import com.webtrends.harness.service.messages.CheckHealth
import scala.concurrent.Await
import akka.pattern.ask
import scala.concurrent.duration.FiniteDuration

class InternalHttpSpec extends TestKitSpecificationWithJUnit(ActorSystem("test")) with InternalHttpClient {
  val port = 8123
  val path = "http://127.0.0.1:" + port + "/"
  val httpActor = system.actorOf(Props(classOf[SimpleHttpServer], port))

  // We need to make sure the httpActor has started up before trying to connect.
  implicit val timeout = Timeout(FiniteDuration(5, TimeUnit.SECONDS))
  Await.result(httpActor ? CheckHealth, timeout.duration)

  "Test handlers" should {
    "handle the get path /ping" in {
      val url = new URL(path + "ping")
      val conn = url.openConnection().asInstanceOf[HttpURLConnection]
      val resp = getResponseContent(conn)

      resp.status mustEqual "200"
      resp.content.length must be > 0
      resp.content.substring(0, 5) mustEqual "pong:"
    }
  }

  step {
    TestKit.shutdownActorSystem(system)
  }

} 
Example 84
Source File: IngestorRegistryEndpoint.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.http

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import hydra.common.config.ConfigSupport
import ConfigSupport._
import hydra.core.http.RouteSupport
import hydra.ingest.bootstrap.HydraIngestorRegistryClient
import hydra.ingest.services.IngestorRegistry.{FindAll, LookupResult}

import scala.concurrent.duration.{FiniteDuration, _}


class IngestorRegistryEndpoint()(implicit system:ActorSystem) extends RouteSupport
    with HydraIngestJsonSupport
    with ConfigSupport {

  private val registryLookupTimeout = applicationConfig
    .getDurationOpt("ingest.service-lookup.timeout")
    .getOrElse(5.seconds)

  lazy val registry = HydraIngestorRegistryClient(applicationConfig).registry

  private implicit val timeout = Timeout(registryLookupTimeout)

  override val route: Route =
    path("ingestors" ~ Slash.?) {
      get {
        onSuccess(registry ? FindAll) {
          case response: LookupResult => complete(response.ingestors)
        }
      }
    }
} 
Example 85
Source File: IngestionHandler.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import akka.actor.SupervisorStrategy.Stop
import akka.actor.{Actor, ActorRef, OneForOneStrategy, ReceiveTimeout}
import akka.http.scaladsl.model.{StatusCode, StatusCodes}
import hydra.core.ingest.{HydraRequest, IngestionReport, RequestParams}
import hydra.ingest.services.IngestorRegistry.{
  FindAll,
  FindByName,
  LookupResult
}

import scala.concurrent.duration.FiniteDuration

trait IngestionHandler {
  this: Actor =>

  def timeout: FiniteDuration

  def request: HydraRequest

  //we require an actorref here for performance reasons
  def registry: ActorRef

  private val targetIngestor =
    request.metadataValue(RequestParams.HYDRA_INGESTOR_PARAM)

  targetIngestor match {
    case Some(ingestor) => registry ! FindByName(ingestor)
    case None           => registry ! FindAll
  }

  override def receive: Receive = {
    case LookupResult(Nil) =>
      val errorCode = targetIngestor
        .map(i =>
          StatusCodes
            .custom(404, s"No ingestor named $i was found in the registry.")
        )
        .getOrElse(StatusCodes.BadRequest)

      complete(errorWith(errorCode))

    case LookupResult(ingestors) =>
      context.actorOf(
        IngestionSupervisor.props(request, self, ingestors, timeout)
      )

    case report: IngestionReport =>
      complete(report)

  }

  override val supervisorStrategy =
    OneForOneStrategy() {
      case e: Exception =>
        fail(e)
        Stop
    }

  private def errorWith(statusCode: StatusCode) = {
    IngestionReport(request.correlationId, Map.empty, statusCode.intValue())
  }

  def complete(report: IngestionReport)

  def fail(e: Throwable)
} 
Example 86
Source File: ConfigSupport.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.config

import java.util.Properties
import java.util.concurrent.TimeUnit

import cats.implicits._
import com.typesafe.config.{Config, ConfigFactory, ConfigObject}

import scala.concurrent.duration.FiniteDuration
import scala.language.implicitConversions


trait ConfigSupport extends ConfigComponent {

  private val defaultConfig = ConfigFactory.load()

  val applicationName: String = defaultConfig.getString("application.name")

  val rootConfig: Config = defaultConfig

  val applicationConfig: Config = rootConfig.getConfig(applicationName)

}

object ConfigSupport {

  import scala.collection.JavaConverters._

  implicit def toMap(cfg: ConfigObject): Map[String, Object] = {
    cfg.toConfig
      .entrySet()
      .asScala
      .map({ entry => entry.getKey -> entry.getValue.unwrapped() })(
        collection.breakOut
      )
  }

  implicit def toMap(cfg: Config): Map[String, Object] = {
    cfg
      .entrySet()
      .asScala
      .map({ entry => entry.getKey -> entry.getValue.unwrapped() })(
        collection.breakOut
      )
  }

  implicit def toProps(map: Map[String, AnyRef]): Properties = {
    map.foldLeft(new Properties) {
      case (a, (k, v)) =>
        a.put(k, v)
        a
    }
  }

  implicit class ConfigImplicits(config: Config) {
    def getDurationOpt(path: String): Option[FiniteDuration] =
      getOptional(path, config.getDuration).map(d => FiniteDuration(d.toNanos, TimeUnit.NANOSECONDS))

    def getStringOpt(path: String): Option[String] =
      getOptional(path, config.getString)

    def getConfigOpt(path: String): Option[Config] =
      getOptional(path, config.getConfig)

    def getIntOpt(path: String): Option[Int] =
      getOptional(path, config.getInt)

    def getBooleanOpt(path: String): Option[Boolean] =
      getOptional(path, config.getBoolean)

    def getStringListOpt(path: String): Option[List[String]] =
      getOptional(path, config.getStringList).map(_.asScala.toList)

    private def getOptional[A](path: String, method: String => A): Option[A] = {
      if (config.hasPath(path)) {
        method(path).some
      } else {
        none
      }
    }
  }

} 
Example 87
Source File: FeyGenericActorTest.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.ActorRef
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.duration.FiniteDuration

class FeyGenericActorTest(override val params: Map[String,String] = Map.empty,
               override val backoff: FiniteDuration = 1.minutes,
               override val connectTo: Map[String,ActorRef] = Map.empty,
               override val schedulerTimeInterval: FiniteDuration = 2.seconds,
               override val orchestrationName: String = "",
               override val orchestrationID: String = "",
               override val autoScale: Boolean = false) extends FeyGenericActor {

  var count = 0
  var started = false
  var processed = false
  var executing = false
  var stopped = false
  var restarted = false

  override def onStart(): Unit = {
    started = true
  }

  override def processMessage[T](message: T, sender: ActorRef): Unit = {
    processed = true
    log.info(s"Processing message ${message.toString}")
    propagateMessage(s"PROPAGATING FROM ${self.path.name} - Message: ${message.toString}")
    startBackoff()
  }

  override def execute(): Unit = {
    log.info(s"Executing action in ${self.path.name}")
    executing = true
  }

  override def customReceive: Receive = {
    case "TEST_CUSTOM" => count+=1
  }

  override def onStop(): Unit = {
    log.info(s"Actor ${self.path.name} stopped.")
    stopped = true
  }

  override def onRestart(reason: Throwable): Unit = {
    restarted = true
  }
} 
Example 88
Source File: JsonReceiverSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.{Files, Paths}

import akka.actor.ActorRef
import akka.testkit.{EventFilter, TestProbe}
import ch.qos.logback.classic.Level
import scala.concurrent.duration.{DurationInt, FiniteDuration}

class JsonReceiverSpec extends BaseAkkaSpec with LoggingTest{


  class ReceiverTest(verifyActor: ActorRef) extends JsonReceiver{

    override def execute(): Unit = {
      verifyActor ! "EXECUTED"
      Thread.sleep(500)
    }

    override def exceptionOnRun(e: Exception): Unit = {
      verifyActor ! "INTERRUPTED"
    }

  }

  val verifyTB = TestProbe("RECEIVER-TEST")
  val receiver = new ReceiverTest(verifyTB.ref)

  "Executing validJson in JsonReceiver" should {
    "return false when json schema is not right" in {
      receiver.validJson(getJSValueFromString(Utils_JSONTest.test_json_schema_invalid)) should be(false)
    }
    "log message to Error" in {
      ("Incorrect JSON schema \n/ensembles/0 \n\tErrors: Property command missing") should beLoggedAt(Level.ERROR)
    }
    "return true when Json schema is valid" in {
      receiver.validJson(getJSValueFromString(Utils_JSONTest.create_json_test)) should be(true)
    }
  }

  "Executing checkForLocation in JsonReceiver" should {
    "log message at Debug level" in {
      receiver.checkForLocation(getJSValueFromString(Utils_JSONTest.test_json_schema_invalid))
      "Location not defined in JSON" should beLoggedAt(Level.DEBUG)
    }
    "download jar dynamically from URL" in {
      receiver.checkForLocation(getJSValueFromString(Utils_JSONTest.location_test))
      Files.exists(Paths.get(s"${CONFIG.DYNAMIC_JAR_REPO}/fey-stream.jar")) should be(true)
    }
  }

  var watchThread: Thread = _
  "Start a Thread with the JSON receiver" should {
    "Start Thread" in {
      watchThread = new Thread(receiver, "TESTING-RECEIVERS-IN-THREAD")
      watchThread.setDaemon(true)
      watchThread.start()
      TestProbe().isThreadRunning("TESTING-RECEIVERS-IN-THREAD") should be(true)
    }
    "execute execute() method inside run" in {
      verifyTB.expectMsgAllOf(600.milliseconds,"EXECUTED","EXECUTED")
    }
  }

  "Interrupting the receiver Thread" should {
    "Throw Interrupted exception" in {
      EventFilter[InterruptedException]() intercept {
        watchThread.interrupt()
        watchThread.join()
      }
    }
    "execute exceptionOnRun method" in {
      verifyTB.receiveWhile(1200.milliseconds) {
        case "EXECUTED" =>
      }
      verifyTB.expectMsg("INTERRUPTED")
    }
  }


} 
Example 89
Source File: WatchServiceReceiverSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.{Files, Paths}
import java.nio.charset.StandardCharsets

import akka.testkit.{EventFilter, TestProbe}

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import java.io.File

import ch.qos.logback.classic.Level

class WatchServiceReceiverSpec extends BaseAkkaSpec{

  val watcherTB = TestProbe("WATCH-SERVICE")
  var watchFileTask:WatchServiceReceiver = _
  val watchTestDir = s"${CONFIG.JSON_REPOSITORY}/watchtest"

  "Creating WatchServiceReceiver" should {
    "process initial files in the JSON repository" in {
      CONFIG.JSON_EXTENSION = "json.not"
      watchFileTask = new WatchServiceReceiver(watcherTB.ref)
      watcherTB.expectMsgAllClassOf(classOf[JsonReceiverActor.JSON_RECEIVED])
      CONFIG.JSON_EXTENSION = "json.test"
    }
  }

  var watchThread: Thread = _
  "Start a Thread with WatchServiceReceiver" should {
    "Start Thread" in {
      watchThread = new Thread(watchFileTask, "TESTING-WATCHER-IN-THREAD")
      watchThread.setDaemon(true)
      watchThread.start()
      TestProbe().isThreadRunning("TESTING-WATCHER-IN-THREAD") should be(true)
    }
  }

  "Start watching directory" should {
    "Starting receiving CREATED event" taggedAs(SlowTest) in {
      watchFileTask.watch(Paths.get(watchTestDir))
      Files.write(Paths.get(s"$watchTestDir/watched.json.test"), Utils_JSONTest.create_json_test.getBytes(StandardCharsets.UTF_8))
      watcherTB.expectMsgAllClassOf(20.seconds, classOf[JsonReceiverActor.JSON_RECEIVED])
    }
    "Starting receiving UPDATE event" taggedAs(SlowTest) in {
      Files.write(Paths.get(s"$watchTestDir/watched-update.json.test"), Utils_JSONTest.delete_json_test.getBytes(StandardCharsets.UTF_8))
      Thread.sleep(200)
      Files.write(Paths.get(s"$watchTestDir/watched-update.json.test"), Utils_JSONTest.create_json_test.getBytes(StandardCharsets.UTF_8))
      watcherTB.expectMsgAllClassOf(20.seconds, classOf[JsonReceiverActor.JSON_RECEIVED])
    }
  }

  "processJson" should {
    "log to warn level when json has invalid schema" in {
      Files.write(Paths.get(s"$watchTestDir/watched-invalid.json.test"), Utils_JSONTest.test_json_schema_invalid.getBytes(StandardCharsets.UTF_8))
      watchFileTask.processJson(s"$watchTestDir/watched-invalid.json.test",new File(s"$watchTestDir/watched-invalid.json.test"))
      s"File $watchTestDir/watched-invalid.json.test not processed. Incorrect JSON schema" should beLoggedAt(Level.WARN)
    }
  }

  "interrupt watchservice" should{
    "interrupt thread" in {
      watchThread.interrupt()
    }
  }

} 
Example 90
Source File: BaseAkkaSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.Paths

import akka.actor.{ActorIdentity, ActorRef, ActorSystem, Identify, Props}
import akka.testkit.{EventFilter, TestEvent, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import play.api.libs.json._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.Await

class BaseAkkaSpec extends BaseSpec with BeforeAndAfterAll with LoggingTest{

  //Load default configuration for Fey when running tests
  resetCapturedLogs()
  CONFIG.loadUserConfiguration(Paths.get(TestSetup.configTest.toURI()).toFile().getAbsolutePath)
  TestSetup.setup()

  val systemName = "FEY-TEST"
  implicit val system = ActorSystem(systemName, ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]"""))
  system.eventStream.publish(TestEvent.Mute(EventFilter.debug()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.info()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.warning()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.error()))

  val globalIdentifierName = "GLOBAL-IDENTIFIER"
  val globalIdentifierRef = system.actorOf(Props[IdentifyFeyActors],globalIdentifierName)

  override protected def afterAll(): Unit = {
    //Force reload of GenericActor's jar
    Utils.loadedJars.remove("fey-test-actor.jar")
    Monitor.events.removeAllNodes()
    Await.ready(system.terminate(), 20.seconds)
  }

  implicit class TestProbeOps(probe: TestProbe) {

    def expectActor(path: String, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def expectActorInSystem(path: String, lookInSystem: ActorSystem, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (lookInSystem actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def verifyActorTermination(actor: ActorRef)(implicit system: ActorSystem): Unit = {
      val watcher = TestProbe()
      watcher.watch(actor)
      watcher.expectTerminated(actor)
    }

    def notExpectActor(path: String, max: FiniteDuration = 3.seconds): Unit = {
      probe.within(max) {
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, None) =>
          }
        }
      }
    }

    def isThreadRunning(threadName: String): Boolean = {
      Thread.getAllStackTraces.keySet().toArray
        .map(_.asInstanceOf[Thread])
        .find(_.getName == threadName) match {
        case Some(thread) =>
          if(thread.isAlive) true else false
        case None => false
      }
    }
  }

  //Utils Functions
  def getJSValueFromString(json: String): JsValue = {
    Json.parse(json)
  }

} 
Example 91
Source File: FeyGenericActorReceiverTest.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.ActorRef
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.duration.FiniteDuration

class FeyGenericActorReceiverTest(override val params: Map[String,String] = Map.empty,
                          override val backoff: FiniteDuration = 1.minutes,
                          override val connectTo: Map[String,ActorRef] = Map.empty,
                          override val schedulerTimeInterval: FiniteDuration = 2.seconds,
                          override val orchestrationName: String = "",
                          override val orchestrationID: String = "",
                          override val autoScale: Boolean = false) extends FeyGenericActorReceiver {

  override def customReceive:Receive = {
    case "PROPAGATE" => propagateMessage("PROPAGATE-CALLED")
    case x => log.debug(s"Message not treated: $x")
  }

  override def getJSONString[T](input: T): String = {
    input match{
      case "VALID_JSON" => Utils_JSONTest.create_json_test
      case "INVALID_JSON" => Utils_JSONTest.test_json_schema_invalid
      case "JSON_LOCATION" => Utils_JSONTest.location_test_2
    }
  }

  var count = 0
  var started = false
  var executing = false
  var stopped = false
  var restarted = false

  override def onStart(): Unit = {
    started = true
  }

  override def execute(): Unit = {
    log.info(s"Executing action in ${self.path.name}")
    executing = true
  }

  override def onStop(): Unit = {
    log.info(s"Actor ${self.path.name} stopped.")
    stopped = true
  }

  override def onRestart(reason: Throwable): Unit = {
    restarted = true
  }

} 
Example 92
Source File: NotificationChannel.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log

import java.util.concurrent.TimeUnit

import akka.actor.Actor
import akka.actor.ActorRef
import com.rbmhtechnology.eventuate._
import com.rbmhtechnology.eventuate.ReplicationProtocol._
import com.typesafe.config.Config

import scala.collection.immutable.Seq
import scala.concurrent.duration.DurationLong
import scala.concurrent.duration.FiniteDuration

class NotificationChannelSettings(config: Config) {
  val registrationExpirationDuration: FiniteDuration =
    config.getDuration("eventuate.log.replication.retry-delay", TimeUnit.MILLISECONDS).millis
}

object NotificationChannel {
  case class Updated(events: Seq[DurableEvent])

  private case class Registration(replicator: ActorRef, currentTargetVersionVector: VectorTime, filter: ReplicationFilter, registrationTime: Long)

  private object Registration {
    def apply(read: ReplicationRead): Registration =
      new Registration(read.replicator, read.currentTargetVersionVector, read.filter, System.nanoTime())
  }
}


class NotificationChannel(logId: String) extends Actor {
  import NotificationChannel._

  private val settings = new NotificationChannelSettings(context.system.settings.config)

  // targetLogId -> subscription
  private var registry: Map[String, Registration] = Map.empty

  // targetLogIds for which a read operation is in progress
  private var reading: Set[String] = Set.empty

  def receive = {
    case Updated(events) =>
      val currentTime = System.nanoTime()
      registry.foreach {
        case (targetLogId, reg) =>
          if (!reading.contains(targetLogId)
            && events.exists(_.replicable(reg.currentTargetVersionVector, reg.filter))
            && currentTime - reg.registrationTime <= settings.registrationExpirationDuration.toNanos)
            reg.replicator ! ReplicationDue
      }
    case r: ReplicationRead =>
      registry += (r.targetLogId -> Registration(r))
      reading += r.targetLogId
    case r: ReplicationReadSuccess =>
      reading -= r.targetLogId
    case r: ReplicationReadFailure =>
      reading -= r.targetLogId
    case w: ReplicationWrite =>
      for {
        id <- w.sourceLogIds
        rr <- registry.get(id)
      } registry += (id -> rr.copy(currentTargetVersionVector = w.metadata(id).currentVersionVector))
  }
} 
Example 93
Source File: EventProducerConfig.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx.api

import akka.actor.ActorRef

import scala.concurrent.duration.FiniteDuration

sealed trait EventProducerConfig {
  def id: String
  def log: ActorRef
}

sealed trait VertxProducerConfig extends EventProducerConfig {
  def endpointRouter: EndpointRouter
}

case class VertxPublisherConfig(id: String, log: ActorRef, endpointRouter: EndpointRouter) extends VertxProducerConfig
case class VertxSenderConfig(id: String, log: ActorRef, endpointRouter: EndpointRouter, deliveryMode: DeliveryMode) extends VertxProducerConfig
case class LogProducerConfig(id: String, log: ActorRef, endpoints: Set[String], filter: PartialFunction[Any, Boolean]) extends EventProducerConfig

sealed trait ConfirmationType
case object Single extends ConfirmationType
case class Batch(size: Int) extends ConfirmationType

sealed trait DeliveryMode
case object AtMostOnce extends DeliveryMode
case class AtLeastOnce(confirmationType: ConfirmationType, confirmationTimeout: FiniteDuration) extends DeliveryMode

object EndpointRouter {

  def route(f: PartialFunction[Any, String]): EndpointRouter =
    new EndpointRouter(f)

  def routeAllTo(s: String): EndpointRouter =
    new EndpointRouter({ case _ => s })
}

class EndpointRouter(f: PartialFunction[Any, String]) {
  val endpoint: Any => Option[String] = f.lift
}


  def writeTo(log: ActorRef, filter: PartialFunction[Any, Boolean] = { case _ => true }) = new CompletableEventProducerConfigFactory {
    override def as(id: String): LogProducerConfig =
      LogProducerConfig(id, log, endpoints, filter)
  }
} 
Example 94
Source File: VertxProducer.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import com.rbmhtechnology.eventuate.DurableEvent
import com.rbmhtechnology.eventuate.adapter.vertx.api.EventMetadata
import io.vertx.core.Vertx
import io.vertx.core.eventbus.{ DeliveryOptions, Message }

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ ExecutionContext, Future, Promise }

trait VertxProducer {
  def vertx: Vertx

  protected def deliveryOptions(event: DurableEvent): DeliveryOptions =
    new DeliveryOptions().setHeaders(EventMetadata(event).toHeaders)
}

trait VertxPublisher extends VertxProducer {
  def publish(address: String, evt: DurableEvent): Unit =
    vertx.eventBus().publish(address, evt.payload, deliveryOptions(evt))
}

trait VertxSender extends VertxProducer {

  import VertxHandlerConverters._

  def send[A](address: String, evt: DurableEvent, timeout: FiniteDuration)(implicit ec: ExecutionContext): Future[A] = {
    val promise = Promise[Message[A]]
    vertx.eventBus().send(address, evt.payload, deliveryOptions(evt).setSendTimeout(timeout.toMillis), promise.asVertxHandler)
    promise.future.map(_.body)
  }

  def send(address: String, evt: DurableEvent): Unit =
    vertx.eventBus().send(address, evt.payload, deliveryOptions(evt))
} 
Example 95
Source File: VertxSingleConfirmationSender.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorLogging, ActorRef, Props }
import akka.pattern.pipe
import com.rbmhtechnology.eventuate.adapter.vertx.api.EndpointRouter
import com.rbmhtechnology.eventuate.{ ConfirmedDelivery, EventsourcedActor }
import io.vertx.core.Vertx

import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success }

private[vertx] object VertxSingleConfirmationSender {

  case class DeliverEvent(evt: EventEnvelope, deliveryId: String)
  case class Confirm(deliveryId: String)
  case class DeliverFailed(evt: EventEnvelope, deliveryId: String, err: Throwable)
  case object Redeliver

  case class DeliveryConfirmed()

  def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, confirmationTimeout: FiniteDuration): Props =
    Props(new VertxSingleConfirmationSender(id, eventLog, endpointRouter, vertx, confirmationTimeout))
}

private[vertx] class VertxSingleConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, confirmationTimeout: FiniteDuration)
  extends EventsourcedActor with ConfirmedDelivery with VertxSender with ActorLogging {

  import VertxSingleConfirmationSender._
  import context.dispatcher

  context.system.scheduler.schedule(confirmationTimeout, confirmationTimeout, self, Redeliver)

  override def onCommand: Receive = {
    case DeliverEvent(envelope, deliveryId) =>
      send[Any](envelope.address, envelope.evt, confirmationTimeout)
        .map(_ => Confirm(deliveryId))
        .recover {
          case err => DeliverFailed(envelope, deliveryId, err)
        }
        .pipeTo(self)

    case Confirm(deliveryId) if unconfirmed.contains(deliveryId) =>
      persistConfirmation(DeliveryConfirmed(), deliveryId) {
        case Success(evt) =>
        case Failure(err) => log.error(s"Confirmation for delivery with id '$deliveryId' could not be persisted.", err)
      }

    case Redeliver =>
      redeliverUnconfirmed()

    case DeliverFailed(evt, deliveryId, err) =>
      log.warning(s"Delivery with id '$deliveryId' for event [$evt] failed with $err. The delivery will be retried.")
  }

  override def onEvent: Receive = {
    case DeliveryConfirmed() =>
    // confirmations should not be published
    case ev =>
      endpointRouter.endpoint(ev) match {
        case Some(endpoint) =>
          val deliveryId = lastSequenceNr.toString
          deliver(deliveryId, DeliverEvent(EventEnvelope(endpoint, lastHandledEvent), deliveryId), self.path)
        case None =>
      }
  }
} 
Example 96
Source File: KinesisTestConsumer.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import java.util.Collections

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord
import com.amazonaws.services.kinesis.model._
import com.amazonaws.services.kinesis.{AmazonKinesisAsyncClient, _}
import com.weightwatchers.reactive.kinesis.consumer.KinesisConsumer.ConsumerConf

import scala.collection.JavaConverters._
import scala.concurrent.duration.FiniteDuration

object KinesisTestConsumer {

  
  def retrieveRecords(streamName: String, batchSize: Int): List[String] = {
    getShards(streamName)
      .flatMap { shard =>
        val getRecordsRequest = new GetRecordsRequest
        getRecordsRequest.setShardIterator(getShardIterator(streamName, shard))
        getRecordsRequest.setLimit(batchSize)
        client.getRecords(getRecordsRequest).getRecords.asScala.toList
      }
      .flatMap { record: Record =>
        UserRecord
          .deaggregate(Collections.singletonList(record))
          .asScala
          .map { ur =>
            new String(ur.getData.array(), java.nio.charset.StandardCharsets.UTF_8)
          }
      }
  }

  private def getShardIterator(streamName: String, shard: Shard) = {
    client
      .getShardIterator(streamName, shard.getShardId, "TRIM_HORIZON")
      .getShardIterator
  }

  private def getShards(streamName: String) = {
    client
      .describeStream(streamName)
      .getStreamDescription
      .getShards
      .asScala
      .toList
  }

  def shutdown(): Unit = client.shutdown()

} 
Example 97
Source File: LeaderAwareCustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.ClusterEvent._
import akka.event.Logging

import scala.concurrent.duration.FiniteDuration

abstract class LeaderAwareCustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends CustomAutoDownBase(autoDownUnreachableAfter) {

  private val log = Logging(context.system, this)

  private var leader = false

  def onLeaderChanged(leader: Option[Address]): Unit = {}

  def isLeader: Boolean = leader

  override def receiveEvent: Receive = {
    case LeaderChanged(leaderOption) =>
      leader = leaderOption.contains(selfAddress)
      if (isLeader) {
        log.info("This node is the new Leader")
      }
      onLeaderChanged(leaderOption)
    case UnreachableMember(m) =>
      log.info("{} is unreachable", m)
      unreachableMember(m)
    case ReachableMember(m)   =>
      log.info("{} is reachable", m)
      remove(m)
    case MemberRemoved(m, _)  =>
      log.info("{} was removed from the cluster", m)
      remove(m)
  }

  override def initialize(state: CurrentClusterState): Unit = {
    leader = state.leader.exists(_ == selfAddress)
    super.initialize(state)
  }
} 
Example 98
Source File: RoleLeaderAutoDowningRoles.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}

final class RoleLeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val leaderRole = system.settings.config.getString("custom-downing.role-leader-auto-downing-roles.leader-role")
    val roles = system.settings.config.getStringList("custom-downing.role-leader-auto-downing-roles.target-roles").asScala.toSet
    if (roles.isEmpty) None else Some(RoleLeaderAutoDownRoles.props(leaderRole, roles, stableAfter))
  }
}


private[autodown] object RoleLeaderAutoDownRoles {
  def props(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[RoleLeaderAutoDownRoles], leaderRole, targetRoles, autoDownUnreachableAfter)
}

private[autodown] class RoleLeaderAutoDownRoles(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends RoleLeaderAutoDownRolesBase(leaderRole, targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("RoleLeader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }
} 
Example 99
Source File: CustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{Cancellable, Scheduler, Address, Actor}
import akka.cluster.ClusterEvent._
import akka.cluster.MemberStatus.{Exiting, Down}
import akka.cluster._
import scala.concurrent.duration.{Duration, FiniteDuration}

object CustomDowning {
  case class UnreachableTimeout(member: Member)
}

abstract class CustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends Actor {

  import CustomDowning._

  def selfAddress: Address

  def down(node: Address): Unit

  def downOrAddPending(member: Member): Unit

  def downOrAddPendingAll(members: Set[Member]): Unit

  def scheduler: Scheduler

  import context.dispatcher

  val skipMemberStatus = Set[MemberStatus](Down, Exiting)

  private var scheduledUnreachable: Map[Member, Cancellable] = Map.empty
  private var pendingUnreachable: Set[Member] = Set.empty
  private var unstableUnreachable: Set[Member] = Set.empty

  override def postStop(): Unit = {
    scheduledUnreachable.values foreach { _.cancel }
    super.postStop()
  }

  def receiveEvent: Receive

  def receive: Receive = receiveEvent orElse predefinedReceiveEvent

  def predefinedReceiveEvent: Receive = {
    case state: CurrentClusterState =>
      initialize(state)
      state.unreachable foreach unreachableMember

    case UnreachableTimeout(member) =>
      if (scheduledUnreachable contains member) {
        scheduledUnreachable -= member
        if (scheduledUnreachable.isEmpty) {
          unstableUnreachable += member
          downOrAddPendingAll(unstableUnreachable)
          unstableUnreachable = Set.empty
        } else {
          unstableUnreachable += member
        }
      }

    case _: ClusterDomainEvent =>
  }

  def initialize(state: CurrentClusterState) = {}

  def unreachableMember(m: Member): Unit =
    if (!skipMemberStatus(m.status) && !scheduledUnreachable.contains(m))
      scheduleUnreachable(m)

  def scheduleUnreachable(m: Member): Unit = {
    if (autoDownUnreachableAfter == Duration.Zero) {
      downOrAddPending(m)
    } else {
      val task = scheduler.scheduleOnce(autoDownUnreachableAfter, self, UnreachableTimeout(m))
      scheduledUnreachable += (m -> task)
    }
  }

  def remove(member: Member): Unit = {
    scheduledUnreachable.get(member) foreach { _.cancel }
    scheduledUnreachable -= member
    pendingUnreachable -= member
    unstableUnreachable -= member
  }

  def scheduledUnreachableMembers: Map[Member, Cancellable] = scheduledUnreachable

  def pendingUnreachableMembers: Set[Member] = pendingUnreachable

  def pendingAsUnreachable(member: Member): Unit = pendingUnreachable += member

  def downPendingUnreachableMembers(): Unit = {
    pendingUnreachable.foreach(member => down(member.address))
    pendingUnreachable = Set.empty
  }

  def unstableUnreachableMembers: Set[Member] = unstableUnreachable
} 
Example 100
Source File: RoleLeaderAutoDownRolesBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.Member
import scala.concurrent.duration.FiniteDuration

abstract class RoleLeaderAutoDownRolesBase(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends RoleLeaderAwareCustomAutoDownBase(autoDownUnreachableAfter){


  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    if (leaderRole == role && isRoleLeaderOf(leaderRole)) downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (targetRoles.exists(role => member.hasRole(role))) {
      if (isRoleLeaderOf(leaderRole)) {
        down(member.address)
      } else {
        pendingAsUnreachable(member)
      }
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    members.foreach(downOrAddPending)
  }
} 
Example 101
Source File: OldestAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.cluster.MemberStatus.Down
import akka.cluster.{MemberStatus, Member}

import scala.concurrent.duration.FiniteDuration

abstract class OldestAutoDownBase(oldestMemberRole: Option[String], downIfAlone: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends OldestAwareCustomAutoDownBase(autoDownUnreachableAfter){

  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isOldestOf(oldestMemberRole))
      downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isOldestOf(oldestMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  def downOnSecondary(member: Member): Unit = {
    if (isSecondaryOldest(oldestMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    val oldest = oldestMember(oldestMemberRole)
    if (downIfAlone && isOldestAlone(oldestMemberRole)) {
      if (isOldestOf(oldestMemberRole)) {
        shutdownSelf()
      } else if (isSecondaryOldest(oldestMemberRole)) {
        members.foreach(downOnSecondary)
      } else {
        members.foreach(downOrAddPending)
      }
    } else {
      if (oldest.fold(true)(o => members.contains(o))) {
        shutdownSelf()
      } else {
        members.foreach(downOrAddPending)
      }
    }
  }

  def downAloneOldest(member: Member): Unit = {
    val oldest = oldestMember(oldestMemberRole)
    if (isOldestOf(oldestMemberRole)) {
      shutdownSelf()
    } else if (isSecondaryOldest(oldestMemberRole) && oldest.contains(member)) {
      oldest.foreach { m =>
        down(m.address)
        replaceMember(m.copy(Down))
      }
    } else {
      pendingAsUnreachable(member)
    }
  }
} 
Example 102
Source File: QuorumLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.{MemberStatus, Member}
import akka.cluster.MemberStatus.Down

import scala.concurrent.duration.FiniteDuration

abstract class QuorumLeaderAutoDownBase(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends QuorumAwareCustomAutoDownBase(quorumSize, autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (quorumRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    quorumRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }


  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isQuorumMet(quorumRole)) {
      if (isLeaderOf(quorumRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(quorumRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isQuorumMetAfterDown(members, quorumRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfOutOfQuorum) {
      shutdownSelf()
    }
  }
} 
Example 103
Source File: LeaderAutoDownRolesBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.Member

import scala.concurrent.duration.FiniteDuration


abstract class LeaderAutoDownRolesBase(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends LeaderAwareCustomAutoDownBase(autoDownUnreachableAfter){


  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (isLeader) downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (targetRoles.exists(role => member.hasRole(role))) {
      if (isLeader) {
        down(member.address)
      } else {
        pendingAsUnreachable(member)
      }
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    members.foreach(downOrAddPending)
  }
} 
Example 104
Source File: RoleLeaderAwareCustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.ClusterEvent._
import akka.event.Logging

import scala.concurrent.duration.FiniteDuration

abstract class RoleLeaderAwareCustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends CustomAutoDownBase(autoDownUnreachableAfter) {

  private val log = Logging(context.system, this)

  private var roleLeader: Map[String, Boolean] = Map.empty

  def isRoleLeaderOf(role: String): Boolean = roleLeader.getOrElse(role, false)

  def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {}

  override def receiveEvent: Receive = {
    case RoleLeaderChanged(role, leaderOption) =>
      roleLeader = roleLeader + (role -> leaderOption.contains(selfAddress))
      if (isRoleLeaderOf(role)) {
        log.info("This node is the new role leader for role {}", role)
      }
      onRoleLeaderChanged(role, leaderOption)
    case UnreachableMember(m) =>
      log.info("{} is unreachable", m)
      unreachableMember(m)
    case ReachableMember(m)   =>
      log.info("{} is reachable", m)
      remove(m)
    case MemberRemoved(m, _)  =>
      log.info("{} was removed from the cluster", m)
      remove(m)
  }

  override def initialize(state: CurrentClusterState): Unit = {
    roleLeader = state.roleLeaderMap.mapValues(_.exists(_ == selfAddress)).toMap
    super.initialize(state)
  }
} 
Example 105
Source File: LeaderAutoDowningRoles.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}

final class LeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val roles = system.settings.config.getStringList("custom-downing.leader-auto-downing-roles.target-roles").asScala.toSet
    if (roles.isEmpty) None else Some(LeaderAutoDownRoles.props(roles, stableAfter))
  }
}


private[autodown] object LeaderAutoDownRoles {
  def props(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[LeaderAutoDownRoles], targetRoles, autoDownUnreachableAfter)
}

private[autodown] class LeaderAutoDownRoles(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends LeaderAutoDownRolesBase(targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Leader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }
} 
Example 106
Source File: MajorityLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.MemberStatus.Down
import akka.cluster.{MemberStatus, Member}

import scala.concurrent.duration.FiniteDuration

abstract class MajorityLeaderAutoDownBase(majorityMemberRole: Option[String], downIfInMinority: Boolean, autoDownUnreachableAfter: FiniteDuration)
    extends MajorityAwareCustomAutoDownBase(autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (majorityMemberRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    majorityMemberRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }

  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isMajority(majorityMemberRole)) {
      if (isLeaderOf(majorityMemberRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(majorityMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isMajorityAfterDown(members, majorityMemberRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfInMinority) {
      shutdownSelf()
    }
  }
} 
Example 107
Source File: ConsulCoordinationSpec.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination.consul

import akka.Done
import akka.actor.{ ActorSystem, AddressFromURIString }
import akka.testkit.{ TestDuration, TestProbe }
import com.typesafe.config.ConfigFactory
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }
import scala.concurrent.duration.{ Duration, DurationInt, FiniteDuration }
import scala.concurrent.{ Await, Awaitable }
import scala.util.Random

object ConsulCoordinationSpec {

  private val coordinationHost = {
    val dockerHostPattern = """tcp://(\S+):\d{1,5}""".r
    sys.env
      .get("DOCKER_HOST")
      .collect { case dockerHostPattern(address) => address }
      .getOrElse("127.0.0.1")
  }
}

class ConsulCoordinationSpec extends WordSpec with Matchers with BeforeAndAfterAll {
  import ConsulCoordinationSpec._

  private implicit val system = {
    val config =
      ConfigFactory
        .parseString(s"constructr.coordination.host = $coordinationHost")
        .withFallback(ConfigFactory.load())
    ActorSystem("default", config)
  }

  private val address1 = AddressFromURIString("akka.tcp://default@a:2552")
  private val address2 = AddressFromURIString("akka.tcp://default@b:2552")

  "ConsulCoordination" should {
    "correctly interact with consul" in {
      val coordination = new ConsulCoordination(randomString(), system)

      // Getting nodes
      resultOf(coordination.getNodes()) shouldBe 'empty

      // Lock (ttl >= 10s)
      resultOf(coordination.lock(address1, 10.seconds)) shouldBe true
      resultOf(coordination.lock(address1, 10.seconds)) shouldBe true
      resultOf(coordination.lock(address2, 10.seconds)) shouldBe false

      // Add self
      resultOf(coordination.addSelf(address1, 10.seconds)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address1)

      // Refresh
      resultOf(coordination.refresh(address1, 10.seconds)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address1)

      val probe = TestProbe()
      import probe._
      awaitAssert(
        resultOf(coordination.getNodes()) shouldBe 'empty,
        25.seconds // Wait until open sessions expire
      )
    }
  }

  override protected def afterAll() = {
    Await.ready(system.terminate(), Duration.Inf)
    super.afterAll()
  }

  private def resultOf[A](awaitable: Awaitable[A], max: FiniteDuration = 3.seconds.dilated) =
    Await.result(awaitable, max)

  private def randomString() = math.abs(Random.nextInt).toString
} 
Example 108
Source File: AuthToken.scala    From tsec   with MIT License 5 votes vote down vote up
package tsec.authentication

import java.time.Instant

import scala.concurrent.duration.FiniteDuration


trait AuthToken[A] {
  def expiry(a: A): Instant
  def lastTouched(a: A): Option[Instant]

  def isExpired(a: A, now: Instant): Boolean = expiry(a).isBefore(now)
  def isTimedOut(a: A, now: Instant, timeOut: FiniteDuration): Boolean =
    lastTouched(a).exists(
      _.plusSeconds(timeOut.toSeconds)
        .isBefore(now)
    )
} 
Example 109
Source File: StatefulJWTAuth.scala    From tsec   with MIT License 5 votes vote down vote up
package tsec.authentication.internal

import java.time.Instant

import cats.data.OptionT
import cats.effect.Sync
import cats.syntax.all._
import org.http4s._
import tsec.authentication._
import tsec.common._
import tsec.jws.mac._
import tsec.jwt._
import tsec.jwt.algorithms.JWTMacAlgo
import tsec.mac.jca._

import scala.concurrent.duration.FiniteDuration


private[tsec] abstract class StatefulJWTAuth[F[_], I, V, A: JWTMacAlgo](
    val expiry: FiniteDuration,
    val maxIdle: Option[FiniteDuration],
    tokenStore: BackingStore[F, SecureRandomId, AugmentedJWT[A, I]],
    identityStore: IdentityStore[F, I, V],
    signingKey: MacSigningKey[A]
)(implicit F: Sync[F], cv: JWSMacCV[F, A])
    extends JWTAuthenticator[F, I, V, A] {

  private[tsec] def verifyAndRefresh(
      raw: String,
      retrieved: AugmentedJWT[A, I],
      now: Instant
  ): F[AugmentedJWT[A, I]]

  def parseRaw(raw: String, request: Request[F]): OptionT[F, SecuredRequest[F, V, AugmentedJWT[A, I]]] =
    OptionT(
      (for {
        now       <- F.delay(Instant.now())
        extracted <- cv.verifyAndParse(raw, signingKey, now)
        id        <- cataOption(extracted.id)
        retrieved <- tokenStore.get(SecureRandomId(id)).orAuthFailure
        refreshed <- verifyAndRefresh(raw, retrieved, now)
        identity  <- identityStore.get(retrieved.identity).orAuthFailure
      } yield SecuredRequest(request, identity, refreshed).some)
        .handleError(_ => None)
    )

  def create(body: I): F[AugmentedJWT[A, I]] =
    for {
      cookieId <- F.delay(SecureRandomId.Interactive.generate)
      now      <- F.delay(Instant.now())
      newExpiry = now.plusSeconds(expiry.toSeconds)
      claims = JWTClaims(
        issuedAt = Some(now),
        jwtId = Some(cookieId),
        expiration = Some(newExpiry)
      )
      signed  <- JWTMac.build[F, A](claims, signingKey)
      created <- tokenStore.put(AugmentedJWT(cookieId, signed, body, newExpiry, touch(now)))
    } yield created

  def renew(authenticator: AugmentedJWT[A, I]): F[AugmentedJWT[A, I]] =
    F.delay(Instant.now()).flatMap { now =>
      val updatedExpiry = now.plusSeconds(expiry.toSeconds)
      val newBody       = authenticator.jwt.body.withExpiry(updatedExpiry)
      for {
        reSigned <- JWTMac.build[F, A](newBody, signingKey)
        updated <- tokenStore
          .update(authenticator.copy(jwt = reSigned, expiry = updatedExpiry, lastTouched = touch(now)))
      } yield updated
    }

  def update(authenticator: AugmentedJWT[A, I]): F[AugmentedJWT[A, I]] =
    tokenStore.update(authenticator)

  def discard(authenticator: AugmentedJWT[A, I]): F[AugmentedJWT[A, I]] =
    tokenStore.delete(SecureRandomId.coerce(authenticator.id)).map(_ => authenticator)

  def afterBlock(response: Response[F], authenticator: AugmentedJWT[A, I]): OptionT[F, Response[F]] =
    OptionT.pure[F](response)
} 
Example 110
Source File: GrantHandler.scala    From tsec   with MIT License 5 votes vote down vote up
package tsec.oauth2.provider
package grantHandler

import cats.data.EitherT
import cats.effect.Sync
import cats.implicits._

import scala.concurrent.duration.FiniteDuration

sealed abstract class GrantType extends Product with Serializable {
  def name: String
}

object GrantType {
  val header = "grant_type"

  case object AuthorizationCode extends GrantType {
    def name: String = "authorization_code"
  }

  case object RefreshToken extends GrantType {
    def name: String = "refresh_token"
  }

  case object ClientCrendentials extends GrantType {
    def name: String = "client_credentials"
  }

  case object Password extends GrantType {
    def name: String = "password"
  }

  case object Implicit extends GrantType {
    def name: String = "implicit"
  }

  val strToGrantType = Map(
    AuthorizationCode.name  -> AuthorizationCode,
    RefreshToken.name       -> RefreshToken,
    ClientCrendentials.name -> ClientCrendentials,
    Password.name           -> Password,
    Implicit.name           -> Implicit
  )
}

final case class GrantHandlerResult[U](
    authInfo: AuthInfo[U],
    tokenType: String,
    accessToken: String,
    expiresIn: Option[FiniteDuration],
    refreshToken: Option[String],
    scope: Option[String],
    params: Map[String, String]
)

trait GrantHandler[F[_], U] {
  type A
  def handleRequest(req: A)(implicit F: Sync[F]): EitherT[F, OAuthError, GrantHandlerResult[U]]

  
  def refreshAccessToken(authInfo: AuthInfo[U], refreshToken: String): F[AccessToken]
} 
Example 111
Source File: DurationGeneratorsSpec.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.scalacheck

import org.scalatest.WordSpec
import org.scalatest.prop.GeneratorDrivenPropertyChecks._
import play.api.libs.json.scalacheck.DurationGenerators._

import scala.concurrent.duration.{Duration, FiniteDuration}

class DurationGeneratorsSpec extends WordSpec {

  "Arbitrary[FiniteDuration]" should {
    "always produce a valid finite value" in {
      forAll() { (duration: FiniteDuration) =>
        assert(duration.isFinite())
      }
    }
  }

  "Arbitrary[Duration]" should {
    "always produce a valid value" in {
      forAll() { (duration: Duration) =>
        assert(duration ne null)
      }
    }
  }
} 
Example 112
Source File: DurationGeneratorsSpec.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.scalacheck

import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks._
import play.api.libs.json.scalacheck.DurationGenerators._

import scala.concurrent.duration.{Duration, FiniteDuration}

class DurationGeneratorsSpec extends AnyWordSpec {

  "Arbitrary[FiniteDuration]" should {
    "always produce a valid finite value" in {
      forAll() { (duration: FiniteDuration) =>
        assert(duration.isFinite())
      }
    }
  }

  "Arbitrary[Duration]" should {
    "always produce a valid value" in {
      forAll() { (duration: Duration) =>
        assert(duration ne null)
      }
    }
  }
} 
Example 113
Source File: DurationGeneratorsSpec.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.scalacheck

import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import play.api.libs.json.scalacheck.DurationGenerators._

import scala.concurrent.duration.{Duration, FiniteDuration}

class DurationGeneratorsSpec extends AnyWordSpec
  with ScalaCheckDrivenPropertyChecks {

  "Arbitrary[FiniteDuration]" should {
    "always produce a valid finite value" in {
      forAll() { (duration: FiniteDuration) =>
        assert(duration.isFinite)
      }
    }
  }

  "Arbitrary[Duration]" should {
    "always produce a valid value" in {
      forAll() { (duration: Duration) =>
        assert(duration ne null)
      }
    }
  }
} 
Example 114
Source File: DropwizardMetrics.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.dropwizard

import fr.davit.akka.http.metrics.core.{Counter, Dimension, Gauge, Histogram, Timer}
import io.dropwizard.metrics5.{MetricName, MetricRegistry}

import scala.concurrent.duration.FiniteDuration

object DropwizardMetrics {

  implicit class RichMetricsName(val metricName: MetricName) extends AnyVal {

    def tagged(dimensions: Seq[Dimension]): MetricName =
      metricName.tagged(dimensions.flatMap(d => Seq(d.key, d.value)): _*)

  }
}

abstract class DropwizardMetrics(namespace: String, name: String) {
  protected lazy val metricName: MetricName = MetricName.build(namespace, name)
}

class DropwizardCounter(namespace: String, name: String)(implicit registry: MetricRegistry)
    extends DropwizardMetrics(namespace, name)
    with Counter {

  import DropwizardMetrics._

  override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    registry.counter(metricName.tagged(dimensions)).inc()
  }
}

class DropwizardGauge(namespace: String, name: String)(implicit registry: MetricRegistry)
    extends DropwizardMetrics(namespace, name)
    with Gauge {

  import DropwizardMetrics._

  override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    registry.counter(metricName.tagged(dimensions)).inc()
  }

  override def dec(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    registry.counter(metricName.tagged(dimensions)).dec()
  }
}

class DropwizardTimer(namespace: String, name: String)(implicit registry: MetricRegistry)
    extends DropwizardMetrics(namespace, name)
    with Timer {

  import DropwizardMetrics._

  override def observe(duration: FiniteDuration, dimensions: Seq[Dimension] = Seq.empty): Unit = {
    registry.timer(metricName.tagged(dimensions)).update(duration.length, duration.unit)
  }
}

class DropwizardHistogram(namespace: String, name: String)(implicit registry: MetricRegistry)
    extends DropwizardMetrics(namespace, name)
    with Histogram {

  import DropwizardMetrics._

  override def update[T](value: T, dimensions: Seq[Dimension] = Seq.empty)(implicit numeric: Numeric[T]): Unit = {
    registry.histogram(metricName.tagged(dimensions)).update(numeric.toLong(value))
  }
} 
Example 115
Source File: CarbonMetrics.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.graphite

import fr.davit.akka.http.metrics.core.{Counter, Dimension, Gauge, Histogram, Timer}

import scala.concurrent.duration.FiniteDuration

abstract class CarbonMetrics(namespace: String, name: String) {
  protected lazy val metricName: String = s"$namespace.$name"
}

class CarbonCounter(namespace: String, name: String)(implicit client: CarbonClient)
    extends CarbonMetrics(namespace, name)
    with Counter {
  override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.publish(metricName, 1, dimensions)
  }
}

class CarbonGauge(namespace: String, name: String)(implicit client: CarbonClient)
    extends CarbonMetrics(namespace, name)
    with Gauge {
  override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.publish(metricName, 1, dimensions)
  }

  override def dec(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.publish(metricName, -1, dimensions)
  }
}

class CarbonTimer(namespace: String, name: String)(implicit client: CarbonClient)
    extends CarbonMetrics(namespace, name)
    with Timer {
  override def observe(duration: FiniteDuration, dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.publish(metricName, duration.toMillis, dimensions)
  }
}

class CarbonHistogram(namespace: String, name: String)(implicit client: CarbonClient)
    extends CarbonMetrics(namespace, name)
    with Histogram {
  override def update[T: Numeric](value: T, dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.publish(metricName, value, dimensions)
  }
} 
Example 116
Source File: PrometheusMetrics.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.prometheus

import fr.davit.akka.http.metrics.core._

import scala.concurrent.duration.FiniteDuration

class PrometheusCounter(counter: io.prometheus.client.Counter) extends Counter {
  override def inc(dimensions: Seq[Dimension]): Unit = {
    counter.labels(dimensions.map(_.value): _*).inc()
  }
}

class PrometheusGauge(gauge: io.prometheus.client.Gauge) extends Gauge {
  override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    gauge.labels(dimensions.map(_.value): _*).inc()
  }

  override def dec(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    gauge.labels(dimensions.map(_.value): _*).dec()
  }
}

class PrometheusSummaryTimer(summary: io.prometheus.client.Summary) extends Timer {
  override def observe(duration: FiniteDuration, dimensions: Seq[Dimension] = Seq.empty): Unit = {
    summary.labels(dimensions.map(_.value): _*).observe(duration.toMillis.toDouble / 1000.0)
  }
}

class PrometheusHistogramTimer(summary: io.prometheus.client.Histogram) extends Timer {
  override def observe(duration: FiniteDuration, dimensions: Seq[Dimension] = Seq.empty): Unit = {
    summary.labels(dimensions.map(_.value): _*).observe(duration.toMillis.toDouble / 1000.0)
  }
}

class PrometheusSummary(summary: io.prometheus.client.Summary) extends Histogram {
  override def update[T](value: T, dimensions: Seq[Dimension] = Seq.empty)(implicit numeric: Numeric[T]): Unit = {
    summary.labels(dimensions.map(_.value): _*).observe(numeric.toDouble(value))
  }
}

class PrometheusHistogram(histogram: io.prometheus.client.Histogram) extends Histogram {
  override def update[T](value: T, dimensions: Seq[Dimension] = Seq.empty)(implicit numeric: Numeric[T]): Unit = {
    histogram.labels(dimensions.map(_.value): _*).observe(numeric.toDouble(value))
  }
} 
Example 117
Source File: Metrics.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.core

import scala.concurrent.duration.FiniteDuration

trait Dimension {
  def key: String
  def value: String
}

trait Counter {
  def inc(dimensions: Seq[Dimension] = Seq.empty): Unit
}

trait Gauge {
  def inc(dimensions: Seq[Dimension] = Seq.empty): Unit

  def dec(dimensions: Seq[Dimension] = Seq.empty): Unit
}

trait Timer {
  def observe(duration: FiniteDuration, dimensions: Seq[Dimension] = Seq.empty): Unit
}

trait Histogram {
  def update[T: Numeric](value: T, dimensions: Seq[Dimension] = Seq.empty): Unit
} 
Example 118
Source File: TestRegistry.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.core

import akka.http.scaladsl.marshalling.{Marshaller, ToEntityMarshaller}
import akka.http.scaladsl.model.HttpEntity

import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration

object TestRegistry {
  implicit val marshaller: ToEntityMarshaller[TestRegistry] = Marshaller.opaque(_ => HttpEntity.Empty)

  private def keyer(dimensions: Seq[Dimension]): String = dimensions.mkString(":")

  class TestCounter extends Counter {
    protected val acc = mutable.Map[String, Long]()

    override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
      val key = keyer(dimensions)
      acc.get(key) match {
        case Some(v) => acc += (key -> (v + 1))
        case None    => acc += (key -> 1)
      }
    }

    def value(dimensions: Seq[Dimension] = Seq.empty): Long = acc.getOrElse(keyer(dimensions), 0)
  }

  class TestGauge extends TestCounter with Gauge {
    override def dec(dimensions: Seq[Dimension] = Seq.empty): Unit = {
      val key = keyer(dimensions)
      acc.get(key) match {
        case Some(v) => acc += (key -> (v - 1))
        case None    => acc += (key -> -1)
      }
    }
  }

  class TestTimer extends Timer {
    protected val acc = mutable.Map[String, List[FiniteDuration]]()

    override def observe(duration: FiniteDuration, dimensions: Seq[Dimension] = Seq.empty): Unit = {
      val key = keyer(dimensions)
      acc.get(key) match {
        case Some(vs) => acc += (key -> (duration :: vs))
        case None     => acc += (key -> (duration :: Nil))
      }
    }

    def values(dimensions: Seq[Dimension] = Seq.empty): List[FiniteDuration] = acc.getOrElse(keyer(dimensions), Nil)
  }

  final class TestHistogram extends Histogram {
    protected val acc = mutable.Map[String, List[Long]]()

    override def update[T](value: T, dimensions: Seq[Dimension] = Seq.empty)(implicit numeric: Numeric[T]): Unit = {
      val key = keyer(dimensions)
      acc.get(key) match {
        case Some(vs) => acc += (key -> (numeric.toLong(value) :: vs))
        case None     => acc += (key -> (numeric.toLong(value) :: Nil))
      }
    }

    def values(dimensions: Seq[Dimension] = Seq.empty): List[Long] = acc.getOrElse(keyer(dimensions), Nil)
  }

}

final class TestRegistry(settings: HttpMetricsSettings = HttpMetricsSettings.default)
    extends HttpMetricsRegistry(settings) {

  import TestRegistry._

  override val active = new TestGauge

  override val requests = new TestCounter

  override val receivedBytes = new TestHistogram

  override val responses = new TestCounter

  override val errors = new TestCounter

  override val duration = new TestTimer

  override val sentBytes = new TestHistogram

  override val connected = new TestGauge

  override val connections = new TestCounter
} 
Example 119
Source File: StatsDMetrics.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.datadog

import com.timgroup.statsd.StatsDClient
import fr.davit.akka.http.metrics.core.{Counter, Dimension, Gauge, Histogram, Timer}

import scala.concurrent.duration.FiniteDuration

object StatsDMetrics {
  def dimensionToTag(dimension: Dimension): String = s"${dimension.key}:${dimension.value}"
}

abstract class StatsDMetrics(namespace: String, name: String) {
  protected lazy val metricName: String = s"$namespace.$name"
}

class StatsDCounter(namespace: String, name: String)(implicit client: StatsDClient)
    extends StatsDMetrics(namespace: String, name: String)
    with Counter {
  override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.increment(metricName, dimensions.map(StatsDMetrics.dimensionToTag): _*)
  }
}

class StatsDGauge(namespace: String, name: String)(implicit client: StatsDClient)
    extends StatsDMetrics(namespace: String, name: String)
    with Gauge {
  override def inc(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.increment(metricName, dimensions.map(StatsDMetrics.dimensionToTag): _*)
  }

  override def dec(dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.decrement(metricName, dimensions.map(StatsDMetrics.dimensionToTag): _*)
  }
}

class StatsDTimer(namespace: String, name: String)(implicit client: StatsDClient)
    extends StatsDMetrics(namespace: String, name: String)
    with Timer {
  override def observe(duration: FiniteDuration, dimensions: Seq[Dimension] = Seq.empty): Unit = {
    client.distribution(metricName, duration.toMillis, dimensions.map(StatsDMetrics.dimensionToTag): _*)
  }
}

class StatsDHistogram(namespace: String, name: String)(implicit client: StatsDClient)
    extends StatsDMetrics(namespace: String, name: String)
    with Histogram {
  override def update[T](value: T, dimensions: Seq[Dimension] = Seq.empty)(implicit numeric: Numeric[T]): Unit = {
    client.distribution(metricName, numeric.toDouble(value), dimensions.map(StatsDMetrics.dimensionToTag): _*)
  }
} 
Example 120
Source File: GuideStyleUtils.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web.guide.styles.utils

import io.udash.css.{CssBase, CssStyle}
import io.udash.web.commons.styles.utils.StyleConstants
import scalacss.internal.Macros.Color
import scalacss.internal.{AV, Attr, Length}

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.language.postfixOps

object GuideStyleUtils extends CssBase {
  import dsl._

  val relativeMiddle: CssStyle = mixin(
    top(50 %%),
    transform := "translateY(-50%)",
    position.relative
  )

  def transition(property: Attr = all, duration: FiniteDuration = 250 milliseconds): CssStyle = mixin(
    transitionProperty := property.toString(),
    transitionDuration(duration),
    transitionTimingFunction.easeInOut
  )

  def border(bColor: Color = StyleConstants.Colors.GreyExtra, bWidth: Length[Double] = 1.0 px, bStyle: AV = borderStyle.solid): CssStyle = mixin(
    borderWidth(bWidth),
    bStyle,
    borderColor(bColor)
  )
} 
Example 121
Source File: CommonStyleUtils.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web.commons.styles.utils

import io.udash.css.{CssBase, CssStyle}
import scalacss.internal.Macros.Color
import scalacss.internal.{AV, Attr, Length}

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.language.postfixOps

object CommonStyleUtils extends CssBase {
  import dsl._

  val middle: CssStyle = mixin(
    top(50 %%),
    transform := "translateY(-50%)"
  )

  val center: CssStyle = mixin(
    top(50 %%),
    left(50 %%),
    transform := "translateY(-50%) translateX(-50%)"
  )

  val relativeMiddle: CssStyle = mixin(
    middle,
    position.relative
  )

  val absoluteMiddle: CssStyle = mixin(
    middle,
    position.absolute
  )

  val absoluteCenter: CssStyle = mixin(
    center,
    position.absolute
  )

  def transition(property: Attr = all, duration: FiniteDuration = 250 milliseconds): CssStyle = style(
    transitionProperty := property.toString(),
    transitionDuration(duration),
    transitionTimingFunction.easeInOut
  )

  def border(bColor: Color = StyleConstants.Colors.GreyExtra, bWidth: Length[Double] = 1.0 px, bStyle: AV = borderStyle.solid): CssStyle = style(
    borderWidth(bWidth),
    bStyle,
    borderColor(bColor)
  )
} 
Example 122
Source File: RemoteTranslationProvider.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.i18n

import java.{util => ju}

import com.avsystem.commons._
import org.scalajs.dom.ext.Storage

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.scalajs.js
import scala.util.Try


  private def reloadCache(storage: Storage)(implicit lang: Lang): Future[Boolean] = {
    def isCacheValid(timestamp: String): Boolean =
      Try(timestamp.toLong > now()).getOrElse(false)

    storage(storageKey(cacheTTLKey)) match {
      case Some(value) if isCacheValid(value) =>
        Future.successful(false)
      case _ if reloading != null =>
        reloading.mapNow(_ => true)
      case _ =>
        reloading = translationsEndpoint.loadTranslations(BundleHash(storage(storageKey(cacheHashKey)).getOrElse("")))
        reloading.mapNow {
          case Some(Bundle(hash, translations)) =>
            translations.foreach {
              case (key, value) =>
                storage(storageKey(key)) = value
            }
            storage(storageKey(cacheHashKey)) = hash.hash
            storage(storageKey(cacheTTLKey)) = (js.Date.now() + ttl.fromNow.timeLeft.toMillis).toString
            reloading = null
            true
          case None =>
            storage(storageKey(cacheTTLKey)) = (js.Date.now() + ttl.fromNow.timeLeft.toMillis).toString
            reloading = null
            true
        }
    }
  }

  private def now() = new ju.Date().getTime
} 
Example 123
Source File: instances.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry
package alleycats

import cats.{Eval, Id}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}
import java.util.concurrent.{ThreadFactory, Executors}

object instances {
  implicit val threadSleepId: Sleep[Id] = new Sleep[Id] {
    def sleep(delay: FiniteDuration): Id[Unit] = Thread.sleep(delay.toMillis)
  }

  implicit val threadSleepEval: Sleep[Eval] = new Sleep[Eval] {
    def sleep(delay: FiniteDuration): Eval[Unit] =
      Eval.later(Thread.sleep(delay.toMillis))
  }

  private lazy val scheduler =
    Executors.newSingleThreadScheduledExecutor(new ThreadFactory {
      override def newThread(runnable: Runnable) = {
        val t = new Thread(runnable)
        t.setDaemon(true)
        t.setName("cats-retry scheduler")
        t
      }
    })

  implicit val threadSleepFuture: Sleep[Future] =
    new Sleep[Future] {
      def sleep(delay: FiniteDuration): Future[Unit] = {
        val promise = Promise[Unit]()
        scheduler.schedule(new Runnable {
          def run: Unit = {
            promise.success(())
            ()
          }
        }, delay.length, delay.unit)
        promise.future
      }
    }
} 
Example 124
Source File: RetryDetails.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry

import scala.concurrent.duration.FiniteDuration

sealed trait RetryDetails {
  def retriesSoFar: Int
  def cumulativeDelay: FiniteDuration
  def givingUp: Boolean
  def upcomingDelay: Option[FiniteDuration]
}

object RetryDetails {
  final case class GivingUp(
      totalRetries: Int,
      totalDelay: FiniteDuration
  ) extends RetryDetails {
    val retriesSoFar: Int                     = totalRetries
    val cumulativeDelay: FiniteDuration       = totalDelay
    val givingUp: Boolean                     = true
    val upcomingDelay: Option[FiniteDuration] = None
  }

  final case class WillDelayAndRetry(
      nextDelay: FiniteDuration,
      retriesSoFar: Int,
      cumulativeDelay: FiniteDuration
  ) extends RetryDetails {
    val givingUp: Boolean                     = false
    val upcomingDelay: Option[FiniteDuration] = Some(nextDelay)
  }
} 
Example 125
Source File: RetryPolicies.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry

import java.util.concurrent.TimeUnit

import cats.Applicative
import cats.syntax.functor._
import cats.syntax.show._
import cats.instances.finiteDuration._
import cats.instances.int._
import retry.PolicyDecision._

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.util.Random

object RetryPolicies {
  private val LongMax: BigInt = BigInt(Long.MaxValue)

  
  def limitRetriesByCumulativeDelay[M[_]: Applicative](
      threshold: FiniteDuration,
      policy: RetryPolicy[M]
  ): RetryPolicy[M] = {
    def decideNextRetry(status: RetryStatus): M[PolicyDecision] =
      policy.decideNextRetry(status).map {
        case r @ DelayAndRetry(delay) =>
          if (status.cumulativeDelay + delay >= threshold) GiveUp else r
        case GiveUp => GiveUp
      }

    RetryPolicy.withShow[M](
      decideNextRetry,
      show"limitRetriesByCumulativeDelay(threshold=$threshold, $policy)"
    )
  }
} 
Example 126
Source File: Sleep.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry

import cats.effect.Timer
import scala.concurrent.duration.FiniteDuration

trait Sleep[M[_]] {
  def sleep(delay: FiniteDuration): M[Unit]
}

object Sleep {
  def apply[M[_]](implicit sleep: Sleep[M]): Sleep[M] = sleep

  implicit def sleepUsingTimer[F[_]](implicit timer: Timer[F]): Sleep[F] =
    new Sleep[F] {
      def sleep(delay: FiniteDuration): F[Unit] = timer.sleep(delay)
    }
} 
Example 127
Source File: RetryStatus.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry

import scala.concurrent.duration.{Duration, FiniteDuration}

final case class RetryStatus(
    retriesSoFar: Int,
    cumulativeDelay: FiniteDuration,
    previousDelay: Option[FiniteDuration]
) {
  def addRetry(delay: FiniteDuration): RetryStatus = RetryStatus(
    retriesSoFar = this.retriesSoFar + 1,
    cumulativeDelay = this.cumulativeDelay + delay,
    previousDelay = Some(delay)
  )
}

object RetryStatus {
  val NoRetriesYet = RetryStatus(0, Duration.Zero, None)
} 
Example 128
Source File: LeanMessagingProvider.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.connector.lean

import java.util.concurrent.BlockingQueue
import java.util.concurrent.LinkedBlockingQueue

import scala.collection.mutable.Map
import scala.collection.concurrent.TrieMap
import scala.concurrent.duration.FiniteDuration
import scala.util.Success
import scala.util.Try

import akka.actor.ActorSystem
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.core.WhiskConfig
import org.apache.openwhisk.core.connector.MessageConsumer
import org.apache.openwhisk.core.connector.MessageProducer
import org.apache.openwhisk.core.connector.MessagingProvider
import org.apache.openwhisk.core.entity.ByteSize


  val queues: Map[String, BlockingQueue[Array[Byte]]] =
    new TrieMap[String, BlockingQueue[Array[Byte]]]

  def getConsumer(config: WhiskConfig, groupId: String, topic: String, maxPeek: Int, maxPollInterval: FiniteDuration)(
    implicit logging: Logging,
    actorSystem: ActorSystem): MessageConsumer = {

    val queue = queues.getOrElseUpdate(topic, new LinkedBlockingQueue[Array[Byte]]())

    new LeanConsumer(queue, maxPeek)
  }

  def getProducer(config: WhiskConfig, maxRequestSize: Option[ByteSize] = None)(
    implicit logging: Logging,
    actorSystem: ActorSystem): MessageProducer =
    new LeanProducer(queues)

  def ensureTopic(config: WhiskConfig, topic: String, topicConfigKey: String, maxMessageBytes: Option[ByteSize] = None)(
    implicit logging: Logging): Try[Unit] = {
    if (queues.contains(topic)) {
      Success(logging.info(this, s"topic $topic already existed"))
    } else {
      queues.put(topic, new LinkedBlockingQueue[Array[Byte]]())
      Success(logging.info(this, s"topic $topic created"))
    }
  }
} 
Example 129
Source File: ContainerFactory.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.containerpool

import akka.actor.ActorSystem
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.WhiskConfig
import org.apache.openwhisk.core.entity.{ByteSize, ExecManifest, ExecutableWhiskAction, InvokerInstanceId}
import org.apache.openwhisk.spi.Spi

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.math.max

case class ContainerArgsConfig(network: String,
                               dnsServers: Seq[String] = Seq.empty,
                               dnsSearch: Seq[String] = Seq.empty,
                               dnsOptions: Seq[String] = Seq.empty,
                               extraEnvVars: Seq[String] = Seq.empty,
                               extraArgs: Map[String, Set[String]] = Map.empty) {

  val extraEnvVarMap: Map[String, String] =
    extraEnvVars.flatMap {
      _.split("=", 2) match {
        case Array(key)        => Some(key -> "")
        case Array(key, value) => Some(key -> value)
        case _                 => None
      }
    }.toMap
}

case class ContainerPoolConfig(userMemory: ByteSize,
                               concurrentPeekFactor: Double,
                               akkaClient: Boolean,
                               prewarmExpirationCheckInterval: FiniteDuration) {
  require(
    concurrentPeekFactor > 0 && concurrentPeekFactor <= 1.0,
    s"concurrentPeekFactor must be > 0 and <= 1.0; was $concurrentPeekFactor")

  require(prewarmExpirationCheckInterval.toSeconds > 0, "prewarmExpirationCheckInterval must be > 0")

  
trait ContainerFactoryProvider extends Spi {
  def instance(actorSystem: ActorSystem,
               logging: Logging,
               config: WhiskConfig,
               instance: InvokerInstanceId,
               parameters: Map[String, Set[String]]): ContainerFactory
} 
Example 130
Source File: MessagingProvider.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.connector

import akka.actor.ActorSystem

import scala.concurrent.duration.DurationInt
import scala.concurrent.duration.FiniteDuration
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.core.WhiskConfig
import org.apache.openwhisk.core.entity.ByteSize
import org.apache.openwhisk.spi.Spi

import scala.util.Try


trait MessagingProvider extends Spi {
  def getConsumer(
    config: WhiskConfig,
    groupId: String,
    topic: String,
    maxPeek: Int = Int.MaxValue,
    maxPollInterval: FiniteDuration = 5.minutes)(implicit logging: Logging, actorSystem: ActorSystem): MessageConsumer
  def getProducer(config: WhiskConfig, maxRequestSize: Option[ByteSize] = None)(
    implicit logging: Logging,
    actorSystem: ActorSystem): MessageProducer
  def ensureTopic(config: WhiskConfig, topic: String, topicConfig: String, maxMessageBytes: Option[ByteSize] = None)(
    implicit logging: Logging): Try[Unit]
} 
Example 131
Source File: OpenWhiskEvents.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.monitoring.metrics

import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.event.slf4j.SLF4JLogging
import akka.http.scaladsl.Http
import akka.kafka.ConsumerSettings
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import kamon.Kamon
import kamon.prometheus.PrometheusReporter
import org.apache.kafka.common.serialization.StringDeserializer
import pureconfig._
import pureconfig.generic.auto._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

object OpenWhiskEvents extends SLF4JLogging {

  case class MetricConfig(port: Int,
                          enableKamon: Boolean,
                          ignoredNamespaces: Set[String],
                          renameTags: Map[String, String],
                          retry: RetryConfig)

  case class RetryConfig(minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)

  def start(config: Config)(implicit system: ActorSystem,
                            materializer: ActorMaterializer): Future[Http.ServerBinding] = {
    implicit val ec: ExecutionContext = system.dispatcher

    val prometheusReporter = new PrometheusReporter()
    Kamon.registerModule("prometheus", prometheusReporter)
    Kamon.init(config)

    val metricConfig = loadConfigOrThrow[MetricConfig](config, "whisk.user-events")

    val prometheusRecorder = PrometheusRecorder(prometheusReporter, metricConfig)
    val recorders = if (metricConfig.enableKamon) Seq(prometheusRecorder, KamonRecorder) else Seq(prometheusRecorder)
    val eventConsumer = EventConsumer(eventConsumerSettings(defaultConsumerConfig(config)), recorders, metricConfig)

    CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "shutdownConsumer") { () =>
      eventConsumer.shutdown()
    }
    val port = metricConfig.port
    val api = new PrometheusEventsApi(eventConsumer, prometheusRecorder)
    val httpBinding = Http().bindAndHandle(api.routes, "0.0.0.0", port)
    httpBinding.foreach(_ => log.info(s"Started the http server on http://localhost:$port"))(system.dispatcher)
    httpBinding
  }

  def eventConsumerSettings(config: Config): ConsumerSettings[String, String] =
    ConsumerSettings(config, new StringDeserializer, new StringDeserializer)

  def defaultConsumerConfig(globalConfig: Config): Config = globalConfig.getConfig("akka.kafka.consumer")
} 
Example 132
Source File: KafkaSpecBase.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.monitoring.metrics

import akka.kafka.testkit.scaladsl.{EmbeddedKafkaLike, ScalatestKafkaSpec}
import akka.stream.ActorMaterializer
import net.manub.embeddedkafka.EmbeddedKafka
import org.scalatest._
import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}

import scala.concurrent.duration.{DurationInt, FiniteDuration}

abstract class KafkaSpecBase
    extends ScalatestKafkaSpec(6065)
    with Matchers
    with ScalaFutures
    with FlatSpecLike
    with EmbeddedKafka
    with EmbeddedKafkaLike
    with IntegrationPatience
    with Eventually
    with EventsTestHelper { this: Suite =>
  implicit val timeoutConfig: PatienceConfig = PatienceConfig(1.minute)
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  override val sleepAfterProduce: FiniteDuration = 10.seconds
  override protected val topicCreationTimeout = 60.seconds
} 
Example 133
Source File: PostActionActivation.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.controller.actions

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

import akka.http.scaladsl.model.StatusCodes.BadRequest

import spray.json._

import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.controller.RejectRequest
import org.apache.openwhisk.core.controller.WhiskServices
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.http.Messages

protected[core] trait PostActionActivation extends PrimitiveActions with SequenceActions {
  
  protected[controller] def invokeAction(
    user: Identity,
    action: WhiskActionMetaData,
    payload: Option[JsObject],
    waitForResponse: Option[FiniteDuration],
    cause: Option[ActivationId])(implicit transid: TransactionId): Future[Either[ActivationId, WhiskActivation]] = {
    action.toExecutableWhiskAction match {
      // this is a topmost sequence
      case None =>
        val SequenceExecMetaData(components) = action.exec
        invokeSequence(user, action, components, payload, waitForResponse, cause, topmost = true, 0).map(r => r._1)
      // a non-deprecated ExecutableWhiskAction
      case Some(executable) if !executable.exec.deprecated =>
        invokeSingleAction(user, executable, payload, waitForResponse, cause)
      // a deprecated exec
      case _ =>
        Future.failed(RejectRequest(BadRequest, Messages.runtimeDeprecated(action.exec)))
    }
  }
} 
Example 134
Source File: CacheIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{CacheLookupError, CacheStoreError}
import external.TeamThreeCacheApi
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.duration.FiniteDuration

sealed trait CacheIntegration[F[_]] {
  def cachedProduct: ProductId => F[Option[Product]]
  def storeProductToCache: ProductId => Product => F[Unit]
}

object CacheIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]](
    cache: TeamThreeCacheApi[ProductId, Product],
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): CacheIntegration[F] =
    new CacheIntegration[F] {
      def cachedProduct: ProductId => F[Option[Product]] =
        pId => CS.shift >> cache.get(pId).adaptedTo[F].timeout(t).narrowFailureTo[CacheLookupError]

      def storeProductToCache: ProductId => Product => F[Unit] =
        pId => p => CS.shift >> cache.put(pId)(p).adaptedTo[F].timeout(t).narrowFailureTo[CacheStoreError]
    }
} 
Example 135
Source File: Api.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.{ ActorSystem, Scheduler }
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.StatusCodes.{ Conflict, Created, NoContent, NotFound }
import akka.http.scaladsl.server.{ Directives, Route }
import akka.stream.Materializer
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.actor.typed.{ ActorRef, Behavior }
import akka.util.Timeout
import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport
import java.net.InetSocketAddress
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success }

object Api extends Logging {

  sealed trait Command
  private final case object HandleBindFailure                      extends Command
  private final case class HandleBound(address: InetSocketAddress) extends Command

  final val Name = "api"

  def apply(address: String,
            port: Int,
            userRepository: ActorRef[UserRepository.Command],
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      import akka.actor.typed.scaladsl.adapter._
      import context.executionContext
      implicit val s: ActorSystem = context.system.toUntyped

      val self = context.self
      Http()
        .bindAndHandle(route(userRepository, userView)(askTimeout, context.system.scheduler),
                       address,
                       port)
        .onComplete {
          case Failure(_)                      => self ! HandleBindFailure
          case Success(ServerBinding(address)) => self ! HandleBound(address)
        }

      Actor.immutable {
        case (_, HandleBindFailure) =>
          logger.error(s"Stopping, because cannot bind to $address:$port!")
          Actor.stopped

        case (_, HandleBound(address)) =>
          logger.info(s"Bound to $address")
          Actor.ignore
      }
    }

  def route(
      userRepository: ActorRef[UserRepository.Command],
      userView: ActorRef[UserView.Command]
  )(implicit askTimeout: Timeout, scheduler: Scheduler): Route = {
    import Directives._
    import ErrorAccumulatingCirceSupport._
    import io.circe.generic.auto._
    import io.circe.refined._

    pathEndOrSingleSlash {
      get {
        complete {
          import UserView._
          (userView ? GetUsers).mapTo[Users]
        }
      } ~
      post {
        entity(as[User]) { user =>
          import UserRepository._
          onSuccess(userRepository ? addUser(user)) {
            case UsernameTaken(_) => complete(Conflict)
            case UserAdded(_)     => complete(Created)
          }
        }
      }
    } ~
    path(Segment) { username =>
      delete {
        import UserRepository._
        onSuccess(userRepository ? removeUser(username)) {
          case UsernameUnknown(_) => complete(NotFound)
          case UserRemoved(_)     => complete(NoContent)
        }
      }
    }
  }
} 
Example 136
Source File: UserProjection.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.Scheduler
import akka.actor.typed.{ ActorRef, Behavior }
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.cluster.Cluster
import akka.cluster.ddata.{ ORSet, ORSetKey }
import akka.cluster.ddata.Replicator.WriteLocal
import akka.cluster.ddata.typed.scaladsl.{ DistributedData, Replicator }
import akka.persistence.query.EventEnvelope
import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import cats.instances.string._
import cats.syntax.eq._
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration

object UserProjection extends Logging {
  import akka.actor.typed.scaladsl.adapter._

  sealed trait Command
  final case object Stop                              extends Command
  private final case object HandleEventStreamComplete extends Command

  abstract class EventStreamCompleteException
      extends IllegalStateException("Event stream completed unexpectedly!")
  private final case object EventStreamCompleteException extends EventStreamCompleteException

  final val Name = "user-projection"

  final val usersKey: ORSetKey[User] =
    ORSetKey("users")

  def apply(readJournal: EventsByPersistenceIdQuery,
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      implicit val c: Cluster   = Cluster(context.system.toUntyped)
      implicit val s: Scheduler = context.system.scheduler
      implicit val t: Timeout   = askTimeout
      val replicator            = DistributedData(context.system).replicator
      val self                  = context.self

      readJournal
        .eventsByPersistenceId(UserRepository.Name, 0, Long.MaxValue)
        .collect { case EventEnvelope(_, _, _, event: UserRepository.Event) => event }
        .mapAsync(1) {
          case UserRepository.UserAdded(user) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal)(_ + user)

          case UserRepository.UserRemoved(username) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal) { users =>
              users.elements.find(_.username.value === username).fold(users)(users - _)
            }
        }
        .runWith(Sink.onComplete(_ => self ! HandleEventStreamComplete))
      logger.debug("Running event stream")

      Actor.immutable {
        case (_, Stop)                      => Actor.stopped
        case (_, HandleEventStreamComplete) => throw EventStreamCompleteException
      }
    }
} 
Example 137
Source File: BotPluginTestKit.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.test.annotated

import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import com.sumologic.sumobot.core.model.{IncomingMessage, InstantMessageChannel, OutgoingMessage, UserSender}
import org.junit.runner.RunWith
import org.scalatest.concurrent.Eventually
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import slack.models.User

import scala.concurrent.duration.{FiniteDuration, _}

@RunWith(classOf[JUnitRunner])
abstract class BotPluginTestKit(actorSystem: ActorSystem)
  extends TestKit(actorSystem)
    with WordSpecLike with Eventually with Matchers
    with BeforeAndAfterAll {

  protected val outgoingMessageProbe = TestProbe()
  system.eventStream.subscribe(outgoingMessageProbe.ref, classOf[OutgoingMessage])

  protected def confirmOutgoingMessage(test: OutgoingMessage => Unit, timeout: FiniteDuration = 1.second): Unit = {
    outgoingMessageProbe.expectMsgClass(timeout, classOf[OutgoingMessage]) match {
      case msg: OutgoingMessage =>
        test(msg)
    }
  }

  protected def instantMessage(text: String, user: User = mockUser("123", "jshmoe")): IncomingMessage = {
    IncomingMessage(text, true, InstantMessageChannel("125", user), "1527239216000090", sentBy = UserSender(user))
  }

  protected def mockUser(id: String, name: String): User = {
    User(id, name, None, None, None, None, None, None, None, None, None, None, None, None, None, None)
  }

  protected def send(message: IncomingMessage): Unit = {
    system.eventStream.publish(message)
  }

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 138
Source File: BotPluginTestKit.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.test

import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import com.sumologic.sumobot.core.model.{IncomingMessage, InstantMessageChannel, OutgoingMessage, UserSender}
import org.scalatest.BeforeAndAfterAll
import slack.models.User

import scala.concurrent.duration.{FiniteDuration, _}

@deprecated("use com.sumologic.sumobot.test.annotated.BotPluginTestKit", "1.0.2")
class BotPluginTestKit(_system: ActorSystem)
  extends TestKit(_system)
  with SumoBotSpec
  with BeforeAndAfterAll {

  protected val outgoingMessageProbe = TestProbe()
  system.eventStream.subscribe(outgoingMessageProbe.ref, classOf[OutgoingMessage])

  protected def confirmOutgoingMessage(test: OutgoingMessage => Unit, timeout: FiniteDuration = 1.second): Unit = {
    outgoingMessageProbe.expectMsgClass(timeout, classOf[OutgoingMessage]) match {
      case msg: OutgoingMessage =>
        test(msg)
    }
  }

  protected def instantMessage(text: String, user: User = mockUser("123", "jshmoe")): IncomingMessage = {
    IncomingMessage(text, true, InstantMessageChannel("125", user), "1527239216000090", sentBy = UserSender(user))
  }

  protected def mockUser(id: String, name: String): User = {
    User(id, name, None, None, None, None, None, None, None, None, None, None, None, None, None, None)
  }

  protected def send(message: IncomingMessage): Unit = {
    system.eventStream.publish(message)
  }

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 139
Source File: SessionWorkflowExecutorActorProvider.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.workflowexecutor

import scala.concurrent.duration.FiniteDuration

import akka.actor.{ActorContext, ActorRef}

import io.deepsense.commons.utils.Logging
import io.deepsense.deeplang.CommonExecutionContext
import io.deepsense.models.workflows.Workflow

class SessionWorkflowExecutorActorProvider(
  executionContext: CommonExecutionContext,
  workflowManagerClientActor: ActorRef,
  heartbeatPublisher: ActorRef,
  notebookTopicPublisher: ActorRef,
  workflowManagerTimeout: Int,
  publisher: ActorRef,
  sessionId: String,
  heartbeatInterval: FiniteDuration
) extends Logging {
  def provide(context: ActorContext, workflowId: Workflow.Id): ActorRef = {
    context.actorOf(
      SessionWorkflowExecutorActor.props(
        executionContext,
        workflowManagerClientActor,
        publisher,
        heartbeatPublisher,
        notebookTopicPublisher,
        workflowManagerTimeout,
        sessionId,
        heartbeatInterval),
      workflowId.toString)
  }
} 
Example 140
Source File: NotebookPoller.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.rest.client

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

import akka.actor.ActorSystem
import akka.util.Timeout
import spray.client.pipelining._
import spray.http.StatusCodes

import io.deepsense.commons.models.Id
import io.deepsense.commons.utils.Retry
import io.deepsense.commons.utils.RetryActor.RetriableException

class NotebookPoller private (
    notebookRestClient: NotebookRestClient,
    pollInterval: FiniteDuration,
    retryCountLimit: Int,
    workflowId: Id,
    nodeId: Id,
    endpointPath: String)(
    implicit override val actorSystem: ActorSystem,
    override val timeout: Timeout)
  extends Retry[Array[Byte]] {

  override val retryInterval: FiniteDuration = pollInterval

  override val retryLimit: Int = retryCountLimit

  override val workDescription: Option[String] = Some("notebook data retrieval")

  override def work: Future[Array[Byte]] = {
    implicit val ec: ExecutionContext = actorSystem.dispatcher

    notebookRestClient.fetchHttpResponse(Get(endpointPath)).flatMap { resp =>
      resp.status match {
        case StatusCodes.NotFound =>
          Future.failed(RetriableException(s"File containing output data for workflow " +
            s"s$workflowId and node s$nodeId not found", None))
        case StatusCodes.OK =>
          Future.successful(resp.entity.data.toByteArray)
        case statusCode =>
          Future.failed(NotebookHttpException(resp, s"Notebook server responded with $statusCode " +
            s"when asked for file for workflow $workflowId and node $nodeId"))
      }
    }
  }
}

object NotebookPoller {
  def apply(
      notebookRestClient: NotebookRestClient,
      pollInterval: FiniteDuration,
      retryCountLimit: Int,
      workflowId: Id,
      nodeId: Id,
      endpointPath: String
  )(implicit as: ActorSystem, tout: Timeout): Retry[Array[Byte]] = new NotebookPoller(
    notebookRestClient,
    pollInterval,
    retryCountLimit,
    workflowId,
    nodeId,
    endpointPath
  )
} 
Example 141
Source File: Retry.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.utils

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout

trait Retry[T] {

  def work: Future[T]

  def retryInterval: FiniteDuration
  def retryLimit: Int
  def actorSystem: ActorSystem
  // the timeout should exceed the retryLimit * retryInterval + (retryLimit + 1) * avgWorkDuration
  // otherwise the ask in tryWork method may timeout before all the retries have been attempted
  implicit def timeout: Timeout
  def workDescription: Option[String]

  private lazy val retryActor = actorSystem.actorOf(Props(new RetryActor[T](
    retryInterval,
    retryLimit,
    work,
    workDescription
  )))

  def tryWork: Future[T] = (retryActor ? RetryActor.Trigger).asInstanceOf[Future[T]]

} 
Example 142
Source File: RetryActor.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.commons.utils

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration
import scala.util.{Failure, Success, Try}

import akka.actor.{Actor, ActorRef, Status}

class RetryActor[T](
    retryInterval: FiniteDuration,
    retryCountLimit: Int,
    workCode: => Future[T],
    workDescription: Option[String]) extends Actor
    with Logging {

  import RetryActor._

  private implicit val ec: ExecutionContext = context.system.dispatcher

  override def receive: Receive = {
    case Trigger => doWork(sender, 0)
    case Retry(initialSender, retryCount) => doWork(initialSender, retryCount)
  }

  val workDescriptionForLogs: String = workDescription.map(" " + _).getOrElse(" some work")

  private def doWork(initialSender: ActorRef, retryCount: Int): Unit = {
    workCode.onComplete {
      case Success(t) => initialSender ! t
      case Failure(RetriableException(msg, cause)) if retryCount < retryCountLimit =>
        logFailure(msg, cause)
        logger.info(s"Will retry$workDescriptionForLogs in $retryInterval.")
        context.system.scheduler.scheduleOnce(retryInterval, self, Retry(initialSender, retryCount + 1))
      case Failure(RetriableException(msg, cause)) if retryCount >= retryCountLimit =>
        logFailure(msg, cause)
        val retryLimitReachedException =
          RetryLimitReachedException(s"Retry limit of $retryCountLimit reached, last error was $cause", cause)
        logger.error(s"Retry limit reached for$workDescriptionForLogs.", retryLimitReachedException)
        initialSender ! Status.Failure(retryLimitReachedException)
      case Failure(f) =>
        logFailure(f.getMessage, Some(f))
        logger.error(s"Unexpected exception when performing$workDescriptionForLogs.", f)
        initialSender ! Status.Failure(f)
    }
  }

  private def logFailure(msg: String, tOpt: Option[Throwable]): Unit = {
    val msgText = s"Exception when performing$workDescriptionForLogs. The message was: $msg"
    tOpt match {
      case Some(t) => logger.info(msgText, t)
      case None => logger.info(msgText)
    }
  }
}

object RetryActor {
  sealed trait Message
  case object Trigger extends Message
  case class Retry(initialSender: ActorRef, retryCount: Int) extends Message

  case class RetryLimitReachedException(msg: String, lastError: Option[Throwable]) extends Exception(msg)
  case class RetriableException(msg: String, cause: Option[Throwable]) extends Exception(msg, cause.orNull)

} 
Example 143
Source File: ConsumerConfig.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.fs2kafka

import java.util.concurrent.TimeUnit.{MILLISECONDS, SECONDS}

import com.avast.sst.fs2kafka.ConsumerConfig._
import com.github.ghik.silencer.silent
import fs2.kafka.{AutoOffsetReset, CommitRecovery, IsolationLevel}
import org.apache.kafka.clients.consumer.{ConsumerConfig => ApacheConsumerConfig}

import scala.concurrent.duration.FiniteDuration
import scala.jdk.CollectionConverters._

@silent("dead code")
final case class ConsumerConfig(
    bootstrapServers: List[String],
    groupId: String,
    groupInstanceId: Option[String] = None,
    clientId: Option[String] = None,
    clientRack: Option[String] = None,
    autoOffsetReset: AutoOffsetReset = AutoOffsetReset.None,
    enableAutoCommit: Boolean = false,
    autoCommitInterval: FiniteDuration = defaultMillis(ApacheConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG),
    allowAutoCreateTopics: Boolean = default(ApacheConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG),
    closeTimeout: FiniteDuration = FiniteDuration(20, SECONDS),
    commitRecovery: CommitRecovery = CommitRecovery.Default,
    commitTimeout: FiniteDuration = FiniteDuration(15, SECONDS),
    defaultApiTimeout: FiniteDuration = defaultMillis(ApacheConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG),
    heartbeatInterval: FiniteDuration = defaultMillis(ApacheConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG),
    isolationLevel: IsolationLevel = defaultIsolationLevel,
    maxPrefetchBatches: Int = 2,
    pollInterval: FiniteDuration = FiniteDuration(50, MILLISECONDS),
    pollTimeout: FiniteDuration = FiniteDuration(50, MILLISECONDS),
    maxPollInterval: FiniteDuration = defaultMillis(ApacheConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG),
    maxPollRecords: Int = default(ApacheConsumerConfig.MAX_POLL_RECORDS_CONFIG),
    requestTimeout: FiniteDuration = defaultMillis(ApacheConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG),
    sessionTimeout: FiniteDuration = defaultMillis(ApacheConsumerConfig.SESSION_TIMEOUT_MS_CONFIG),
    properties: Map[String, String] = Map.empty
)

object ConsumerConfig {

  private val officialDefaults = ApacheConsumerConfig.configDef().defaultValues().asScala

  private def default[A](key: String): A = officialDefaults(key).asInstanceOf[A]

  private def defaultMillis(key: String): FiniteDuration = FiniteDuration(default[Int](key).toLong, MILLISECONDS)

  private val defaultIsolationLevel = default[String](ApacheConsumerConfig.ISOLATION_LEVEL_CONFIG) match {
    case "read_uncommitted" => IsolationLevel.ReadUncommitted
    case "read_committed"   => IsolationLevel.ReadCommitted
  }

} 
Example 144
Source File: ProducerConfig.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.fs2kafka

import java.util.concurrent.TimeUnit.{MILLISECONDS, SECONDS}

import com.avast.sst.fs2kafka.ProducerConfig._
import fs2.kafka.Acks
import org.apache.kafka.clients.producer.{ProducerConfig => ApacheProducerConfig}

import scala.concurrent.duration.FiniteDuration
import scala.jdk.CollectionConverters._

final case class ProducerConfig(
    bootstrapServers: List[String],
    clientId: Option[String] = None,
    acks: Acks = defaultAcks,
    batchSize: Int = default[Int](ApacheProducerConfig.BATCH_SIZE_CONFIG),
    closeTimeout: FiniteDuration = FiniteDuration(60, SECONDS),
    deliveryTimeout: FiniteDuration = defaultMillis(ApacheProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG),
    requestTimeout: FiniteDuration = defaultMillis(ApacheProducerConfig.REQUEST_TIMEOUT_MS_CONFIG),
    linger: FiniteDuration = defaultMillisLong(ApacheProducerConfig.LINGER_MS_CONFIG),
    enableIdempotence: Boolean = default[Boolean](ApacheProducerConfig.ENABLE_IDEMPOTENCE_CONFIG),
    maxInFlightRequestsPerConnection: Int = default[Int](ApacheProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION),
    parallelism: Int = 100,
    retries: Int = 0,
    properties: Map[String, String] = Map.empty
)

object ProducerConfig {

  private val officialDefaults = ApacheProducerConfig.configDef().defaultValues().asScala

  private def default[A](key: String): A = officialDefaults(key).asInstanceOf[A]

  private def defaultMillis(key: String): FiniteDuration = FiniteDuration(default[Int](key).toLong, MILLISECONDS)
  private def defaultMillisLong(key: String): FiniteDuration = FiniteDuration(default[Long](key), MILLISECONDS)

  private val defaultAcks = default[String](ApacheProducerConfig.ACKS_CONFIG) match {
    case "all" => Acks.All
    case "0"   => Acks.Zero
    case "1"   => Acks.One
  }

} 
Example 145
Source File: DoobieHikariConfig.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.doobie

import java.util.concurrent.TimeUnit

import doobie.enum.TransactionIsolation

import scala.concurrent.duration.FiniteDuration

final case class DoobieHikariConfig(
    driver: String,
    url: String,
    username: String,
    password: String,
    autoCommit: Boolean = true,
    connectionTimeout: FiniteDuration = FiniteDuration(30, TimeUnit.SECONDS),
    idleTimeout: FiniteDuration = FiniteDuration(10, TimeUnit.MINUTES),
    maxLifeTime: FiniteDuration = FiniteDuration(30, TimeUnit.MINUTES),
    minimumIdle: Int = 10,
    maximumPoolSize: Int = 10,
    readOnly: Boolean = false,
    leakDetectionThreshold: Option[FiniteDuration] = None,
    allowPoolSuspension: Boolean = false,
    initializationFailTimeout: Option[FiniteDuration] = None,
    isolateInternalQueries: Boolean = false,
    poolName: Option[String] = None,
    registerMBeans: Boolean = false,
    validationTimeout: Option[FiniteDuration] = None,
    transactionIsolation: Option[TransactionIsolation] = None,
    dataSourceProperties: Map[String, String] = Map.empty
) 
Example 146
Source File: Http4sBlazeServerConfig.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.http4s.server

import java.util.concurrent.TimeUnit

import com.avast.sst.http4s.server.Http4sBlazeServerConfig.SocketOptions
import org.http4s.blaze.channel
import org.http4s.server.defaults

import scala.concurrent.duration.{Duration, FiniteDuration}

final case class Http4sBlazeServerConfig(
    listenAddress: String,
    listenPort: Int,
    nio2Enabled: Boolean = true,
    webSocketsEnabled: Boolean = false,
    http2Enabled: Boolean = false,
    responseHeaderTimeout: FiniteDuration = Duration(defaults.ResponseTimeout.toNanos, TimeUnit.NANOSECONDS),
    idleTimeout: FiniteDuration = Duration(defaults.IdleTimeout.toNanos, TimeUnit.NANOSECONDS),
    bufferSize: Int = 64 * 1024,
    maxRequestLineLength: Int = 4 * 1024,
    maxHeadersLength: Int = 40 * 1024,
    chunkBufferMaxSize: Int = 1024 * 1024,
    connectorPoolSize: Int = channel.DefaultPoolSize,
    socketOptions: SocketOptions = SocketOptions()
)

object Http4sBlazeServerConfig {

  def localhost8080: Http4sBlazeServerConfig = Http4sBlazeServerConfig("127.0.0.1", 8080)

  final case class SocketOptions(tcpNoDelay: Boolean = true)

} 
Example 147
Source File: Http4sBlazeClientConfig.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.http4s.client

import java.util.concurrent.TimeUnit

import org.http4s.BuildInfo
import org.http4s.client.blaze.ParserMode
import org.http4s.client.defaults
import org.http4s.headers.{`User-Agent`, AgentComment, AgentProduct}

import scala.concurrent.duration.{Duration, FiniteDuration}

final case class Http4sBlazeClientConfig(
    responseHeaderTimeout: Duration = Duration.Inf,
    idleTimeout: FiniteDuration = Duration(1, TimeUnit.MINUTES),
    requestTimeout: FiniteDuration = defaults.RequestTimeout,
    connectTimeout: FiniteDuration = defaults.ConnectTimeout,
    userAgent: `User-Agent` = `User-Agent`(AgentProduct("http4s-blaze-client", Some(BuildInfo.version)), List(AgentComment("Server"))),
    maxTotalConnections: Int = 10,
    maxWaitQueueLimit: Int = 256,
    maxConnectionsPerRequestkey: Int = 256,
    checkEndpointIdentification: Boolean = true,
    maxResponseLineSize: Int = 4 * 1024,
    maxHeaderLength: Int = 40 * 1024,
    maxChunkSize: Int = Int.MaxValue,
    chunkBufferMaxSize: Int = 1024 * 1024,
    parserMode: ParserMode = ParserMode.Strict,
    bufferSize: Int = 8192
) 
Example 148
Source File: UserIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{PreferenceErr, UserErr}
import external._
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

sealed trait UserIntegration[F[_]] {
  def user: UserId => F[User]
  def usersPreferences: UserId => F[UserPreferences]
}

object UserIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]](
    userDep: TeamTwoHttpApi,
    preferencesDep: TeamOneHttpApi,
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): UserIntegration[F] =
    new UserIntegration[F] {
      def user: UserId => F[User] = { id =>
        CS.shift >> userDep.user(id).adaptedTo[F].timeout(t).narrowFailureTo[UserErr]
      }

      def usersPreferences: UserId => F[UserPreferences] = { id =>
        CS.shift >> preferencesDep.usersPreferences(id).adaptedTo[F].timeout(t).narrowFailureTo[PreferenceErr]
      }
    }
} 
Example 149
Source File: tokens.scala    From pfps-shopping-cart   with Apache License 2.0 5 votes vote down vote up
package shop.algebras

import cats.effect.Sync
import cats.implicits._
import dev.profunktor.auth.jwt._
import io.circe.syntax._
import pdi.jwt._
import scala.concurrent.duration.FiniteDuration
import shop.config.data._

trait Tokens[F[_]] {
  def create: F[JwtToken]
}

object LiveTokens {
  def make[F[_]: Sync](
      tokenConfig: JwtSecretKeyConfig,
      tokenExpiration: TokenExpiration
  ): F[Tokens[F]] =
    Sync[F].delay(java.time.Clock.systemUTC).map { implicit jClock =>
      new LiveTokens[F](tokenConfig, tokenExpiration.value)
    }
}

final class LiveTokens[F[_]: GenUUID: Sync] private (
    config: JwtSecretKeyConfig,
    exp: FiniteDuration
)(implicit val ev: java.time.Clock)
    extends Tokens[F] {
  def create: F[JwtToken] =
    for {
      uuid <- GenUUID[F].make
      claim <- Sync[F].delay(JwtClaim(uuid.asJson.noSpaces).issuedNow.expiresIn(exp.toMillis))
      secretKey = JwtSecretKey(config.value.value.value)
      token <- jwtEncode[F](claim, secretKey, JwtAlgorithm.HS256)
    } yield token
} 
Example 150
Source File: ProductIntegration.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package integration

import cats.effect.syntax.concurrent._
import cats.effect.{Concurrent, ContextShift, IO, Timer}
import cats.syntax.flatMap._
import errors.PriceServiceError.{ProductErr, ProductPriceErr}
import external._
import external.library.IoAdapt.-->
import external.library.syntax.errorAdapt._
import external.library.syntax.ioAdapt._
import model.DomainModel._

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

sealed trait ProductIntegration[F[_]] {
  def product: ProductId => F[Option[Product]]
  def productPrice: Product => UserPreferences => F[Price]
}

object ProductIntegration {
  @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]](
    productDep: TeamTwoHttpApi,
    pricesDep: TeamOneHttpApi,
    t: FiniteDuration
  )(
    implicit CS: ContextShift[F]
  ): ProductIntegration[F] =
    new ProductIntegration[F] {
      def product: ProductId => F[Option[Product]] = { ps =>
        CS.shift >> productDep.product(ps).adaptedTo[F].timeout(t).narrowFailureTo[ProductErr]
      }

      def productPrice: Product => UserPreferences => F[Price] = { p => pref =>
        CS.shift >> pricesDep.productPrice(p)(pref).adaptedTo[F].timeout(t).narrowFailureTo[ProductPriceErr]
      }
    }
} 
Example 151
Source File: ReadJournalSource.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.jdbc.spark.sql.execution.streaming

import akka.actor.{ ActorSystem, ExtendedActorSystem }
import akka.persistence.query.PersistenceQuery
import akka.persistence.query.scaladsl.{ CurrentEventsByPersistenceIdQuery, CurrentEventsByTagQuery, CurrentPersistenceIdsQuery, ReadJournal }
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.extension.{ Sink => Snk }
import akka.stream.{ ActorMaterializer, Materializer }
import org.apache.spark.sql._
import org.apache.spark.sql.execution.streaming.{ LongOffset, Offset, Source }
import org.apache.spark.sql.types.StructType

import scala.collection.immutable._
import scala.concurrent.duration.{ FiniteDuration, _ }
import scala.concurrent.{ Await, ExecutionContext, Future }

trait ReadJournalSource {
  _: Source =>
  def readJournalPluginId: String
  def sqlContext: SQLContext

  // some machinery
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  // read journal, only interested in the Current queries, as Spark isn't asynchronous
  lazy val readJournal = PersistenceQuery(system).readJournalFor(readJournalPluginId)
    .asInstanceOf[ReadJournal with CurrentPersistenceIdsQuery with CurrentEventsByPersistenceIdQuery with CurrentEventsByTagQuery]

  implicit class FutureOps[A](f: Future[A])(implicit ec: ExecutionContext, timeout: FiniteDuration = null) {
    def futureValue: A = Await.result(f, Option(timeout).getOrElse(10.seconds))
  }

  def maxPersistenceIds: Long =
    readJournal.currentPersistenceIds().runWith(Snk.count).futureValue

  def persistenceIds(start: Long, end: Long) =
    readJournal.currentPersistenceIds().drop(start).take(end).runWith(Sink.seq).futureValue

  def maxEventsByPersistenceId(pid: String): Long =
    readJournal.currentEventsByPersistenceId(pid, 0, Long.MaxValue).runWith(Snk.count).futureValue

  def eventsByPersistenceId(pid: String, start: Long, end: Long, eventMapperFQCN: String): Seq[Row] = {
    readJournal.currentEventsByPersistenceId(pid, start, end)
      .map(env => getMapper(eventMapperFQCN).get.row(env, sqlContext)).runWith(Sink.seq).futureValue
  }

  implicit def mapToDataFrame(rows: Seq[Row]): DataFrame = {
    import scala.collection.JavaConversions._
    sqlContext.createDataFrame(rows, schema)
  }

  def getStartEnd(_start: Option[Offset], _end: Offset): (Long, Long) = (_start, _end) match {
    case (Some(LongOffset(start)), LongOffset(end)) => (start, end)
    case (None, LongOffset(end))                    => (0L, end)
  }

  def getMapper(eventMapperFQCN: String): Option[EventMapper] =
    system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[EventMapper](eventMapperFQCN, List.empty)
      .recover { case cause => cause.printStackTrace(); null }.toOption

  override def stop(): Unit = {
    println("Stopping jdbc read journal")
    system.terminate()
  }
} 
Example 152
Source File: SparkImplicits.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.datasources

import java.util.Properties

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.{ Sink, Source }
import org.apache.spark.sql._
import org.apache.spark.sql.streaming.DataStreamReader

import scala.collection.immutable._
import scala.concurrent.duration.{ FiniteDuration, _ }
import scala.concurrent.{ Await, Future }
import scala.reflect.runtime.universe._
import slick.driver.PostgresDriver.api._

object SparkImplicits {
  implicit class DataSourceOps(dfr: DataFrameReader) {
    def helloworld(path: String): DataFrame = dfr.format("helloworld").load(path)
    def person(path: String): DataFrame = dfr.format("person").load(path)
    def jdbc(table: String)(implicit jdbcOptions: Map[String, String]): DataFrame =
      dfr.format("jdbc").options(jdbcOptions ++ Map("dbtable" -> table)).load()
  }

  implicit class DataStreamReaderOps(dsr: DataStreamReader) {
    def currentPersistenceIds(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-persistence-id").load(path)
    def eventsByPersistenceId(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-events-by-persistence-id").load(path)
  }

  implicit class DataFrameWriterOps[T](dfw: DataFrameWriter[T]) {
    
    def ignore = dfw.mode(SaveMode.Ignore)

    def jdbc(table: String)(implicit jdbcOptions: Map[String, String]) = {
      val properties = jdbcOptions.foldLeft(new Properties) { case (prop, (k, v)) => prop.put(k, v); prop }
      dfw.jdbc(jdbcOptions("url"), table, properties)
      // does not (yet) work see: https://issues.apache.org/jira/browse/SPARK-7646
      // dfw.format("jdbc").mode(SaveMode.Overwrite).options(jdbcOptions ++ Map("dbtable" -> table))
    }
  }

  trait DataFrameQueryGenerator[A] {
    def upsert: String
  }

  implicit class DatasetOps(df: DataFrame) {
    def withSession[A](db: Database)(f: Session => A): A = {
      val session = db.createSession()
      try f(session) finally session.close()
    }

    def withStatement[A](db: Database)(f: java.sql.Statement => A): A =
      withSession(db)(session ⇒ session.withStatement()(f))

    def upsert[A](table: String)(implicit db: Database, dfq: DataFrameQueryGenerator[A]): DataFrame = withStatement(db) { stmt =>
      stmt.executeUpdate(dfq.upsert)
      df
    }
  }

  implicit class SparkSessionOps(spark: SparkSession) {
    def fromFuture[A <: Product: TypeTag](data: Future[Seq[A]])(implicit _timeout: FiniteDuration = null): DataFrame =
      spark.createDataFrame(Await.result(data, Option(_timeout).getOrElse(15.minutes)))

    def fromSource[A <: Product: TypeTag](data: Source[A, NotUsed])(implicit _timeout: FiniteDuration = null, mat: Materializer): DataFrame =
      fromFuture(data.runWith(Sink.seq))
  }
} 
Example 153
Source File: DataFetcher.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data.evaluator

import com.cave.metrics.data.ExponentialBackOff
import com.cave.metrics.data.influxdb.InfluxClientFactory
import org.joda.time.DateTime

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

class DataFetcher(clientFactory: InfluxClientFactory) extends ExponentialBackOff {

  // parameters for Exponential BackOff
  protected val MaxBackOffTimeInMillis = 1000L
  protected val ShouldLogErrors = true
  def maxTries: Int = 3

  def fetchData(clusterName: Option[String], databaseName: String, metricName: String, metricTags: Map[String, String],
                repeats: Int, delay: FiniteDuration, end: DateTime)(implicit ec: ExecutionContext): Future[Option[List[Double]]] =
    try {
      val delayedEnd = end.minusMinutes(delay.toMinutes.toInt)
      val (client, context) = clientFactory.getClient(clusterName)

      retryUpTo(maxTries) {
        client.getMetricData(
          database = databaseName,
          metric = metricName,
          tags = metricTags,
          start = None,
          end = Some(delayedEnd),
          limit = Some(repeats))(context)
      } map {
        case scala.util.Success(data) => data.map(_.metrics.map(_.value).toList)

        case scala.util.Failure(t) => sys.error(t.getMessage)
      }
    } catch {
      case e: RuntimeException =>
        Future.failed(e)
    }

  def fetchData(clusterName: Option[String], databaseName: String, metricName: String, metricTags: Map[String, String],
                duration: FiniteDuration, end: DateTime)(implicit ec: ExecutionContext): Future[Option[List[Double]]] =
    try {
      val (client, context) = clientFactory.getClient(clusterName)
      retryUpTo(maxTries) {
        client.getMetricData(
          database = databaseName,
          metric = metricName,
          tags = metricTags,
          start = Some(end.minusSeconds(duration.toSeconds.toInt)),
          end = Some(end),
          limit = None)(context)
      } map {
        case scala.util.Success(data) => data.map(_.metrics.map(_.value).toList)

        case scala.util.Failure(t) => sys.error(t.getMessage)
      }
    } catch {
      case e: RuntimeException =>
        Future.failed(e)
    }

  def fetchData(clusterName: Option[String], databaseName: String, aggregator: String, period: FiniteDuration,
                metric: String, tags: Map[String, String], repeats: Int, delay: FiniteDuration, end: DateTime)
               (implicit ec: ExecutionContext) = {
    try {
      val delayedEnd = end.minusMinutes(delay.toMinutes.toInt)
      val (client, context) = clientFactory.getClient(clusterName)
      retryUpTo(maxTries) {
        client.getAggregatedData(
          databaseName,
          aggregator, period,
          metric, tags,
          start = None,
          end = Some(delayedEnd),
          limit = Some(repeats)
        )(context)
      } map {
        case scala.util.Success(data) => data.map(_.metrics.map(_.value).toList)
        case scala.util.Failure(t) => sys.error(t.getMessage)
      }
    } catch {
      case NonFatal(e) =>
        Future.failed(e)
    }
  }
} 
Example 154
Source File: AbstractEvaluator.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data.evaluator

import org.joda.time.DateTime

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try

abstract class AbstractEvaluator(conditionStr: String) extends AlertParser {

  private val condition = parseAll(anyAlert, conditionStr) match {
    case Success(SimpleAlert(leftOperand, operator, rightOperand, repeatCount, delay), _) =>
      Left((leftOperand, operator, rightOperand, repeatCount, delay))

    case Success(MissingDataAlert(metricSource, duration), _) =>
      Right((metricSource, duration))

    case _ => sys.error("Unsupported check condition: " + conditionStr)
  }

  def evaluateRange(clusterName: Option[String], databaseName: String, end: DateTime)
                   (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Try[Boolean]] = {
    condition match {
      case Left((left, operator, right, repeats, delay)) =>
        val results = for {
          leftResult <- evaluateSource(clusterName, databaseName, end, left, repeats, delay)(fetcher, ec)
          rightResult <- evaluateSource(clusterName, databaseName, end, right, repeats, delay)(fetcher, ec)
        } yield (leftResult, rightResult)

        results map {
          case (Some(l), Some(r)) =>
            val zipped = l.zip(r)
            implicit val op = operator
            scala.util.Success((zipped.size == repeats) && (zipped forall evaluatePair))

          case _ =>
            scala.util.Failure(new RuntimeException("Failed to evaluate: at least one series does not exist."))
        }

      case Right((metricSrc, duration)) =>
        getData(clusterName, databaseName, metricSrc.metric, metricSrc.tags, duration, end)(fetcher, ec) map {
          case Some(values) =>
            scala.util.Success(values.size == 0)

          case None => util.Failure(new RuntimeException("Cannot evaluate: series does not exist!"))
        }
    }
  }

  def evaluateSource(clusterName: Option[String], databaseName: String, end: DateTime,
                     source: Source, repeats: Int, delay: FiniteDuration)
                    (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]] =
    source match {
      case ValueSource(num) => Future.successful(Some(List.fill(repeats)(num)))
      case MetricSource(name, tags) => getData(clusterName, databaseName, name, tags, repeats, delay, end)(fetcher, ec)
      case a: AggregatedSource => getData(clusterName, databaseName, a, repeats, delay, end)(fetcher, ec)

      case FactoredSource(src, factor) => src match {
        case ValueSource(num) =>
          Future.successful(Some(List.fill(repeats)(num * factor)))

        case MetricSource(name, tags) =>
          getData(clusterName, databaseName, name, tags, repeats, delay, end)(fetcher, ec) map(_.map(_.map(_ * factor)))

        case a: AggregatedSource =>
          getData(clusterName, databaseName, a, repeats, delay, end)(fetcher, ec) map(_.map(_.map(_ * factor)))

        case _ => Future.failed(new RuntimeException("Impossible to evaluate."))
      }
    }

  def getData(clusterName: Option[String], databaseName: String, metricName: String,
                       metricTags: Map[String, String], repeats: Int, delay: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]]

  def getData(clusterName: Option[String], databaseName: String, metricName: String,
                       metricTags: Map[String, String], duration: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]]

  def getData(clusterName: Option[String], databaseName: String, agg: AggregatedSource, repeats: Int, delay: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]]

  def evaluatePair(values: (Double, Double))(implicit op: Operator.Value): Boolean = op match {
    case Operator.LessThan            => values._1 <  values._2
    case Operator.LessThanOrEqual     => values._1 <= values._2
    case Operator.GreaterThan         => values._1 >  values._2
    case Operator.GreaterThanOrEqual  => values._1 >= values._2
    case Operator.Equal               => values._1 == values._2
    case Operator.NotEqual            => values._1 != values._2
  }
} 
Example 155
Source File: CheckEvaluator.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data.evaluator

import com.cave.metrics.data.Check
import org.joda.time.DateTime

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try

class CheckEvaluator(check: Check) extends AbstractEvaluator(check.schedule.alert.condition) {

  def evaluate(fetcher: DataFetcher)(implicit ec: ExecutionContext): Future[Try[Boolean]] = {
    evaluateRange(clusterName = check.schedule.clusterName,
                  databaseName = check.schedule.databaseName,
                  end = check.timestamp)(fetcher, ec)
  }

  override def getData(clusterName: Option[String], databaseName: String, metricName: String,
                       metricTags: Map[String, String], repeats: Int, delay: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]] =
    fetcher.fetchData(clusterName, databaseName, metricName, metricTags, repeats, delay, end)(ec)

  override def getData(clusterName: Option[String], databaseName: String, metricName: String,
                       metricTags: Map[String, String], duration: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]] =
    fetcher.fetchData(clusterName, databaseName, metricName, metricTags, duration, end)(ec)

  override def getData(clusterName: Option[String], databaseName: String, agg: AggregatedSource, repeats: Int, delay: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]] =
    fetcher.fetchData(clusterName, databaseName, agg.toString, Map.empty[String, String], repeats, delay, end)(ec)
} 
Example 156
Source File: ConditionEvaluator.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data.evaluator

import com.cave.metrics.data.influxdb.InfluxClientFactory
import com.cave.metrics.data.{MetricCheckRequest, MetricData, MetricDataBulk}
import org.joda.time.{DateTime, Period}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

class ConditionEvaluator(clusterName: Option[String], databaseName: String, request: MetricCheckRequest) extends AbstractEvaluator(request.condition) {

  def evaluate(clientFactory: InfluxClientFactory)(implicit ec: ExecutionContext): Future[Option[MetricDataBulk]] = {

    val fetcher = new DataFetcher(clientFactory)
    val step = Period.minutes(request.interval)
    val dateRange = Iterator.iterate(request.start)(_.plus(step)).takeWhile(!_.isAfter(request.end))

    def evaluateOnce(rangeStartDate: DateTime): Future[Option[MetricData]] = {
      val rangeEndDate = rangeStartDate.plus(step).minusSeconds(1)

      evaluateRange(clusterName, databaseName, rangeEndDate)(fetcher, ec) map {
        case util.Success(value) =>
          Some(MetricData(rangeStartDate, if (value) 1.0 else 0.0))
        case _ => None
      }
    }

    // If the result of one evaluation is None, it means the metric does not exist!
    // In that case, there's no point in evaluating any other dates in the range.
    evaluateOnce(dateRange.next()) flatMap {
      case Some(value) =>
        val results = dateRange map evaluateOnce
        Future.sequence(results) map(seq => Some(MetricDataBulk(value +: seq.flatten.toSeq)))

      case None =>
        Future.successful(None)
    }
  }

  override def getData(clusterName: Option[String], databaseName: String, metricName: String,
                       metricTags: Map[String, String], repeats: Int, delay: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]] =
    fetcher.fetchData(clusterName, databaseName, metricName, metricTags, repeats, delay, end)(ec)

  override def getData(clusterName: Option[String], databaseName: String, metricName: String,
                       metricTags: Map[String, String], duration: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]] =
    fetcher.fetchData(clusterName, databaseName, metricName, metricTags, duration, end)(ec)

  override def getData(clusterName: Option[String], databaseName: String, agg: AggregatedSource, repeats: Int, delay: FiniteDuration, end: DateTime)
                      (implicit fetcher: DataFetcher, ec: ExecutionContext): Future[Option[List[Double]]] =
    fetcher.fetchData(clusterName, databaseName, Aggregator.toInflux(agg.aggregator),
      agg.duration, agg.metricSource.metric, agg.metricSource.tags, repeats, delay, end)(ec)
} 
Example 157
Source File: FriendServiceTest.scala    From activator-lagom-scala-chirper   with Apache License 2.0 5 votes vote down vote up
package sample.chirper.friend.impl

import java.util.concurrent.TimeUnit.SECONDS

import scala.collection.immutable.Seq
import scala.concurrent.duration.FiniteDuration

import org.junit.Assert.assertEquals
import org.junit.Test

import com.lightbend.lagom.javadsl.testkit.ServiceTest.defaultSetup
import com.lightbend.lagom.javadsl.testkit.ServiceTest.eventually
import com.lightbend.lagom.javadsl.testkit.ServiceTest.withServer

import akka.NotUsed
import sample.chirper.friend.api.FriendId
import sample.chirper.friend.api.FriendService
import sample.chirper.friend.api.User

class FriendServiceTest {


  @throws(classOf[Exception])
  @Test
  def shouldBeAbleToCreateUsersAndConnectFriends() {
    withServer(defaultSetup, server => {
      val friendService = server.client(classOf[FriendService])
      val usr1 = new User("usr1", "User 1");
      friendService.createUser().invoke(usr1).toCompletableFuture().get(10, SECONDS)
      val usr2 = new User("usr2", "User 2");
      friendService.createUser().invoke(usr2).toCompletableFuture().get(3, SECONDS)
      val usr3 = new User("usr3", "User 3");
      friendService.createUser().invoke(usr3).toCompletableFuture().get(3, SECONDS)

      friendService.addFriend("usr1").invoke(FriendId(usr2.userId)).toCompletableFuture().get(3, SECONDS)
      friendService.addFriend("usr1").invoke(FriendId(usr3.userId)).toCompletableFuture().get(3, SECONDS)

      val fetchedUsr1 = friendService.getUser("usr1").invoke(NotUsed).toCompletableFuture().get(3,
          SECONDS)
      assertEquals(usr1.userId, fetchedUsr1.userId)
      assertEquals(usr1.name, fetchedUsr1.name)
      assertEquals(Seq("usr2", "usr3"), fetchedUsr1.friends)

      eventually(FiniteDuration(10, SECONDS), () => {
        val followers = friendService.getFollowers("usr2").invoke()
            .toCompletableFuture().get(3, SECONDS)
        assertEquals(Seq("usr1"), followers)
      })

    })
  }

} 
Example 158
Source File: TimerLogic.scala    From flink-parameter-server   with Apache License 2.0 5 votes vote down vote up
package hu.sztaki.ilab.ps.common

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration.FiniteDuration

class TimerLogic[W](intervalLength: FiniteDuration)
  extends Combinable[W]
    with Serializable {

  var timerThread: Option[Thread] = None
  var containsData = false

  def runTimer(callback: (Array[W] => Unit) => Unit, collect: Array[W] => Unit): Unit = {
    timerThread = Some(new Thread {
      override def run {
        while (true) {
          Thread sleep intervalLength.toMillis
          if (sendCondition()) {
            send(callback, collect)
            containsData = false
          }
        }
      }
    })
    timerThread.get.start()
  }

  
  override def logic(adder: (ArrayBuffer[W]) => Unit,
                     callback: (Array[W] => Unit) => Unit,
                     collectAnswerMsg: Array[W] => Unit): Unit = {
    containsData = true

    if (!timerThread.isDefined) {
      runTimer(callback, collectAnswerMsg)
    }
  }

} 
Example 159
Source File: Constructr.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr

import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated }
import akka.cluster.{ Cluster, Member }
import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberExited, MemberLeft, MemberRemoved }
import akka.cluster.MemberStatus.Up
import de.heikoseeberger.constructr.coordination.Coordination
import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS }

object Constructr {

  final val Name = "constructr"

  def props: Props =
    Props(new Constructr)
}

final class Constructr private extends Actor with ActorLogging {

  override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

  private val cluster = Cluster(context.system)

  if (cluster.settings.SeedNodes.isEmpty) {
    log.info("Creating constructr-machine, because no seed-nodes defined")
    cluster.subscribe(self,
                      InitialStateAsEvents,
                      classOf[MemberLeft],
                      classOf[MemberExited],
                      classOf[MemberRemoved])
    context.become(active(context.watch(createConstructrMachine())))
  } else {
    log.info("Stopping self, because seed-nodes defined")
    context.stop(self)
  }

  override def receive = Actor.emptyBehavior

  private def active(machine: ActorRef): Receive = {
    case Terminated(`machine`) =>
      val selfAddress = cluster.selfAddress
      def isSelfAndUp(member: Member) =
        member.address == selfAddress && member.status == Up
      if (cluster.state.members.exists(isSelfAndUp)) {
        log.error("Leaving, because constructr-machine terminated!")
        cluster.leave(selfAddress)
      } else {
        log.error("Terminating system, because constructr-machine terminated!")
        context.system.terminate()
      }

    case MemberRemoved(member, _) if member.address == cluster.selfAddress =>
      log.error("Terminating system, because member has been removed!")
      context.system.terminate()
  }

  private def createConstructrMachine() = {
    val config = context.system.settings.config
    def getDuration(key: String) =
      FiniteDuration(config.getDuration(key).toNanos, NANOSECONDS)

    val coordinationTimeout   = getDuration("constructr.coordination-timeout")
    val nrOfRetries           = config.getInt("constructr.nr-of-retries")
    val retryDelay            = getDuration("constructr.retry-delay")
    val refreshInterval       = getDuration("constructr.refresh-interval")
    val ttlFactor             = config.getDouble("constructr.ttl-factor")
    val maxNrOfSeedNodes      = config.getInt("constructr.max-nr-of-seed-nodes")
    val joinTimeout           = getDuration("constructr.join-timeout")
    val abortOnJoinTimeout    = config.getBoolean("constructr.abort-on-join-timeout")
    val ignoreRefreshFailures = config.getBoolean("constructr.ignore-refresh-failures")

    context.actorOf(
      ConstructrMachine.props(
        cluster.selfAddress,
        Coordination(context.system.name, context.system),
        coordinationTimeout,
        nrOfRetries,
        retryDelay,
        refreshInterval,
        ttlFactor,
        if (maxNrOfSeedNodes <= 0) Int.MaxValue else maxNrOfSeedNodes,
        joinTimeout,
        abortOnJoinTimeout,
        ignoreRefreshFailures
      ),
      ConstructrMachine.Name
    )
  }
} 
Example 160
Source File: Coordination.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr.coordination

import akka.Done
import akka.actor.{ ActorSystem, Address }
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

object Coordination {

  def apply(clusterName: String, system: ActorSystem): Coordination =
    try {
      val className =
        system.settings.config.getString("constructr.coordination.class-name")
      Class
        .forName(className)
        .getConstructor(classOf[String], classOf[ActorSystem])
        .newInstance(clusterName, system)
        .asInstanceOf[Coordination]
    } catch {
      case _: NoSuchMethodException =>
        throw new Exception(
          """|A Coordination implementation must provide a constructor with the following signature:
             |(clusterName: String, system: ActorSystem)""".stripMargin
        )
    }
}


  def refresh(self: Address, ttl: FiniteDuration): Future[Done]
} 
Example 161
Source File: EtcdCoordinationSpec.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr.coordination.etcd

import akka.Done
import akka.actor.{ ActorSystem, AddressFromURIString }
import akka.testkit.{ TestDuration, TestProbe }
import com.typesafe.config.ConfigFactory
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }
import scala.concurrent.duration.{ Duration, DurationInt, FiniteDuration }
import scala.concurrent.{ Await, Awaitable }
import scala.util.Random

object EtcdCoordinationSpec {

  private val coordinationHost = {
    val dockerHostPattern = """tcp://(\S+):\d{1,5}""".r
    sys.env
      .get("DOCKER_HOST")
      .collect { case dockerHostPattern(address) => address }
      .getOrElse("127.0.0.1")
  }
}

class EtcdCoordinationSpec extends WordSpec with Matchers with BeforeAndAfterAll {
  import EtcdCoordinationSpec._

  private implicit val system = {
    val config =
      ConfigFactory
        .parseString(s"constructr.coordination.host = $coordinationHost")
        .withFallback(ConfigFactory.load())
    ActorSystem("default", config)
  }

  private val address  = AddressFromURIString("akka.tcp://default@a:2552")
  private val address2 = AddressFromURIString("akka.tcp://default@b:2552")

  "EtcdCoordination" should {
    "correctly interact with etcd" in {
      val coordination = new EtcdCoordination(randomString(), system)

      resultOf(coordination.getNodes()) shouldBe 'empty

      resultOf(coordination.lock(address, 10.seconds.dilated)) shouldBe true
      resultOf(coordination.lock(address, 10.seconds.dilated)) shouldBe true
      resultOf(coordination.lock(address2, 10.seconds.dilated)) shouldBe false

      resultOf(coordination.addSelf(address, 10.seconds.dilated)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address)

      resultOf(coordination.refresh(address, 1.second.dilated)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address)

      val probe = TestProbe()
      probe.within(5.seconds.dilated) { // 2 seconds should be enough, but who knows hows ...
        probe.awaitAssert {
          resultOf(coordination.getNodes()) shouldBe 'empty
        }
      }
    }
  }

  override protected def afterAll() = {
    Await.ready(system.terminate(), Duration.Inf)
    super.afterAll()
  }

  private def resultOf[A](awaitable: Awaitable[A], max: FiniteDuration = 3.seconds.dilated) =
    Await.result(awaitable, max)

  private def randomString() = math.abs(Random.nextInt).toString
} 
Example 162
Source File: Background.scala    From pfps-shopping-cart   with Apache License 2.0 5 votes vote down vote up
package shop.effects

import cats.effect._
import cats.effect.implicits._
import cats.implicits._
import scala.concurrent.duration.FiniteDuration

trait Background[F[_]] {
  def schedule[A](
      fa: F[A],
      duration: FiniteDuration
  ): F[Unit]
}

object Background {
  def apply[F[_]](implicit ev: Background[F]): Background[F] = ev

  implicit def concurrentBackground[F[_]: Concurrent: Timer]: Background[F] =
    new Background[F] {

      def schedule[A](
          fa: F[A],
          duration: FiniteDuration
      ): F[Unit] =
        (Timer[F].sleep(duration) *> fa).start.void

    }

} 
Example 163
Source File: DockerClientWithFileAccess.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.containerpool.docker

import java.io.File
import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.alpakka.file.scaladsl.FileTailSource
import akka.stream.scaladsl.{FileIO, Source => AkkaSource}
import akka.util.ByteString

import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.blocking
import spray.json.DefaultJsonProtocol._
import spray.json._
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.containerpool.ContainerId
import org.apache.openwhisk.core.containerpool.ContainerAddress

import scala.io.Source
import scala.concurrent.duration.FiniteDuration

class DockerClientWithFileAccess(dockerHost: Option[String] = None,
                                 containersDirectory: File = Paths.get("containers").toFile)(
  executionContext: ExecutionContext)(implicit log: Logging, as: ActorSystem)
    extends DockerClient(dockerHost)(executionContext)
    with DockerApiWithFileAccess {

  implicit private val ec = executionContext

  
  def rawContainerLogs(containerId: ContainerId,
                       fromPos: Long,
                       pollInterval: Option[FiniteDuration]): AkkaSource[ByteString, Any]
} 
Example 164
Source File: ExecutorConfig.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor

import java.util.concurrent.TimeUnit

import akka.stream.ThrottleMode
import com.typesafe.config.Config

import scala.concurrent.duration.FiniteDuration

object ExecutorConfig {
  def throttleFromConfig(config: Config): Throttle = {
    val mode = config.getString("mode") match {
      case "shaping" => ThrottleMode.shaping
      case "enforcing" => ThrottleMode.enforcing
    }

    Throttle(
      elements = config.getInt("elements"),
      maxBurst = config.getInt("max-burst"),
      duration = FiniteDuration(config.getDuration("duration").toMillis, TimeUnit.MILLISECONDS),
      mode = mode
    )
  }
}

class ExecutorStreamConfig(config: Config) {
  val defaultIdleTimeout: Option[FiniteDuration] = if (config.hasPath("default-idle-timeout")) {
    Some(FiniteDuration(config.getDuration("default-idle-timeout").toMillis, TimeUnit.MILLISECONDS))
  } else { None }

  val defaultTransformDelay: Option[FiniteDuration] = if (config.hasPath("default-idle-timeout")) {
    Some(FiniteDuration(config.getDuration("default-transform-delay").toMillis, TimeUnit.MILLISECONDS))
  } else { None }

  val defaultThrottle: Option[Throttle] = if (config.hasPath("default-throttle")) {
    Some(ExecutorConfig.throttleFromConfig(config.getConfig("default-throttle")))
  } else { None }

  val defaultParallelism: Parallelism = config.getInt("default-parallelism")
  val defaultBufferSize: Int = config.getInt("default-buffer-size")
}

class ExecutorFlowConfig(config: Config) {
  val defaultIdleTimeout: Option[FiniteDuration] = if (config.hasPath("default-idle-timeout")) {
    Some(FiniteDuration(config.getDuration("default-idle-timeout").toMillis, TimeUnit.MILLISECONDS))
  } else { None }

  val defaultTransformDelay: Option[FiniteDuration] = if (config.hasPath("default-idle-timeout")) {
    Some(FiniteDuration(config.getDuration("default-transform-delay").toMillis, TimeUnit.MILLISECONDS))
  } else { None }

  val defaultThrottle: Option[Throttle] = if (config.hasPath("default-throttle")) {
    Some(ExecutorConfig.throttleFromConfig(config.getConfig("default-throttle")))
  } else { None }

  val defaultParallelism: Parallelism = config.getInt("default-parallelism")
}

class ExecutorConfig(config: Config) {
  val defaultMemoryTimeout: FiniteDuration = {
    FiniteDuration(config.getDuration("default-memory-timeout").toMillis, TimeUnit.MILLISECONDS)
  }

  val defaultDiskTimeout: FiniteDuration = {
    FiniteDuration(config.getDuration("default-memory-timeout").toMillis, TimeUnit.MILLISECONDS)
  }

  val stream: ExecutorStreamConfig = new ExecutorStreamConfig(config.getConfig("stream"))
  val flow: ExecutorFlowConfig = new ExecutorFlowConfig(config.getConfig("flow"))
} 
Example 165
Source File: TypeConverters.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.springboot

import java.net.URI
import java.util.concurrent.TimeUnit

import com.google.protobuf.ProtocolStringList
import ml.combust.mleap.executor
import ml.combust.mleap.pb._

import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._
import scala.util.Try
import ml.combust.mleap.runtime.types.BundleTypeConverters._

object TypeConverters {
  import scala.language.implicitConversions

  implicit def getTimeout(ms: Int): FiniteDuration = FiniteDuration(ms, TimeUnit.MILLISECONDS)

  implicit def pbToExecutorLoadModelRequest(request: LoadModelRequest): executor.LoadModelRequest =
    executor.LoadModelRequest(modelName = request.modelName,
                              uri = URI.create(request.uri),
                              config = request.config.map(pbToExecutorModelConfig),
                              force = request.force)

  implicit def javaPbToExecutorLoadModelRequest(request: Mleap.LoadModelRequest): executor.LoadModelRequest = {
    val modelConfig = Option(request.getConfig).map(javaPbToExecutorModelConfig)

    executor.LoadModelRequest(modelName = request.getModelName,
      uri = URI.create(request.getUri),
      config = modelConfig,
      force = request.getForce)
  }

  implicit def pbToExecutorModelConfig(config: ModelConfig): executor.ModelConfig = {
    executor.ModelConfig(memoryTimeout = config.memoryTimeout.map(_.millis),
      diskTimeout = config.diskTimeout.map(_.millis))
  }

  implicit def javaPbToExecutorModelConfig(config: Mleap.ModelConfig): executor.ModelConfig = {
    executor.ModelConfig(memoryTimeout = Option(config.getMemoryTimeout).map(_.getValue.millis),
      diskTimeout = Option(config.getDiskTimeout).map(_.getValue.millis))
  }

  implicit def executorToPbModelConfig(config: executor.ModelConfig): ModelConfig =
    ModelConfig(memoryTimeout = config.memoryTimeout.map(_.toMillis),
      diskTimeout = config.diskTimeout.map(_.toMillis))

  implicit def executorToPbModel(model: executor.Model): Model =
    Model(name = model.name, uri = model.uri.toString, config = Some(model.config))

  implicit def pbToExecutorModel(model: Model): executor.Model =
    executor.Model(name = model.name, uri = URI.create(model.uri), config = model.config.get)

  implicit def executorToPbBundleMeta(meta: executor.BundleMeta): BundleMeta =
    BundleMeta(bundle = Some(meta.info.asBundle), inputSchema = Some(meta.inputSchema), outputSchema = Some(meta.outputSchema))

  implicit def pbToExecutorTransformOptions(options: TransformOptions): executor.TransformOptions =
    executor.TransformOptions(select = options.select, selectMode = options.selectMode)

  implicit def javaPbToExecutorTransformOptions(options: Mleap.TransformOptions): executor.TransformOptions =
    executor.TransformOptions(select = options.getSelectList, selectMode = options.getSelectMode)

  implicit def javaPbToExecutorSelectMode(sm: Mleap.SelectMode): executor.SelectMode =
    if (sm == Mleap.SelectMode.SELECT_MODE_RELAXED)
      executor.SelectMode.Relaxed
    else if (sm == Mleap.SelectMode.SELECT_MODE_STRICT)
      executor.SelectMode.Strict
    else executor.SelectMode.Strict


  implicit def javaPbToExecutorSelect(select: ProtocolStringList): Option[Seq[String]] =
    if (select.isEmpty) None else Some(select.toArray().map(_.asInstanceOf[String]).toSeq)

  implicit def pbToExecutorSelectMode(sm: SelectMode): executor.SelectMode =
    if (sm.isSelectModeRelaxed)
      executor.SelectMode.Relaxed
    else if (sm.isSelectModeStrict)
      executor.SelectMode.Strict
    else executor.SelectMode.Strict

  implicit def pbToExecutorSelect(select: Seq[String]): Option[Seq[String]] = if (select.isEmpty) None else Some(select)

  implicit class RichFuture[T](f: Future[T]) {
    def mapAll[U](pf: PartialFunction[Try[T], U])(implicit executor: ExecutionContext): Future[U] = {
      val p = Promise[U]()
      f.onComplete(r => p.complete(Try(pf(r))))(executor)
      p.future
    }
  }
} 
Example 166
Source File: AuditSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import javax.inject.{Inject, Singleton}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration

import play.api.Logger

import akka.actor.{Actor, ActorRef}
import org.thp.cortex.models.JobStatus

import org.elastic4play.models.BaseEntity
import org.elastic4play.services._

object AuditActor {
  case class Register(jobId: String, timeout: FiniteDuration)
  case class Unregister(jobId: String, actorRef: ActorRef)
  case class JobEnded(jobId: String, status: JobStatus.Type)
}

@Singleton
class AuditActor @Inject()(eventSrv: EventSrv, implicit val ec: ExecutionContext) extends Actor {

  import AuditActor._

  object EntityExtractor {
    def unapply(e: BaseEntity) = Some((e.model, e.id, e.routing))
  }
  var registration                    = Map.empty[String, Seq[ActorRef]]
  private[AuditActor] lazy val logger = Logger(getClass)

  override def preStart(): Unit = {
    eventSrv.subscribe(self, classOf[EventMessage])
    super.preStart()
  }

  override def postStop(): Unit = {
    eventSrv.unsubscribe(self)
    super.postStop()
  }

  override def receive: Receive = {
    case Register(jobId, timeout) ⇒
      logger.info(s"Register new listener for job $jobId ($sender)")
      val newActorList = registration.getOrElse(jobId, Nil) :+ sender
      registration += (jobId → newActorList)
      context.system.scheduler.scheduleOnce(timeout, self, Unregister(jobId, sender))

    case Unregister(jobId, actorRef) ⇒
      logger.info(s"Unregister listener for job $jobId ($actorRef)")
      val newActorList = registration.getOrElse(jobId, Nil).filterNot(_ == actorRef)
      registration += (jobId → newActorList)

    case AuditOperation(EntityExtractor(model, id, routing), action, details, authContext, date) ⇒
      if (model.modelName == "job" && action == AuditableAction.Update) {
        logger.info(s"Job $id has be updated (${details \ "status"})")
        val status = (details \ "status").asOpt[JobStatus.Type].getOrElse(JobStatus.InProgress)
        if (status != JobStatus.InProgress) registration.getOrElse(id, Nil).foreach { aref ⇒
          aref ! JobEnded(id, status)
        }
      }
  }
} 
Example 167
Source File: ProcessJobRunnerSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path, Paths}

import akka.actor.ActorSystem
import javax.inject.{Inject, Singleton}
import org.elastic4play.utils.RichFuture
import org.thp.cortex.models._
import play.api.Logger
import play.api.libs.json.Json

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.sys.process.{Process, ProcessLogger, _}
import scala.util.Try

@Singleton
class ProcessJobRunnerSrv @Inject()(implicit val system: ActorSystem) {

  lazy val logger = Logger(getClass)

  private val pythonPackageVersionRegex = "^Version: ([0-9]*)\\.([0-9]*)\\.([0-9]*)".r

  def checkCortexUtilsVersion(pythonVersion: String): Option[(Int, Int, Int)] =
    Try {
      (s"pip$pythonVersion" :: "show" :: "cortexutils" :: Nil)
        .lineStream
        .collectFirst {
          case pythonPackageVersionRegex(major, minor, patch) ⇒ (major.toInt, minor.toInt, patch.toInt)
        }
    }.getOrElse(None)

  def run(jobDirectory: Path, command: String, job: Job, timeout: Option[FiniteDuration])(implicit ec: ExecutionContext): Future[Unit] = {
    val baseDirectory = Paths.get(command).getParent.getParent
    val output        = StringBuilder.newBuilder
    logger.info(s"Execute $command in $baseDirectory, timeout is ${timeout.fold("none")(_.toString)}")
    val process = Process(Seq(command, jobDirectory.toString), baseDirectory.toFile)
      .run(ProcessLogger { s ⇒
        logger.info(s"  Job ${job.id}: $s")
        output ++= s
      })
    val execution = Future
      .apply {
        process.exitValue()
        ()
      }
      .map { _ ⇒
        val outputFile = jobDirectory.resolve("output").resolve("output.json")
        if (!Files.exists(outputFile) || Files.size(outputFile) == 0) {
          val report = Json.obj("success" → false, "errorMessage" → output.toString)
          Files.write(outputFile, report.toString.getBytes(StandardCharsets.UTF_8))
        }
        ()
      }
      .recoverWith {
        case error ⇒
          logger.error(s"Execution of command $command failed", error)
          Future.apply {
            val report = Json.obj("success" → false, "errorMessage" → s"${error.getMessage}\n$output")
            Files.write(jobDirectory.resolve("output").resolve("output.json"), report.toString.getBytes(StandardCharsets.UTF_8))
            ()
          }
      }
    timeout.fold(execution)(t ⇒ execution.withTimeout(t, killProcess(process)))
  }

  def killProcess(process: Process): Unit = {
    logger.info("Timeout reached, killing process")
    process.destroy()
  }
} 
Example 168
Source File: BackOffSupervision.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.utils

import akka.actor.{ActorRef, ActorSystem}
import org.slf4j.LoggerFactory

import scala.collection.mutable.HashMap
import scala.concurrent.duration.{FiniteDuration, _}


  def manageActorLifecycle(managedActor: ActorRef, backOffSlotTime: FiniteDuration = null, backOffMinimumDelay: FiniteDuration = null): FiniteDuration = {
    val managedActorName = managedActor.path.toStringWithoutAddress

    if (actorBackOffWaitTime.contains(managedActorName)) {
      val newBackOff = actorBackOffWaitTime(managedActorName).nextBackOff
      actorBackOffWaitTime.put(managedActorName, newBackOff)
      log.warn(s"$managerName: Set new back-off waiting " +
        s"time to value ${newBackOff.backOffWaitTime} for rebooted actor ${managedActorName}; " +
        s"(retries=${newBackOff.retries}, resets=${newBackOff.resets}, total-retries=${newBackOff.totalRetries})")

      //schedule tick response based on backoff
      newBackOff.backOffWaitTime
    } else {
      val backOff = ExponentialBackOff(backOffSlotTime = backOffSlotTime, constantDelay = backOffMinimumDelay)
      log.debug(s"$managerName: Set initial back-off waiting " +
        s"time to value ${backOff.backOffWaitTime} for booted actor ${managedActorName}; " +
        s"(retries=${backOff.retries}, resets=${backOff.resets}, total-retries=${backOff.totalRetries})")
      actorBackOffWaitTime.put(managedActorName, backOff)

      //schedule immediate tick response
      0 millis
    }
  }

} 
Example 169
Source File: ExponentialBackOff.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.utils

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.util.Random


case class ExponentialBackOff(backOffSlotTime: FiniteDuration,
                              backOffSlot: Int = 1,
                              backOffWaitTime: FiniteDuration = Duration.Zero,
                              constantDelay: FiniteDuration = Duration.Zero,
                              ceiling: Int = 10,
                              resetOnCeiling: Boolean = false,
                              retries: Int = 0,
                              resets: Int = 0,
                              totalRetries: Long = 0) {

  private def updateTime = backOffSlotTime * expectedBackOff(backOffSlot) + constantDelay

  private def expectedBackOff(backOffSlot: Int) = {
    val rand = new Random().nextInt(backOffSlot + 1)
    math.round(math.pow(2, rand) - 1)
  }

  def nextBackOff: ExponentialBackOff = {
    if (backOffSlot >= ceiling && resetOnCeiling)
    // reset
      copy(backOffSlot = 1,
        backOffWaitTime = Duration.Zero,
        resets = resets + 1,
        retries = 0,
        totalRetries = totalRetries + 1)
    else {
      val newBackOffSlot = if (backOffSlot >= ceiling) ceiling else backOffSlot + 1
      // increase 1 collision
      copy(backOffSlot = newBackOffSlot,
        backOffWaitTime = updateTime,
        retries = retries + 1,
        totalRetries = totalRetries + 1)
    }
  }
} 
Example 170
Source File: Mixer.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch12

import akka.actor.typed.{ActorRef, Behavior, SupervisorStrategy}
import akka.actor.typed.scaladsl.Behaviors
import ch12.Bakery.{Groceries, Dough}
import ch12.Chef.Collect

import scala.concurrent.duration.FiniteDuration
import scala.util.Random

object Mixer {
  class MotorOverheatException extends Exception
  class SlowRotationSpeedException extends Exception
  class StrongVibrationException extends Exception

  final case class Mix(groceries: Groceries, sender: ActorRef[Collect])

  def mix(mixTime: FiniteDuration): Behavior[Mix] = Behaviors.receive[Mix] {
    case (ctx, Mix(Groceries(eggs, flour, sugar, chocolate), sender)) =>
      if (Random.nextBoolean()) throw new MotorOverheatException
      Thread.sleep(mixTime.toMillis)
      sender ! Collect(Dough(eggs * 50 + flour + sugar + chocolate), ctx.self)
      Behaviors.stopped
  }

  def controlledMix(mixTime: FiniteDuration): Behavior[Mix] =
    Behaviors
      .supervise(
        Behaviors
          .supervise(Behaviors
            .supervise(mix(mixTime))
            .onFailure[MotorOverheatException](SupervisorStrategy.stop))
          .onFailure[SlowRotationSpeedException](SupervisorStrategy.restart))
      .onFailure[StrongVibrationException](SupervisorStrategy.resume)
} 
Example 171
Source File: compiletime.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package shapeless.test

import scala.language.experimental.macros

import scala.concurrent.duration.FiniteDuration
import scala.reflect.macros.blackbox


object compileTime {
  def apply(code: String): FiniteDuration = macro CompileTimeMacros.applyImpl
}

@macrocompat.bundle
class CompileTimeMacros(val c: blackbox.Context) {
  import c.universe._

  def applyImpl(code: Tree): Tree = {
    def wallClock(codeStr: String): Long = {
      try {
        val t1 = System.nanoTime()
        c.typecheck(c.parse(codeStr))
        val t2 = System.nanoTime()
        t2 - t1
      } catch {
        case ex: Exception => c.abort(c.enclosingPosition, ex.getMessage)
      }
    }

    val Literal(Constant(codeStr: String)) = code
    val elapsedTime = wallClock(codeStr)

    q"_root_.scala.concurrent.duration.Duration.fromNanos($elapsedTime)"
  }
} 
Example 172
Source File: ReceiveTimeout.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.actor.dungeon

import ReceiveTimeout.emptyReceiveTimeoutData
import akka.actor.ActorCell
import akka.actor.ActorCell.emptyCancellable
import akka.actor.Cancellable
import scala.concurrent.duration.Duration
import scala.concurrent.duration.FiniteDuration

private[akka] object ReceiveTimeout {
  final val emptyReceiveTimeoutData: (Duration, Cancellable) = (Duration.Undefined, ActorCell.emptyCancellable)
}

private[akka] trait ReceiveTimeout { this: ActorCell ⇒

  import ReceiveTimeout._
  import ActorCell._

  private var receiveTimeoutData: (Duration, Cancellable) = emptyReceiveTimeoutData

  final def receiveTimeout: Duration = receiveTimeoutData._1

  final def setReceiveTimeout(timeout: Duration): Unit = receiveTimeoutData = receiveTimeoutData.copy(_1 = timeout)

  final def checkReceiveTimeout() {
    val recvtimeout = receiveTimeoutData
    //Only reschedule if desired and there are currently no more messages to be processed
    if (!mailbox.hasMessages) recvtimeout._1 match {
      case f: FiniteDuration ⇒
        recvtimeout._2.cancel() //Cancel any ongoing future
        val task = system.scheduler.scheduleOnce(f, self, akka.actor.ReceiveTimeout)(this.dispatcher)
        receiveTimeoutData = (f, task)
      case _ ⇒ cancelReceiveTimeout()
    }
    else cancelReceiveTimeout()

  }

  final def cancelReceiveTimeout(): Unit =
    if (receiveTimeoutData._2 ne emptyCancellable) {
      receiveTimeoutData._2.cancel()
      receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable)
    }

} 
Example 173
Source File: TimerSchedulerImpl.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.actor

import scala.concurrent.duration.FiniteDuration

import akka.annotation.InternalApi
import akka.event.Logging
import akka.util.OptionVal


@InternalApi private[akka] class TimerSchedulerImpl(ctx: ActorContext) extends TimerScheduler {
  import TimerSchedulerImpl._

  private val log = Logging(ctx.system, classOf[TimerScheduler])
  private var timers: Map[Any, Timer] = Map.empty
  private var timerGen = 0
  private def nextTimerGen(): Int = {
    timerGen += 1
    timerGen
  }

  override def startPeriodicTimer(key: Any, msg: Any, interval: FiniteDuration): Unit =
    startTimer(key, msg, interval, repeat = true)

  override def startSingleTimer(key: Any, msg: Any, timeout: FiniteDuration): Unit =
    startTimer(key, msg, timeout, repeat = false)

  private def startTimer(key: Any, msg: Any, timeout: FiniteDuration, repeat: Boolean): Unit = {
    timers.get(key) match {
      case Some(t) ⇒ cancelTimer(t)
      case None ⇒
    }
    val nextGen = nextTimerGen()

    val timerMsg = TimerMsg(key, nextGen, this)
    val task =
      if (repeat)
        ctx.system.scheduler.schedule(timeout, timeout, ctx.self, timerMsg)(ctx.dispatcher)
      else
        ctx.system.scheduler.scheduleOnce(timeout, ctx.self, timerMsg)(ctx.dispatcher)

    val nextTimer = Timer(key, msg, repeat, nextGen, task)
    log.debug("Start timer [{}] with generation [{}]", key, nextGen)
    timers = timers.updated(key, nextTimer)
  }

  override def isTimerActive(key: Any): Boolean =
    timers.contains(key)

  override def cancel(key: Any): Unit = {
    timers.get(key) match {
      case None ⇒ // already removed/canceled
      case Some(t) ⇒ cancelTimer(t)
    }
  }

  private def cancelTimer(timer: Timer): Unit = {
    log.debug("Cancel timer [{}] with generation [{}]", timer.key, timer.generation)
    timer.task.cancel()
    timers -= timer.key
  }

  override def cancelAll(): Unit = {
    log.debug("Cancel all timers")
    timers.valuesIterator.foreach { timer ⇒
      timer.task.cancel()
    }
    timers = Map.empty
  }

  def interceptTimerMsg(timerMsg: TimerMsg): OptionVal[AnyRef] = {
    timers.get(timerMsg.key) match {
      case None ⇒
        // it was from canceled timer that was already enqueued in mailbox
        log.debug("Received timer [{}] that has been removed, discarding", timerMsg.key)
        OptionVal.None // message should be ignored
      case Some(t) ⇒
        if (timerMsg.owner ne this) {
          // after restart, it was from an old instance that was enqueued in mailbox before canceled
          log.debug("Received timer [{}] from old restarted instance, discarding", timerMsg.key)
          OptionVal.None // message should be ignored
        } else if (timerMsg.generation == t.generation) {
          // valid timer
          if (!t.repeat)
            timers -= t.key
          OptionVal.Some(t.msg.asInstanceOf[AnyRef])
        } else {
          // it was from an old timer that was enqueued in mailbox before canceled
          log.debug(
            "Received timer [{}] from from old generation [{}], expected generation [{}], discarding",
            timerMsg.key, timerMsg.generation, t.generation)
          OptionVal.None // message should be ignored
        }
    }
  }

} 
Example 174
Source File: GracefulStopSupport.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.pattern

import akka.actor._
import akka.util.{ Timeout }
import akka.dispatch.sysmsg.{ Unwatch, Watch }
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

trait GracefulStopSupport {
  
  def gracefulStop(target: ActorRef, timeout: FiniteDuration, stopMessage: Any = PoisonPill): Future[Boolean] = {
    val internalTarget = target.asInstanceOf[InternalActorRef]
    val ref = PromiseActorRef(internalTarget.provider, Timeout(timeout), target, stopMessage.getClass.getName)
    internalTarget.sendSystemMessage(Watch(internalTarget, ref))
    target.tell(stopMessage, Actor.noSender)
    ref.result.future.transform(
      {
        case Terminated(t) if t.path == target.path ⇒ true
        case _ ⇒ { internalTarget.sendSystemMessage(Unwatch(target, ref)); false }
      },
      t ⇒ { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ref.internalCallingThreadExecutionContext)
  }
} 
Example 175
Source File: FutureTimeoutSupport.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.pattern

import scala.concurrent.{ ExecutionContext, Promise, Future }
import akka.actor._
import scala.util.control.NonFatal
import scala.concurrent.duration.FiniteDuration
import java.util.concurrent.CompletionStage
import java.util.concurrent.CompletableFuture
import akka.dispatch.Futures
import java.util.function.BiConsumer

trait FutureTimeoutSupport {
  
  def afterCompletionStage[T](duration: FiniteDuration, using: Scheduler)(value: ⇒ CompletionStage[T])(implicit ec: ExecutionContext): CompletionStage[T] =
    if (duration.isFinite() && duration.length < 1) {
      try value catch { case NonFatal(t) ⇒ Futures.failedCompletionStage(t) }
    } else {
      val p = new CompletableFuture[T]
      using.scheduleOnce(duration) {
        try {
          val future = value
          future.whenComplete(new BiConsumer[T, Throwable] {
            override def accept(t: T, ex: Throwable): Unit = {
              if (t != null) p.complete(t)
              if (ex != null) p.completeExceptionally(ex)
            }
          })
        } catch {
          case NonFatal(ex) ⇒ p.completeExceptionally(ex)
        }
      }
      p
    }
} 
Example 176
Source File: PinnedDispatcher.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.dispatch

import akka.actor.ActorCell
import scala.concurrent.duration.Duration
import scala.concurrent.duration.FiniteDuration


class PinnedDispatcher(
  _configurator: MessageDispatcherConfigurator,
  _actor: ActorCell,
  _id: String,
  _shutdownTimeout: FiniteDuration,
  _threadPoolConfig: ThreadPoolConfig)
  extends Dispatcher(
    _configurator,
    _id,
    Int.MaxValue,
    Duration.Zero,
    _threadPoolConfig.copy(corePoolSize = 1, maxPoolSize = 1),
    _shutdownTimeout) {

  @volatile
  private var owner: ActorCell = _actor

  //Relies on an external lock provided by MessageDispatcher.attach
  protected[akka] override def register(actorCell: ActorCell) = {
    val actor = owner
    if ((actor ne null) && actorCell != actor) throw new IllegalArgumentException("Cannot register to anyone but " + actor)
    owner = actorCell
    super.register(actorCell)
  }
  //Relies on an external lock provided by MessageDispatcher.detach
  protected[akka] override def unregister(actor: ActorCell) = {
    super.unregister(actor)
    owner = null
  }
} 
Example 177
Source File: LedPulser.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.actor.typed.scaladsl.{ActorContext, Behaviors, TimerScheduler}
import akka.actor.typed.{ActorRef, Behavior}
import org.neopixel.Neopixel

import scala.concurrent.duration.FiniteDuration

object LedPulser {
  sealed trait Command
  final case class PulseLed(ledNumber: Int,
                            color: Long,
                            flashDuration: FiniteDuration,
                            overRunColor: Option[Long]) extends Command
  private final case class StopPulse(ledNumber: Int) extends Command

  def apply(settings: Settings, ledStripDriver: ActorRef[LedStripDriver.Command]): Behavior[Command] = Behaviors.setup { context =>
    Behaviors.withTimers { timers =>
      new LedPulser(settings, context, timers, ledStripDriver).run(Neopixel.Black)
    }
  }
}

class LedPulser(settings: Settings,
                context: ActorContext[LedPulser.Command],
                timers: TimerScheduler[LedPulser.Command],
                ledStripDriver: ActorRef[LedStripDriver.Command]) {
  import LedPulser._

  def run(currentColor: Long): Behavior[Command] = Behaviors.receiveMessagePartial {
    case PulseLed(ledNumber, color, flashDuration, overRunColor) if color != currentColor =>
      timers.startTimerWithFixedDelay(StopPulse(ledNumber), flashDuration)
      ledStripDriver ! LedStripDriver.SetLedState(ledNumber, color, None)
      run(color)
    case PulseLed(ledNumber, color, flashDuration, overRunColor) =>
      // If the new color is the same as the current color, it implies that
      // the timer is still running. Obviously, no need to update the color
      // on the LED. Running `startTimerWithFixedDelay` will cancel the current
      // timer and start a "fresh" one
      timers.startTimerWithFixedDelay(StopPulse(ledNumber), flashDuration)
      run(color)
    case StopPulse(ledNumber) =>
      ledStripDriver ! LedStripDriver.SetLedState(ledNumber, Neopixel.Black, None)
      run(Neopixel.Black)
  }
} 
Example 178
Source File: package.scala    From embedded-kafka   with MIT License 5 votes vote down vote up
package net.manub

import scala.concurrent.duration.FiniteDuration

package object embeddedkafka {
  implicit private[embeddedkafka] class ServerOps(
      servers: Seq[EmbeddedServer]
  ) {
    def toFilteredSeq[T <: EmbeddedServer](
        filter: EmbeddedServer => Boolean
    ): Seq[T] =
      servers.filter(filter).asInstanceOf[Seq[T]]
  }

  def duration2JavaDuration(d: FiniteDuration): java.time.Duration =
    java.time.Duration.ofNanos(d.toNanos)

  def loanAndClose[A <: AutoCloseable, B](a: A)(f: A => B): B = {
    try {
      f(a)
    } finally {
      a.close()
    }
  }
} 
Example 179
Source File: ConnectionStatus.scala    From mqttd   with MIT License 5 votes vote down vote up
package plantae.citrus.mqtt.actors.session

import java.util.concurrent.TimeUnit

import akka.actor._
import plantae.citrus.mqtt.actors.SystemRoot
import plantae.citrus.mqtt.actors.topic.Publish
import plantae.citrus.mqtt.dto.connect.Will

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration


case class ConnectionStatus(will: Option[Will], keepAliveTime: Int, session: ActorRef, sessionContext: ActorContext, socket: ActorRef) {
  implicit val ec: ExecutionContext = ExecutionContext.Implicits.global

  private var keepAliveTimer: Option[Cancellable] = if (keepAliveTime > 0)
    Some(SystemRoot.system.scheduler.scheduleOnce(FiniteDuration(keepAliveTime, TimeUnit.SECONDS), session, SessionKeepAliveTimeOut))
  else None

  def cancelTimer = {
    keepAliveTimer match {
      case Some(x) => x.cancel()
      case None =>
    }

  }

  def resetTimer = {
    cancelTimer
    keepAliveTimer = if (keepAliveTime > 0)
      Some(SystemRoot.system.scheduler.scheduleOnce(FiniteDuration(keepAliveTime, TimeUnit.SECONDS), session, SessionKeepAliveTimeOut))
    else None
  }

  private def publishWill = {
    will match {
      case Some(x) =>
      // TODO : will qos
        SystemRoot.topicManager! Publish(x.topic, x.message, x.retain, None)
      case None =>
    }
  }

  def destroyProperly = {
    sessionContext.stop(socket)
    cancelTimer
  }

  def destroyAbnormally = {
    sessionContext.stop(socket)
    publishWill
    cancelTimer
  }


} 
Example 180
Source File: SeqFileStreamProcessor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.fsio

import java.io.File
import java.time.Instant
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.FiniteDuration
import akka.actor.Cancellable
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.SequenceFile._
import org.apache.hadoop.io.{SequenceFile, Text}
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.examples.fsio.HadoopConfig._
import org.apache.gearpump.streaming.examples.fsio.SeqFileStreamProcessor._
import org.apache.gearpump.streaming.task.{Task, TaskContext}

class SeqFileStreamProcessor(taskContext: TaskContext, config: UserConfig)
  extends Task(taskContext, config) {

  import taskContext.taskId

  val outputPath = new Path(config.getString(OUTPUT_PATH).get + File.separator + taskId)
  var writer: SequenceFile.Writer = null
  val textClass = new Text().getClass
  val key = new Text()
  val value = new Text()
  val hadoopConf = config.hadoopConf

  private var msgCount: Long = 0
  private var snapShotKVCount: Long = 0
  private var snapShotTime: Long = 0
  private var scheduler: Cancellable = null

  override def onStart(startTime: Instant): Unit = {

    val fs = FileSystem.get(hadoopConf)
    fs.deleteOnExit(outputPath)
    writer = SequenceFile.createWriter(hadoopConf, Writer.file(outputPath),
      Writer.keyClass(textClass), Writer.valueClass(textClass))

    scheduler = taskContext.schedule(new FiniteDuration(5, TimeUnit.SECONDS),
      new FiniteDuration(5, TimeUnit.SECONDS))(reportStatus())
    snapShotTime = System.currentTimeMillis()
    LOG.info("sequence file bolt initiated")
  }

  override def onNext(msg: Message): Unit = {
    val kv = msg.value.asInstanceOf[String].split("\\+\\+")
    if (kv.length >= 2) {
      key.set(kv(0))
      value.set(kv(1))
      writer.append(key, value)
    }
    msgCount += 1
  }

  override def onStop(): Unit = {
    if (scheduler != null) {
      scheduler.cancel()
    }
    writer.close()
    LOG.info("sequence file bolt stopped")
  }

  private def reportStatus() = {
    val current: Long = System.currentTimeMillis()
    LOG.info(s"Task $taskId Throughput: ${
      (msgCount - snapShotKVCount,
        (current - snapShotTime) / 1000)
    } (KVPairs, second)")
    snapShotKVCount = msgCount
    snapShotTime = current
  }
}

object SeqFileStreamProcessor {
  val OUTPUT_PATH = "outputpath"
} 
Example 181
Source File: SOLStreamProcessor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.sol

import java.time.Instant
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.FiniteDuration
import akka.actor.Cancellable
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.{Task, TaskContext}

class SOLStreamProcessor(taskContext: TaskContext, conf: UserConfig)
  extends Task(taskContext, conf) {
  import taskContext.output

  val taskConf = taskContext

  private var msgCount: Long = 0
  private var scheduler: Cancellable = null
  private var snapShotWordCount: Long = 0
  private var snapShotTime: Long = 0

  override def onStart(startTime: Instant): Unit = {
    scheduler = taskContext.schedule(new FiniteDuration(5, TimeUnit.SECONDS),
      new FiniteDuration(5, TimeUnit.SECONDS))(reportWordCount())
    snapShotTime = System.currentTimeMillis()
  }

  override def onNext(msg: Message): Unit = {
    output(msg)
    msgCount = msgCount + 1
  }

  override def onStop(): Unit = {
    if (scheduler != null) {
      scheduler.cancel()
    }
  }

  def reportWordCount(): Unit = {
    val current: Long = System.currentTimeMillis()
    LOG.info(s"Task ${taskConf.taskId} " +
      s"Throughput: ${(msgCount - snapShotWordCount, (current - snapShotTime) / 1000)} " +
      s"(words, second)")
    snapShotWordCount = msgCount
    snapShotTime = current
  }
} 
Example 182
Source File: Sum.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.wordcount

import java.time.Instant
import java.util.concurrent.TimeUnit

import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration
import akka.actor.Cancellable
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.{Task, TaskContext}

class Sum(taskContext: TaskContext, conf: UserConfig) extends Task(taskContext, conf) {
  private[wordcount] val map: mutable.HashMap[String, Long] = new mutable.HashMap[String, Long]()

  private[wordcount] var wordCount: Long = 0
  private var snapShotTime: Long = System.currentTimeMillis()
  private var snapShotWordCount: Long = 0

  private var scheduler: Cancellable = null

  override def onStart(startTime: Instant): Unit = {
    scheduler = taskContext.schedule(new FiniteDuration(5, TimeUnit.SECONDS),
      new FiniteDuration(30, TimeUnit.SECONDS))(reportWordCount)
  }

  override def onNext(msg: Message): Unit = {
    if (null != msg) {
      val current = map.getOrElse(msg.value.asInstanceOf[String], 0L)
      wordCount += 1
      map.put(msg.value.asInstanceOf[String], current + 1)
    }
  }

  override def onStop(): Unit = {
    if (scheduler != null) {
      scheduler.cancel()
    }
  }

  def reportWordCount(): Unit = {
    val current: Long = System.currentTimeMillis()
    LOG.info(s"Task ${taskContext.taskId} Throughput:" +
      s" ${(wordCount - snapShotWordCount, (current - snapShotTime) / 1000)} (words, second)")
    snapShotWordCount = wordCount
    snapShotTime = current
  }
} 
Example 183
Source File: DropWithinTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import java.time.Instant
import java.util.concurrent.TimeUnit

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext

import scala.concurrent.duration.FiniteDuration

case object DropWithinTimeout

class DropWithinTask[T](context: TaskContext, userConf : UserConfig)
  extends GraphTask(context, userConf) {

  val timeout = userConf.getValue[FiniteDuration](DropWithinTask.TIMEOUT).
    getOrElse(FiniteDuration(0, TimeUnit.MINUTES))
  var timeoutActive = true

  override def onStart(startTime: Instant): Unit = {
    context.scheduleOnce(timeout)(
      self ! Message(DropWithinTimeout, Instant.now())
    )
  }

  override def onNext(msg: Message) : Unit = {
    msg.value match {
      case DropWithinTimeout =>
        timeoutActive = false
      case _ =>

    }
    timeoutActive match {
      case true =>
      case false =>
        context.output(msg)
    }
  }
}

object DropWithinTask {
  val TIMEOUT = "TIMEOUT"
} 
Example 184
Source File: TakeWithinTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import java.time.Instant
import java.util.concurrent.TimeUnit

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext

import scala.concurrent.duration.FiniteDuration

case object TakeWithinTimeout

class TakeWithinTask[T](context: TaskContext, userConf : UserConfig)
  extends GraphTask(context, userConf) {

  val timeout = userConf.getValue[FiniteDuration](TakeWithinTask.TIMEOUT).
    getOrElse(FiniteDuration(0, TimeUnit.MINUTES))
  var timeoutActive = false

  override def onStart(startTime: Instant): Unit = {
    context.scheduleOnce(timeout)(
      self ! Message(DropWithinTimeout, Instant.now())
    )
  }

  override def onNext(msg: Message) : Unit = {
    msg.value match {
      case DropWithinTimeout =>
        timeoutActive = true
      case _ =>

    }
    timeoutActive match {
      case true =>
      case false =>
        context.output(msg)
    }
  }
}

object TakeWithinTask {
  val TIMEOUT = "TIMEOUT"
} 
Example 185
Source File: TickSourceTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import java.time.Instant
import java.util.concurrent.TimeUnit

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext

import scala.concurrent.duration.FiniteDuration

class TickSourceTask[T](context: TaskContext, userConf : UserConfig)
  extends GraphTask(context, userConf) {

  val initialDelay = userConf.getValue[FiniteDuration](TickSourceTask.INITIAL_DELAY).
    getOrElse(FiniteDuration(0, TimeUnit.MINUTES))
  val interval = userConf.getValue[FiniteDuration](TickSourceTask.INTERVAL).
    getOrElse(FiniteDuration(0, TimeUnit.MINUTES))
  val tick = userConf.getValue[T](TickSourceTask.TICK).get

  override def onStart(startTime: Instant): Unit = {
    context.schedule(initialDelay, interval)(
      self ! Message(tick, Instant.now())
    )
  }

  override def onNext(msg: Message) : Unit = {
    context.output(msg)
  }
}

object TickSourceTask {
  val INITIAL_DELAY = "INITIAL_DELAY"
  val INTERVAL = "INTERVAL"
  val TICK = "TICK"
} 
Example 186
Source File: DelayInitialTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import java.time.Instant
import java.util.concurrent.TimeUnit

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext

import scala.concurrent.duration.FiniteDuration

case object DelayInitialTime

class DelayInitialTask[T](context: TaskContext, userConf : UserConfig)
  extends GraphTask(context, userConf) {

  val delayInitial = userConf.getValue[FiniteDuration](DelayInitialTask.DELAY_INITIAL).
    getOrElse(FiniteDuration(0, TimeUnit.MINUTES))
  var delayInitialActive = true

  override def onStart(startTime: Instant): Unit = {
    context.scheduleOnce(delayInitial)(
      self ! Message(DelayInitialTime, Instant.now())
    )
  }
  override def onNext(msg: Message) : Unit = {
    msg.value match {
      case DelayInitialTime =>
        delayInitialActive = false
      case _ =>
        delayInitialActive match {
          case true =>
          case false =>
            context.output(msg)
        }
    }
  }
}

object DelayInitialTask {
  val DELAY_INITIAL = "DELAY_INITIAL"
} 
Example 187
Source File: ThrottleTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import java.util.concurrent.TimeUnit

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext

import scala.concurrent.duration.FiniteDuration

class ThrottleTask[T](context: TaskContext, userConf : UserConfig)
  extends GraphTask(context, userConf) {

  val cost = userConf.getInt(ThrottleTask.COST).getOrElse(0)
  val costCalc = userConf.getValue[T => Int](ThrottleTask.COST_CALC)
  val maxBurst = userConf.getInt(ThrottleTask.MAX_BURST)
  val timePeriod = userConf.getValue[FiniteDuration](ThrottleTask.TIME_PERIOD).
    getOrElse(FiniteDuration(0, TimeUnit.MINUTES))
  val interval = timePeriod.toNanos / cost

  // TODO control rate from TaskActor
  override def onNext(msg: Message) : Unit = {
    val data = msg.value.asInstanceOf[T]
    val time = msg.timestamp
    context.output(msg)
  }
}

object ThrottleTask {
  val COST = "COST"
  val COST_CALC = "COST_CAL"
  val MAX_BURST = "MAX_BURST"
  val TIME_PERIOD = "TIME_PERIOD"
} 
Example 188
Source File: GroupedWithinTask.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.task

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext

import scala.collection.immutable.VectorBuilder
import scala.concurrent.duration.FiniteDuration

class GroupedWithinTask[T](context: TaskContext, userConf : UserConfig)
  extends GraphTask(context, userConf) {

  case object GroupedWithinTrigger
  val buf: VectorBuilder[T] = new VectorBuilder
  val timeWindow = userConf.getValue[FiniteDuration](GroupedWithinTask.TIME_WINDOW)
  val batchSize = userConf.getInt(GroupedWithinTask.BATCH_SIZE)

  override def onNext(msg: Message) : Unit = {

  }
}

object GroupedWithinTask {
  val BATCH_SIZE = "BATCH_SIZE"
  val TIME_WINDOW = "TIME_WINDOW"
} 
Example 189
Source File: MasterConnectionKeeper.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.appmaster

import java.util.concurrent.TimeUnit
import scala.concurrent.duration.FiniteDuration

import akka.actor._

import org.apache.gearpump.cluster.AppMasterToMaster.RegisterAppMaster
import org.apache.gearpump.cluster.MasterToAppMaster.AppMasterRegistered
import org.apache.gearpump.cluster.appmaster.MasterConnectionKeeper.AppMasterRegisterTimeout
import org.apache.gearpump.cluster.appmaster.MasterConnectionKeeper.MasterConnectionStatus.{MasterConnected, MasterStopped}
import org.apache.gearpump.cluster.master.MasterProxy.{MasterRestarted, WatchMaster}
import org.apache.gearpump.util.LogUtil


private[appmaster]
class MasterConnectionKeeper(
    register: RegisterAppMaster, masterProxy: ActorRef, masterStatusListener: ActorRef)
  extends Actor {

  import context.dispatcher

  private val LOG = LogUtil.getLogger(getClass)

  // Subscribe self to masterProxy,
  masterProxy ! WatchMaster(self)

  def registerAppMaster: Cancellable = {
    masterProxy ! register
    context.system.scheduler.scheduleOnce(FiniteDuration(30, TimeUnit.SECONDS),
      self, AppMasterRegisterTimeout)
  }

  context.become(waitMasterToConfirm(registerAppMaster))

  def waitMasterToConfirm(cancelRegister: Cancellable): Receive = {
    case AppMasterRegistered(appId) =>
      cancelRegister.cancel()
      masterStatusListener ! MasterConnected
      context.become(masterLivenessListener)
    case AppMasterRegisterTimeout =>
      cancelRegister.cancel()
      masterStatusListener ! MasterStopped
      context.stop(self)
  }

  def masterLivenessListener: Receive = {
    case MasterRestarted =>
      LOG.info("Master restarted, re-registering AppMaster....")
      context.become(waitMasterToConfirm(registerAppMaster))
    case MasterStopped =>
      LOG.info("Master is dead, killing this AppMaster....")
      masterStatusListener ! MasterStopped
      context.stop(self)
  }

  def receive: Receive = null
}

private[appmaster] object MasterConnectionKeeper {

  case object AppMasterRegisterTimeout

  object MasterConnectionStatus {

    case object MasterConnected

    case object MasterStopped

  }

} 
Example 190
Source File: MongoDsl.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo

import com.ringcentral.gatling.mongo.action.MongoActionBuilder
import com.ringcentral.gatling.mongo.check.MongoCheckSupport
import com.ringcentral.gatling.mongo.command.{MongoCommandBuilder, MongoDslBuilder}
import com.ringcentral.gatling.mongo.feeder.MongoFeederSource
import com.ringcentral.gatling.mongo.protocol.{MongoProtocol, MongoProtocolFieldsBuilder, MongoProtocolUriBuilder}
import io.gatling.core.action.builder.ActionBuilder
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.feeder.RecordSeqFeederBuilder
import io.gatling.core.session.Expression
import play.api.libs.json.JsObject

import scala.concurrent.duration.{FiniteDuration, _}

trait MongoDsl extends MongoCheckSupport {

  def mongo(implicit configuration: GatlingConfiguration) = MongoProtocol

  def mongo(requestName: Expression[String])(implicit configuration: GatlingConfiguration) = new MongoDslBuilder(requestName, configuration)

  def mongoFeeder(url: String, collection: String, query: String, limit: Int = 100, batchSize: Int = 0, connectionTimeout: FiniteDuration = 5 seconds,
                  receiveTimeout: FiniteDuration = 30 seconds, postProcessor: JsObject => Map[String, Any] = MongoFeederSource.defaultPostProcessor): RecordSeqFeederBuilder[Any] =
    RecordSeqFeederBuilder(MongoFeederSource(url, collection, query, limit, batchSize, connectionTimeout, receiveTimeout, postProcessor))

  implicit def mongoProtocolUriBuilder2mongoProtocol(builder: MongoProtocolUriBuilder): MongoProtocol = builder.build()

  implicit def mongoProtocolBuilder2mongoProtocol(builder: MongoProtocolFieldsBuilder): MongoProtocol = builder.build()

  implicit def mongoCommandBuilder2ActionBuilder(commandBuilder: MongoCommandBuilder)(implicit configuration: GatlingConfiguration): ActionBuilder = {
    new MongoActionBuilder(commandBuilder.build(), configuration)
  }
} 
Example 191
Source File: MongoUtils.scala    From gatling-mongodb-protocol   with MIT License 5 votes vote down vote up
package com.ringcentral.gatling.mongo

import reactivemongo.api.MongoConnection.{ParsedURI, URIParsingException}
import reactivemongo.api.{DefaultDB, MongoConnection, MongoDriver}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success, Try}

// fixme remove global context
import scala.concurrent.ExecutionContext.Implicits.global

object MongoUtils {

  private val defaultPort: Int = 27017
  private lazy val mongoDriver = new MongoDriver()

  private def establishConnection(uri: ParsedURI, dbName: String, connectionTimeout: FiniteDuration): DefaultDB = {
    Await.result(establishConnection(uri, dbName), connectionTimeout)
  }

  private def establishConnection(uri: ParsedURI, dbName: String): Future[DefaultDB] =
    Try(mongoDriver.connection(uri).database(dbName))
    match {
      case Success(db) => db
      case Failure(err) =>
        throw new IllegalStateException(s"Can't connect to database ${printHosts(uri.hosts)}: ${err.getMessage}", err)
    }

  private def printHosts(hosts: List[(String, Int)]): String = hosts.map { case (host, port) => s"$host:$port" }.mkString(", ")

  def connectToDB(uri: ParsedURI, connectionTimeout: FiniteDuration): DefaultDB =
    uri.db match {
      case Some(dbName) => establishConnection(uri, dbName, connectionTimeout)
      case None => throw new IllegalStateException(s"Can't connect to database $uri.")
    }

  def connectToDB(uri: String, connectionTimeout: FiniteDuration): DefaultDB =  connectToDB(parseUri(uri), connectionTimeout)

  def parseHosts(hosts: Seq[String]): Seq[(String, Int)] = hosts.map { hostAndPort =>
    hostAndPort.split(':').toList match {
      case host :: port :: Nil =>
        host -> Try(port.toInt).filter(p => p > 0 && p < 65536)
          .getOrElse(throw new URIParsingException(s"Could not parse hosts '$hosts' from URI: invalid port '$port'"))
      case host :: Nil =>
        host -> defaultPort
      case _ => throw new URIParsingException(s"Could not parse hosts from URI: invalid definition '$hosts'")
    }
  }

  def parseUri(uri: String): ParsedURI = {
    MongoConnection.parseURI(uri) match {
      case Success(parsedUri) => parsedUri
      case Failure(err) => throw new IllegalStateException(s"Can't parse database uri. $err")
    }
  }
} 
Example 192
Source File: Event.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.io.File
import java.time.ZonedDateTime
import scala.concurrent.Future
import akka.NotUsed
import akka.util.ByteString
import akka.stream.IOResult
import akka.stream.scaladsl.{ Source, FileIO, Framing }

import scala.concurrent.duration.FiniteDuration


case class Event(
  host: String,
  service: String,
  state: State,
  time: ZonedDateTime,
  description: String,
  tag: Option[String] = None, 
  metric: Option[Double] = None
)


sealed trait State
case object Critical extends State
case object Error  extends State
case object Ok extends State
case object Warning extends State

object State {
  def norm(str: String): String = str.toLowerCase
  def norm(state: State): String = norm(state.toString)

  val ok = norm(Ok)
  val warning = norm(Warning)
  val error = norm(Error)
  val critical = norm(Critical)

  def unapply(str: String): Option[State] = {
    val normalized = norm(str)
    if(normalized == norm(Ok)) Some(Ok)
    else if(normalized == norm(Warning)) Some(Warning)
    else if(normalized == norm(Error)) Some(Error)
    else if(normalized == norm(Critical)) Some(Critical)
    else None
  }
}

case class LogReceipt(logId: String, written: Long)
case class ParseError(logId: String, msg: String) 
Example 193
Source File: GlobalAppConfig.scala    From mqtt-mongo   with MIT License 5 votes vote down vote up
package com.izmailoff.mm.config

import java.util.concurrent.TimeUnit

import com.izmailoff.mm.util.HoconMap
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{FiniteDuration, _}

object GlobalAppConfig {

  val config = ConfigFactory.load()

  object Application {

    object MqttBroker {
      private lazy val brokerConf = config.getConfig("application.mqttBroker")
      lazy val url = brokerConf.getString("url")
      lazy val userName = brokerConf.getString("userName")
      lazy val password = brokerConf.getString("password")
      lazy val stashTimeToLive: FiniteDuration =
        brokerConf.getDuration("stashTimeToLive", TimeUnit.SECONDS) seconds
      lazy val stashCapacity = brokerConf.getInt("stashCapacity")
      lazy val reconnectDelayMin: FiniteDuration =
        brokerConf.getDuration("reconnectDelayMin", TimeUnit.SECONDS) seconds
      lazy val reconnectDelayMax: FiniteDuration =
        brokerConf.getDuration("reconnectDelayMax", TimeUnit.SECONDS) seconds
    }

    object Mongo {
      private lazy val mongoConf = config.getConfig("application.mongo")
      lazy val host = mongoConf.getString("host")
      lazy val port = mongoConf.getInt("port")
      lazy val dbName = mongoConf.getString("dbName")
    }

    object MqttMongo {
      private lazy val mqttMongoConf = config.getConfig("application.mqttMongo")
      lazy val topicsToCollectionsMappings: Map[String, Set[String]] =
        HoconMap.getMap(identity(_), getElems,
          mqttMongoConf, "topicsToCollectionsMappings").withDefaultValue(Set.empty)
      val getElems: String => Set[String] =
        _.split(";").toList.map(_.trim).filter(!_.isEmpty).toSet
      lazy val serializationFormat = SerializationFormat.withName(mqttMongoConf.getString("serializationFormat"))
    }

  }

} 
Example 194
Source File: RpcUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.rpc.util

import java.net.ServerSocket

import akka.actor.ActorSystem
import org.bitcoins.rpc.client.common.BitcoindRpcClient

import scala.annotation.tailrec
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Random, Success, Try}

abstract class RpcUtil extends AsyncUtil {

  def awaitServerShutdown(
      server: BitcoindRpcClient,
      duration: FiniteDuration = 300.milliseconds,
      maxTries: Int = 50)(implicit system: ActorSystem): Future[Unit] = {
    retryUntilSatisfiedF(() => server.isStoppedF, duration, maxTries)
  }

  
  @tailrec
  final def randomPort: Int = {
    val MAX = 65535 // max tcp port number
    val MIN = 1025 // lowest port not requiring sudo
    val port = Math.abs(Random.nextInt(MAX - MIN) + (MIN + 1))
    val attempt = Try {
      val socket = new ServerSocket(port)
      socket.close()
      socket.getLocalPort
    }

    attempt match {
      case Success(value) => value
      case Failure(_)     => randomPort
    }
  }
}

object RpcUtil extends RpcUtil 
Example 195
Source File: TestRpcUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.testkit.rpc

import akka.actor.ActorSystem
import org.bitcoins.testkit.async.TestAsyncUtil

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

abstract class TestRpcUtil extends org.bitcoins.rpc.util.RpcUtil {

  override protected def retryUntilSatisfiedWithCounter(
      conditionF: () => Future[Boolean],
      duration: FiniteDuration,
      counter: Int,
      maxTries: Int,
      stackTrace: Array[StackTraceElement])(implicit
      system: ActorSystem): Future[Unit] = {
    val retryF = super
      .retryUntilSatisfiedWithCounter(conditionF,
                                      duration,
                                      counter,
                                      maxTries,
                                      stackTrace)

    TestAsyncUtil.transformRetryToTestFailure(retryF)(system.dispatcher)
  }
}

object TestRpcUtil extends TestRpcUtil 
Example 196
Source File: TestAsyncUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.testkit.async

import akka.actor.ActorSystem
import org.scalatest.exceptions.{StackDepthException, TestFailedException}

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration

abstract class TestAsyncUtil
    extends org.bitcoins.rpc.util.AsyncUtil
    with Serializable {

  override protected def retryUntilSatisfiedWithCounter(
      conditionF: () => Future[Boolean],
      duration: FiniteDuration,
      counter: Int,
      maxTries: Int,
      stackTrace: Array[StackTraceElement])(implicit
      system: ActorSystem): Future[Unit] = {
    val retryF = super
      .retryUntilSatisfiedWithCounter(conditionF,
                                      duration,
                                      counter,
                                      maxTries,
                                      stackTrace)

    TestAsyncUtil.transformRetryToTestFailure(retryF)(system.dispatcher)
  }
}

object TestAsyncUtil extends TestAsyncUtil {

  
  def transformRetryToTestFailure[T](fut: Future[T])(implicit
      ec: ExecutionContext): Future[T] = {
    def transformRetry(err: Throwable): Throwable = {
      if (err.isInstanceOf[RpcRetryException]) {
        val retryErr = err.asInstanceOf[RpcRetryException]
        val relevantStackTrace = retryErr.caller.tail
          .dropWhile(elem => retryErr.internalFiles.contains(elem.getFileName))
          .takeWhile(!_.getFileName.contains("TestSuite"))
        val stackElement = relevantStackTrace.head
        val file = stackElement.getFileName
        val path = stackElement.getClassName
        val line = stackElement.getLineNumber
        val pos = org.scalactic.source.Position(file, path, line)
        val newErr = new TestFailedException({ _: StackDepthException =>
                                               Some(retryErr.message)
                                             },
                                             None,
                                             pos)
        newErr.setStackTrace(relevantStackTrace)
        newErr
      } else {
        err
      }
    }

    fut.transform({ elem: T =>
                    elem
                  },
                  transformRetry)
  }
} 
Example 197
Source File: Config.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.application

import better.files._
import cats.effect.Sync
import org.http4s.Uri
import org.http4s.Uri.UserInfo
import org.scalasteward.core.application.Cli.EnvVar
import org.scalasteward.core.git.Author
import org.scalasteward.core.util
import org.scalasteward.core.vcs.data.AuthenticatedUser
import scala.concurrent.duration.FiniteDuration
import scala.sys.process.Process


final case class Config(
    workspace: File,
    reposFile: File,
    defaultRepoConfigFile: Option[File],
    gitAuthor: Author,
    vcsType: SupportedVCS,
    vcsApiHost: Uri,
    vcsLogin: String,
    gitAskPass: File,
    signCommits: Boolean,
    whitelistedDirectories: List[String],
    readOnlyDirectories: List[String],
    disableSandbox: Boolean,
    doNotFork: Boolean,
    ignoreOptsFiles: Boolean,
    envVars: List[EnvVar],
    processTimeout: FiniteDuration,
    scalafixMigrations: Option[File],
    groupMigrations: Option[File],
    cacheTtl: FiniteDuration,
    cacheMissDelay: FiniteDuration,
    bitbucketServerUseDefaultReviewers: Boolean
) {
  def vcsUser[F[_]](implicit F: Sync[F]): F[AuthenticatedUser] = {
    val urlWithUser = util.uri.withUserInfo.set(UserInfo(vcsLogin, None))(vcsApiHost).renderString
    val prompt = s"Password for '$urlWithUser': "
    F.delay {
      val password = Process(List(gitAskPass.pathAsString, prompt)).!!.trim
      AuthenticatedUser(vcsLogin, password)
    }
  }
}

object Config {
  def create[F[_]](args: Cli.Args)(implicit F: Sync[F]): F[Config] =
    F.delay {
      Config(
        workspace = args.workspace.toFile,
        reposFile = args.reposFile.toFile,
        defaultRepoConfigFile = args.defaultRepoConf.map(_.toFile),
        gitAuthor = Author(args.gitAuthorName, args.gitAuthorEmail),
        vcsType = args.vcsType,
        vcsApiHost = args.vcsApiHost,
        vcsLogin = args.vcsLogin,
        gitAskPass = args.gitAskPass.toFile,
        signCommits = args.signCommits,
        whitelistedDirectories = args.whitelist,
        readOnlyDirectories = args.readOnly,
        disableSandbox = args.disableSandbox,
        doNotFork = args.doNotFork,
        ignoreOptsFiles = args.ignoreOptsFiles,
        envVars = args.envVar,
        processTimeout = args.processTimeout,
        scalafixMigrations = args.scalafixMigrations.map(_.toFile),
        groupMigrations = args.groupMigrations.map(_.toFile),
        cacheTtl = args.cacheTtl,
        cacheMissDelay = args.cacheMissDelay,
        bitbucketServerUseDefaultReviewers = args.bitbucketServerUseDefaultReviewers
      )
    }
} 
Example 198
Source File: UpdateAlg.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.update

import cats.{Eval, Monad}
import cats.implicits._
import org.scalasteward.core.coursier.VersionsCache
import org.scalasteward.core.data._
import org.scalasteward.core.repoconfig.RepoConfig
import org.scalasteward.core.util.Nel
import scala.concurrent.duration.FiniteDuration

final class UpdateAlg[F[_]](implicit
    filterAlg: FilterAlg[F],
    versionsCache: VersionsCache[F],
    groupMigrations: GroupMigrations,
    F: Monad[F]
) {
  def findUpdate(
      dependency: Scope[Dependency],
      maxAge: Option[FiniteDuration]
  ): F[Option[Update.Single]] =
    for {
      versions <- versionsCache.getVersions(dependency, maxAge)
      current = Version(dependency.value.version)
      maybeNewerVersions = Nel.fromList(versions.filter(_ > current))
      maybeUpdate0 = maybeNewerVersions.map { newerVersions =>
        Update.Single(CrossDependency(dependency.value), newerVersions.map(_.value))
      }
      migratedUpdate = Eval.later(groupMigrations.findUpdateWithNewerGroupId(dependency.value))
      maybeUpdate1 = maybeUpdate0.orElse(migratedUpdate.value)
    } yield maybeUpdate1
  def findUpdates(
      dependencies: List[Scope.Dependency],
      repoConfig: RepoConfig,
      maxAge: Option[FiniteDuration]
  ): F[List[Update.Single]] = {
    val updates = dependencies.traverseFilter(findUpdate(_, maxAge))
    updates.flatMap(filterAlg.localFilterMany(repoConfig, _))
  }
}

object UpdateAlg {
  def isUpdateFor(update: Update, crossDependency: CrossDependency): Boolean =
    crossDependency.dependencies.forall { dependency =>
      update.groupId === dependency.groupId &&
      update.currentVersion === dependency.version &&
      update.artifactIds.contains_(dependency.artifactId)
    }
} 
Example 199
Source File: VersionsCache.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.coursier

import cats.Parallel
import cats.implicits._
import io.circe.generic.semiauto.deriveCodec
import io.circe.{Codec, KeyEncoder}
import org.scalasteward.core.coursier.VersionsCache.{Key, Value}
import org.scalasteward.core.data.{Dependency, Resolver, Scope, Version}
import org.scalasteward.core.persistence.KeyValueStore
import org.scalasteward.core.util.{DateTimeAlg, MonadThrowable, Timestamp}
import scala.concurrent.duration.FiniteDuration

final class VersionsCache[F[_]](
    cacheTtl: FiniteDuration,
    store: KeyValueStore[F, Key, Value]
)(implicit
    coursierAlg: CoursierAlg[F],
    dateTimeAlg: DateTimeAlg[F],
    parallel: Parallel[F],
    F: MonadThrowable[F]
) {
  def getVersions(dependency: Scope.Dependency, maxAge: Option[FiniteDuration]): F[List[Version]] =
    dependency.resolvers
      .parFlatTraverse(getVersionsImpl(dependency.value, _, maxAge.getOrElse(cacheTtl)))
      .map(_.distinct.sorted)

  private def getVersionsImpl(
      dependency: Dependency,
      resolver: Resolver,
      maxAge: FiniteDuration
  ): F[List[Version]] =
    dateTimeAlg.currentTimestamp.flatMap { now =>
      val key = Key(dependency, resolver)
      store.get(key).flatMap {
        case Some(value) if value.updatedAt.until(now) <= (maxAge * value.maxAgeFactor) =>
          F.pure(value.versions)
        case maybeValue =>
          coursierAlg.getVersions(dependency, resolver).attempt.flatMap {
            case Right(versions) =>
              store.put(key, Value(now, versions, None)).as(versions)
            case Left(throwable) =>
              val versions = maybeValue.map(_.versions).getOrElse(List.empty)
              store.put(key, Value(now, versions, Some(throwable.toString))).as(versions)
          }
      }
    }
}

object VersionsCache {
  final case class Key(dependency: Dependency, resolver: Resolver) {
    override val toString: String =
      resolver.path + "/" +
        dependency.groupId.value.replace('.', '/') + "/" +
        dependency.artifactId.crossName +
        dependency.scalaVersion.fold("")("_" + _.value) +
        dependency.sbtVersion.fold("")("_" + _.value)
  }

  object Key {
    implicit val keyKeyEncoder: KeyEncoder[Key] =
      KeyEncoder.instance(_.toString)
  }

  final case class Value(
      updatedAt: Timestamp,
      versions: List[Version],
      maybeError: Option[String]
  ) {
    def maxAgeFactor: Long =
      if (maybeError.nonEmpty && versions.isEmpty) 4L else 1L
  }

  object Value {
    implicit val valueCodec: Codec[Value] =
      deriveCodec
  }
} 
Example 200
Source File: Timestamp.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.util

import cats.Order
import cats.implicits._
import io.circe.Codec
import io.circe.generic.extras.semiauto.deriveUnwrappedCodec
import java.time.{Instant, LocalDateTime, ZoneOffset}
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.FiniteDuration

final case class Timestamp(millis: Long) {
  def +(finiteDuration: FiniteDuration): Timestamp =
    Timestamp(millis + finiteDuration.toMillis)

  def toLocalDateTime: LocalDateTime =
    LocalDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.UTC)

  def until(that: Timestamp): FiniteDuration =
    FiniteDuration(that.millis - millis, TimeUnit.MILLISECONDS)
}

object Timestamp {
  def fromLocalDateTime(ldt: LocalDateTime): Timestamp =
    Timestamp(ldt.toInstant(ZoneOffset.UTC).toEpochMilli)

  implicit val timestampCodec: Codec[Timestamp] =
    deriveUnwrappedCodec

  implicit val timestampOrder: Order[Timestamp] =
    Order.by(_.millis)
}