scala.concurrent.ExecutionContextExecutor Scala Examples

The following examples show how to use scala.concurrent.ExecutionContextExecutor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ModelService.scala    From reactive-machine-learning-systems   with MIT License 6 votes vote down vote up
package com.reactivemachinelearning

import akka.actor.ActorSystem
import akka.event.{Logging, LoggingAdapter}
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.ToResponseMarshallable
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
//import spray.json._
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ExecutionContextExecutor, Future}

case class Prediction(id: Long, timestamp: Long, value: Double)

trait Protocols extends DefaultJsonProtocol {
  implicit val ipInfoFormat = jsonFormat3(Prediction.apply)
}

trait Service extends Protocols {
  implicit val system: ActorSystem

  implicit def executor: ExecutionContextExecutor

  implicit val materializer: Materializer

  val logger: LoggingAdapter

//  private def parseFeatures(features: String): Map[Long, Double] = {
//    features.parseJson.convertTo[Map[Long, Double]]
//  }

  def predict(features: String): Future[Prediction] = {
    Future(Prediction(123, 456, 0.5))
  }

  val routes = {
    logRequestResult("predictive-service") {
      pathPrefix("ip") {
        (get & path(Segment)) { features =>
          complete {
            predict(features).map[ToResponseMarshallable] {
//              case prediction: Prediction => prediction
              case _ => BadRequest
            }
          }
        }
      }
    }
  }
}

object PredictiveService extends App with Service {
  override implicit val system = ActorSystem()
  override implicit val executor = system.dispatcher
  override implicit val materializer = ActorMaterializer()

  override val logger = Logging(system, getClass)

  Http().bindAndHandle(routes, "0.0.0.0", 9000)
} 
Example 2
Source File: GrpcHandler.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.cassandra.services

import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.storage.backends.cassandra.services.GrpcHandler._
import com.google.protobuf.GeneratedMessageV3
import io.grpc.Status
import io.grpc.stub.StreamObserver
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.{ExecutionContextExecutor, Future}
import scala.util.{Failure, Success}

object GrpcHandler {
  protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[GrpcHandler])
}



class GrpcHandler(operationName: String)(implicit val executor: ExecutionContextExecutor) extends MetricsSupport {
  private val metricFriendlyOperationName = operationName.replace('/', '.')
  private val timer = metricRegistry.timer(metricFriendlyOperationName)
  private val failureMeter = metricRegistry.meter(s"$metricFriendlyOperationName.failures")

  def handle[Rs](request: GeneratedMessageV3, responseObserver: StreamObserver[Rs])(op: => Future[Rs]): Unit = {
    val time = timer.time()
    op onComplete {
      case Success(response) =>
        responseObserver.onNext(response)
        responseObserver.onCompleted()
        time.stop()
        LOGGER.debug(s"service invocation for operation=$operationName and request=${request.toString} completed successfully")

      case Failure(ex) =>
        responseObserver.onError(Status.fromThrowable(ex).asRuntimeException())
        failureMeter.mark()
        time.stop()
        LOGGER.debug(s"service invocation for operation=$operationName and request=${request.toString} failed with error", ex)
    }
  }
} 
Example 3
Source File: BankAccountReadModelUseCase.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.useCase

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Flow, Keep, Sink, Source, SourceQueueWithComplete }
import akka.stream.{ ActorMaterializer, OverflowStrategy }
import akka.{ Done, NotUsed }
import com.github.j5ik2o.bank.domain.model._
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol.{
  ResolveBankAccountEventsRequest,
  ResolveBankAccountEventsResponse
}
import com.github.j5ik2o.bank.useCase.port.{ BankAccountReadModelFlows, JournalReader }
import pureconfig._

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor, Future, Promise }

class BankAccountReadModelUseCase(bankAccountReadModelFlows: BankAccountReadModelFlows, journalReader: JournalReader)(
    implicit val system: ActorSystem
) extends UseCaseSupport {

  import UseCaseSupport._

  private val config = loadConfigOrThrow[BankAccountAggregateUseCaseConfig]("bank.use-case.bank-account-use-case")

  private val bufferSize: Int = config.bufferSize

  private implicit val mat: ActorMaterializer       = ActorMaterializer()
  private implicit val ec: ExecutionContextExecutor = system.dispatcher

  def resolveBankAccountEventsById(
      request: ResolveBankAccountEventsRequest
  )(implicit ec: ExecutionContext): Future[ResolveBankAccountEventsResponse] =
    offerToQueue(resolveBankAccountEventQueue)(request, Promise())

  private lazy val resolveBankAccountEventQueue
    : SourceQueueWithComplete[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])] =
    Source
      .queue[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])](bufferSize,
                                                                                           OverflowStrategy.dropNew)
      .via(bankAccountReadModelFlows.resolveBankAccountEventByIdFlow.zipPromise)
      .toMat(completePromiseSink)(Keep.left)
      .run()

  private val projectionFlow: Flow[(BankAccountEvent, Long), Int, NotUsed] =
    Flow[(BankAccountEvent, Long)].flatMapConcat {
      case (event: BankAccountOpened, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.openBankAccountFlow)
      case (event: BankAccountEventUpdated, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.updateAccountFlow)
      case (event: BankAccountDeposited, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.deposit, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.depositBankAccountFlow)
      case (event: BankAccountWithdrawn, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.withdraw, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.withdrawBankAccountFlow)
      case (event: BankAccountClosed, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.closeBankAccountFlow)
    }

  def execute(): Future[Done] = {
    bankAccountReadModelFlows.resolveLastSeqNrSource
      .flatMapConcat { lastSeqNr =>
        journalReader.eventsByTagSource(classOf[BankAccountEvent].getName, lastSeqNr + 1)
      }
      .map { eventBody =>
        (eventBody.event.asInstanceOf[BankAccountEvent], eventBody.sequenceNr)
      }
      .via(projectionFlow)
      .toMat(Sink.ignore)(Keep.right)
      .run()

  }
} 
Example 4
Source File: AdminController.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers.admin

import com.mohiva.play.silhouette.api.Silhouette
import controllers.{BaseAuthController, Security}
import javax.inject.{Inject, Singleton}
import services.annotation.AnnotationService
import services.contribution.ContributionService
import services.document.{DocumentService, DocumentToJSON}
import services.user.UserService
import services.user.Roles._
import services.visit.VisitService
import org.webjars.play.WebJarsUtil
import play.api.Configuration
import play.api.libs.json.Json
import play.api.mvc.ControllerComponents
import scala.concurrent.ExecutionContextExecutor
import controllers.HasPrettyPrintJSON

@Singleton
class AdminController @Inject() (
  val components: ControllerComponents, 
  val config: Configuration,
  val annotations: AnnotationService,
  val contributions: ContributionService,
  val documents: DocumentService,
  val users: UserService,
  val visits: VisitService,
  val silhouette: Silhouette[Security.Env],
  implicit val ctx: ExecutionContextExecutor,
  implicit val webJarsUtil: WebJarsUtil
) extends BaseAuthController(components, config, documents, users) with HasPrettyPrintJSON {
        
  def index = silhouette.SecuredAction(Security.WithRole(Admin)) { implicit request =>
    Ok(views.html.admin.activity())
  }
  
  def getStats() = silhouette.SecuredAction(Security.WithRole(Admin)).async { implicit request =>
    // DocumentRecord JSON serialization
    import DocumentToJSON._
    
    val fRecentContributions = contributions.getMostRecent(10)
    val fSystemStats = contributions.getSystemStats()
    val fTotalAnnotations = annotations.countTotal()
    val fTotalVisits = visits.countTotal()
    val fTotalUsers = users.countUsers()

    val f = for {
      recentContributions <- fRecentContributions
      recentAffectedDocuments <- documents.getDocumentRecordsById(recentContributions.map(_.affectsItem.documentId))
      stats <- fSystemStats
      annotationCount <- fTotalAnnotations
      visitCount <- fTotalVisits
      userCount <- fTotalUsers
    } yield (recentContributions, recentAffectedDocuments, stats, annotationCount, visitCount, userCount)
    
    f.map { case (recentContributions, recentAffectedDocuments, stats, annotationCount, visitCount, userCount) =>
      val response =
        Json.obj(
          "recent_contributions" -> recentContributions,
          "recent_documents" -> recentAffectedDocuments,
          "contribution_stats" -> stats,
          "total_annotations" -> annotationCount,
          "total_visits" -> visitCount,
          "total_users" -> userCount)
      jsonOk(response)
    }
  }

  def getSignupHistory = silhouette.SecuredAction(Security.WithRole(Admin)).async { implicit request =>
    users.getSignupsOverTime().map { history =>
      val json = history.map(t => Json.obj("timestamp" -> t._1, "count" -> t._2))
      jsonOk(Json.toJson(json))
    }
  }
  
} 
Example 5
Source File: ExecutionContextScheduler.scala    From reactor-scala-extensions   with Apache License 2.0 5 votes vote down vote up
package reactor.core.scala.scheduler

import java.util.concurrent.Executor

import reactor.core.Disposable
import reactor.core.scheduler.Scheduler.Worker
import reactor.core.scheduler.{Scheduler, Schedulers}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, ExecutionContextExecutorService}

class ExecutionContextScheduler private(val scheduler: Scheduler) extends Scheduler {
  override def schedule(task: Runnable): Disposable = scheduler.schedule(task)

  override def createWorker(): Worker = scheduler.createWorker()
}


object ExecutionContextScheduler {
  def apply(executionContext: ExecutionContext): ExecutionContextScheduler = {
    executionContext match {
      case eces: ExecutionContextExecutorService => new ExecutionContextScheduler(Schedulers.fromExecutorService(eces))
      case ece: ExecutionContextExecutor => new ExecutionContextScheduler(Schedulers.fromExecutor(ece))
      case _ => new ExecutionContextScheduler(Schedulers.fromExecutor(new Executor {
        override def execute(command: Runnable): Unit = executionContext.execute(command)
      }))
    }
  }
} 
Example 6
Source File: AkkaHttpClient.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.ws.{Message, WebSocketRequest, WebSocketUpgradeResponse}
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route, RoutingLog}
import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings, ParserSettings, RoutingSettings}
import akka.http.scaladsl.{Http, HttpsConnectionContext}
import akka.stream.Materializer
import akka.stream.scaladsl.Flow

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

trait AkkaHttpClient {
  def singleRequest(
      request: HttpRequest,
      settings: ConnectionPoolSettings
  ): Future[HttpResponse]

  def singleWebsocketRequest[WS_RESULT](
      request: WebSocketRequest,
      clientFlow: Flow[Message, Message, WS_RESULT],
      settings: ClientConnectionSettings
  )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)]
}

object AkkaHttpClient {
  def default(
      system: ActorSystem,
      connectionContext: Option[HttpsConnectionContext],
      customLog: Option[LoggingAdapter]
  ): AkkaHttpClient =
    new AkkaHttpClient {
      private val http = Http()(system)

      override def singleRequest(
          request: HttpRequest,
          settings: ConnectionPoolSettings
      ): Future[HttpResponse] = {
        http.singleRequest(
          request,
          connectionContext.getOrElse(http.defaultClientHttpsContext),
          settings,
          customLog.getOrElse(system.log)
        )
      }

      override def singleWebsocketRequest[WS_RESULT](
          request: WebSocketRequest,
          clientFlow: Flow[Message, Message, WS_RESULT],
          settings: ClientConnectionSettings
      )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)] = {
        val (wsResponse, wsResult) = http.singleWebSocketRequest(
          request,
          clientFlow,
          connectionContext.getOrElse(http.defaultClientHttpsContext),
          None,
          settings,
          customLog.getOrElse(system.log)
        )
        wsResponse.map((_, wsResult))
      }
    }

  def stubFromAsyncHandler(run: HttpRequest => Future[HttpResponse]): AkkaHttpClient =
    new AkkaHttpClient {
      def singleRequest(request: HttpRequest, settings: ConnectionPoolSettings): Future[HttpResponse] =
        run(request)

      override def singleWebsocketRequest[WS_RESULT](
          request: WebSocketRequest,
          clientFlow: Flow[Message, Message, WS_RESULT],
          settings: ClientConnectionSettings
      )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)] =
        Future.failed(new RuntimeException("Websockets are not supported"))
    }

  def stubFromRoute(route: Route)(implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): AkkaHttpClient = stubFromAsyncHandler(Route.asyncHandler(route))
} 
Example 7
Source File: ServerSettingsTemplate.scala    From akka-http-circe-json-template   with Apache License 2.0 5 votes vote down vote up
package com.vitorsvieira.http.config

import akka.actor.ActorSystem
import akka.event.{ LogSource, Logging }
import akka.stream.ActorMaterializer
import com.typesafe.config.{ Config, ConfigFactory }

import scala.concurrent.ExecutionContextExecutor

trait ServerSettingsTemplate {

  lazy private val config: Config = ConfigFactory.load()
  private val httpConfig: Config = config.getConfig("http")
  val httpInterface: String = httpConfig.getString("interface")
  val httpPort: Int = httpConfig.getInt("port")

  implicit val actorSystem: ActorSystem = ActorSystem("akka-http-circe-json")
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher
  private implicit val logSource: LogSource[ServerSettingsTemplate] = (t: ServerSettingsTemplate) ⇒ t.getClass.getSimpleName
  private def logger(implicit logSource: LogSource[_ <: ServerSettingsTemplate]) = Logging(actorSystem, this.getClass)

  implicit val log = logger
}

object ServerSettingsTemplate extends ServerSettingsTemplate 
Example 8
Source File: PulsarSinkGraphStage.scala    From pulsar4s   with Apache License 2.0 5 votes vote down vote up
package com.sksamuel.pulsar4s.akka.streams

import akka.Done
import akka.stream.stage.{AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue, InHandler}
import akka.stream.{Attributes, Inlet, SinkShape}
import com.sksamuel.exts.Logging
import com.sksamuel.pulsar4s.{Producer, ProducerMessage}

import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
import scala.util.{Failure, Success}

class PulsarSinkGraphStage[T](createFn: () => Producer[T])
  extends GraphStageWithMaterializedValue[SinkShape[ProducerMessage[T]], Future[Done]]
    with Logging {

  private val in = Inlet.create[ProducerMessage[T]]("pulsar.in")
  override def shape: SinkShape[ProducerMessage[T]] = SinkShape.of(in)

  override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = {

    val promise = Promise[Done]()

    val logic: GraphStageLogic = new GraphStageLogic(shape) with InHandler {
      setHandler(in, this)

      implicit def context: ExecutionContextExecutor = super.materializer.executionContext

      var producer: Producer[T] = _
      var next: AsyncCallback[ProducerMessage[T]] = _
      var error: Throwable = _

      override def preStart(): Unit = {
        producer = createFn()
        next = getAsyncCallback { _ => pull(in) }
        pull(in)
      }

      override def onPush(): Unit = {
        try {
          val t = grab(in)
          logger.debug(s"Sending message $t")
          producer.sendAsync(t).onComplete {
            case Success(_) => next.invoke(t)
            case Failure(e) =>
              logger.error("Failing pulsar sink stage", e)
              failStage(e)
          }
        } catch {
          case e: Throwable =>
            logger.error("Failing pulsar sink stage", e)
            failStage(e)
        }
      }

      override def postStop(): Unit = {
        logger.debug("Graph stage stopping; closing producer")
        producer.flush()
        producer.close()
      }

      override def onUpstreamFailure(ex: Throwable): Unit = {
        promise.tryFailure(ex)
      }

      override def onUpstreamFinish(): Unit = {
        promise.trySuccess(Done)
      }
    }

    (logic, promise.future)
  }
} 
Example 9
Source File: PulsarMultiSinkGraphStage.scala    From pulsar4s   with Apache License 2.0 5 votes vote down vote up
package com.sksamuel.pulsar4s.akka.streams

import akka.Done
import akka.stream.stage.{AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue, InHandler}
import akka.stream.{Attributes, Inlet, SinkShape}
import com.sksamuel.exts.Logging
import com.sksamuel.pulsar4s.{Producer, ProducerMessage, Topic}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future, Promise}
import scala.util.{Failure, Success}

class PulsarMultiSinkGraphStage[T](createFn: Topic => Producer[T], initTopics: Set[Topic] = Set.empty)
  extends GraphStageWithMaterializedValue[SinkShape[(Topic, ProducerMessage[T])], Future[Done]]
    with Logging {

  private val in = Inlet.create[(Topic, ProducerMessage[T])]("pulsar.in")

  override def shape: SinkShape[(Topic, ProducerMessage[T])] = SinkShape.of(in)

  override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = {

    val promise = Promise[Done]()

    val logic: GraphStageLogic = new GraphStageLogic(shape) with InHandler {
      setHandler(in, this)

      implicit def context: ExecutionContextExecutor = super.materializer.executionContext

      var producers: Map[Topic, Producer[T]] = _
      var next: AsyncCallback[(Topic, ProducerMessage[T])] = _
      var error: Throwable = _

      override def preStart(): Unit = {
        producers = initTopics.map(t => t -> createFn(t)).toMap
        next = getAsyncCallback { _ => pull(in) }
        pull(in)
      }

      private def getProducer(topic: Topic): Producer[T] =
        producers.get(topic) match {
          case Some(p) => p
          case None =>
            logger.debug(s"creating new producer for topic $topic")
            val producer = createFn(topic)
            producers += topic -> producer
            producer
        }

      override def onPush(): Unit = {
        try {
          val (topic, message) = grab(in)
          logger.debug(s"Sending message $message to $topic")
          val producer = getProducer(topic)
          producer.sendAsync(message).onComplete {
            case Success(_) => next.invoke(topic -> message)
            case Failure(e) =>
              logger.error("Failing pulsar sink stage", e)
              failStage(e)
          }
        } catch {
          case e: Throwable =>
            logger.error("Failing pulsar sink stage", e)
            failStage(e)
        }
      }

      override def postStop(): Unit = {
        logger.debug("Graph stage stopping; closing producers")
        val fs = producers.flatMap { case (_, p) =>
          Seq(
            p.flushAsync,
            p.closeAsync
          )
        }
        Await.ready(Future.sequence(fs), 15.seconds)
      }

      override def onUpstreamFailure(ex: Throwable): Unit = {
        promise.tryFailure(ex)
      }

      override def onUpstreamFinish(): Unit = {
        promise.trySuccess(Done)
      }
    }

    (logic, promise.future)
  }

} 
Example 10
Source File: AkkaHttpService.scala    From akka-http-spring-boot   with Apache License 2.0 5 votes vote down vote up
package com.github.scalaspring.akka.http

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.Materializer
import org.springframework.beans.BeanInstantiationException
import org.springframework.context.annotation.Bean
import org.springframework.util.ClassUtils

import scala.concurrent.ExecutionContextExecutor


trait AkkaHttpService {

  protected implicit val system: ActorSystem
  protected implicit def executor: ExecutionContextExecutor
  protected implicit def materializer: Materializer

  def route: Route = reject

  @Bean(name = Array("route"))
  def akkaHttpRoute: Route = {
    if (route == reject) throw new BeanInstantiationException(classOf[Route],
      s"Please supply a route definition by overriding the route function in ${ClassUtils.getUserClass(this).getName}. " +
        s"See the example in the documentation for ${classOf[AkkaHttpService].getName}.")
    else route
  }

} 
Example 11
Source File: CassandraTraceRecordWriter.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.cassandra.store

import java.util.concurrent.atomic.AtomicInteger

import com.expedia.open.tracing.backend.TraceRecord
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.commons.retries.RetryOperation._
import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraSession
import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.CassandraConfiguration
import com.expedia.www.haystack.trace.storage.backends.cassandra.metrics.AppMetricNames
import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
import scala.util.{Failure, Success}

class CassandraTraceRecordWriter(cassandra: CassandraSession,
                                 config: CassandraConfiguration)(implicit val dispatcher: ExecutionContextExecutor)
  extends MetricsSupport {

  private val LOGGER = LoggerFactory.getLogger(classOf[CassandraTraceRecordWriter])
  private lazy val writeTimer = metricRegistry.timer(AppMetricNames.CASSANDRA_WRITE_TIME)
  private lazy val writeFailures = metricRegistry.meter(AppMetricNames.CASSANDRA_WRITE_FAILURE)

  cassandra.ensureKeyspace(config.clientConfig.tracesKeyspace)
  private val spanInsertPreparedStmt = cassandra.createSpanInsertPreparedStatement(config.clientConfig.tracesKeyspace)

  private def execute(record: TraceRecord): Future[Unit] = {

    val promise = Promise[Unit]
    // execute the request async with retry
    withRetryBackoff(retryCallback => {
      val timer = writeTimer.time()

      // prepare the statement
      val statement = cassandra.newTraceInsertBoundStatement(record.getTraceId,
        record.getSpans.toByteArray,
        config.writeConsistencyLevel(retryCallback.lastError()),
        spanInsertPreparedStmt)

      val asyncResult = cassandra.executeAsync(statement)
      asyncResult.addListener(new CassandraTraceRecordWriteResultListener(asyncResult, timer, retryCallback), dispatcher)
    },
      config.retryConfig,
      onSuccess = (_: Any) => promise.success(),
      onFailure = ex => {
        writeFailures.mark()
        LOGGER.error(s"Fail to write to cassandra after ${config.retryConfig.maxRetries} retry attempts for ${record.getTraceId}", ex)
        promise.failure(ex)
      })
    promise.future
  }

  
      execute(record).onComplete {
        case Success(_) => if (writableRecordsLatch.decrementAndGet() == 0) {
          promise.success()
        }
        case Failure(ex) =>
          //TODO: We fail the response only if the last cassandra write fails, ideally we should be failing if any of the cassandra writes fail
          if (writableRecordsLatch.decrementAndGet() == 0) {
            promise.failure(ex)
          }
      }
    })
    promise.future

  }
} 
Example 12
Source File: CassandraTraceRecordReader.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.cassandra.store

import com.expedia.open.tracing.backend.TraceRecord
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraSession
import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.ClientConfiguration
import com.expedia.www.haystack.trace.storage.backends.cassandra.metrics.AppMetricNames
import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContextExecutor, Future, Promise}

class CassandraTraceRecordReader(cassandra: CassandraSession, config: ClientConfiguration)
                                (implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport {
  private val LOGGER = LoggerFactory.getLogger(classOf[CassandraTraceRecordReader])

  private lazy val readTimer = metricRegistry.timer(AppMetricNames.CASSANDRA_READ_TIME)
  private lazy val readFailures = metricRegistry.meter(AppMetricNames.CASSANDRA_READ_FAILURES)

  def readTraceRecords(traceIds: List[String]): Future[Seq[TraceRecord]] = {
    val timer = readTimer.time()
    val promise = Promise[Seq[TraceRecord]]

    try {
      val statement = cassandra.newSelectRawTracesBoundStatement(traceIds)
      val asyncResult = cassandra.executeAsync(statement)
      asyncResult.addListener(new CassandraTraceRecordReadResultListener(asyncResult, timer, readFailures, promise), dispatcher)
      promise.future
    } catch {
      case ex: Exception =>
        readFailures.mark()
        timer.stop()
        LOGGER.error("Failed to read raw traces with exception", ex)
        Future.failed(ex)
    }
  }
} 
Example 13
Source File: Rest.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka

import akka.actor.{ActorSystem, Props}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.RouteConcatenation
import ch.megard.akka.http.cors.scaladsl.CorsDirectives.cors
import com.example.akka.add.{AddActor, AddService}
import com.example.akka.addoption.{AddOptionActor, AddOptionService}
import com.example.akka.echoenum.EchoEnumService
import com.example.akka.echoenumeratum.EchoEnumeratumService
import com.example.akka.echolist.EchoListService
import com.example.akka.hello.{HelloActor, HelloService}
import com.example.akka.swagger.SwaggerDocService

import scala.concurrent.ExecutionContextExecutor

object Rest extends App with RouteConcatenation {
  implicit val system: ActorSystem = ActorSystem("akka-http-sample")
  sys.addShutdownHook(system.terminate())

  implicit val executionContext: ExecutionContextExecutor = system.dispatcher

  val add = system.actorOf(Props[AddActor])
  val addOption = system.actorOf(Props[AddOptionActor])
  val hello = system.actorOf(Props[HelloActor])
  val routes =
    cors() (new AddService(add).route ~
      new AddOptionService(addOption).route ~
      new HelloService(hello).route ~
      EchoEnumService.route ~
      EchoEnumeratumService.route ~
      EchoListService.route ~
      SwaggerDocService.routes)
  Http().bindAndHandle(routes, "0.0.0.0", 12345)
} 
Example 14
Source File: SpansPersistenceService.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.cassandra.services

import com.expedia.open.tracing.backend.WriteSpansResponse.ResultCode
import com.expedia.open.tracing.backend._
import com.expedia.www.haystack.trace.storage.backends.cassandra.store.{CassandraTraceRecordReader, CassandraTraceRecordWriter}
import io.grpc.stub.StreamObserver

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContextExecutor

class SpansPersistenceService(reader: CassandraTraceRecordReader,
                              writer: CassandraTraceRecordWriter)
                             (implicit val executor: ExecutionContextExecutor) extends StorageBackendGrpc.StorageBackendImplBase {

  private val handleReadSpansResponse = new GrpcHandler(StorageBackendGrpc.METHOD_READ_SPANS.getFullMethodName)
  private val handleWriteSpansResponse = new GrpcHandler(StorageBackendGrpc.METHOD_WRITE_SPANS.getFullMethodName)

  override def writeSpans(request: WriteSpansRequest, responseObserver: StreamObserver[WriteSpansResponse]): Unit = {
    handleWriteSpansResponse.handle(request, responseObserver) {
      writer.writeTraceRecords(request.getRecordsList.asScala.toList) map (_ =>
        WriteSpansResponse.newBuilder().setCode(ResultCode.SUCCESS).build())
    }
  }

  
  override def readSpans(request: ReadSpansRequest, responseObserver: StreamObserver[ReadSpansResponse]): Unit = {

    handleReadSpansResponse.handle(request, responseObserver) {
      reader.readTraceRecords(request.getTraceIdsList.iterator().asScala.toList).map {
        records => {
          ReadSpansResponse.newBuilder()
            .addAllRecords(records.asJava)
            .build()
        }
      }
    }
  }
} 
Example 15
Source File: InMemoryTraceRecordStore.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.memory.store

import com.expedia.open.tracing.backend.TraceRecord
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import org.slf4j.LoggerFactory

import scala.concurrent.ExecutionContextExecutor

class InMemoryTraceRecordStore()
                              (implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport with AutoCloseable {
  private val LOGGER = LoggerFactory.getLogger(classOf[InMemoryTraceRecordStore])


  private var inMemoryTraceRecords = Map[String, List[TraceRecord]]()

  def readTraceRecords(traceIds: List[String]): Seq[TraceRecord] = {

    try {
      traceIds.flatMap(traceId => {
        inMemoryTraceRecords.getOrElse(traceId, List())
      })
    } catch {
      case ex: Exception =>
        LOGGER.error("Failed to read raw traces with exception", ex)
        List()
    }
  }

  
  def writeTraceRecords(traceRecords: List[TraceRecord]): Unit = {


    traceRecords.foreach(record => {

      try {
        val existingRecords: List[TraceRecord] = inMemoryTraceRecords.getOrElse(record.getTraceId, List())
        val records = record :: existingRecords
        inMemoryTraceRecords = inMemoryTraceRecords + (record.getTraceId -> records)
      } catch {
        case ex: Exception =>
          LOGGER.error("Fail to write the spans to memory with exception", ex)

      }
    })
  }

  override def close(): Unit = ()
} 
Example 16
Source File: SpansPersistenceService.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.memory.services

import com.expedia.open.tracing.backend.WriteSpansResponse.ResultCode
import com.expedia.open.tracing.backend._
import com.expedia.www.haystack.trace.storage.backends.memory.store.InMemoryTraceRecordStore
import io.grpc.stub.StreamObserver

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContextExecutor

class SpansPersistenceService(store: InMemoryTraceRecordStore)
                             (implicit val executor: ExecutionContextExecutor) extends StorageBackendGrpc.StorageBackendImplBase {


  override def writeSpans(request: WriteSpansRequest, responseObserver: StreamObserver[WriteSpansResponse]): Unit = {
    store.writeTraceRecords(request.getRecordsList.asScala.toList)
    val response =  WriteSpansResponse.newBuilder().setCode(
      ResultCode.SUCCESS
    ).build()
    responseObserver.onNext(response)
    responseObserver.onCompleted()
  }

  
  override def readSpans(request: ReadSpansRequest, responseObserver: StreamObserver[ReadSpansResponse]): Unit = {

    val records = store.readTraceRecords(request.getTraceIdsList.iterator().asScala.toList)
    val response = ReadSpansResponse.newBuilder()
      .addAllRecords(records.asJava)
      .build()
    responseObserver.onNext(response)
    responseObserver.onCompleted()
  }
} 
Example 17
Source File: GrpcHandler.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.reader.services

import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.google.protobuf.GeneratedMessageV3
import io.grpc.Status
import io.grpc.stub.StreamObserver
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.{ExecutionContextExecutor, Future}
import scala.util.{Failure, Success}

object GrpcHandler {
  protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[GrpcHandler])
}



class GrpcHandler(operationName: String)(implicit val executor: ExecutionContextExecutor) extends MetricsSupport {
  private val metricFriendlyOperationName = operationName.replace('/', '.')
  private val timer = metricRegistry.timer(metricFriendlyOperationName)
  private val failureMeter = metricRegistry.meter(s"$metricFriendlyOperationName.failures")

  import GrpcHandler._

  def handle[Rs](request: GeneratedMessageV3, responseObserver: StreamObserver[Rs])(op: => Future[Rs]): Unit = {
    val time = timer.time()
    op onComplete {
      case Success(response) =>
        responseObserver.onNext(response)
        responseObserver.onCompleted()
        time.stop()
        LOGGER.debug(s"service invocation for operation=$operationName and request=${request.toString} completed successfully")

      case Failure(ex) =>
        responseObserver.onError(Status.fromThrowable(ex).asRuntimeException())
        failureMeter.mark()
        time.stop()
        LOGGER.error(s"service invocation for operation=$operationName and request=${request.toString} failed with error", ex)
    }
  }
} 
Example 18
Source File: ElasticSearchReader.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.reader.stores.readers.es

import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.commons.clients.es.AWSSigningJestClientFactory
import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration
import com.expedia.www.haystack.trace.reader.config.entities.ElasticSearchClientConfiguration
import com.expedia.www.haystack.trace.reader.metrics.AppMetricNames
import com.expedia.www.haystack.trace.reader.stores.readers.es.ESUtils._
import com.google.gson.Gson
import io.searchbox.client.config.HttpClientConfig
import io.searchbox.client.{JestClient, JestClientFactory}
import io.searchbox.core.{Search, SearchResult}
import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
import scala.util.Try

class ElasticSearchReader(config: ElasticSearchClientConfiguration, awsRequestSigningConfig: AWSRequestSigningConfiguration)(implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport with AutoCloseable {
  private val LOGGER = LoggerFactory.getLogger(classOf[ElasticSearchReader])
  private val readTimer = metricRegistry.timer(AppMetricNames.ELASTIC_SEARCH_READ_TIME)
  private val readFailures = metricRegistry.meter(AppMetricNames.ELASTIC_SEARCH_READ_FAILURES)

  // initialize the elastic search client
  private val esClient: JestClient = {
    LOGGER.info("Initializing the http elastic search client with endpoint={}", config.endpoint)

    val factory = {
      if (awsRequestSigningConfig.enabled) {
        LOGGER.info("using AWSSigningJestClientFactory for es client")
        new AWSSigningJestClientFactory(awsRequestSigningConfig)
      } else {
        LOGGER.info("using JestClientFactory for es client")
        new JestClientFactory()
      }
    }

    val builder = new HttpClientConfig.Builder(config.endpoint)
      .multiThreaded(true)
      .connTimeout(config.connectionTimeoutMillis)
      .readTimeout(config.readTimeoutMillis)

    if (config.username.isDefined && config.password.isDefined) {
      builder.defaultCredentials(config.username.get, config.password.get)
    }

    factory.setHttpClientConfig(builder.build())
    factory.getObject
  }

  def search(request: Search): Future[SearchResult] = {
    val promise = Promise[SearchResult]()
    val time = readTimer.time()
    try {
      LOGGER.debug(s"elastic search query requested: ${request.toString}', query: '${request.toJson}'")
      esClient.executeAsync(request, new ElasticSearchReadResultListener(request, promise, time, readFailures))
      promise.future
    } catch {
      case ex: Exception =>
        readFailures.mark()
        time.stop()
        LOGGER.error(s"Failed to read from elasticsearch for request=${request.toJson} with exception", ex)
        Future.failed(ex)
    }
  }

  def count(request: Search): Future[SearchResult] = {
    val promise = Promise[SearchResult]()
    val time = readTimer.time()
    try {
      LOGGER.debug(s"elastic count query requested: ${request.toString}', query: '${request.toJson}'")
      esClient.executeAsync(request, new ElasticSearchCountResultListener(request, promise, time, readFailures))
      promise.future
    } catch {
      case ex: Exception =>
        readFailures.mark()
        time.stop()
        LOGGER.error(s"Failed to read from elasticsearch for request=${request.getData(new Gson())} with exception", ex)
        Future.failed(ex)
    }
  }

  override def close(): Unit = Try(esClient.shutdownClient())
} 
Example 19
Source File: GrpcTraceReaders.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.reader.stores.readers.grpc

import com.expedia.open.tracing.api.Trace
import com.expedia.open.tracing.backend.{ReadSpansRequest, StorageBackendGrpc}
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.commons.config.entities.TraceStoreBackends
import com.expedia.www.haystack.trace.reader.exceptions.TraceNotFoundException
import com.expedia.www.haystack.trace.reader.metrics.AppMetricNames
import com.expedia.www.haystack.trace.reader.readers.utils.TraceMerger
import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import org.slf4j.LoggerFactory

import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContextExecutor, Future, Promise}

class GrpcTraceReaders(config: TraceStoreBackends)
                      (implicit val dispatcher: ExecutionContextExecutor) extends MetricsSupport with AutoCloseable {
  private val LOGGER = LoggerFactory.getLogger(classOf[GrpcTraceReaders])

  private val readTimer = metricRegistry.timer(AppMetricNames.BACKEND_READ_TIME)
  private val readFailures = metricRegistry.meter(AppMetricNames.BACKEND_READ_FAILURES)
  private val tracesFailures = metricRegistry.meter(AppMetricNames.BACKEND_TRACES_FAILURE)

  private val clients: Seq[GrpcChannelClient] =  config.backends.map {
    backend => {
      val channel = ManagedChannelBuilder
        .forAddress(backend.host, backend.port)
        .usePlaintext(true)
        .build()

      val client = StorageBackendGrpc.newFutureStub(channel)
      GrpcChannelClient(channel, client)
    }
  }

  def readTraces(traceIds: List[String]): Future[Seq[Trace]] = {
    val allFutures = clients.map {
      client =>
        readTraces(traceIds, client.stub) recoverWith  {
          case _: Exception => Future.successful(Seq.empty[Trace])
        }
    }

    Future.sequence(allFutures)
      .map(traceSeq => traceSeq.flatten)
      .map {
        traces =>
          if (traces.isEmpty) throw new TraceNotFoundException() else TraceMerger.merge(traces)
      }
  }

  private def readTraces(traceIds: List[String], client: StorageBackendGrpc.StorageBackendFutureStub): Future[Seq[Trace]] = {
    val timer = readTimer.time()
    val promise = Promise[Seq[Trace]]

    try {
      val readSpansRequest = ReadSpansRequest.newBuilder().addAllTraceIds(traceIds.asJavaCollection).build()
      val futureResponse = client.readSpans(readSpansRequest)
      futureResponse.addListener(new ReadSpansResponseListener(
        futureResponse,
        promise,
        timer,
        readFailures,
        tracesFailures,
        traceIds.size), dispatcher)

      // return the future with the results for the given client
      promise.future
    } catch {
      case ex: Exception =>
        readFailures.mark()
        timer.stop()
        LOGGER.error("Failed to read raw traces with exception", ex)
        Future.failed(ex)
    }
  }

  override def close(): Unit = {
    clients.foreach(_.channel.shutdown())
  }

  case class GrpcChannelClient(channel: ManagedChannel, stub: StorageBackendGrpc.StorageBackendFutureStub)
} 
Example 20
Source File: IOSuite.scala    From skafka   with MIT License 5 votes vote down vote up
package com.evolutiongaming.skafka

import cats.Parallel
import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer}
import cats.implicits._
import com.evolutiongaming.catshelper.FromFuture
import com.evolutiongaming.smetrics.MeasureDuration
import org.scalatest.Succeeded

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

object IOSuite {
  val Timeout: FiniteDuration = 10.seconds

  implicit val executor: ExecutionContextExecutor = ExecutionContext.global

  implicit val contextShiftIO: ContextShift[IO]     = IO.contextShift(executor)
  implicit val concurrentIO: Concurrent[IO]         = IO.ioConcurrentEffect
  implicit val timerIO: Timer[IO]                   = IO.timer(executor)
  implicit val parallelIO: Parallel[IO]             = IO.ioParallel
  implicit val fromFutureIO: FromFuture[IO]         = FromFuture.lift[IO]
  implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock[IO](Clock[IO])

  def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = {
    io.timeout(timeout).as(Succeeded).unsafeToFuture
  }

  implicit class IOOps[A](val self: IO[A]) extends AnyVal {
    def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout)
  }
} 
Example 21
Source File: EsUtil.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.util

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.util.ByteString

import scala.concurrent.ExecutionContextExecutor

object EsUtil {

  def countDocumentsInShard(httpAddress: String,
                            shard: Shard,
                            filter: String)
                           (implicit system: ActorSystem,
                            executionContext: ExecutionContextExecutor,
                            actorMaterializer: ActorMaterializer): Long = {

    val request = HttpRequest(
      method = HttpUtil.SAFE_POST,
      uri = s"http://$httpAddress/${shard.indexName}/_count?preference=_shards:${shard.shard}",
      entity = ByteString(s"{$filter}"))

    val json = HttpUtil.jsonResult(request, "count documents in shard")

    json.get("count").asLong
  }
} 
Example 22
Source File: TInsertTime.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.builder.JobBuilder
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor, Future}

class TInsertTime extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val jobAccess = new SlickJobAccess(db)
  val duration = FiniteDuration(3,TimeUnit.DAYS)

  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    db.close()
  }
  test("InsertTime"){
    val recordNum = 10000
    val futures = 1 to recordNum map{ i =>

      val job = JobBuilder().withName(i.toString)
        .withClass("com.gabry.job.examples.TestTask")
        .withDataTimeOffset(0)
        .withDataTimeOffsetUnit(TimeUnit.MINUTES)
        .build()

      jobAccess.insert(job)
    }
    val start = System.currentTimeMillis()
    val all = Future.sequence(futures)
    Await.result(all,duration)
    val end = System.currentTimeMillis()
    println(s"插入 $recordNum 条数据,总耗时 ${end-start} 毫秒,平均 ${(end-start)/recordNum} 毫秒/条")
  }
} 
Example 23
Source File: TracedExecutionContext.scala    From scala-concurrent   with Apache License 2.0 5 votes vote down vote up
package io.opentracing.contrib.concurrent

import io.opentracing.util.GlobalTracer
import io.opentracing.{Span, Tracer}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}

class TracedExecutionContext(ec: ExecutionContext, tracer: Tracer, createSpans: Boolean) extends ExecutionContextExecutor {
  def this(ec: ExecutionContext) = this(ec, GlobalTracer.get(), false)

  def this(ec: ExecutionContext, tracer: Tracer) = this(ec, tracer, false)

  if (ec == null) throw new IllegalArgumentException("ec")
  if (tracer == null) throw new IllegalArgumentException("tracer")

  override def prepare(): ExecutionContext = {
    if (!createSpans && tracer.scopeManager.activeSpan() == null) ec else new TracedExecutionContextImpl
  }

  override def reportFailure(cause: Throwable): Unit = ec.reportFailure(cause)

  override def execute(command: Runnable): Unit = ec.execute(command)

  class TracedExecutionContextImpl extends ExecutionContextExecutor {

    val activeSpan: Span = if (createSpans) tracer.buildSpan(Constants.EXECUTE_OPERATION_NAME).start()
    else tracer.scopeManager.activeSpan()

    override def reportFailure(cause: Throwable): Unit = ec.reportFailure(cause)

    override def execute(command: Runnable): Unit = {
      ec.execute(new Runnable {
        override def run(): Unit = {
          // Only deactivate the active Span if we created/own it.
          val scope = tracer.scopeManager.activate(activeSpan)
          try {
            command.run()
          } finally {
            scope.close()
            if (createSpans) {
              activeSpan.finish()
            }
          }
        }
      })
    }
  }

} 
Example 24
Source File: TracedAutoFinishExecutionContext.scala    From scala-concurrent   with Apache License 2.0 5 votes vote down vote up
package io.opentracing.contrib.concurrent

import io.opentracing.Tracer
import io.opentracing.util.GlobalTracer

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}

class TracedAutoFinishExecutionContext(ec: ExecutionContext, tracer: Tracer) extends ExecutionContextExecutor {
  def this(ec: ExecutionContext) = this(ec, GlobalTracer.get())

  if (ec == null) throw new IllegalArgumentException("ec")
  if (tracer == null) throw new IllegalArgumentException("tracer")

  override def prepare(): ExecutionContext = {
    if (tracer.scopeManager.activeSpan() == null) ec else new TracedAutoFinishExecutionContextImpl
  }

  override def reportFailure(cause: Throwable): Unit = ec.reportFailure(cause)

  override def execute(command: Runnable): Unit = ec.execute(command)


  class TracedAutoFinishExecutionContextImpl extends ExecutionContextExecutor {
    val continuation: AutoFinishScope#Continuation = {
      Option(tracer.scopeManager()) match {
        case Some(manager: AutoFinishScopeManager) => manager.tlsScope.get().capture
        case _ => throw new IllegalStateException("Usage of AutoFinishScopeManager required.")
      }
    }

    override def execute(command: Runnable): Unit = {
      ec.execute(new Runnable {
        override def run(): Unit = {
          val scope = continuation.activate()
          try {
            command.run()
          } finally {
            scope.close()
          }
        }
      })
    }

    override def reportFailure(cause: Throwable): Unit = ec.reportFailure(cause)
  }

} 
Example 25
Source File: TestSpec.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test

import java.io.IOException
import java.util.UUID

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.exceptions.TestFailedException
import org.scalatest._
import rx.lang.scala._

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.{ Random ⇒ Rnd, Try }

object Random {
  def apply(): Rnd = new Rnd()
}

trait TestSpec extends FlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with BeforeAndAfterAll {
  implicit val system: ActorSystem = ActorSystem("test")
  implicit val ec: ExecutionContextExecutor = system.dispatcher
  val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  implicit val timeout = Timeout(50.seconds)

  override protected def afterAll(): Unit = {
    system.terminate()
  }

  
  def cleanup(actors: ActorRef*): Unit = {
    actors.foreach { (actor: ActorRef) ⇒
      actor ! PoisonPill
      probe watch actor
    }
  }

  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class PimpedObservable[T](self: Observable[T]) {
    def waitFor: Unit = {
      self.toBlocking.toIterable.last
    }
  }

  implicit class MustBeWord[T](self: T) {
    def mustBe(pf: PartialFunction[T, Unit]): Unit =
      if (!pf.isDefinedAt(self)) throw new TestFailedException("Unexpected: " + self, 0)
  }

  object Socket { def apply() = new Socket }
  class Socket {
    def readFromMemory: Future[Array[Byte]] = Future {
      Thread.sleep(100) // sleep 100 millis
      "fromMemory".getBytes
    }

    def send(payload: Array[Byte], from: String, failed: Boolean): Future[Array[Byte]] =
      if (failed) Future.failed(new IOException(s"Network error: $from"))
      else {
        Future {
          Thread.sleep(250) // sleep 250 millis, not real life time, but hey
          s"${payload.getString}->$from".getBytes
        }
      }

    def sendToEurope(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromEurope", failed)

    def sendToUsa(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromUsa", failed)
  }
} 
Example 26
Source File: RestServiceInMemory.scala    From kafka-with-akka-streams-kafka-streams-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.scala.akkastream.queryablestate.inmemory

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives.{complete, get, path}
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer

import scala.concurrent.ExecutionContextExecutor
// import akka.util.timeout  // See usage below.
import com.lightbend.scala.akkastream.modelserver.stage.ModelStateStore
import com.lightbend.scala.modelServer.model.ModelToServeStats
import de.heikoseeberger.akkahttpjackson.JacksonSupport


object RestServiceInMemory {

  // Serve model status: http://localhost:5500/state
  def startRest(service: ModelStateStore)(implicit system: ActorSystem, materializer: ActorMaterializer): Unit = {

    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    // Use with HTTP methods that accept an implicit timeout argument
    // implicit val timeout = Timeout(10.seconds)
    val host = "127.0.0.1"
    val port = 5500
    val routes = QueriesAkkaHttpResource.storeRoutes(service)

    Http().bindAndHandle(routes, host, port) map
      { binding => println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
}

object QueriesAkkaHttpResource extends JacksonSupport {

  def storeRoutes(service: ModelStateStore): Route =
    get {
      path("state") {
        val info: ModelToServeStats = service.getCurrentServingInfo
        complete(info)
      }
    }
} 
Example 27
Source File: RestServiceActors.scala    From kafka-with-akka-streams-kafka-streams-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.scala.akkastream.queryablestate.actors

import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives.{complete, get, onSuccess, path}
import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.lightbend.scala.akkastream.modelserver.actors.{GetModels, GetModelsResult, GetState}
import com.lightbend.scala.modelServer.model.ModelToServeStats
import de.heikoseeberger.akkahttpjackson.JacksonSupport

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration._


object RestServiceActors {

  // See http://localhost:5500/models
  // Then select a model shown and try http://localhost:5500/state/<model>, e.g., http://localhost:5500/state/wine
  def startRest(modelserver: ActorRef)(implicit system: ActorSystem, materializer: ActorMaterializer): Unit = {

    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    // Use with HTTP methods that accept an implicit timeout argument
    // implicit val timeout = Timeout(10.seconds)
    val host = "127.0.0.1"
    val port = 5500
    val routes: Route = QueriesAkkaHttpResource.storeRoutes(modelserver)

    Http().bindAndHandle(routes, host, port) map
      { binding => println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
}

object QueriesAkkaHttpResource extends JacksonSupport {

  implicit val askTimeout: Timeout = Timeout(30.seconds)

  def storeRoutes(modelserver: ActorRef): Route =
    get {
      path("state"/Segment) { datatype =>
        onSuccess(modelserver ? GetState(datatype)) {
          case info: ModelToServeStats =>
            complete(info)
        }
      } ~
        path("models") {
          onSuccess(modelserver ? GetModels()) {
            case models: GetModelsResult =>
              complete(models)
          }
        }
    }
} 
Example 28
Source File: ExampleApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.akkahttp

import scala.concurrent.ExecutionContextExecutor
import scala.io.StdIn
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import caliban.ExampleData.sampleCharacters
import caliban.ExampleService.ExampleService
import caliban.interop.circe.AkkaHttpCirceAdapter
import caliban.{ ExampleApi, ExampleService }
import zio.clock.Clock
import zio.console.Console
import zio.internal.Platform
import zio.Runtime


  val route =
    path("api" / "graphql") {
      adapter.makeHttpService(interpreter)
    } ~ path("ws" / "graphql") {
      adapter.makeWebSocketService(interpreter)
    } ~ path("graphiql") {
      getFromResource("graphiql.html")
    }

  val bindingFuture = Http().bindAndHandle(route, "localhost", 8088)
  println(s"Server online at http://localhost:8088/\nPress RETURN to stop...")
  StdIn.readLine()
  bindingFuture
    .flatMap(_.unbind())
    .onComplete(_ => system.terminate())

} 
Example 29
Source File: AkkaJobClient.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.client.akkaclient

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.pattern.{AskTimeoutException, ask}
import akka.util.Timeout
import com.gabry.job.client.AbstractJobClient
import com.gabry.job.core.command.JobClientCommand
import com.gabry.job.core.domain.{Dependency, Job}
import com.gabry.job.core.event.{FailedEvent, JobTrackerEvent}
import com.gabry.job.core.registry.{Registry, RegistryFactory}
import com.typesafe.config.Config

import scala.concurrent.ExecutionContextExecutor
import scala.util.{Failure, Success}

  override def cancelJob(jobId: Long,force:Boolean): Unit = {
    clientActor ? JobClientCommand.CancelJob(jobId,force) onComplete{
      case Success(_) =>
        println("作业取消成功")
      case Failure(reason) =>
        reason.printStackTrace()
        println(s"作业取消失败 ${reason.getMessage}")
    }
  }
} 
Example 30
Source File: JobSchedulerNode.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.scheduler.node

import akka.actor.{ActorRef, Props}
import akka.cluster.Member
import com.gabry.job.core.command.JobSchedulerCommand
import com.gabry.job.core.event.TaskTrackerEvent
import com.gabry.job.core.node.{ClusterNode, ClusterNodeProps}
import com.gabry.job.db.factory.DatabaseFactory
import com.gabry.job.db.proxy.DataAccessProxy
import com.gabry.job.scheduler.actor.{JobSchedulerActor, JobTaskAggregatorActor, JobTaskDispatcherActor}

import scala.concurrent.ExecutionContextExecutor


class JobSchedulerNode extends ClusterNode{
  private var schedulerActor:ActorRef = _
  private var dispatcherActor:ActorRef = _
  private var aggregatorActor:ActorRef = _
  private val dataAccessFactory = DatabaseFactory.getDataAccessFactory(config).get
  private var dataAccessProxy:ActorRef = _
  private implicit lazy val databaseIoExecutionContext: ExecutionContextExecutor = context.system.dispatchers.lookup("akka.actor.database-io-dispatcher")

  override def preStart(): Unit = {
    super.preStart()
    dataAccessFactory.init()
    dataAccessProxy = context.actorOf(DataAccessProxy.props(databaseIoExecutionContext),"dataAccessProxy")
    context.watch(dataAccessProxy)

    schedulerActor = context.actorOf(JobSchedulerActor.props(dataAccessProxy,selfAnchor),"schedulerActor")

    context.watch(schedulerActor)
    aggregatorActor = context.actorOf(JobTaskAggregatorActor.props(dataAccessProxy,selfAnchor),"aggregatorActor")

    context.watch(aggregatorActor)
    dispatcherActor = context.actorOf(JobTaskDispatcherActor.props(dataAccessProxy,selfAnchor,aggregatorActor),"dispatcherActor")

    context.watch(dispatcherActor)

  }

  override def postStop(): Unit = {
    super.postStop()
    dataAccessFactory.destroy()
    context.stop(schedulerActor)
    context.stop(dispatcherActor)
    context.stop(aggregatorActor)
    context.stop(dataAccessProxy)

  }
  override def userDefineEventReceive: Receive = {
    case cmd @ JobSchedulerCommand.ScheduleJob(job,replyTo) =>
      schedulerActor ! cmd
    case cmd @ JobSchedulerCommand.StopScheduleJob(job) =>
      schedulerActor ! cmd
    case evt @ TaskTrackerEvent.TaskTrackerStarted(taskTracker) =>
      log.info(s"TaskTracker启动 $taskTracker")
      dispatcherActor ! evt
    case evt @ TaskTrackerEvent.TaskTrackerStopped(taskTracker) =>
      log.info(s"TaskTracker停止 $taskTracker")
      dispatcherActor ! evt
  }

  override def register(member: Member): Unit = {

  }

  override def unRegister(member: Member): Unit = {

  }
} 
Example 31
Source File: JobTrackerNode.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.manager.node

import akka.actor.{ActorRef, Props, RootActorPath}
import akka.cluster.Member
import akka.routing.{ActorSelectionRoutee, RoundRobinRoutingLogic, Router}
import com.gabry.job.core.command.{JobSchedulerCommand, JobTrackerCommand}
import com.gabry.job.core.constant.Constants
import com.gabry.job.core.domain.{Dependency, UID}
import com.gabry.job.core.event.JobTrackerEvent
import com.gabry.job.core.node.{ClusterNode, ClusterNodeProps}
import com.gabry.job.core.po.DependencyPo
import com.gabry.job.db.proxy.DataAccessProxy
import com.gabry.job.manager.actor.JobTrackerActor

import scala.concurrent.ExecutionContextExecutor

    case originCmd @ JobTrackerCommand.SubmitJob(job,_,_) =>
      log.debug(s"Receive SubmitJob Command $originCmd")
      jobTracker ! originCmd
    case JobTrackerCommand.ScheduleJob(job,replyTo) =>
      if(schedulerRouter.routees.nonEmpty){
        schedulerRouter.route(JobSchedulerCommand.ScheduleJob(job,self),self)
        log.info(s"Send ScheduleJob command to scheduler job.id = ${job.uid}")
        // 此处将插入后更新的Job对象发送给reply
        replyTo ! JobTrackerEvent.JobSubmitted(job)
      }else{
        replyTo ! JobTrackerEvent.JobSubmitFailed("No Scheduler node found")
      }
  }
  override def register(member: Member): Unit = {
    if(member.hasRole(Constants.ROLE_SCHEDULER_NAME)){
      val scheduleNode = context.system.actorSelection(RootActorPath(member.address)/ "user" / Constants.ROLE_SCHEDULER_NAME)
      schedulerRouter = schedulerRouter.addRoutee(scheduleNode)
    }
  }

  override def unRegister(member: Member): Unit = {
    if(member.hasRole(Constants.ROLE_SCHEDULER_NAME)){
      val scheduleNode = context.system.actorSelection(RootActorPath(member.address)/ "user" / Constants.ROLE_SCHEDULER_NAME)
      schedulerRouter = schedulerRouter.removeRoutee(scheduleNode)
    }
  }
} 
Example 32
Source File: Main.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.apiServer

import akka.actor.{ ActorRef, ActorSystem }
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import com.github.j5ik2o.bank.adaptor.aggregate.{ BankAccountAggregateFlowsImpl, ShardedBankAccountAggregates }
import com.github.j5ik2o.bank.adaptor.controller.Routes
import com.github.j5ik2o.bank.adaptor.dao.BankAccountReadModelFlowsImpl
import com.github.j5ik2o.bank.adaptor.generator.IdGenerator
import com.github.j5ik2o.bank.adaptor.readJournal.JournalReaderImpl
import com.github.j5ik2o.bank.useCase.{ BankAccountAggregateUseCase, BankAccountReadModelUseCase }
import com.typesafe.config.{ Config, ConfigFactory }
import pureconfig._
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile

import scala.concurrent.ExecutionContextExecutor

object Main extends App {
  val rootConfig: Config                    = ConfigFactory.load()
  val dbConfig: DatabaseConfig[JdbcProfile] = DatabaseConfig.forConfig[JdbcProfile](path = "slick", rootConfig)

  implicit val system: ActorSystem                        = ActorSystem("bank-system", config = rootConfig)
  implicit val materializer: ActorMaterializer            = ActorMaterializer()
  implicit val executionContext: ExecutionContextExecutor = system.dispatcher

  val bankAccountIdGenerator = IdGenerator.ofBankAccountId(dbConfig.profile, dbConfig.db)

  val bankAccountAggregatesRef: ActorRef =
    system.actorOf(ShardedBankAccountAggregates.props, ShardedBankAccountAggregates.name)

  val bankAccountAggregateUseCase: BankAccountAggregateUseCase = new BankAccountAggregateUseCase(
    new BankAccountAggregateFlowsImpl(bankAccountAggregatesRef)
  )

  val bankAccountReadModelUseCase: BankAccountReadModelUseCase =
    new BankAccountReadModelUseCase(new BankAccountReadModelFlowsImpl(dbConfig.profile, dbConfig.db),
                                    new JournalReaderImpl())

  val routes: Routes = Routes(bankAccountIdGenerator, bankAccountAggregateUseCase, bankAccountReadModelUseCase)

  val ApiServerConfig(host, port) =
    loadConfigOrThrow[ApiServerConfig](system.settings.config.getConfig("bank.api-server"))

  val bindingFuture = Http().bindAndHandle(routes.root, host, port)

  sys.addShutdownHook {
    bindingFuture
      .flatMap(_.unbind())
      .onComplete(_ => system.terminate())
  }
} 
Example 33
Source File: TJobAccess.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.domain.Job
import com.gabry.job.db.slicks
import com.gabry.job.db.slicks.schema.Tables
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}


class TJobAccess extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val jobAccess = new SlickJobAccess(db)
  val scheduleNode = "3958164162305738376-node"
  val job:Tables.JobsRow = slicks.jobPo2Row(Job("0", "3958164162305738376-test","com.gabry.job.examples.TestTask","",0,TimeUnit.MINUTES))
    .copy(schedulerNode = Some(scheduleNode))
  val duration = FiniteDuration(3,TimeUnit.SECONDS)
  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    jobAccess.delete(job)
    db.close()
  }
  test("JobAccess insert"){
    val insert = Await.result(jobAccess.insert(job),duration)
    assert(insert != null )
    assert(insert.name == job.name)
  }
  test("JobAccess select"){
    val select = Await.result(jobAccess.selectOne(job.name),duration)
    assert(select.isDefined)
    assert(select.get.name == job.name)
  }
  test("JobAccess update"){
    val updateClassName = "updateClassName"
    val old = Await.result(jobAccess.selectOne(job.name),duration)
    assert(old.isDefined)
    assert(old.get.className!=updateClassName)
    val update = Await.result(jobAccess.update(job.copy(className = updateClassName)),duration)
    assert(update > 0 )
    val newJob = Await.result(jobAccess.selectOne(job.name),duration)
    assert(newJob.isDefined)
    assert(newJob.get.className==updateClassName)
  }
  test("JobAccess selectJobsByScheduleNode"){
    jobAccess.selectJobsByScheduleNode(scheduleNode){ r =>
      assert(r.schedulerNode.isDefined && r.schedulerNode.get == scheduleNode)
    }
  }
  test("JobAccess insertOnDuplicateUpdate"){
    val insert1 = Await.result(jobAccess.insertOnDuplicateUpdate(job),duration)
    val insert2 = Await.result(jobAccess.insertOnDuplicateUpdate(job),duration)
    assert(insert1>0)
    assert(insert2>0)
  }
  test("JobAccess delete"){
    val delete = Await.result(jobAccess.delete(job),duration)
    assert(delete > 0 )
    val select = Await.result(jobAccess.selectOne(job.name),duration)
    assert(select.isEmpty)
  }
} 
Example 34
Source File: TTaskAccess.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.domain.UID
import com.gabry.job.db.slicks.schema.Tables
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}

class TTaskAccess extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val taskAccess = new SlickTaskAccess(db)
  var jobIdAndTriggerTime: (UID, Long) = ("999",1523497644627L)
  val taskTrackerNode = "3958164162305738376-node"
  val taskPo:Tables.TasksRow = Tables.TasksRow(-1,jobIdAndTriggerTime._1,jobIdAndTriggerTime._1,"-1",1,taskTrackerNode,"TEST",jobIdAndTriggerTime._2,Some("test"),null)
  val duration = FiniteDuration(3,TimeUnit.SECONDS)

  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    db.close()
  }
  test("TaskAccess insert, select,delete"){
    val insert = Await.result(taskAccess.insert(taskPo) ,duration)
    assert(insert!=null)
    assert(insert.state==taskPo.state)
    val select = Await.result(taskAccess.selectOne(insert.uid),duration)
    assert(select.isDefined)
    assert(select.get.state==insert.state)
    val delete = Await.result(taskAccess.delete(insert),duration)
    assert(delete>0)
    val select1 = Await.result(taskAccess.selectOne(insert.uid),duration)
    assert(select1.isEmpty)
  }
  test("TaskAccess insertOnDuplicateUpdate"){
    val insert = Await.result(taskAccess.insertOnDuplicateUpdate(taskPo) ,duration)
    assert(insert==0)
  }
} 
Example 35
Source File: TScheduleAccess.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.domain.UID
import com.gabry.job.core.po.SchedulePo
import com.gabry.job.db.slicks.schema.Tables
import com.gabry.job.utils.Utils
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}

class TScheduleAccess extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val scheduleAccess = new SlickScheduleAccess(db)
  val scheduleNode = "3958164162305738376-node"
  var jobIdAndTriggerTime: (UID, Long) = ("999",1523497644627L)

  val schedulePo:Tables.SchedulesRow = SchedulePo("0",jobIdAndTriggerTime._1,2,3,false,
    jobIdAndTriggerTime._2,scheduleNode,123,false,
    Utils.calcPostOffsetTime(jobIdAndTriggerTime._2,0,TimeUnit.MINUTES),null)
  val duration = FiniteDuration(3,TimeUnit.SECONDS)
  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    Await.result(scheduleAccess.delete(schedulePo) ,duration)
    db.close()
  }
  test("ScheduleAccess insert"){
    val insert = Await.result(scheduleAccess.insert(schedulePo.copy(jobUid = schedulePo.jobUid)) ,duration)
    assert(insert != null )
  }
  test("ScheduleAccess insertOnDuplicateUpdate"){
    val insert1 = Await.result(scheduleAccess.insertOnDuplicateUpdate(schedulePo),duration)
    val insert2 = Await.result(scheduleAccess.insertOnDuplicateUpdate(schedulePo),duration)
    assert(insert1 > 0 )
    assert(insert2 > 0 )
  }

  test("ScheduleAccess select setDispatched"){
    val select = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(select.isDefined)
    assert(select.get.jobUid == jobIdAndTriggerTime._1 && select.get.triggerTime == jobIdAndTriggerTime._2)
    val update = Await.result(scheduleAccess.setDispatched(select.get.uid,true),duration)
    assert(update > 0 )

    val select1 = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(select1.isDefined)
    assert(select1.get.dispatched)
  }
  test("ScheduleAccess update"){
    val updateScheduleNode = "updateScheduleNode"
    val old = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(old.isDefined)
    assert(old.get.scheduleNode!=updateScheduleNode)

    val update = Await.result(scheduleAccess.update(schedulePo.copy(scheduleNode = updateScheduleNode)),duration)
    assert(update > 0 )
    val newJob = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(newJob.isDefined)
    assert(newJob.get.scheduleNode == updateScheduleNode)
  }
  test("ScheduleAccess selectUnDispatchSchedule"){
    scheduleAccess.selectUnDispatchSchedule("1",scheduleNode,jobIdAndTriggerTime._2+30,2){ r=>
      assert(!r.dispatched)
    }
  }
} 
Example 36
Source File: TDatabaseFactory.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.factory

import java.util.concurrent.TimeUnit

import com.gabry.job.db.slicks.{SlickDependencyAccess, SlickJobAccess, SlickScheduleAccess, SlickTaskAccess}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}


class TDatabaseFactory extends FunSuite with BeforeAndAfterAll{
  private implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  private val config = ConfigFactory.load()
  private val duration = FiniteDuration(3,TimeUnit.SECONDS)
  private val dataAccessFactory = DatabaseFactory.getDataAccessFactory(config).get
  override def beforeAll(): Unit = {
    super.beforeAll()
    dataAccessFactory.init()
  }
  override def afterAll(): Unit = {
    super.afterAll()
    dataAccessFactory.destroy()
  }
  test("TDatabaseFactory default jobAccess type"){
    val access = dataAccessFactory.getJobAccess
    assert(access.isInstanceOf[SlickJobAccess])
  }
  test("TDatabaseFactory jobAccess select"){
    val access = dataAccessFactory.getJobAccess
    assert(access.isInstanceOf[SlickJobAccess])

    val select = Await.result(access.selectOne("test"),duration)
    assert(select.isDefined)
    assert(select.get.name == "test")

  }
  test("TDatabaseFactory dependencyAccess type"){
    val access = dataAccessFactory.getDependencyAccess
    assert(access.isInstanceOf[SlickDependencyAccess])
  }
  test("TDatabaseFactory scheduleAccess type"){
    val access = dataAccessFactory.getScheduleAccess
    assert(access.isInstanceOf[SlickScheduleAccess])
  }
  test("TDatabaseFactory taskAccess type"){
    val access = dataAccessFactory.getTaskAccess
    assert(access.isInstanceOf[SlickTaskAccess])
  }
} 
Example 37
Source File: DataAccessProxy.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.proxy

import akka.actor.{ActorRef, Props}
import com.gabry.job.core.actor.SimpleActor
import com.gabry.job.core.domain.UID
import com.gabry.job.core.po.{DependencyPo, JobPo, SchedulePo, TaskPo}
import com.gabry.job.db.DataTables
import com.gabry.job.db.factory.DatabaseFactory
import com.gabry.job.db.proxy.actor.{DependencyAccessProxy, JobAccessProxy, ScheduleAccessProxy, TaskAccessProxy}
import com.gabry.job.db.proxy.command.DatabaseCommand

import scala.concurrent.ExecutionContextExecutor


  override def userDefineEventReceive: Receive = {
    case cmd @ DatabaseCommand.Insert(_:JobPo,_,_) =>
      jobAccessProxy ! cmd
    case cmd @ DatabaseCommand.Insert(_:SchedulePo,_,_) =>
      scheduleAccessProxy ! cmd
    case cmd @ DatabaseCommand.Insert(_:Array[DependencyPo],_,_) =>
      dependencyAccessProxy ! cmd
    case cmd @ DatabaseCommand.Insert(_:TaskPo,_,_) =>
      taskAccessProxy ! cmd
    case cmd @ DatabaseCommand.Select(_:JobPo,_,_) =>
      jobAccessProxy ! cmd
    case cmd @ DatabaseCommand.Update(_:JobPo,_:JobPo,_,_) =>
      jobAccessProxy ! cmd
    case cmd @ DatabaseCommand.UpdateField(DataTables.SCHEDULE,_,_,_,_) =>
      scheduleAccessProxy ! cmd
    case cmd @ DatabaseCommand.Select((DataTables.DEPENDENCY,_,_),_,_) =>
      dependencyAccessProxy ! cmd
    case cmd @ DatabaseCommand.Select((DataTables.JOB,_,_,_),_,_) =>
      jobAccessProxy ! cmd
    case cmd @ DatabaseCommand.Select((DataTables.SCHEDULE,jobUid:UID,nodeAnchor:String,triggerTime:Long,parallel:Int),_,_) =>
      scheduleAccessProxy ! cmd
    case cmd @ DatabaseCommand.Select((DataTables.JOB,nodeAnchor:String),_,_) =>
      jobAccessProxy ! cmd
  }
} 
Example 38
Source File: Contexts.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package contexts

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
import scala.scalajs.concurrent.JSExecutionContext
import scala.util.control.NonFatal

object Pooled {
  lazy val global: ExecutionContextExecutor =
    new logging.ReportingExecutionContext(JSExecutionContext.queue)

  object Implicits {
    implicit lazy val global: ExecutionContext = Pooled.global
  }
}

object Immediate {
  lazy val global: ExecutionContextExecutor = new ExecutionContextExecutor {
    def execute(runnable: Runnable) =
      try runnable.run()
      catch { case NonFatal(exception) => reportFailure(exception) }

    def reportFailure(throwable: Throwable) = logging.reportException(throwable)
  }

  object Implicits {
    implicit lazy val global: ExecutionContext = Immediate.global
  }
}

object Queued {
  lazy val global = create()

  def create(): ExecutionContextExecutor =
    new logging.ReportingExecutionContext(JSExecutionContext.queue)

  object Implicits {
    implicit lazy val global: ExecutionContext = Queued.global
  }
} 
Example 39
Source File: TracingExecutionContext.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package logging

import scribe.{LoggingExecutionContext, Position}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
import scala.reflect.macros.whitebox

class TracingExecutionContext(context: ExecutionContext, stack: List[Position]) extends
    LoggingExecutionContext(context, stack) with
    ExecutionContextExecutor {
  override def execute(runnable: Runnable) = super.execute(new Runnable {
    override def run() = tracing run { runnable.run() }
  })
}

object ImplicitTracingExecutionContext {
  def resolve(c: whitebox.Context): c.Tree = {
    import c.universe._

    if (c.hasErrors)
      c.abort(c.enclosingPosition, "Skipping tracing execution context macro due to compilation errors")

    // the current macro expansion always appears twice
    // see: http://stackoverflow.com/a/20466423
    val recursionCount = c.openMacros.count { other =>
      c.enclosingPosition == other.enclosingPosition &&
        c.macroApplication.toString == other.macroApplication.toString
    }
    if (recursionCount > 2)
      c.abort(c.enclosingPosition, "Skipping tracing execution context macro for recursive invocation")

    val tree = c.inferImplicitValue(typeOf[ExecutionContext])
    if (tree.isEmpty)
      c.abort(c.enclosingPosition, "Skipping tracing execution context macro due to unresolved execution context")

    // flag symbol of inferred tree as synthetic if it is private or a local variable
    // to prevent "value is never used" warnings
    val symbol = tree.symbol.asTerm
    if (!symbol.isGetter && !symbol.isParamAccessor || symbol.isGetter && symbol.isPrivate)
      c.internal.setFlag(tree.symbol, Flag.SYNTHETIC)

    ExplicitTracingExecutionContext.instrument(c)(tree)
  }
}

object ExplicitTracingExecutionContext {
  def instrument(c: whitebox.Context)(context: c.Tree): c.Tree = {
    import c.universe._

    val stack = c.typecheck(q"${termNames.ROOTPKG}.scribe.Execution.custom(null)", silent = true) match {
      case q"new $_($_, $stack)" =>
        Right(stack)
      case q"new $_($_, $stack): $_" =>
        Right(stack)
      case tree =>
        Left(tree)
    }

    val tracingContext = tq"${termNames.ROOTPKG}.loci.logging.TracingExecutionContext"

    stack match {
      case Right(stack) =>
        q"new $tracingContext($context, $stack)"

      case Left(tree) =>
        val message = s"scribe logging framework custom execution context macro generated unexpected code: $tree"
        q"""{
          @${termNames.ROOTPKG}.scala.annotation.compileTimeOnly($message) def unexpectedTree() = ()
          unexpectedTree()
          new $tracingContext(null, null)
        }"""
    }
  }
} 
Example 40
Source File: Contexts.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package contexts

import java.util.concurrent.{Executors, ThreadFactory}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
import scala.util.control.NonFatal

object Pooled {
  lazy val global: ExecutionContextExecutor =
    new logging.ReportingExecutionContext(ExecutionContext.global)

  object Implicits {
    implicit lazy val global: ExecutionContext = Pooled.global
  }
}

object Immediate {
  lazy val global: ExecutionContextExecutor = new ExecutionContextExecutor {
    def execute(runnable: Runnable) =
      try runnable.run()
      catch { case NonFatal(exception) => reportFailure(exception) }

    def reportFailure(throwable: Throwable) = logging.reportException(throwable)
  }

  object Implicits {
    implicit lazy val global: ExecutionContext = Immediate.global
  }
}

object Queued {
  lazy val global = create()

  def create(): ExecutionContextExecutor =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(new ThreadFactory {
        def newThread(runnable: Runnable) = {
          val thread = new Thread(new Runnable {
            def run() =
              try runnable.run()
              catch {
                case NonFatal(exception) =>
                  if (exception.getCause != null)
                    logging.reportException(exception.getCause)
                  else
                    logging.reportException(exception)
              }
          })
          thread.setDaemon(true)
          thread
        }
      }),
      logging.reportException)

  object Implicits {
    implicit lazy val global: ExecutionContext = Queued.global
  }
} 
Example 41
Source File: DiscoverEsTopology.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.util

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import com.fasterxml.jackson.databind.JsonNode

import scala.collection.JavaConverters._
import scala.collection.breakOut
import scala.concurrent.ExecutionContextExecutor

case class Shard(indexName: String, shard: Int) {

  // This field is not in the constructor argument list since it is not part of equality.
  private var _downloadAttempt: Int = 0

  def downloadAttempt: Int = _downloadAttempt

  def nextAttempt: Shard = {
    val copy = this.copy()
    copy._downloadAttempt = this._downloadAttempt + 1
    copy
  }
}


case class EsTopology(nodes: Map[String, String], // nodeId -> address
                      shards: Map[Shard, Seq[String]], // shard -> Seq[nodeId]
                      allIndexNames: Set[String])


object DiscoverEsTopology {

  def apply(esContactPoint: String,
            aliases: Seq[String] = Seq.empty)
           (implicit system: ActorSystem,
            executionContext: ExecutionContextExecutor,
            actorMaterializer: ActorMaterializer): EsTopology = {

    // Get a map from node name -> address

    val nodesJson = HttpUtil.jsonResult(HttpRequest(uri = s"http://$esContactPoint/_nodes"), "find es nodes")
    val extractAddress = "inet\\[/(.+)]".r // "inet[/10.204.146.152:9304]"
    val nodes: Map[String, String] = nodesJson.get("nodes").fields.asScala.map { entry =>

      val nodeId = entry.getKey
      val extractAddress(hostPort) = entry.getValue.get("http_address").asText

      nodeId -> hostPort
    }.toMap

    // Find all the shards for all indexes.

    val searchShardsJson = HttpUtil.jsonResult(HttpRequest(uri = s"http://$esContactPoint/_search_shards"), "search shards")

    val shards: Map[Shard, Seq[String]] = searchShardsJson.get("shards").elements.asScala.map { shardLocations: JsonNode =>

      // Sort the shard locations so that the primary is first - we will always try the primary first
      val locations = shardLocations.elements.asScala.toSeq.sortBy(_.findValue("primary").booleanValue).reverse

      assert(locations.nonEmpty)
      assert(locations.head.findValue("primary").booleanValue) // first one is primary node

      val indexName = locations.head.findValue("index").asText
      val shard = locations.head.findValue("shard").asInt
      val nodeIds: Vector[String] = locations.map(_.findValue("node").asText)(breakOut)

      Shard(indexName, shard) -> nodeIds
    }.toMap

    // Get a list of aliases that we want to read from.
    // This is used to filter the list of all shards down to the ones that we want to read from.

    def resolveAlias(alias: String): Set[String] = {
      val aliasesJson = HttpUtil.jsonResult(HttpRequest(uri = s"http://$esContactPoint/$alias/_alias"), s"shards for $alias")
      aliasesJson.fieldNames.asScala.toSet
    }

    val readIndexNames: Set[String] = if (aliases.isEmpty)
      resolveAlias("cm_well_all") // Default if no alias or index name specified.
    else
      (Set.empty[String] /: aliases) (_ ++ resolveAlias(_)) // resolve and combine all the index names

    // allIndexNames is useful for validation of parameters to ensure they are all valid index names.

    val allIndexNames: Set[String] = {
      val aliasesJson = HttpUtil.jsonResult(HttpRequest(uri = s"http://$esContactPoint/_all/_alias"), "Get all index names")
      aliasesJson.fieldNames.asScala.toSet
    }

    EsTopology(
      nodes = nodes,
      // Only read shards for indexes that are included in the given aliases or index names.
      shards = shards.filter { case (shard, _) => readIndexNames.contains(shard.indexName) },
      allIndexNames = allIndexNames)
  }
} 
Example 42
Source File: Main.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.{ ActorMaterializer, Materializer }
import jp.co.dzl.example.akka.api.di.{ ServiceModule, HandlerModule, ConfigModule, AkkaModule }
import jp.co.dzl.example.akka.api.handler.RootHandler
import scaldi.{ Injector, Injectable }

import scala.concurrent.ExecutionContextExecutor

trait MainService extends Injectable {
  implicit val module: Injector =
    new AkkaModule :: new ConfigModule :: new HandlerModule :: new ServiceModule

  implicit val system: ActorSystem = inject[ActorSystem]
  implicit val executor: ExecutionContextExecutor = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()

  val host = inject[String](identified by "http.listen.host")
  val port = inject[Int](identified by "http.listen.port")
  val handler = inject[RootHandler]
}

object Main extends App with MainService {
  Http().bindAndHandle(handler.routes, host, port)
} 
Example 43
Source File: HttpFeeRateProvider.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.feeprovider

import java.time.{Duration, Instant}

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.util.ByteString
import org.bitcoins.core.api.FeeRateApi
import org.bitcoins.core.util.TimeUtil
import org.bitcoins.core.wallet.fee.FeeUnit

import scala.concurrent.{ExecutionContextExecutor, Future}
import scala.util.Try

object HttpFeeRateProvider {

  def makeApiCall(uri: Uri)(implicit system: ActorSystem): Future[String] = {
    implicit val ec: ExecutionContextExecutor = system.dispatcher
    Http()
      .singleRequest(HttpRequest(uri = uri))
      .flatMap(response =>
        response.entity.dataBytes
          .runFold(ByteString.empty)(_ ++ _)
          .map(payload => payload.decodeString(ByteString.UTF_8)))
  }
}

abstract class HttpFeeRateProvider extends FeeRateApi {
  implicit protected val system: ActorSystem

  protected def uri: Uri

  protected def converter(str: String): Try[FeeUnit]

  def getFeeRate: Future[FeeUnit] = {
    HttpFeeRateProvider
      .makeApiCall(uri)
      .flatMap(ret => Future.fromTry(converter(ret)))(system.dispatcher)
  }
}

abstract class CachedHttpFeeRateProvider extends HttpFeeRateProvider {

  private var cachedFeeRateOpt: Option[(FeeUnit, Instant)] = None

  val cacheDuration: Duration = Duration.ofMinutes(5)

  private def updateFeeRate(): Future[FeeUnit] = {
    implicit val ec: ExecutionContextExecutor = system.dispatcher
    super.getFeeRate.map { feeRate =>
      cachedFeeRateOpt = Some(feeRate, TimeUtil.now)
      feeRate
    }
  }

  override def getFeeRate: Future[FeeUnit] = {
    cachedFeeRateOpt match {
      case None =>
        updateFeeRate()
      case Some((cachedFeeRate, time)) =>
        val now = TimeUtil.now
        if (time.plus(cacheDuration).isAfter(now)) {
          updateFeeRate()
        } else {
          Future.successful(cachedFeeRate)
        }
    }
  }
} 
Example 44
Source File: InterpreterSpec.scala    From hammock   with MIT License 5 votes vote down vote up
package hammock
package fetch

import cats.effect.IO
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AsyncFlatSpec
import scala.concurrent.ExecutionContextExecutor
import Interpreter._

class InterpreterSpec extends AsyncFlatSpec with Matchers {

  behavior of "node.Interpreter.trans"

  implicit override def executionContext: ExecutionContextExecutor =
    scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
  implicit val cs = IO.contextShift(executionContext)

  Seq(
    ("Options", (uri: Uri, headers: Map[String, String]) => Ops.options(uri, headers)),
    ("Get", (uri: Uri, headers: Map[String, String]) => Ops.get(uri, headers)),
    ("Head", (uri: Uri, headers: Map[String, String]) => Ops.head(uri, headers)),
    ("Post", (uri: Uri, headers: Map[String, String]) => Ops.post(uri, headers, None)),
    ("Put", (uri: Uri, headers: Map[String, String]) => Ops.put(uri, headers, None)),
    ("Delete", (uri: Uri, headers: Map[String, String]) => Ops.delete(uri, headers)),
    ("Trace", (uri: Uri, headers: Map[String, String]) => Ops.trace(uri, headers)),
    ("Patch", (uri: Uri, headers: Map[String, String]) => Ops.patch(uri, headers, None))
  ) foreach {
    case (method, operation) =>
      it should s"get response from mocky with $method requests" in {
        operation(uri"http://www.mocky.io/v2/5185415ba171ea3a00704eed", Map("mock" -> "header"))
          .foldMap(Interpreter[IO].trans)
          .unsafeToFuture
          .map { resp =>
            resp.status.code shouldBe 200
          }
      }
  }
} 
Example 45
Source File: TraceFriendlyExecutionContextExecutor.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core.concurrent

import com.comcast.money.core.internal.{ MDCSupport, SpanLocal }
import com.comcast.money.core.logging.TraceLogging
import org.slf4j.MDC

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor }

class TraceFriendlyExecutionContextExecutor(wrapped: ExecutionContext)
  extends ExecutionContextExecutor with TraceLogging {

  lazy val mdcSupport = new MDCSupport()

  override def execute(task: Runnable): Unit = {
    val inherited = SpanLocal.current
    val submittingThreadsContext = MDC.getCopyOfContextMap

    wrapped.execute(
      new Runnable {
        override def run = {
          mdcSupport.propogateMDC(Option(submittingThreadsContext))
          SpanLocal.clear()
          inherited.foreach(SpanLocal.push)
          try {
            task.run
          } catch {
            case t: Throwable =>
              logException(t)
              throw t
          } finally {
            SpanLocal.clear()
            MDC.clear()
          }
        }
      })
  }

  override def reportFailure(t: Throwable): Unit = wrapped.reportFailure(t)
}

object TraceFriendlyExecutionContextExecutor {
  object Implicits {
    implicit lazy val global: TraceFriendlyExecutionContextExecutor = new TraceFriendlyExecutionContextExecutor(scala.concurrent.ExecutionContext.global)
  }

  def apply(ec: ExecutionContext) = new TraceFriendlyExecutionContextExecutor(ec)
} 
Example 46
Source File: Main.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.cluster.sharding.typed.scaladsl.{ClusterSharding, Entity}
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.management.scaladsl.AkkaManagement
import akka.persistence.typed.PersistenceId
import akka.stream.Materializer
import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings}
import spray.json._

import scala.concurrent.ExecutionContextExecutor

object Main extends SprayJsonSupport with DefaultJsonProtocol {

  case class AddPoints(points: Int)

  implicit val transactionFormat = jsonFormat1(AddPoints)

  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { ctx =>
    implicit val system = ctx.system
    implicit val untypedSystem: akka.actor.ActorSystem = ctx.system.toClassic
    implicit val ec: ExecutionContextExecutor = ctx.system.executionContext

    val oledDriver = ctx.spawn(OledDriver(settings), "oled-driver")
    oledDriver ! OledDriver.RegisterView("Cluster State", 0)
    oledDriver ! OledDriver.RegisterView("Sharding State", 1)

    val clusterView = ctx.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view")
    val clusterStatusTracker = ctx.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView)

    val shardVisualizer = ctx.spawn(OledShardingVisualizer(1, oledDriver), "oled-sharding-view")

    val sharding = ClusterSharding(ctx.system)
    sharding.init(Entity(typeKey = ClientEntity.TypeKey) { entityContext =>
      ClientEntity(entityContext.entityId,
        PersistenceId(entityContext.entityTypeKey.name, entityContext.entityId),
        shardVisualizer)
    })
    val tracker = ctx.spawn(ShardStateTracker(shardVisualizer), "oled-sharding-tracker")
    ctx.spawn(ShardStateScheduler(sharding.shardState, tracker), "oled-sharding-scheduler")

    val routes = new Routes(sharding)

    //materializer
    Materializer.createMaterializer(ctx.system.toClassic)
    implicit val mat: Materializer = Materializer.createMaterializer(ctx.system.toClassic)
    Http()(ctx.system.toClassic).bindAndHandle(routes.route,
      settings.config.getString("cluster-node-configuration.external-ip"), 8080)

    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object DisplayClusterShardingMain {
  def main(args: Array[String]): Unit = {
    val settings = Settings()
    val system = ActorSystem[NotUsed](Main(settings), "akka-oled", settings.config)

    // Start Akka HTTP Management extension
    AkkaManagement(system).start()
  }
} 
Example 47
Source File: Main.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.monitoring.metrics

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import kamon.Kamon

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContextExecutor, Future}

object Main {
  def main(args: Array[String]): Unit = {
    Kamon.init()
    implicit val system: ActorSystem = ActorSystem("events-actor-system")
    implicit val materializer: ActorMaterializer = ActorMaterializer()
    val binding = OpenWhiskEvents.start(system.settings.config)
    addShutdownHook(binding)
  }

  private def addShutdownHook(binding: Future[Http.ServerBinding])(implicit actorSystem: ActorSystem,
                                                                   materializer: ActorMaterializer): Unit = {
    implicit val ec: ExecutionContextExecutor = actorSystem.dispatcher
    sys.addShutdownHook {
      Await.result(binding.map(_.unbind()), 30.seconds)
      Await.result(actorSystem.whenTerminated, 30.seconds)
    }
  }
} 
Example 48
Source File: AkkaAutowiredImplicits.scala    From akka-spring-boot   with Apache License 2.0 5 votes vote down vote up
package com.github.scalaspring.akka

import akka.actor.ActorSystem
import org.springframework.beans.factory.annotation.Autowired

import scala.concurrent.ExecutionContextExecutor

trait AkkaAutowiredImplicits {

  @Autowired implicit val system: ActorSystem = null
  @Autowired(required = false) private val _executor: ExecutionContextExecutor = null

  // executor property that defaults to the actor system's dispatcher if no executor bean defined in the application context
  implicit def executor: ExecutionContextExecutor = {
    _executor match {
      case null => system.dispatcher
      case _ => _executor
    }
  }
} 
Example 49
Source File: HttpMetricsRoute.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.core.scaladsl.server

import akka.NotUsed
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.server._
import akka.http.scaladsl.settings.{ParserSettings, RoutingSettings}
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import fr.davit.akka.http.metrics.core.HttpMetricsHandler
import fr.davit.akka.http.metrics.core.scaladsl.model.PathLabelHeader

import scala.concurrent.{ExecutionContextExecutor, Future}

object HttpMetricsRoute {

  implicit def apply(route: Route): HttpMetricsRoute = new HttpMetricsRoute(route)

}


final class HttpMetricsRoute private (route: Route) extends HttpMetricsDirectives {

  private def markUnhandled(inner: Route): Route = {
    Directives.mapResponse(markUnhandled).tapply(_ => inner)
  }

  private def markUnhandled(response: HttpResponse): HttpResponse = {
    response.addHeader(PathLabelHeader.Unhandled)
  }

  def recordMetrics(metricsHandler: HttpMetricsHandler)(
      implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): Flow[HttpRequest, HttpResponse, NotUsed] = {
    val effectiveEC = if (executionContext ne null) executionContext else materializer.executionContext

    {
      // override the execution context passed as parameter
      implicit val executionContext: ExecutionContextExecutor = effectiveEC
      Flow[HttpRequest]
        .mapAsync(1)(recordMetricsAsync(metricsHandler))
        .watchTermination() {
          case (mat, completion) =>
            // every connection materializes a stream.
            metricsHandler.onConnection(completion)
            mat
        }
    }
  }

  def recordMetricsAsync(metricsHandler: HttpMetricsHandler)(
      implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): HttpRequest => Future[HttpResponse] = {
    val effectiveEC               = if (executionContext ne null) executionContext else materializer.executionContext
    val effectiveRejectionHandler = rejectionHandler.mapRejectionResponse(markUnhandled)
    val effectiveExceptionHandler = ExceptionHandler.seal(exceptionHandler).andThen(markUnhandled(_))

    {
      // override the execution context passed as parameter, rejection and error handler
      implicit val executionContext: ExecutionContextExecutor = effectiveEC
      implicit val rejectionHandler: RejectionHandler         = effectiveRejectionHandler
      implicit val exceptionHandler: ExceptionHandler         = effectiveExceptionHandler

      request =>
        val response = Route.asyncHandler(route).apply(request)
        metricsHandler.onRequest(request, response)
        response
    }
  }
} 
Example 50
Source File: HttpApp.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.server.rest

import java.util.concurrent.Executor

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.RouteConcatenation
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import it.agilelab.darwin.common.Logging

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor}

class HttpApp(config: Config, services: Service*)
             (implicit system: ActorSystem, materializer: ActorMaterializer) extends Logging {
  def run(): Unit = {
    val interface = config.getString("interface")
    val port = config.getInt("port")


    val route = RouteConcatenation.concat(services.map(_.route): _*)

    log.info("Starting http server on {}:{}", interface, port)
    val eventuallyBinding = Http().bindAndHandle(route, interface, port)
    val binding = Await.result(eventuallyBinding, Duration.Inf)
    log.info("Started http server on {}:{}", interface, port)

    val shutdownThread = new Thread(new Runnable {
      override def run(): Unit = {
        implicit val ec: ExecutionContext = newSameThreadExecutor
        log.info("Received shutdown hook")

        val termination = for {
          _ <- binding.unbind()
          terminated <- system.terminate()
        } yield terminated

        Await.ready(termination, Duration.Inf)
        log.info("Shutdown")
      }
    })

    shutdownThread.setName("shutdown")

    Runtime.getRuntime.addShutdownHook(shutdownThread)

    log.info("registered shutdown hook")
  }


  private def newSameThreadExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(new Executor {
    override def execute(command: Runnable): Unit = command.run()
  })
}

object HttpApp {
  def apply(config:Config, services: Service*)(implicit system: ActorSystem, materializer: ActorMaterializer): HttpApp =
    new HttpApp(config, services: _*)
} 
Example 51
Source File: CyrusServer.scala    From gatling-imap   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.linagora.gatling.imap

import org.slf4j.{Logger, LoggerFactory}
import org.testcontainers.containers.GenericContainer

import com.yahoo.imapnio.async.request.CreateFolderCommand

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor}

object CyrusServer extends Server {

  private val imapPort = 143
  private val logger: Logger = LoggerFactory.getLogger(CyrusServer.getClass)

  class RunningCyrusServer(val container: GenericContainer[_]) extends RunningServer with ImapTestUtils {
    protected val logger: Logger = CyrusServer.logger
    lazy val mappedImapPort: Integer = container.getMappedPort(imapPort)

    def addUser(login: String, password: String): Unit = {
      container.execInContainer("bash", "-c", s"echo $password | saslpasswd2 -u test -c $login -p")
      implicit val executionContext: ExecutionContextExecutor = ExecutionContext.global
      Await.result(
        connect(mappedImapPort)
          .flatMap(implicit session =>
          for {
            _ <- Imap.login("cyrus", "cyrus")
            _ <- Imap.rawCommand(new CreateFolderCommand(s"user.$login"))
            _ <- Imap.disconnect()
          } yield ()), 1.minute)

    }
    def stop(): Unit = container.stop()
  }

  def start(): RunningServer = {
    val cyrus = new GenericContainer("linagora/cyrus-imap")
    cyrus.addExposedPort(imapPort)
    cyrus.start()
    new RunningCyrusServer(cyrus)
  }
} 
Example 52
Source File: IOSuite.scala    From kafka-journal   with MIT License 5 votes vote down vote up
package com.evolutiongaming.kafka.journal

import cats.Parallel
import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer}
import cats.implicits._
import com.evolutiongaming.smetrics.MeasureDuration
import org.scalatest.Succeeded

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

object IOSuite {
  val Timeout: FiniteDuration = 5.seconds

  implicit val executor: ExecutionContextExecutor = ExecutionContext.global

  implicit val contextShiftIO: ContextShift[IO]     = IO.contextShift(executor)
  implicit val concurrentIO: Concurrent[IO]         = IO.ioConcurrentEffect
  implicit val timerIO: Timer[IO]                   = IO.timer(executor)
  implicit val parallel: Parallel[IO]               = IO.ioParallel
  implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock(Clock[IO])

  def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = {
    io.timeout(timeout).as(Succeeded).unsafeToFuture
  }

  implicit class IOOps[A](val self: IO[A]) extends AnyVal {
    def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout)
  }
} 
Example 53
Source File: AkkaUnitTestLike.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import akka.actor.{ActorSystem, Scheduler}
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKitBase
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContextExecutor


trait AkkaUnitTestLike extends TestKitBase with ScalaFutures with BeforeAndAfterAll {
  self: Suite =>

  implicit lazy val config: Config                = ConfigFactory.load("sample.conf")
  implicit lazy val system: ActorSystem           = ActorSystem(suiteName, config)
  implicit lazy val scheduler: Scheduler          = system.scheduler
  implicit lazy val mat: Materializer             = ActorMaterializer()
  implicit lazy val ctx: ExecutionContextExecutor = system.dispatcher

  abstract override def afterAll(): Unit = {
    super.afterAll()
    // intentionally shutdown the actor system last.
    system.terminate().futureValue
  }
} 
Example 54
Source File: ConfigSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.io.{BufferedWriter, File, FileWriter}
import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.app.HarnessActor.ConfigChange
import com.webtrends.harness.config.ConfigWatcherActor
import com.webtrends.harness.health.{ComponentState, HealthComponent}
import com.webtrends.harness.service.messages.CheckHealth
import org.specs2.mutable.SpecificationWithJUnit

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.FiniteDuration
import scala.reflect.io.{Directory, Path}

class ConfigSpec extends SpecificationWithJUnit {
  implicit val dur = FiniteDuration(2, TimeUnit.SECONDS)
  new File("services/test/conf").mkdirs()
  implicit val sys = ActorSystem("system", ConfigFactory.parseString( """
    akka.actor.provider = "akka.actor.LocalActorRefProvider"
    services { path = "services" }
    """).withFallback(ConfigFactory.load))

  implicit val ec: ExecutionContextExecutor =  sys.dispatcher

  val probe = TestProbe()
  val parent = sys.actorOf(Props(new Actor {
    val child = context.actorOf(ConfigWatcherActor.props, "child")
    def receive = {
      case x if sender == child => probe.ref forward x
      case x => child forward x
    }
  }))

  sequential

  "config " should {
    "be in good health" in {
      probe.send(parent, CheckHealth)
      val msg = probe.expectMsgClass(classOf[HealthComponent])
      msg.state equals ComponentState.NORMAL
    }

    "detect changes in config" in {
      val file = new File("services/test/conf/test.conf")
      val bw = new BufferedWriter(new FileWriter(file))
      bw.write("test = \"value\"")
      bw.close()
      val msg = probe.expectMsgClass(classOf[ConfigChange])
      msg.isInstanceOf[ConfigChange]
    }
  }

  step {
    sys.terminate().onComplete { _ =>
        Directory(Path(new File("services"))).deleteRecursively()
    }
  }
} 
Example 55
Source File: Execution.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.libs.iteratee

import java.util.ArrayDeque
import scala.annotation.tailrec
import scala.concurrent.{ ExecutionContextExecutor, ExecutionContext }


    @tailrec
    private def executeScheduled(): Unit = {
      local.get match {
        case Empty =>
          // Nothing to run
          ()
        case next: Runnable =>
          // Mark the queue of Runnables after this one as empty
          local.set(Empty)
          // Run the only scheduled Runnable
          next.run()
          // Recurse in case more Runnables were added
          executeScheduled()
        case arrayDeque: ArrayDeque[_] =>
          val runnables = arrayDeque.asInstanceOf[ArrayDeque[Runnable]]
          // Rather than recursing, we can use a more efficient
          // while loop. The value of the ThreadLocal will stay as
          // an ArrayDeque until all the scheduled Runnables have been
          // run.
          while (!runnables.isEmpty) {
            val runnable = runnables.removeFirst()
            runnable.run()
          }
        case illegal =>
          throw new IllegalStateException(s"Unsupported trampoline ThreadLocal value: $illegal")
      }
    }

    def reportFailure(t: Throwable): Unit = t.printStackTrace()
  }

} 
Example 56
Source File: EncryBaseApiRoute.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.api.http.routes

import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes}
import akka.http.scaladsl.server.{Directive, Directive1, Route}
import encry.api.http.ApiRoute
import io.circe.Json
import org.encryfoundation.common.crypto.encoding.Base58Check
import org.encryfoundation.common.modifiers.mempool.transaction.EncryAddress.Address
import org.encryfoundation.common.utils.Algos
import org.encryfoundation.common.utils.TaggedTypes.{ADKey, ModifierId}

import scala.concurrent.{ExecutionContextExecutor, Future}
import scala.util.Success

trait EncryBaseApiRoute extends ApiRoute {

  implicit val ec: ExecutionContextExecutor = context.dispatcher

  protected def toJsonResponse(js: Json): Route = {
    val resp = complete(HttpEntity(ContentTypes.`application/json`, js.spaces2))
    withCors(resp)
  }

  protected def toJsonResponse(fn: Future[Json]): Route = onSuccess(fn) { toJsonResponse }

  protected def toJsonOptionalResponse(fn: Future[Option[Json]]): Route = {
    onSuccess(fn) {
      case Some(v) => toJsonResponse(v)
      case None => withCors(complete(StatusCodes.NotFound))
    }
  }

  val paging: Directive[(Int, Int)] = parameters("offset".as[Int] ? 0, "limit".as[Int] ? 50)

  val modifierId: Directive1[ModifierId] = pathPrefix(Segment).flatMap { h =>
    Algos.decode(h) match {
      case Success(header) => provide(ModifierId @@ header)
      case _ => reject
    }
  }

  implicit class OkJsonResp(fn: Future[Json]) {
    def okJson(): Route = toJsonResponse(fn)
  }

  implicit class OkJsonOptResp(fn: Future[Option[Json]]) {
    def okJson(): Route = toJsonOptionalResponse(fn)
  }
} 
Example 57
Source File: BlockingIO.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.util

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future, Promise, blocking}
import scala.util.control.NonFatal
import monix.execution.Scheduler

// https://github.com/alexandru/scala-best-practices/blob/master/sections/4-concurrency-parallelism.md
object BlockingIO {
  private val ioThreadPool = Scheduler.io(name = "io-thread")

  def future[T](t: => T): Future[T] = {
    val p = Promise[T]()

    val runnable = new Runnable {
      def run() = try {
        p.success(blocking(t))
      } catch {
        case NonFatal(ex) => p.failure(ex)
      }
    }

    ioThreadPool.execute(runnable)

    p.future
  }
} 
Example 58
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 59
Source File: AkkaStreamPerformanceTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.perf.util

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.ledger.api.testing.utils.Resource
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}

@SuppressWarnings(Array("org.wartremover.warts.LeakingSealed"))
abstract class AkkaStreamPerformanceTest extends PerformanceTest {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  type ResourceType

  @volatile protected var system: ActorSystem = _
  @volatile protected var materializer: Materializer = _
  @transient protected implicit val ec: ExecutionContextExecutor = ExecutionContext.global

  protected def resource: Resource[ResourceType]

  protected def setup(): Unit = {
    resource.setup()
    implicit val sys: ActorSystem = ActorSystem(this.getClass.getSimpleName.stripSuffix("$"))
    system = sys
    materializer = Materializer(system)
  }

  protected def teardown(): Unit = {
    await(system.terminate())
    resource.close()
  }

  implicit class FixtureSetup[T](using: Using[T]) extends Serializable {
    def withLifecycleManagement(additionalSetup: T => Unit = _ => ()): Using[T] =
      using
        .setUp { input =>
          try {
            setup()
            additionalSetup(input)
          } catch {
            case t: Throwable =>
              logger.error("Setup failed.", t)
              throw t
          }
        }
        .tearDown { _ =>
          try {
            teardown()
          } catch {
            case t: Throwable =>
              logger.error("Teardown failed.", t)
              throw t
          }
        }
  }
} 
Example 60
Source File: DispatcherTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams.dispatcher

import java.util.concurrent.atomic.AtomicReference

import akka.NotUsed
import akka.stream.scaladsl.{Keep, Sink}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.platform.akkastreams.dispatcher.SubSource.OneAfterAnother
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.{ExecutionContextExecutor, Future}

//TODO: merge/review the tests we have around the Dispatcher!
class DispatcherTest extends WordSpec with AkkaBeforeAndAfterAll with Matchers with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(250, Milliseconds)))

  "A Dispatcher" should {
    "not race when creating new subscriptions" in {
      // The test setup here is a little different from the above tests,
      // because we wanted to be specific about emitted pairs and use of Thread.sleep.

      implicit val ec: ExecutionContextExecutor = materializer.executionContext

      val elements = new AtomicReference(Map.empty[Int, Int])
      def readElement(i: Int): Future[Int] = Future {
        Thread.sleep(10) // In a previous version of Dispatcher, this sleep caused a race condition.
        elements.get()(i)
      }
      def readSuccessor(i: Int): Int = i + 1

      // compromise between catching flakes and not taking too long
      0 until 25 foreach { _ =>
        val d = Dispatcher("test", 0, 0)

        // Verify that the results are what we expected
        val subscriptions = 1 until 10 map { i =>
          elements.updateAndGet(m => m + (i -> i))
          d.signalNewHead(i)
          d.startingAt(i - 1, OneAfterAnother(readSuccessor, readElement))
            .toMat(Sink.collection)(Keep.right[NotUsed, Future[Seq[(Int, Int)]]])
            .run()
        }

        d.close()

        subscriptions.zip(1 until 10) foreach {
          case (f, i) =>
            whenReady(f) { vals =>
              vals.map(_._1) should contain theSameElementsAs (i to 9)
              vals.map(_._2) should contain theSameElementsAs (i until 10)
            }
        }
      }
    }
  }
} 
Example 61
Source File: TestConcurrentAccess.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, TimeUnit}

import oharastream.ohara.client.configurator.NodeApi
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}

class TestConcurrentAccess extends OharaTest {
  private[this] val configurator = Configurator.builder.fake().build()

  private[this] val nodeApi = NodeApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(10, TimeUnit.SECONDS))

  
  @Test
  def deletedObjectShouldDisappearFromGet(): Unit = {
    val threadCount                                         = 10
    val threadsPool                                         = Executors.newFixedThreadPool(threadCount)
    val unmatchedCount                                      = new AtomicInteger()
    implicit val executionContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(threadsPool)
    (0 until threadCount).foreach { _ =>
      threadsPool.execute { () =>
        val nodeName = CommonUtils.randomString(10)
        val nodes = result(
          nodeApi.request
            .nodeName(nodeName)
            .user(CommonUtils.randomString(10))
            .password(CommonUtils.randomString(10))
            .create()
            .flatMap(node => nodeApi.delete(node.key))
            .flatMap(_ => nodeApi.list())
        )
        if (nodes.exists(_.hostname == nodeName)) unmatchedCount.incrementAndGet()
      }
    }
    threadsPool.shutdown()
    threadsPool.awaitTermination(60, TimeUnit.SECONDS) shouldBe true
    unmatchedCount.get() shouldBe 0
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 62
Source File: DumpCompleteDocumentFromEs.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.main

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithCompleteDocument}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.TimestampConversion.timestampConverter
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import org.apache.commons.io.FileUtils
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}

import scala.concurrent.ExecutionContextExecutor

object DumpCompleteDocumentFromEs {

  def main(args: Array[String]): Unit = {

    val logger = LogManager.getLogger(DumpCompleteDocumentFromEs.getClass)

    // Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
    // so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
    // be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
    val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)

    implicit val system: ActorSystem = ActorSystem("dump-complete-document-from-es")
    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()

    try {

      object Opts extends ScallopConf(args) {

        val readIndex: ScallopOption[String] = opt[String]("read-index", short = 'i', descr = "The name of the index to read from (default: cm_well_all)", required = false)
        val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))

        val currentOnly: ScallopOption[Boolean] = opt[Boolean]("current-filter", short = 'c', descr = "Filter on current status", default = None)
        val lastModifiedGteFilter: ScallopOption[java.sql.Timestamp] = opt[java.sql.Timestamp]("lastmodified-gte-filter", descr = "Filter on lastModified >= <value>, where value is an ISO8601 timestamp", default = None)(timestampConverter)
        val pathPrefixFilter: ScallopOption[String] = opt[String]("path-prefix-filter", descr = "Filter on the path prefix matching <value>", default = None)

        val out: ScallopOption[String] = opt[String]("out", short = 'o', descr = "The path to save the output to", required = true)
        val format: ScallopOption[String] = opt[String]("format", short = 'f', descr = "The data format: either 'parquet' or 'csv'", default = Some("parquet"))
        val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)

        verify()
      }

      val esContactPoint = FindContactPoints.es(Opts.url())
      val indexesOrAliasesToRead = Opts.readIndex.toOption.fold(Seq("cm_well_all"))(Seq(_))
      val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexesOrAliasesToRead)

      // Calling script should clear output directory as necessary.

      val objectExtractor = IndexWithCompleteDocument
      val dataWriterFactory = DataWriterFactory.file(format = Opts.format(), objectExtractor, outDirectory = Opts.out())

      PartitionedDownloader.runDownload(
        esTopology = esTopology,
        parallelism = Opts.parallelism(),

        currentOnly = Opts.currentOnly(),
        lastModifiedGteFilter = Opts.lastModifiedGteFilter.toOption,
        pathPrefixFilter = Opts.pathPrefixFilter.toOption,

        objectExtractor = objectExtractor,
        dataWriterFactory = dataWriterFactory,
        sourceFilter = false)

      // The Hadoop convention is to touch the (empty) _SUCCESS file to signal successful completion.
      FileUtils.touch(Paths.get(Opts.out(), "_SUCCESS").toFile)
    }
    catch {
      case ex: Throwable =>
        logger.error(ex.getMessage, ex)
        System.exit(1)
    }
    finally {
      system.terminate()
    }
  }
} 
Example 63
Source File: SinkRouteHandler.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.sink

import java.time.{Duration => JDuration}
import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes}
import akka.http.scaladsl.server.{ExceptionHandler, Route}
import com.typesafe.scalalogging.Logger
import oharastream.ohara.common.data.Row
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.shabondi.common.{JsonSupport, RouteHandler, ShabondiUtils}
import org.apache.commons.lang3.StringUtils

import scala.collection.mutable.ArrayBuffer
import scala.compat.java8.DurationConverters._
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.Duration
import spray.json.DefaultJsonProtocol._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._

private[shabondi] object SinkRouteHandler {
  def apply(config: SinkConfig)(implicit actorSystem: ActorSystem) =
    new SinkRouteHandler(config)
}

private[shabondi] class SinkRouteHandler(config: SinkConfig)(implicit actorSystem: ActorSystem) extends RouteHandler {
  implicit private val contextExecutor: ExecutionContextExecutor = actorSystem.dispatcher

  private val log              = Logger(classOf[SinkRouteHandler])
  private[sink] val dataGroups = SinkDataGroups(config)

  def scheduleFreeIdleGroups(interval: JDuration, idleTime: JDuration): Unit =
    actorSystem.scheduler.scheduleWithFixedDelay(Duration(1, TimeUnit.SECONDS), interval.toScala) { () =>
      {
        log.trace("scheduled free group, total group: {} ", dataGroups.size)
        dataGroups.freeIdleGroup(idleTime)
      }
    }

  private val exceptionHandler = ExceptionHandler {
    case ex: Throwable =>
      log.error(ex.getMessage, ex)
      complete((StatusCodes.InternalServerError, ex.getMessage))
  }

  private def fullyPollQueue(queue: RowQueue): Seq[Row] = {
    val buffer    = ArrayBuffer.empty[Row]
    var item: Row = queue.poll()
    while (item != null) {
      buffer += item
      item = queue.poll()
    }
    buffer.toSeq
  }

  private def apiUrl = ShabondiUtils.apiUrl

  def route(): Route = handleExceptions(exceptionHandler) {
    path("groups" / Segment) { groupId =>
      get {
        if (StringUtils.isAlphanumeric(groupId)) {
          val group  = dataGroups.createIfAbsent(groupId)
          val result = fullyPollQueue(group.queue).map(row => JsonSupport.toRowData(row))
          complete(result)
        } else {
          val entity =
            HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Illegal group name, only accept alpha and numeric.")
          complete(StatusCodes.NotAcceptable -> entity)
        }
      } ~ {
        complete(StatusCodes.MethodNotAllowed -> s"Unsupported method, please reference: $apiUrl")
      }
    } ~ {
      complete(StatusCodes.NotFound -> s"Please reference: $apiUrl")
    }
  }

  override def close(): Unit = {
    Releasable.close(dataGroups)
  }
} 
Example 64
Source File: HttpUtil.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.util

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.RequestEntityAcceptance.Tolerated
import akka.http.scaladsl.model.{HttpMethod, HttpRequest, HttpResponse}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import akka.util.ByteString
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{MILLISECONDS, _}
import scala.concurrent.{Await, ExecutionContextExecutor, Future}

object HttpUtil {

  private val mapper = new ObjectMapper()

  private val config = ConfigFactory.load
  private val ReadTimeout = FiniteDuration(config.getDuration("extract-index-from-es.read-timeout").toMillis, MILLISECONDS)

  // Elasticsearch uses the POST verb in some places where the request is actually idempotent.
  // Requests that use POST, but are known to be idempotent can use this method.
  // The presence of any non-idempotent request in-flight causes Akka to not retry, and that will tend result in
  // entire downloads failing more often.
  val SAFE_POST = HttpMethod(
    value = "POST",
    isSafe = true,
    isIdempotent = true,
    requestEntityAcceptance = Tolerated)

  def resultAsync(request: HttpRequest,
                  action: String)
                 (implicit system: ActorSystem,
                  executionContext: ExecutionContextExecutor,
                  actorMaterializer: ActorMaterializer): Future[ByteString] =
    Http().singleRequest(request).map {

      case HttpResponse(status, _, entity, _) if status.isSuccess =>
        entity.dataBytes
          .fold(ByteString.empty)(_ ++ _)
          .runWith(Sink.head)

      case HttpResponse(status, _, entity, _) =>
        val message = Await.result(entity.toStrict(10.seconds).map(_.data), 10.seconds).utf8String
        throw new RuntimeException(s"HTTP request for $action failed. Status code: $status, message:$message")
    }
      .flatMap(identity)

  def result(request: HttpRequest,
             action: String,
             timeout: FiniteDuration = ReadTimeout)
            (implicit system: ActorSystem,
             executionContext: ExecutionContextExecutor,
             actorMaterializer: ActorMaterializer): ByteString =
    Await.result(resultAsync(request, action), timeout)

  def jsonResult(request: HttpRequest,
                 action: String,
                 timeout: FiniteDuration = ReadTimeout)
                (implicit system: ActorSystem,
                 executionContext: ExecutionContextExecutor,
                 actorMaterializer: ActorMaterializer): JsonNode =
    mapper.readTree(result(request, action, timeout).utf8String)

  def jsonResultAsync(request: HttpRequest,
                      action: String)
                     (implicit system: ActorSystem,
                      executionContext: ExecutionContextExecutor,
                      actorMaterializer: ActorMaterializer): Future[JsonNode] =
    resultAsync(request, action).map((bytes: ByteString) => mapper.readTree(bytes.utf8String))
} 
Example 65
Source File: DataWriterFactory.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.data

import java.io.File
import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.util.Shard
import org.apache.avro.generic.GenericRecord
import org.apache.commons.io.FileUtils
import org.apache.parquet.hadoop.metadata.CompressionCodecName

import scala.concurrent.ExecutionContextExecutor

trait DataWriterFactory[T <: GenericRecord] {
  def apply(shard: Shard): DataWriter[T]
}

object DataWriterFactory {

  private val compressionCodec = CompressionCodecName.SNAPPY


  def file[T <: GenericRecord with CsvGenerator](format: String,
                                                 objectExtractor: ObjectExtractor[T],
                                                 outDirectory: String): Shard => DataWriter[T] = {

    val extension = s".$format" + (if (format == "parquet") s"${compressionCodec.getExtension}" else "")

    // Generate a meaningful file name for the target file name based on the source shard index name and shard number.
    (sourceShard: Shard) => {
      val outFile: File = Paths.get(outDirectory, s"part-r-${sourceShard.indexName}.${sourceShard.shard}$extension").toFile

      if (outFile.exists)
        FileUtils.forceDelete(outFile)

      new File(outFile.getParent).mkdirs()

      FileDataWriter[T](format, objectExtractor.schema, outFile.toString, compressionCodec)
    }
  }

  
  def index[T <: GenericRecord](indexMap: Map[String, String], // source-index -> target-index
                                esEndpoint: String)
                               (implicit system: ActorSystem,
                                executionContext: ExecutionContextExecutor,
                                actorMaterializer: ActorMaterializer
                               ): Shard => DataWriter[T] = {

    (sourceShard: Shard) => {
      val targetIndex = indexMap(sourceShard.indexName)
      new IndexDataWriter[T](indexName = targetIndex, esEndpoint = esEndpoint)
    }
  }
} 
Example 66
Source File: CalculateXORSummary.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.main

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{IndexWithSourceHash, XORSummary, XORSummaryFactory}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import org.apache.commons.codec.binary.Hex
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}

import scala.concurrent.ExecutionContextExecutor

object CalculateXORSummary {

  def main(args: Array[String]): Unit = {

    val logger = LogManager.getLogger(CalculateXORSummary.getClass)

    // Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
    // so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
    // be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
    val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)

    implicit val system: ActorSystem = ActorSystem("xor-summary")
    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()

    try {

      object Opts extends ScallopConf(args) {

        val readIndex: ScallopOption[String] = opt[String]("read-index", short = 'i', descr = "The name of the index to read from", required = false)

        val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))
        val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)

        verify()
      }

      val esContactPoint = FindContactPoints.es(Opts.url())
      val indexesOrAliasesToRead = Opts.readIndex.toOption.fold(Seq("cm_well_all"))(Seq(_))
      val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexesOrAliasesToRead)

      val dataWriterFactory = new XORSummaryFactory()

      PartitionedDownloader.runDownload(
        esTopology = esTopology,
        parallelism = Opts.parallelism(),
        objectExtractor = IndexWithSourceHash,
        dataWriterFactory = dataWriterFactory.apply,
        sourceFilter = false)

      // Summarize the summaries down to the index level.
      val summaryByIndex: Map[String, XORSummary] = dataWriterFactory.shardSummaries
        .groupBy { case (shard, _) => shard.indexName }
        .map { case (indexName, summaryMap) => indexName -> summaryMap.values.reduce(XORSummary.combine) }

      // TODO: Fix questionable JSON generation
      val r = "{" +
        summaryByIndex.map { case (index, summary) =>
          val x = Hex.encodeHexString(summary.summary)
          s""" { "index": "$index", "summary": "$x" } """
        }.mkString("\n") + "}"

      println(r)
    }
    catch {
      case ex: Throwable =>
        logger.error(ex.getMessage, ex)
        System.exit(1)
    }
    finally {
      system.terminate()
    }
  }
} 
Example 67
Source File: CopyIndexesWithMapping.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.main

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithCompleteDocument}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContextExecutor

object CopyIndexesWithMapping {

  def main(args: Array[String]): Unit = {

    val logger = LogManager.getLogger(CopyIndexesWithMapping.getClass)

    // Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
    // so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
    // be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
    val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)

    implicit val system: ActorSystem = ActorSystem("copy-index-with-mapping")
    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()

    try {

      object Opts extends ScallopConf(args) {

        val indexMap: ScallopOption[String] = opt[String]("index-map", short = 'i', descr = "A map from source to target index names, in JSON format", required = true)

        val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))
        val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)

        verify()
      }

      val esContactPoint = FindContactPoints.es(Opts.url())

      // Expect a map in the form: { "sourceIndex1": "targetIndex1", "sourceIndex2": "targetIndex2", ... }
      val indexMap: Map[String, String] = new ObjectMapper().readTree(Opts.indexMap()).fields.asScala.map { entry =>
        entry.getKey -> entry.getValue.asText
      }.toMap

      val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexMap.keys.toSeq)

      // Validate that the index-map parameter specified valid index names, and not aliases.
      for (indexName <- indexMap.keys)
        if (!esTopology.allIndexNames.contains(indexName))
          throw new RuntimeException(s"index-map parameter included $indexName as a source, which is not a valid index name.")

      for (indexName <- indexMap.values)
        if (!esTopology.allIndexNames.contains(indexName))
          throw new RuntimeException(s"index-map parameter included $indexName as a target, which is not a valid index name.")

      val dataWriterFactory = DataWriterFactory.index[IndexWithCompleteDocument](
        indexMap = indexMap,
        esEndpoint = esContactPoint)

      PartitionedDownloader.runDownload(
        esTopology = esTopology,
        parallelism = Opts.parallelism(),
        objectExtractor = IndexWithCompleteDocument,
        dataWriterFactory = dataWriterFactory,
        sourceFilter = false)
    }
    catch {
      case ex: Throwable =>
        logger.error(ex.getMessage, ex)
        System.exit(1)
    }
    finally {
      system.terminate()
    }
  }
} 
Example 68
Source File: DumpSystemFieldsFromEs.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.main

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithSystemFields}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.TimestampConversion.timestampConverter
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import org.apache.commons.io.FileUtils
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}

import scala.concurrent.ExecutionContextExecutor

object DumpSystemFieldsFromEs {

  def main(args: Array[String]): Unit = {

    val logger = LogManager.getLogger(DumpSystemFieldsFromEs.getClass)

    implicit val system: ActorSystem = ActorSystem("dump-system-fields-from-es")
    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()

    try {
      // Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
      // so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
      // be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
      val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)

      object Opts extends ScallopConf(args) {

        val readIndex: ScallopOption[String] = opt[String]("read-index", short = 'i', descr = "The name of the index to read from (default: cm_well_all)", required = false)
        val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))

        val currentOnly: ScallopOption[Boolean] = opt[Boolean]("current-only", short = 'c', descr = "Only download current uuids")
        val lastModifiedGteFilter: ScallopOption[java.sql.Timestamp] = opt[java.sql.Timestamp]("lastmodified-gte-filter", descr = "Filter on lastModified >= <value>, where value is an ISO8601 timestamp", default = None)(timestampConverter)
        val pathPrefixFilter: ScallopOption[String] = opt[String]("path-prefix-filter", descr = "Filter on the path prefix matching <value>", default = None)

        val format: ScallopOption[String] = opt[String]("format", short = 'f', descr = "The data format: either 'parquet' or 'csv'", default = Some("parquet"))
        val out: ScallopOption[String] = opt[String]("out", short = 'o', descr = "The path to save the output to", required = true)
        val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)

        val sourceFilter: ScallopOption[Boolean] = toggle("source-filter", noshort = true, default=Some(true), prefix = "no-",
          descrNo = "Do not filter _source fields (workaround for bad index)", descrYes = "Use source filtering to reduce network traffic")

        verify()
      }

      val esContactPoint = FindContactPoints.es(Opts.url())
      val indexesOrAliasesToRead = Opts.readIndex.toOption.fold(Seq("cm_well_all"))(Seq(_))
      val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexesOrAliasesToRead)

      // Calling script should clear output directory as necessary.

      val objectExtractor = IndexWithSystemFields
      val dataWriterFactory = DataWriterFactory.file(format = Opts.format(), objectExtractor, outDirectory = Opts.out())

      PartitionedDownloader.runDownload(
        esTopology = esTopology,
        parallelism = Opts.parallelism(),

        currentOnly = Opts.currentOnly(),
        lastModifiedGteFilter = Opts.lastModifiedGteFilter.toOption,
        pathPrefixFilter = Opts.pathPrefixFilter.toOption,

        objectExtractor = objectExtractor,
        dataWriterFactory = dataWriterFactory,
        sourceFilter = Opts.sourceFilter())

      // The Hadoop convention is to touch the (empty) _SUCCESS file to signal successful completion.
      FileUtils.touch(Paths.get(Opts.out(), "_SUCCESS").toFile)
    }
    catch {
      case ex: Throwable =>
        logger.error(ex.getMessage, ex)
        System.exit(1)
    }
    finally {
      system.terminate()
    }
  }
} 
Example 69
Source File: DumpUuidOnlyFromEs.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.main

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithUuidOnly}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.TimestampConversion.timestampConverter
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import org.apache.commons.io.FileUtils
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}

import scala.concurrent.ExecutionContextExecutor

object DumpUuidOnlyFromEs {

  def main(args: Array[String]): Unit = {

    val logger = LogManager.getLogger(DumpUuidOnlyFromEs.getClass)

    // Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
    // so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
    // be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
    val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)

    implicit val system: ActorSystem = ActorSystem("dump-uuid-only-from-es")
    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()

    try {

      object Opts extends ScallopConf(args) {

        val readIndex: ScallopOption[String] = opt[String]("read-index", short = 'i', descr = "The name of the index to read from (default: cm_well_all)", required = false)
        val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))

        val currentOnly: ScallopOption[Boolean] = opt[Boolean]("current-only", short = 'c', descr = "Only download current uuids")
        val lastModifiedGteFilter: ScallopOption[java.sql.Timestamp] = opt[java.sql.Timestamp]("lastmodified-gte-filter", descr = "Filter on lastModified >= <value>, where value is an ISO8601 timestamp", default = None)(timestampConverter)
        val pathPrefixFilter: ScallopOption[String] = opt[String]("path-prefix-filter", descr = "Filter on the path prefix matching <value>", default = None)

        val out: ScallopOption[String] = opt[String]("out", short = 'o', descr = "The path to save the output to", required = true)
        val format: ScallopOption[String] = opt[String]("format", short = 'f', descr = "The data format: either 'parquet' or 'csv'", default = Some("parquet"))
        val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)

        val sourceFilter: ScallopOption[Boolean] = toggle("source-filter", noshort = true, default=Some(true), prefix = "no-",
          descrNo = "Do not filter _source fields (workaround for bad index)", descrYes = "Use source filtering to reduce network traffic")

        verify()
      }

      val esContactPoint = FindContactPoints.es(Opts.url())
      val indexesOrAliasesToRead = Opts.readIndex.toOption.fold(Seq("cm_well_all"))(Seq(_))
      val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexesOrAliasesToRead)

      // Calling script should clear output directory as necessary.

      val objectExtractor = IndexWithUuidOnly
      val dataWriterFactory = DataWriterFactory.file(format = Opts.format(), objectExtractor, outDirectory = Opts.out())

      PartitionedDownloader.runDownload(
        esTopology = esTopology,
        parallelism = Opts.parallelism(),

        currentOnly = Opts.currentOnly(),
        lastModifiedGteFilter = Opts.lastModifiedGteFilter.toOption,
        pathPrefixFilter = Opts.pathPrefixFilter.toOption,

        objectExtractor = objectExtractor,
        dataWriterFactory = dataWriterFactory,
        sourceFilter = Opts.sourceFilter())

      // The Hadoop convention is to touch the (empty) _SUCCESS file to signal successful completion.
      FileUtils.touch(Paths.get(Opts.out(), "_SUCCESS").toFile)
    }
    catch {
      case ex: Throwable =>
        logger.error(ex.getMessage, ex)
        System.exit(1)
    }
    finally {
      system.terminate()
    }
  }
} 
Example 70
Source File: DumpKeyFieldsFromEs.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.main

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithKeyFields}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.TimestampConversion.timestampConverter
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import org.apache.commons.io.FileUtils
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}

import scala.concurrent.ExecutionContextExecutor

object DumpKeyFieldsFromEs {

  def main(args: Array[String]): Unit = {

    val logger = LogManager.getLogger(DumpKeyFieldsFromEs.getClass)

    implicit val system: ActorSystem = ActorSystem("dump-key-fields-from-es")
    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()

    try {
      // Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
      // so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
      // be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
      val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)

      object Opts extends ScallopConf(args) {

        val readIndex: ScallopOption[String] = opt[String]("read-index", short = 'i', descr = "The name of the index to read from (default: cm_well_all)", required = false)
        val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))

        val currentOnly: ScallopOption[Boolean] = opt[Boolean]("current-only", short = 'c', descr = "Only download current uuids")
        val lastModifiedGteFilter: ScallopOption[java.sql.Timestamp] = opt[java.sql.Timestamp]("lastmodified-gte-filter", descr = "Filter on lastModified >= <value>, where value is an ISO8601 timestamp", default = None)(timestampConverter)
        val pathPrefixFilter: ScallopOption[String] = opt[String]("path-prefix-filter", descr = "Filter on the path prefix matching <value>", default = None)

        val format: ScallopOption[String] = opt[String]("format", short = 'f', descr = "The data format: either 'parquet' or 'csv'", default = Some("parquet"))
        val out: ScallopOption[String] = opt[String]("out", short = 'o', descr = "The path to save the output to", required = true)

        val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)

        val sourceFilter: ScallopOption[Boolean] = toggle("source-filter", noshort = true, default = Some(true), prefix = "no-",
          descrNo = "Do not filter _source fields (workaround for bad index)", descrYes = "Use source filtering to reduce network traffic")

        verify()
      }

      val esContactPoint = FindContactPoints.es(Opts.url())
      val indexesOrAliasesToRead = Opts.readIndex.toOption.fold(Seq("cm_well_all"))(Seq(_))
      val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexesOrAliasesToRead)

      // Calling script should clear output directory as necessary.

      val objectExtractor = IndexWithKeyFields
      val dataWriterFactory = DataWriterFactory.file(format = Opts.format(), objectExtractor, outDirectory = Opts.out())

      PartitionedDownloader.runDownload(
        esTopology = esTopology,
        parallelism = Opts.parallelism(),

        currentOnly = Opts.currentOnly(),
        lastModifiedGteFilter = Opts.lastModifiedGteFilter.toOption,
        pathPrefixFilter = Opts.pathPrefixFilter.toOption,

        objectExtractor = objectExtractor,
        dataWriterFactory = dataWriterFactory,
        sourceFilter = Opts.sourceFilter())

      // The Hadoop convention is to touch the (empty) _SUCCESS file to signal successful completion.
      FileUtils.touch(Paths.get(Opts.out(), "_SUCCESS").toFile)
    }
    catch {
      case ex: Throwable =>
        logger.error(ex.getMessage, ex)
        System.exit(1)
    }
    finally {
      system.terminate()
    }
  }
} 
Example 71
Source File: CopyIndex.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.main

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import cmwell.analytics.data.{DataWriterFactory, IndexWithCompleteDocument}
import cmwell.analytics.downloader.PartitionedDownloader
import cmwell.analytics.util.{DiscoverEsTopology, FindContactPoints}
import org.apache.log4j.LogManager
import org.rogach.scallop.{ScallopConf, ScallopOption}

import scala.concurrent.ExecutionContextExecutor

object CopyIndex {

  def main(args: Array[String]): Unit = {

    val logger = LogManager.getLogger(CopyIndex.getClass)

    // Since we expect this to be run on a CM-Well node, the default parallelism is to use half the processors
    // so as to avoid starving the CM-Well node from processor resources. A higher level of parallelism might
    // be possible (without interfering with CM-Well) since most of the work will actually be on the ES side.
    val defaultParallelism = 1 max (Runtime.getRuntime.availableProcessors / 2)

    implicit val system: ActorSystem = ActorSystem("copy-index")
    implicit val executionContext: ExecutionContextExecutor = system.dispatcher
    implicit val actorMaterializer: ActorMaterializer = ActorMaterializer()

    try {

      object Opts extends ScallopConf(args) {

        val readIndex: ScallopOption[String] = opt[String]("read-index", short = 'i', descr = "The name of the index to read from", required = true)
        val writeIndex: ScallopOption[String] = opt[String]("write-index", short = 'w', descr = "The name of the index to write to", required = true)

        val parallelism: ScallopOption[Int] = opt[Int]("parallelism", short = 'p', descr = "The parallelism level", default = Some(defaultParallelism))
        val url: ScallopOption[String] = trailArg[String]("url", descr = "A CM-Well URL", required = true)

        verify()
      }

      val esContactPoint = FindContactPoints.es(Opts.url())
      val indexesOrAliasesToRead = Opts.readIndex.toOption.fold(Seq("cm_well_all"))(Seq(_))
      val esTopology = DiscoverEsTopology(esContactPoint = esContactPoint, aliases = indexesOrAliasesToRead)

      val dataWriterFactory = DataWriterFactory.index[IndexWithCompleteDocument](
        indexName = Opts.writeIndex(),
        esEndpoint = esContactPoint)

      PartitionedDownloader.runDownload(
        esTopology = esTopology,
        parallelism = Opts.parallelism(),
        objectExtractor = IndexWithCompleteDocument,
        dataWriterFactory = dataWriterFactory,
        sourceFilter = false)
    }
    catch {
      case ex: Throwable =>
        logger.error(ex.getMessage, ex)
        System.exit(1)
    }
    finally {
      system.terminate()
    }
  }
} 
Example 72
Source File: FindContactPoints.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.util

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpMethods.GET
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.stream.ActorMaterializer
import com.fasterxml.jackson.databind.JsonNode

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContextExecutor
import scala.util.Try

// TODO: These s/b `Uri`s?
case class ContactPoints(cassandra: String,
                         es: String)

object FindContactPoints {

  
  def es(url: String)
        (implicit system: ActorSystem,
         executionContext: ExecutionContextExecutor,
         actorMaterializer: ActorMaterializer): String = {

    val uri = Uri(url)

    val request = HttpRequest(
      method = GET,
      uri = s"http://${uri.authority.host}:${uri.authority.port}/proc/health?format=json")

    val json: JsonNode = HttpUtil.jsonResult(request, "fetch /proc/health")

    val masterIpAddresses: Seq[String] = json.get("fields").findValue("masters").elements.asScala.map(_.textValue).toSeq

    if (masterIpAddresses.isEmpty)
      throw new RuntimeException("No master node addresses found.")

    // For Elasticsearch, the port is 9201 for a single node, and 9200 for clustered.
    val esPort = if (masterIpAddresses.lengthCompare(1) > 0) "9200" else "9201"

    // All the masters should be accessible, but verify that.
    // A better implementation would keep all the endpoints in the list, and we could fall back to the others
    // if the one we are using disappears.
    val firstAccessibleESEndpoint = masterIpAddresses.find { ipAddress =>
      val request = HttpRequest(
        method = GET,
        uri = s"http://$ipAddress:$esPort")

      Try(HttpUtil.result(request, "probe for accessible es endpoint")).isSuccess
    }

    if (firstAccessibleESEndpoint.isEmpty)
      throw new RuntimeException("No accessible ES endpoint was found.")

    s"${firstAccessibleESEndpoint.get}:$esPort"
  }
} 
Example 73
Source File: CassandraHealthCheck.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.healthcheck

import akka.actor.ActorSystem
import akka.event.Logging
import akka.pattern.{ ask, AskTimeoutException }
import akka.persistence.Persistence
import akka.persistence.cassandra.PluginSettings
import akka.persistence.cassandra.journal.CassandraJournal.HealthCheckQuery
import akka.util.Timeout

import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.control.NonFatal

final class CassandraHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) {

  private val log = Logging.getLogger(system, getClass)

  private val settings = new PluginSettings(system, system.settings.config.getConfig("akka.persistence.cassandra"))
  private val healthCheckSettings = settings.healthCheckSettings
  private val journalPluginId = s"${healthCheckSettings.pluginLocation}.journal"
  private val journalRef = Persistence(system).journalFor(journalPluginId)

  private implicit val ec: ExecutionContextExecutor = system.dispatchers.lookup(s"$journalPluginId.plugin-dispatcher")
  private implicit val timeout: Timeout = healthCheckSettings.timeout

  override def apply(): Future[Boolean] = {
    (journalRef ? HealthCheckQuery).map(_ => true).recoverWith {
      case _: AskTimeoutException =>
        log.warning("Failed to execute health check due to ask timeout")
        Future.successful(false)
      case NonFatal(e) =>
        log.warning("Failed to execute health check due to: {}", e)
        Future.successful(false)
    }
  }
} 
Example 74
Source File: RemoraKafkaConsumerGroupService.scala    From remora   with MIT License 5 votes vote down vote up
package kafka.admin

import java.util.logging.Logger

import config.KafkaSettings
import kafka.admin.ConsumerGroupCommand.ConsumerGroupCommandOptions
import models.{GroupInfo, Node, PartitionAssignmentState}
import org.apache.kafka.clients.admin

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{ExecutionContextExecutor, Future}

trait ConsumerGroupService {
  def list(): Future[List[String]]

  def describeCluster(): Future[admin.DescribeClusterResult]
  def describeConsumerGroup(group: String): Future[GroupInfo]
}

class RemoraKafkaConsumerGroupService(kafkaSettings: KafkaSettings)
                                     (implicit executionContext: ExecutionContextExecutor) extends ConsumerGroupService
  with nl.grons.metrics.scala.DefaultInstrumented {

  private val logger = Logger.getLogger(RemoraKafkaConsumerGroupService.this.getClass.getName)

  private val listTimer = metrics.timer("list-timer")
  private val describeTimer = metrics.timer("describe-timer")
  private val describeGroupTimer = metrics.timer("describe-group-timer")

  private def createKafkaConsumerGroupService(groupId: Option[String] = None): ConsumerGroupCommand.ConsumerGroupService = {
    groupId match {
      case Some(g) => createKafkaConsumerGroupService(baseConfig() ++ Array("--group", g))
      case None => createKafkaConsumerGroupService(baseConfig())
    }
  }

  private def baseConfig(): Array[String] = {
    var baseConfig: ArrayBuffer[String] = ArrayBuffer("--bootstrap-server", kafkaSettings.address)

    if (!kafkaSettings.commandConfig.isEmpty) {
      baseConfig ++= Array("--command-config", kafkaSettings.commandConfig)
    }

    baseConfig.toArray
  }

  def createKafkaConsumerGroupService(consumerGroupCommandArgs: Array[String]): ConsumerGroupCommand.ConsumerGroupService = {
    new ConsumerGroupCommand.ConsumerGroupService(new ConsumerGroupCommandOptions(consumerGroupCommandArgs))
  }

  override def describeCluster(): Future[admin.DescribeClusterResult] = Future {
    describeTimer.time {
      kafkaSettings.adminClient.describeCluster()
    }
  }

  override def list(): Future[List[String]] = Future {
    listTimer.time {
      val groupService = createKafkaConsumerGroupService()
      try {
        groupService.listGroups()
      } finally {
        groupService.close()
      }
    }
  }

  override def describeConsumerGroup(group: String): Future[GroupInfo] = Future {
    describeGroupTimer.time {
      val kafkaConsumerGroupService = createKafkaConsumerGroupService(Some(group))
      try {
        val (state, assignments) = kafkaConsumerGroupService.collectGroupOffsets()
        assignments match {
          case Some(partitionAssignmentStates) =>
            val assignments = Some(partitionAssignmentStates.map(a => PartitionAssignmentState(a.group,
              a.coordinator match {
                case Some(c) => Some(Node(Option(c.id), Option(c.idString), Option(c.host), Option(c.port), Option(c.rack)))
                case None => None
              },
              a.topic, a.partition, a.offset,
              a.lag, a.consumerId, a.host, a.clientId, a.logEndOffset)))

            val lagPerTopic = Some(partitionAssignmentStates.filter(state => state.topic.isDefined)
              .groupBy(state => state.topic.get)
              .map { case (topic, partitions) => (topic, partitions.map(_.lag).map {
                case Some(lag) => lag.toLong
                case None => 0L
              }.sum)
              })

            GroupInfo(state, assignments, lagPerTopic)
          case None => GroupInfo(state)
        }
      } finally {
        kafkaConsumerGroupService.close()
      }
    }
  }
} 
Example 75
Source File: AlgoliaSchema.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.shared.algolia.schemas

import cool.graph.client.SangriaQueryArguments
import cool.graph.client.schema.SchemaModelObjectTypesBuilder
import cool.graph.shared.algolia.AlgoliaContext
import cool.graph.shared.models.{Model, Project}
import cool.graph.{DataItem, FilteredResolver}
import sangria.schema.{Context, Field, ObjectType, OptionType, Schema}
import scaldi.{Injectable, Injector}

import scala.concurrent.{ExecutionContextExecutor, Future}

class AlgoliaSchema[ManyDataItemType](project: Project, model: Model, modelObjectTypes: SchemaModelObjectTypesBuilder[ManyDataItemType])(
    implicit injector: Injector)
    extends Injectable {

  implicit val dispatcher = inject[ExecutionContextExecutor](identified by "dispatcher")

  def resolve[ManyDataItemType](ctx: Context[AlgoliaContext, Unit]): Future[Option[DataItem]] = {
    FilteredResolver.resolve(modelObjectTypes, model, ctx.ctx.nodeId, ctx, ctx.ctx.dataResolver)
  }

  val algoliaSyncField: Field[AlgoliaContext, Unit] = Field(
    "node",
    description = Some("The model to synchronize with Algolia."),
    arguments = List(SangriaQueryArguments.filterArgument(model = model, project = project)),
    fieldType = OptionType(modelObjectTypes.modelObjectTypes(model.name)),
    resolve = (ctx) => resolve(ctx)
  )

  def build(): Schema[AlgoliaContext, Unit] = {
    val Query = ObjectType(
      "Query",
      List(algoliaSyncField)
    )

    Schema(Query)
  }
} 
Example 76
Source File: AlgoliaFullModelSchema.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.shared.algolia.schemas

import cool.graph.Types.DataItemFilterCollection
import cool.graph.client.database.QueryArguments
import cool.graph.client.SangriaQueryArguments
import cool.graph.client.schema.SchemaModelObjectTypesBuilder
import cool.graph.shared.algolia.AlgoliaFullModelContext
import cool.graph.shared.models.{Model, Project}
import sangria.schema.{Field, ListType, ObjectType, Schema}
import scaldi.{Injectable, Injector}

import scala.concurrent.ExecutionContextExecutor

class AlgoliaFullModelSchema[ManyDataItemType](project: Project, model: Model, modelObjectTypes: SchemaModelObjectTypesBuilder[ManyDataItemType])(
    implicit injector: Injector)
    extends Injectable {

  implicit val dispatcher =
    inject[ExecutionContextExecutor](identified by "dispatcher")

  val algoliaSyncField: Field[AlgoliaFullModelContext, Unit] = Field(
    "node",
    description = Some("The table to synchronize with Algolia."),
    arguments = List(SangriaQueryArguments.filterArgument(model = model, project = project)),
    fieldType = ListType(modelObjectTypes.modelObjectTypes(model.name)),
    resolve = (ctx) => {

      val filter: DataItemFilterCollection = modelObjectTypes
        .extractQueryArgumentsFromContext(model = model, ctx = ctx)
        .flatMap(_.filter)
        .getOrElse(List())

      val arguments = Some(QueryArguments(filter = Some(filter), skip = None, after = None, first = None, before = None, last = None, orderBy = None))

      ctx.ctx.dataResolver
        .resolveByModel(model, arguments)
        .map(result => result.items)
    }
  )

  def build(): Schema[AlgoliaFullModelContext, Unit] = {
    val Query = ObjectType(
      "Query",
      List(algoliaSyncField)
    )

    Schema(Query)
  }
} 
Example 77
Source File: Conseil.scala    From Conseil   with Apache License 2.0 5 votes vote down vote up
package tech.cryptonomic.conseil.api

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import com.typesafe.scalalogging.LazyLogging
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport
import tech.cryptonomic.conseil.api.config.{ConseilAppConfig, ConseilConfiguration}
import tech.cryptonomic.conseil.api.util.Retry.retry
import tech.cryptonomic.conseil.common.config.Platforms.PlatformsConfiguration
import tech.cryptonomic.conseil.common.config._

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor}
import scala.language.postfixOps
import scala.util.Failure

object Conseil extends App with LazyLogging with ConseilAppConfig with FailFastCirceSupport with ConseilMainOutput {

  loadApplicationConfiguration(args) match {
    case Left(errors) =>
    //nothing to do
    case Right(config) =>
      implicit val system: ActorSystem = ActorSystem("conseil-system")
      implicit val materializer: ActorMaterializer = ActorMaterializer()
      implicit val executionContext: ExecutionContextExecutor = system.dispatcher

      val retries = if (config.failFast.on) Some(0) else None

      val serverBinding =
        retry(maxRetry = retries, deadline = Some(config.server.startupDeadline fromNow))(ConseilApi.create(config)).andThen {
          case Failure(error) =>
            logger.error(
              "The server was not started correctly, I failed to create the required Metadata service",
              error
            )
            Await.ready(system.terminate(), 10.seconds)
        }.flatMap(
          runServer(_, config.server, config.platforms, config.verbose)
        )

      sys.addShutdownHook {
        serverBinding
          .flatMap(_.unbind().andThen { case _ => logger.info("Server stopped...") })
          .andThen {
            case _ => system.terminate()
          }
          .onComplete(_ => logger.info("We're done here, nothing else to see"))
      }

  }

  
  def runServer(
      api: ConseilApi,
      server: ConseilConfiguration,
      platforms: PlatformsConfiguration,
      verbose: VerboseOutput
  )(implicit executionContext: ExecutionContext, system: ActorSystem, mat: ActorMaterializer) = {
    val bindingFuture = Http().bindAndHandle(api.route, server.hostname, server.port)
    displayInfo(server)
    if (verbose.on) displayConfiguration(platforms)
    bindingFuture

  }
} 
Example 78
Source File: AttributesComputationSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage.attributes

import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths}

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ContentTypes.`text/plain(UTF-8)`
import akka.testkit.TestKit
import cats.effect.IO
import ch.epfl.bluebrain.nexus.storage.File.{Digest, FileAttributes}
import ch.epfl.bluebrain.nexus.storage.StorageError.InternalError
import ch.epfl.bluebrain.nexus.storage.utils.IOValues
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContextExecutor

class AttributesComputationSpec
    extends TestKit(ActorSystem("AttributesComputationSpec"))
    with AnyWordSpecLike
    with Matchers
    with IOValues {

  implicit private val ec: ExecutionContextExecutor = system.dispatcher

  private trait Ctx {
    val path           = Files.createTempFile("storage-test", ".txt")
    val (text, digest) = "something" -> "3fc9b689459d738f8c88a3a48aa9e33542016b7a4052e001aaa536fca74813cb"
  }

  "Attributes computation computation" should {
    val computation = AttributesComputation.akkaAttributes[IO]
    val alg         = "SHA-256"

    "succeed" in new Ctx {
      Files.write(path, text.getBytes(StandardCharsets.UTF_8))
      computation(path, alg).ioValue shouldEqual FileAttributes(
        s"file://$path",
        Files.size(path),
        Digest(alg, digest),
        `text/plain(UTF-8)`
      )
      Files.deleteIfExists(path)
    }

    "fail when algorithm is wrong" in new Ctx {
      Files.write(path, text.getBytes(StandardCharsets.UTF_8))
      computation(path, "wrong-alg").failed[InternalError]
    }

    "fail when file does not exists" in new Ctx {
      computation(Paths.get("/tmp/non/existing"), alg).failed[InternalError]
    }
  }
} 
Example 79
Source File: executionContexts.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package concurrent

import scala.collection.mutable
import scala.concurrent.ExecutionContextExecutor

object RunNowEC extends ExecutionContextExecutor {
  def get: ExecutionContextExecutor = this

  object Implicits {
    implicit val executionContext: ExecutionContext = RunNowEC
  }

  def execute(runnable: Runnable): Unit =
    runnable.run()

  def reportFailure(cause: Throwable): Unit =
    cause.printStackTrace()
}

object RunInQueueEC extends RunInQueueEC {
  def get: ExecutionContextExecutor = this

  object Implicits {
    implicit val executionContext: ExecutionContext = RunInQueueEC
  }
}

class RunInQueueEC extends ExecutionContextExecutor {
  private val queueTL = new ThreadLocal[mutable.Queue[Runnable]] {
    override def initialValue = new mutable.Queue[Runnable]
  }

  def execute(runnable: Runnable): Unit = {
    val queue = queueTL.get
    val shouldRun = queue.isEmpty
    queue += runnable
    if (shouldRun) {
      while (queue.nonEmpty) {
        val task = queue.head
        try task.run() catch {
          case NonFatal(t) => reportFailure(t)
        }
        queue.dequeue()
      }
    }
  }

  def reportFailure(cause: Throwable): Unit =
    cause.printStackTrace()
}

trait HasExecutionContext {
  protected implicit def executionContext: ExecutionContext
}

trait HasRunNowEC extends HasExecutionContext {
  protected implicit final def executionContext: ExecutionContext = RunNowEC
}

trait HasRunInQueueEC extends HasExecutionContext {
  protected implicit final def executionContext: ExecutionContext = RunInQueueEC
} 
Example 80
Source File: CompressionSpec.scala    From chronicler   with Apache License 2.0 5 votes vote down vote up
package com.github.fsanaulla.chronicler.akka

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.github.fsanaulla.chronicler.akka.io.{AkkaDatabaseApi, InfluxIO}
import com.github.fsanaulla.chronicler.akka.management.InfluxMng
import com.github.fsanaulla.chronicler.akka.shared.InfluxConfig
import com.github.fsanaulla.chronicler.testing.it.DockerizedInfluxDB
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.{FlatSpecLike, Matchers}

import scala.concurrent.ExecutionContextExecutor

class CompressionSpec
  extends TestKit(ActorSystem())
  with FlatSpecLike
  with Matchers
  with DockerizedInfluxDB
  with ScalaFutures
  with IntegrationPatience {

  override def afterAll(): Unit = {
    mng.close()
    io.close()
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  implicit val ec: ExecutionContextExecutor = system.dispatcher

  val testDB = "db"

  lazy val influxConf =
    InfluxConfig(host, port, credentials = Some(creds), compress = true)

  lazy val mng =
    InfluxMng(host, port, credentials = Some(creds))

  lazy val io =
    InfluxIO(influxConf)

  lazy val db: AkkaDatabaseApi = io.database(testDB)

  it should "write data from file" in {
    mng.createDatabase(testDB).futureValue.right.get shouldEqual 200

    db.writeFromFile(Paths.get(getClass.getResource("/large_batch.txt").getPath))
      .futureValue
      .right
      .get shouldEqual 204

    db.readJson("SELECT * FROM test1").futureValue.right.get.length shouldEqual 10000
  }
} 
Example 81
Source File: DistributedCountRDD.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.indexserver

import java.util.concurrent.Executors

import scala.collection.JavaConverters._
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}
import scala.concurrent.duration.Duration

import org.apache.hadoop.mapred.TaskAttemptID
import org.apache.hadoop.mapreduce.{InputSplit, TaskType}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.{Partition, SparkEnv, TaskContext}
import org.apache.spark.sql.SparkSession

import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.CacheProvider
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.index.{IndexInputFormat, IndexStoreManager}
import org.apache.carbondata.core.index.dev.expr.IndexInputSplitWrapper
import org.apache.carbondata.core.util.{CarbonProperties, CarbonThreadFactory}
import org.apache.carbondata.spark.rdd.CarbonRDD


class DistributedCountRDD(@transient ss: SparkSession, indexInputFormat: IndexInputFormat)
  extends CarbonRDD[(String, String)](ss, Nil) {

  @transient private val LOGGER = LogServiceFactory.getLogService(classOf[DistributedPruneRDD]
    .getName)

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    if (split.asInstanceOf[IndexRDDPartition].getLocations != null) {
      split.asInstanceOf[IndexRDDPartition].getLocations.toSeq
    } else {
      Seq()
    }
  }

  override def internalCompute(split: Partition,
      context: TaskContext): Iterator[(String, String)] = {
    val attemptId = new TaskAttemptID(DistributedRDDUtils.generateTrackerId,
      id, TaskType.MAP, split.index, 0)
    val attemptContext = new TaskAttemptContextImpl(FileFactory.getConfiguration, attemptId)
    val inputSplits = split.asInstanceOf[IndexRDDPartition].inputSplit
    val numOfThreads = CarbonProperties.getInstance().getNumOfThreadsForExecutorPruning
    val service = Executors
      .newFixedThreadPool(numOfThreads, new CarbonThreadFactory("IndexPruningPool", true))
    implicit val ec: ExecutionContextExecutor = ExecutionContext
      .fromExecutor(service)
    if (indexInputFormat.ifAsyncCall()) {
      // to clear cache of invalid segments during pre-priming in index server
      IndexStoreManager.getInstance().clearInvalidSegments(indexInputFormat.getCarbonTable,
        indexInputFormat.getInvalidSegments)
    }
    val futures = if (inputSplits.length <= numOfThreads) {
      inputSplits.map {
        split => generateFuture(Seq(split))
      }
    } else {
      DistributedRDDUtils.groupSplits(inputSplits, numOfThreads).map {
        splits => generateFuture(splits)
      }
    }
    // scalastyle:off awaitresult
    val results = Await.result(Future.sequence(futures), Duration.Inf).flatten
    // scalastyle:on awaitresult
    val executorIP = s"${ SparkEnv.get.blockManager.blockManagerId.host }_${
      SparkEnv.get.blockManager.blockManagerId.executorId
    }"
    val cacheSize = if (CacheProvider.getInstance().getCarbonCache != null) {
      CacheProvider.getInstance().getCarbonCache.getCurrentSize
    } else {
      0L
    }
    Iterator((executorIP + "_" + cacheSize.toString, results.map(_._2.toLong).sum.toString))
  }

  override protected def internalGetPartitions: Array[Partition] = {
    new DistributedPruneRDD(ss, indexInputFormat).partitions
  }

  private def generateFuture(split: Seq[InputSplit])
    (implicit executionContext: ExecutionContext) = {
    Future {
      val segments = split.map { inputSplit =>
        val distributable = inputSplit.asInstanceOf[IndexInputSplitWrapper]
        distributable.getDistributable.getSegment
          .setReadCommittedScope(indexInputFormat.getReadCommittedScope)
        distributable.getDistributable.getSegment
      }
      val defaultIndex = IndexStoreManager.getInstance
        .getIndex(indexInputFormat.getCarbonTable, split.head
          .asInstanceOf[IndexInputSplitWrapper].getDistributable.getIndexSchema)
      defaultIndex.getBlockRowCount(defaultIndex, segments.toList.asJava, indexInputFormat
        .getPartitions).asScala
    }
  }

}