akka.stream.Materializer Scala Examples

The following examples show how to use akka.stream.Materializer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: NaptimeModuleTest.scala    From naptime   with Apache License 2.0 8 votes vote down vote up
package org.coursera.naptime

import java.util.Date
import javax.inject.Inject

import akka.stream.Materializer
import com.google.inject.Guice
import com.google.inject.Stage
import com.linkedin.data.schema.DataSchema
import com.linkedin.data.schema.DataSchemaUtil
import com.linkedin.data.schema.PrimitiveDataSchema
import com.linkedin.data.schema.RecordDataSchema
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.resources.TopLevelCollectionResource
import org.coursera.naptime.router2.NaptimeRoutes
import org.junit.Test
import org.mockito.Mockito.mock
import org.scalatest.junit.AssertionsForJUnit
import play.api.libs.json.Json
import play.api.libs.json.OFormat

import scala.concurrent.ExecutionContext

object NaptimeModuleTest {
  case class User(name: String, createdAt: Date)
  object User {
    implicit val oFormat: OFormat[User] = Json.format[User]
  }
  class MyResource(implicit val executionContext: ExecutionContext, val materializer: Materializer)
      extends TopLevelCollectionResource[String, User] {
    override implicit def resourceFormat: OFormat[User] = User.oFormat
    override def keyFormat: KeyFormat[KeyType] = KeyFormat.stringKeyFormat
    override def resourceName: String = "myResource"
    implicit val fields = Fields

    def get(id: String) = Nap.get(ctx => ???)
  }
  object MyFakeModule extends NaptimeModule {
    override def configure(): Unit = {
      bindResource[MyResource]
      bind[MyResource].toInstance(mock(classOf[MyResource]))
      bindSchemaType[Date](DataSchemaUtil.dataSchemaTypeToPrimitiveDataSchema(DataSchema.Type.LONG))
    }
  }

  class OverrideTypesHelper @Inject()(val schemaOverrideTypes: NaptimeModule.SchemaTypeOverrides)
}

class NaptimeModuleTest extends AssertionsForJUnit {
  import NaptimeModuleTest._

  
  @Test
  def checkInferredOverrides(): Unit = {
    val injector = Guice.createInjector(Stage.DEVELOPMENT, MyFakeModule, NaptimeModule)
    val overrides = injector.getInstance(classOf[OverrideTypesHelper])
    assert(overrides.schemaOverrideTypes.size === 1)
    assert(overrides.schemaOverrideTypes.contains("java.util.Date"))
  }

  @Test
  def checkComputedOverrides(): Unit = {
    val injector = Guice.createInjector(Stage.DEVELOPMENT, MyFakeModule, NaptimeModule)
    val overrides = injector.getInstance(classOf[OverrideTypesHelper])
    val routes = injector.getInstance(classOf[NaptimeRoutes])
    assert(1 === routes.routerBuilders.size)
    val routerBuilder = routes.routerBuilders.head
    val inferredSchemaKeyed =
      routerBuilder.types.find(_.key == "org.coursera.naptime.NaptimeModuleTest.User").get
    assert(inferredSchemaKeyed.value.isInstanceOf[RecordDataSchema])
    val userSchema = inferredSchemaKeyed.value.asInstanceOf[RecordDataSchema]
    assert(2 === userSchema.getFields.size())
    val initialCreatedAtSchema = userSchema.getField("createdAt").getType.getDereferencedDataSchema
    assert(initialCreatedAtSchema.isInstanceOf[RecordDataSchema])
    assert(
      initialCreatedAtSchema
        .asInstanceOf[RecordDataSchema]
        .getDoc
        .contains("Unable to infer schema"))
    SchemaUtils.fixupInferredSchemas(userSchema, overrides.schemaOverrideTypes)
    val fixedCreatedAtSchema = userSchema.getField("createdAt").getType.getDereferencedDataSchema
    assert(fixedCreatedAtSchema.isInstanceOf[PrimitiveDataSchema])
  }
} 
Example 2
Source File: MainWithEphemeralDirectory.scala    From daml   with Apache License 2.0 6 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.on.sql

import java.nio.file.Files

import akka.stream.Materializer
import com.daml.ledger.participant.state.kvutils.app.{
  Config,
  LedgerFactory,
  ParticipantConfig,
  ReadWriteService,
  Runner
}
import com.daml.lf.engine.Engine
import com.daml.logging.LoggingContext
import com.daml.resources.{ProgramResource, Resource, ResourceOwner}
import scopt.OptionParser

import scala.concurrent.ExecutionContext

object MainWithEphemeralDirectory {
  private val DirectoryPattern = "%DIR"

  def main(args: Array[String]): Unit = {
    new ProgramResource(new Runner("SQL Ledger", TestLedgerFactory).owner(args)).run()
  }

  object TestLedgerFactory extends LedgerFactory[ReadWriteService, ExtraConfig] {
    override val defaultExtraConfig: ExtraConfig = SqlLedgerFactory.defaultExtraConfig

    override def extraConfigParser(parser: OptionParser[Config[ExtraConfig]]): Unit =
      SqlLedgerFactory.extraConfigParser(parser)

    override def manipulateConfig(config: Config[ExtraConfig]): Config[ExtraConfig] =
      SqlLedgerFactory.manipulateConfig(config)

    override def readWriteServiceOwner(
        config: Config[ExtraConfig],
        participantConfig: ParticipantConfig,
        engine: Engine,
    )(
        implicit materializer: Materializer,
        logCtx: LoggingContext
    ): ResourceOwner[ReadWriteService] =
      new Owner(config, participantConfig, engine)

    class Owner(
        config: Config[ExtraConfig],
        participantConfig: ParticipantConfig,
        engine: Engine,
    )(implicit materializer: Materializer, logCtx: LoggingContext)
        extends ResourceOwner[ReadWriteService] {
      override def acquire()(
          implicit executionContext: ExecutionContext
      ): Resource[ReadWriteService] = {
        val directory = Files.createTempDirectory("ledger-on-sql-ephemeral-")
        val jdbcUrl = config.extra.jdbcUrl.map(_.replace(DirectoryPattern, directory.toString))
        SqlLedgerFactory
          .readWriteServiceOwner(
            config.copy(extra = config.extra.copy(jdbcUrl = jdbcUrl)),
            participantConfig,
            engine,
          )
          .acquire()
      }
    }

  }
} 
Example 3
Source File: Components.scala    From gbf-raidfinder   with MIT License 6 votes vote down vote up
package walfie.gbf.raidfinder.server

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.trueaccord.scalapb.json.JsonFormat
import monix.execution.Scheduler
import play.api.BuiltInComponents
import play.api.http.{ContentTypes, DefaultHttpErrorHandler}
import play.api.libs.json.Json
import play.api.Mode.Mode
import play.api.mvc._
import play.api.routing.Router
import play.api.routing.sird._
import play.core.server._
import play.filters.cors.{CORSConfig, CORSFilter}
import play.filters.gzip.GzipFilterComponents
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future
import walfie.gbf.raidfinder.protocol.{RaidBossesResponse, BinaryProtobuf}
import walfie.gbf.raidfinder.RaidFinder
import walfie.gbf.raidfinder.server.controller._
import walfie.gbf.raidfinder.server.syntax.ProtocolConverters.RaidBossDomainOps

class Components(
  raidFinder:                 RaidFinder[BinaryProtobuf],
  translator:                 BossNameTranslator,
  port:                       Int,
  mode:                       Mode,
  websocketKeepAliveInterval: FiniteDuration,
  metricsCollector:           MetricsCollector
) extends NettyServerComponents
  with BuiltInComponents with GzipFilterComponents with Controller {

  override lazy val serverConfig = ServerConfig(port = Some(port), mode = mode)

  private val corsFilter = new CORSFilter(corsConfig = CORSConfig().withAnyOriginAllowed)
  override lazy val httpFilters = List(gzipFilter, corsFilter)

  lazy val websocketController = new WebsocketController(
    raidFinder, translator, websocketKeepAliveInterval, metricsCollector
  )(actorSystem, materializer, Scheduler.Implicits.global)

  // The charset isn't necessary, but without it, Chrome displays Japanese incorrectly
  // if you try to view the JSON directly.
  // https://bugs.chromium.org/p/chromium/issues/detail?id=438464
  private val ContentTypeJsonWithUtf8 = "application/json; charset=utf-8"

  lazy val router = Router.from {
    case GET(p"/") =>
      controllers.Assets.at(path = "/public", "index.html")

    case GET(p"/api/bosses.json" ? q_s"name=$names") =>
      val bosses = if (names.nonEmpty) {
        val knownBossesMap = raidFinder.getKnownBosses
        names.collect(knownBossesMap)
      } else raidFinder.getKnownBosses.values

      val responseProtobuf = RaidBossesResponse(
        raidBosses = bosses.map(_.toProtocol(translator)).toSeq
      )
      val responseJson = JsonFormat.toJsonString(responseProtobuf)
      Action(Ok(responseJson).as(ContentTypeJsonWithUtf8))

    case GET(p"/api/metrics.json") =>
      val activeUsers = metricsCollector.getActiveWebSocketCount()
      val json = Json.obj("activeUsers" -> activeUsers)
      Action(Ok(json))

    case GET(p"/ws/raids" ? q_o"keepAlive=${ bool(keepAlive) }") =>
      websocketController.raids(keepAlive = keepAlive.getOrElse(false))

    case GET(p"/$file*") =>
      controllers.Assets.at(path = "/public", file = file)
  }

  override lazy val httpErrorHandler = new ErrorHandler

  override def serverStopHook = () => Future.successful {
    actorSystem.terminate()
  }
} 
Example 4
Source File: ModelService.scala    From reactive-machine-learning-systems   with MIT License 6 votes vote down vote up
package com.reactivemachinelearning

import akka.actor.ActorSystem
import akka.event.{Logging, LoggingAdapter}
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.ToResponseMarshallable
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
//import spray.json._
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ExecutionContextExecutor, Future}

case class Prediction(id: Long, timestamp: Long, value: Double)

trait Protocols extends DefaultJsonProtocol {
  implicit val ipInfoFormat = jsonFormat3(Prediction.apply)
}

trait Service extends Protocols {
  implicit val system: ActorSystem

  implicit def executor: ExecutionContextExecutor

  implicit val materializer: Materializer

  val logger: LoggingAdapter

//  private def parseFeatures(features: String): Map[Long, Double] = {
//    features.parseJson.convertTo[Map[Long, Double]]
//  }

  def predict(features: String): Future[Prediction] = {
    Future(Prediction(123, 456, 0.5))
  }

  val routes = {
    logRequestResult("predictive-service") {
      pathPrefix("ip") {
        (get & path(Segment)) { features =>
          complete {
            predict(features).map[ToResponseMarshallable] {
//              case prediction: Prediction => prediction
              case _ => BadRequest
            }
          }
        }
      }
    }
  }
}

object PredictiveService extends App with Service {
  override implicit val system = ActorSystem()
  override implicit val executor = system.dispatcher
  override implicit val materializer = ActorMaterializer()

  override val logger = Logging(system, getClass)

  Http().bindAndHandle(routes, "0.0.0.0", 9000)
} 
Example 5
Source File: LocalErrorHandler.scala    From pertax-frontend   with Apache License 2.0 5 votes vote down vote up
package error

import akka.stream.Materializer
import config.ConfigDecorator
import controllers.auth.AuthJourney
import com.google.inject.{Inject, Singleton}
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.mvc._
import play.twirl.api.Html
import uk.gov.hmrc.play.bootstrap.http.FrontendErrorHandler
import uk.gov.hmrc.renderer.TemplateRenderer
import javax.inject.Provider
import util.LocalPartialRetriever

@Singleton
class LocalErrorHandler @Inject()(
  val messagesApi: MessagesApi,
  val materializer: Materializer,
  authJourney: Provider[AuthJourney]
)(
  implicit val partialRetriever: LocalPartialRetriever,
  val configDecorator: ConfigDecorator,
  val templateRenderer: TemplateRenderer)
    extends FrontendErrorHandler with I18nSupport with RendersErrors {

  override def standardErrorTemplate(
    pageTitle: String,
    heading: String,
    message: String
  )(implicit request: Request[_]): Html =
    views.html.unauthenticatedError(pageTitle, Some(heading), Some(message))

} 
Example 6
Source File: SessionIdFilter.scala    From pertax-frontend   with Apache License 2.0 5 votes vote down vote up
package filters

import java.util.UUID

import akka.stream.Materializer
import com.google.inject.Inject
import play.api.mvc._
import play.api.mvc.request.{Cell, RequestAttrKey}
import uk.gov.hmrc.http.{SessionKeys, HeaderNames => HMRCHeaderNames}

import scala.concurrent.{ExecutionContext, Future}

class SessionIdFilter(
  override val mat: Materializer,
  uuid: => UUID,
  implicit val ec: ExecutionContext
) extends Filter {

  @Inject
  def this(mat: Materializer, ec: ExecutionContext) {
    this(mat, UUID.randomUUID(), ec)
  }

  override def apply(f: RequestHeader => Future[Result])(rh: RequestHeader): Future[Result] = {

    lazy val sessionId: String = s"session-$uuid"

    if (rh.session.get(SessionKeys.sessionId).isEmpty) {

      val headers = rh.headers.add(
        HMRCHeaderNames.xSessionId -> sessionId
      )

      val session = rh.session + (SessionKeys.sessionId -> sessionId)

      f(rh.withHeaders(headers).addAttr(RequestAttrKey.Session, Cell(session))).map { result =>
        val updatedSession = if (result.session(rh).get(SessionKeys.sessionId).isDefined) {
          result.session(rh)
        } else {
          result.session(rh) + (SessionKeys.sessionId -> sessionId)
        }

        result.withSession(updatedSession)
      }
    } else {
      f(rh)
    }
  }
} 
Example 7
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.{Command, Commands}
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val packageClient = client.packageClient
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def listPackages(implicit ec: ExecutionContext): Future[Set[String]] =
    packageClient.listPackages().map(_.packageIds.toSet)

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand(party: String, workflowId: WorkflowId, cmd: Command.Command): Future[Empty] = {
    val commands = Commands(
      ledgerId = LedgerId.unwrap(ledgerId),
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = party,
      commands = Seq(Command(cmd)),
    )

    commandClient.submitSingleCommand(SubmitRequest(Some(commands), None))
  }

  def nextTransaction(party: String, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: String, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(parties: String*): TransactionFilter =
    TransactionFilter(parties.map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: String): WorkflowId =
    WorkflowId(s"$p Workflow")
} 
Example 8
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.Commands
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.ledger.client.binding.{Primitive => P}
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scalaz.syntax.tag._

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand[T](
      sender: P.Party,
      workflowId: WorkflowId,
      command: P.Update[P.ContractId[T]]): Future[Empty] = {
    commandClient.submitSingleCommand(submitRequest(sender, workflowId, command))
  }

  def submitRequest[T](
      party: P.Party,
      workflowId: WorkflowId,
      seq: P.Update[P.ContractId[T]]*): SubmitRequest = {
    val commands = Commands(
      ledgerId = ledgerId.unwrap,
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = P.Party.unwrap(party),
      commands = seq.map(_.command)
    )
    SubmitRequest(Some(commands), None)
  }

  def nextTransaction(party: P.Party, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(ps: P.Party*): TransactionFilter =
    TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: P.Party): WorkflowId =
    WorkflowId(s"${P.Party.unwrap(p): String} Workflow")
} 
Example 9
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.Commands
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.ledger.client.binding.{Primitive => P}
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scalaz.syntax.tag._

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand[T](
      sender: P.Party,
      workflowId: WorkflowId,
      command: P.Update[P.ContractId[T]]): Future[Empty] = {
    commandClient.submitSingleCommand(submitRequest(sender, workflowId, command))
  }

  def submitRequest[T](
      party: P.Party,
      workflowId: WorkflowId,
      seq: P.Update[P.ContractId[T]]*): SubmitRequest = {
    val commands = Commands(
      ledgerId = ledgerId.unwrap,
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = P.Party.unwrap(party),
      commands = seq.map(_.command)
    )
    SubmitRequest(Some(commands), None)
  }

  def nextTransaction(party: P.Party, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(ps: P.Party*): TransactionFilter =
    TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: P.Party): WorkflowId =
    WorkflowId(s"${P.Party.unwrap(p): String} Workflow")
} 
Example 10
Source File: SqlLedgerFactory.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.on.sql

import java.time.Duration

import akka.stream.Materializer
import com.daml.caching
import com.daml.ledger.participant.state.kvutils.api.KeyValueParticipantState
import com.daml.ledger.participant.state.kvutils.app.{
  Config,
  LedgerFactory,
  ParticipantConfig,
  ReadWriteService
}
import com.daml.ledger.participant.state.kvutils.caching._
import com.daml.ledger.participant.state.v1.SeedService
import com.daml.lf.engine.Engine
import com.daml.logging.LoggingContext
import com.daml.platform.configuration.LedgerConfiguration
import com.daml.resources.{Resource, ResourceOwner}
import scopt.OptionParser

import scala.concurrent.ExecutionContext

object SqlLedgerFactory extends LedgerFactory[ReadWriteService, ExtraConfig] {
  override val defaultExtraConfig: ExtraConfig = ExtraConfig(
    jdbcUrl = None,
  )

  override def ledgerConfig(config: Config[ExtraConfig]): LedgerConfiguration =
    super.ledgerConfig(config).copy(initialConfigurationSubmitDelay = Duration.ZERO)

  override def extraConfigParser(parser: OptionParser[Config[ExtraConfig]]): Unit = {
    parser
      .opt[String]("jdbc-url")
      .required()
      .text("The URL used to connect to the database.")
      .action((jdbcUrl, config) => config.copy(extra = config.extra.copy(jdbcUrl = Some(jdbcUrl))))
    ()
  }

  override def manipulateConfig(config: Config[ExtraConfig]): Config[ExtraConfig] =
    config.copy(participants = config.participants.map(_.copy(allowExistingSchemaForIndex = true)))

  override def readWriteServiceOwner(
      config: Config[ExtraConfig],
      participantConfig: ParticipantConfig,
      engine: Engine,
  )(implicit materializer: Materializer, logCtx: LoggingContext): ResourceOwner[ReadWriteService] =
    new Owner(config, participantConfig, engine)

  class Owner(
      config: Config[ExtraConfig],
      participantConfig: ParticipantConfig,
      engine: Engine,
  )(implicit materializer: Materializer, logCtx: LoggingContext)
      extends ResourceOwner[KeyValueParticipantState] {
    override def acquire()(
        implicit executionContext: ExecutionContext
    ): Resource[KeyValueParticipantState] = {
      val jdbcUrl = config.extra.jdbcUrl.getOrElse {
        throw new IllegalStateException("No JDBC URL provided.")
      }
      val metrics = createMetrics(participantConfig, config)
      new SqlLedgerReaderWriter.Owner(
        config.ledgerId,
        participantConfig.participantId,
        metrics = metrics,
        engine,
        jdbcUrl,
        stateValueCache = caching.WeightedCache.from(
          configuration = config.stateValueCache,
          metrics = metrics.daml.kvutils.submission.validator.stateValueCache,
        ),
        seedService = SeedService(config.seeding),
        resetOnStartup = false
      ).acquire()
        .map(readerWriter => new KeyValueParticipantState(readerWriter, readerWriter, metrics))
    }
  }
} 
Example 11
Source File: AkkaState.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.perf

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import org.openjdk.jmh.annotations._

@State(Scope.Benchmark)
class AkkaState {

  private var _sys: ActorSystem = null
  private var _mat: Materializer = null
  private var _esf: ExecutionSequencerFactory = null

  @Setup(Level.Trial)
  def setup(): Unit = {
    println("Starting Client Akka Infrastructure")
    _sys = ActorSystem()
    _mat = Materializer(_sys)
    _esf = new AkkaExecutionSequencerPool("clientPool")(sys)
  }

  @TearDown(Level.Trial)
  def close(): Unit = {
    println("Stopping Client Akka Infrastructure")
    _esf.close()
    _esf = null
    _mat.shutdown()
    _mat = null
    _sys.terminate() // does not wait
    _sys = null
  }

  def sys: ActorSystem = _sys

  def mat: Materializer = _mat

  def esf: ExecutionSequencerFactory = _esf
} 
Example 12
Source File: PerfBenchState.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.perf

import java.io.File

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.bazeltools.BazelRunfiles.rlocation
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.testing.utils.Resource
import org.openjdk.jmh.annotations._

import scala.concurrent.Await
import scala.concurrent.duration._

@State(Scope.Benchmark)
abstract class PerfBenchState extends InfiniteRetries {

  def darFile: File = new File(rlocation("ledger/test-common/model-tests.dar"))

  private var akkaState: AkkaState = _
  private var server: Resource[LedgerContext] = _

  // Unfortunately this must be a constant literal
  // Valid values are LedgerContext.mem and LedgerContext.sql
  @Param(Array("InMemory", "Postgres"))
  var store: String = _

  @Setup(Level.Trial)
  def setup(): Unit = {
    akkaState = new AkkaState()
    akkaState.setup()
    server = LedgerFactories.createSandboxResource(store, List(darFile))(mat.executionContext)
    server.setup()
  }

  @TearDown(Level.Trial)
  def close(): Unit = {
    server.close()
    server = null
    akkaState.close()
    akkaState = null
  }

  @TearDown(Level.Invocation)
  def reset(): Unit = {
    val _ = Await.result(server.value.reset()(system), 5.seconds)
  }

  def ledger: LedgerContext = server.value

  def mat: Materializer = akkaState.mat

  def system: ActorSystem = akkaState.sys

  def esf: ExecutionSequencerFactory = akkaState.esf

} 
Example 13
Source File: StaticTime.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.services.testing.time

import java.time.Instant
import java.util.concurrent.atomic.AtomicReference

import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, RunnableGraph, Sink}
import akka.stream.{ClosedShape, KillSwitches, Materializer, UniqueKillSwitch}
import com.daml.api.util.{TimeProvider, TimestampConversion}
import com.daml.api.util.TimestampConversion._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.{TimeService, TimeServiceStub}
import com.daml.ledger.client.LedgerClient

import scala.concurrent.{ExecutionContext, Future}

class StaticTime(
    timeService: TimeService,
    clock: AtomicReference[Instant],
    killSwitch: UniqueKillSwitch,
    ledgerId: String)
    extends TimeProvider
    with AutoCloseable {

  def getCurrentTime: Instant = clock.get

  def timeRequest(instant: Instant) =
    SetTimeRequest(
      ledgerId,
      Some(TimestampConversion.fromInstant(getCurrentTime)),
      Some(TimestampConversion.fromInstant(instant)))

  def setTime(instant: Instant)(implicit ec: ExecutionContext): Future[Unit] = {
    timeService.setTime(timeRequest(instant)).map { _ =>
      val _ = StaticTime.advanceClock(clock, instant)
    }
  }

  override def close(): Unit = killSwitch.shutdown()
}

object StaticTime {
  def advanceClock(clock: AtomicReference[Instant], instant: Instant): Instant = {
    clock.updateAndGet {
      case current if instant isAfter current => instant
      case current => current
    }
  }

  def updatedVia(timeService: TimeServiceStub, ledgerId: String, token: Option[String] = None)(
      implicit m: Materializer,
      esf: ExecutionSequencerFactory): Future[StaticTime] = {
    val clockRef = new AtomicReference[Instant](Instant.EPOCH)
    val killSwitchExternal = KillSwitches.single[Instant]
    val sinkExternal = Sink.head[Instant]

    RunnableGraph
      .fromGraph {
        GraphDSL.create(killSwitchExternal, sinkExternal) {
          case (killSwitch, futureOfFirstElem) =>
            // We serve this in a future which completes when the first element has passed through.
            // Thus we make sure that the object we serve already received time data from the ledger.
            futureOfFirstElem.map(_ => new StaticTime(timeService, clockRef, killSwitch, ledgerId))(
              DirectExecutionContext)
        } { implicit b => (killSwitch, sinkHead) =>
          import GraphDSL.Implicits._
          val instantSource = b.add(
            ClientAdapter
              .serverStreaming(
                GetTimeRequest(ledgerId),
                LedgerClient.stub(timeService, token).getTime)
              .map(r => toInstant(r.getCurrentTime)))

          val updateClock = b.add(Flow[Instant].map { i =>
            advanceClock(clockRef, i)
            i
          })

          val broadcastTimes = b.add(Broadcast[Instant](2))

          val ignore = b.add(Sink.ignore)

          // format: OFF
          instantSource ~> killSwitch ~> updateClock ~> broadcastTimes.in
                                                        broadcastTimes.out(0) ~> sinkHead
                                                        broadcastTimes.out(1) ~> ignore
          // format: ON

          ClosedShape
        }
      }
      .run()
  }

} 
Example 14
Source File: JdbcIndex.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.index

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.participant.state.index.v2
import com.daml.ledger.participant.state.index.v2.IndexService
import com.daml.ledger.participant.state.v1.{Configuration, ParticipantId}
import com.daml.logging.LoggingContext
import com.daml.metrics.Metrics
import com.daml.platform.configuration.ServerRole
import com.daml.platform.store.dao.events.LfValueTranslation
import com.daml.resources.ResourceOwner

object JdbcIndex {
  def owner(
      serverRole: ServerRole,
      initialConfig: Configuration,
      ledgerId: LedgerId,
      participantId: ParticipantId,
      jdbcUrl: String,
      eventsPageSize: Int,
      metrics: Metrics,
      lfValueTranslationCache: LfValueTranslation.Cache,
  )(implicit mat: Materializer, logCtx: LoggingContext): ResourceOwner[IndexService] =
    new ReadOnlySqlLedger.Owner(
      serverRole,
      jdbcUrl,
      ledgerId,
      eventsPageSize,
      metrics,
      lfValueTranslationCache,
    ).map { ledger =>
      new LedgerBackedIndexService(MeteredReadOnlyLedger(ledger, metrics), participantId) {
        override def getLedgerConfiguration(): Source[v2.LedgerConfiguration, NotUsed] =
          // FIXME(JM): The indexer should on start set the default configuration.
          Source.single(v2.LedgerConfiguration(initialConfig.maxDeduplicationTime))
      }
    }
} 
Example 15
Source File: LedgerApiServer.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import com.daml.metrics.Metrics
import com.daml.ports.Port
import com.daml.resources.{Resource, ResourceOwner}
import io.grpc.ServerInterceptor
import io.netty.handler.ssl.SslContext

import scala.concurrent.{ExecutionContext, Future, Promise}

final class LedgerApiServer(
    apiServicesOwner: ResourceOwner[ApiServices],
    desiredPort: Port,
    maxInboundMessageSize: Int,
    address: Option[String],
    sslContext: Option[SslContext] = None,
    interceptors: List[ServerInterceptor] = List.empty,
    metrics: Metrics,
)(implicit actorSystem: ActorSystem, materializer: Materializer, logCtx: LoggingContext)
    extends ResourceOwner[ApiServer] {

  private val logger = ContextualizedLogger.get(this.getClass)

  override def acquire()(implicit executionContext: ExecutionContext): Resource[ApiServer] = {
    val servicesClosedPromise = Promise[Unit]()

    for {
      eventLoopGroups <- new ServerEventLoopGroups.Owner(
        actorSystem.name,
        workerParallelism = sys.runtime.availableProcessors(),
        bossParallelism = 1,
      ).acquire()
      apiServicesResource = apiServicesOwner.acquire()
      apiServices <- apiServicesResource
      server <- new GrpcServerOwner(
        address,
        desiredPort,
        maxInboundMessageSize,
        sslContext,
        interceptors,
        metrics,
        eventLoopGroups,
        apiServices.services,
      ).acquire()
      // Notify the caller that the services have been closed, so a reset request can complete
      // without blocking on the server terminating.
      _ <- Resource(Future.successful(()))(_ =>
        apiServicesResource.release().map(_ => servicesClosedPromise.success(())))
    } yield {
      val host = address.getOrElse("localhost")
      val actualPort = server.getPort
      val transportMedium = if (sslContext.isDefined) "TLS" else "plain text"
      logger.info(s"Listening on $host:$actualPort over $transportMedium.")
      new ApiServer {
        override val port: Port =
          Port(server.getPort)

        override def servicesClosed(): Future[Unit] =
          servicesClosedPromise.future
      }
    }
  }
} 
Example 16
Source File: ApiActiveContractsService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.ledger.participant.state.index.v2.{IndexActiveContractsService => ACSBackend}
import com.daml.dec.DirectExecutionContext
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc.ActiveContractsService
import com.daml.ledger.api.v1.active_contracts_service._
import com.daml.ledger.api.validation.TransactionFilterValidator
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import com.daml.platform.api.grpc.GrpcApiService
import com.daml.platform.server.api.validation.ActiveContractsServiceValidation
import io.grpc.{BindableService, ServerServiceDefinition}

import scala.concurrent.ExecutionContext

final class ApiActiveContractsService private (
    backend: ACSBackend,
)(
    implicit executionContext: ExecutionContext,
    protected val mat: Materializer,
    protected val esf: ExecutionSequencerFactory,
    logCtx: LoggingContext,
) extends ActiveContractsServiceAkkaGrpc
    with GrpcApiService {

  private val logger = ContextualizedLogger.get(this.getClass)

  override protected def getActiveContractsSource(
      request: GetActiveContractsRequest): Source[GetActiveContractsResponse, NotUsed] = {
    logger.trace("Serving an Active Contracts request...")

    TransactionFilterValidator
      .validate(request.getFilter, "filter")
      .fold(Source.failed, backend.getActiveContracts(_, request.verbose))
      .via(logger.logErrorsOnStream)
  }

  override def bindService(): ServerServiceDefinition =
    ActiveContractsServiceGrpc.bindService(this, DirectExecutionContext)
}

object ApiActiveContractsService {

  def create(ledgerId: LedgerId, backend: ACSBackend)(
      implicit ec: ExecutionContext,
      mat: Materializer,
      esf: ExecutionSequencerFactory,
      logCtx: LoggingContext): ActiveContractsService with GrpcApiService =
    new ActiveContractsServiceValidation(new ApiActiveContractsService(backend), ledgerId)
    with BindableService {
      override def bindService(): ServerServiceDefinition =
        ActiveContractsServiceGrpc.bindService(this, DirectExecutionContext)
    }
} 
Example 17
Source File: ApiCommandCompletionService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services

import java.util.concurrent.atomic.AtomicLong

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.ledger.participant.state.index.v2.IndexCompletionsService
import com.daml.dec.DirectExecutionContext
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.domain
import com.daml.ledger.api.domain.{LedgerId, LedgerOffset}
import com.daml.ledger.api.messages.command.completion.CompletionStreamRequest
import com.daml.ledger.api.v1.command_completion_service._
import com.daml.ledger.api.validation.PartyNameChecker
import com.daml.logging.LoggingContext.withEnrichedLoggingContext
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import com.daml.platform.api.grpc.GrpcApiService
import com.daml.platform.server.api.services.domain.CommandCompletionService
import com.daml.platform.server.api.services.grpc.GrpcCommandCompletionService
import io.grpc.ServerServiceDefinition

import scala.concurrent.{ExecutionContext, Future}

final class ApiCommandCompletionService private (completionsService: IndexCompletionsService)(
    implicit ec: ExecutionContext,
    protected val mat: Materializer,
    protected val esf: ExecutionSequencerFactory,
    logCtx: LoggingContext)
    extends CommandCompletionService {

  private val logger = ContextualizedLogger.get(this.getClass)

  private val subscriptionIdCounter = new AtomicLong()

  override def completionStreamSource(
      request: CompletionStreamRequest): Source[CompletionStreamResponse, NotUsed] =
    withEnrichedLoggingContext(logging.parties(request.parties), logging.offset(request.offset)) {
      implicit logCtx =>
        val subscriptionId = subscriptionIdCounter.getAndIncrement().toString
        logger.debug(s"Received request for completion subscription $subscriptionId: $request")

        val offset = request.offset.getOrElse(LedgerOffset.LedgerEnd)

        completionsService
          .getCompletions(offset, request.applicationId, request.parties)
          .via(logger.logErrorsOnStream)
    }

  override def getLedgerEnd(ledgerId: domain.LedgerId): Future[LedgerOffset.Absolute] =
    completionsService.currentLedgerEnd().andThen(logger.logErrorsOnCall[LedgerOffset.Absolute])

}

object ApiCommandCompletionService {

  def create(ledgerId: LedgerId, completionsService: IndexCompletionsService)(
      implicit ec: ExecutionContext,
      mat: Materializer,
      esf: ExecutionSequencerFactory,
      logCtx: LoggingContext): GrpcCommandCompletionService with GrpcApiService = {
    val impl: CommandCompletionService =
      new ApiCommandCompletionService(completionsService)

    new GrpcCommandCompletionService(ledgerId, impl, PartyNameChecker.AllowAllParties)
    with GrpcApiService {
      override def bindService(): ServerServiceDefinition =
        CommandCompletionServiceGrpc.bindService(this, DirectExecutionContext)
    }
  }
} 
Example 18
Source File: ApiLedgerConfigurationService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.ledger.participant.state.index.v2.IndexConfigurationService
import com.daml.api.util.DurationConversion._
import com.daml.dec.DirectExecutionContext
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.v1.ledger_configuration_service._
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import com.daml.platform.api.grpc.GrpcApiService
import com.daml.platform.server.api.validation.LedgerConfigurationServiceValidation
import io.grpc.{BindableService, ServerServiceDefinition}

import scala.concurrent.ExecutionContext

final class ApiLedgerConfigurationService private (configurationService: IndexConfigurationService)(
    implicit protected val esf: ExecutionSequencerFactory,
    protected val mat: Materializer,
    logCtx: LoggingContext)
    extends LedgerConfigurationServiceAkkaGrpc
    with GrpcApiService {

  private val logger = ContextualizedLogger.get(this.getClass)

  override protected def getLedgerConfigurationSource(
      request: GetLedgerConfigurationRequest): Source[GetLedgerConfigurationResponse, NotUsed] =
    configurationService
      .getLedgerConfiguration()
      .map(
        configuration =>
          GetLedgerConfigurationResponse(
            Some(LedgerConfiguration(
              Some(toProto(configuration.maxDeduplicationTime)),
            ))))
      .via(logger.logErrorsOnStream)

  override def bindService(): ServerServiceDefinition =
    LedgerConfigurationServiceGrpc.bindService(this, DirectExecutionContext)
}

object ApiLedgerConfigurationService {
  def create(ledgerId: LedgerId, configurationService: IndexConfigurationService)(
      implicit ec: ExecutionContext,
      esf: ExecutionSequencerFactory,
      mat: Materializer,
      logCtx: LoggingContext)
    : LedgerConfigurationServiceGrpc.LedgerConfigurationService with GrpcApiService =
    new LedgerConfigurationServiceValidation(
      new ApiLedgerConfigurationService(configurationService),
      ledgerId) with BindableService {
      override def bindService(): ServerServiceDefinition =
        LedgerConfigurationServiceGrpc.bindService(this, DirectExecutionContext)
    }
} 
Example 19
Source File: StandaloneIndexerServer.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.indexer

import akka.stream.Materializer
import com.daml.ledger.participant.state.v1.ReadService
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import com.daml.metrics.Metrics
import com.daml.platform.configuration.ServerRole
import com.daml.platform.store.dao.events.LfValueTranslation
import com.daml.resources.{Resource, ResourceOwner}

import scala.concurrent.ExecutionContext

final class StandaloneIndexerServer(
    readService: ReadService,
    config: IndexerConfig,
    metrics: Metrics,
    lfValueTranslationCache: LfValueTranslation.Cache,
)(implicit materializer: Materializer, logCtx: LoggingContext)
    extends ResourceOwner[Unit] {

  private val logger = ContextualizedLogger.get(this.getClass)

  override def acquire()(implicit executionContext: ExecutionContext): Resource[Unit] = {
    val indexerFactory = new JdbcIndexerFactory(
      ServerRole.Indexer,
      config,
      readService,
      metrics,
      lfValueTranslationCache,
    )
    val indexer = new RecoveringIndexer(materializer.system.scheduler, config.restartDelay)
    config.startupMode match {
      case IndexerStartupMode.MigrateOnly =>
        Resource.successful(())
      case IndexerStartupMode.MigrateAndStart =>
        Resource
          .fromFuture(indexerFactory.migrateSchema(config.allowExistingSchema))
          .flatMap(startIndexer(indexer, _))
          .map { _ =>
            logger.debug("Waiting for the indexer to initialize the database.")
          }
      case IndexerStartupMode.ResetAndStart =>
        Resource
          .fromFuture(indexerFactory.resetSchema())
          .flatMap(startIndexer(indexer, _))
          .map { _ =>
            logger.debug("Waiting for the indexer to initialize the database.")
          }
      case IndexerStartupMode.ValidateAndStart =>
        Resource
          .fromFuture(indexerFactory.validateSchema())
          .flatMap(startIndexer(indexer, _))
          .map { _ =>
            logger.debug("Waiting for the indexer to initialize the database.")
          }
    }
  }

  private def startIndexer(
      indexer: RecoveringIndexer,
      initializedIndexerFactory: ResourceOwner[JdbcIndexer],
  )(implicit executionContext: ExecutionContext): Resource[Unit] =
    indexer
      .start(() => initializedIndexerFactory.flatMap(_.subscription(readService)).acquire())
      .map(_ => ())
} 
Example 20
Source File: LedgerResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox

import akka.stream.Materializer
import com.codahale.metrics.MetricRegistry
import com.daml.api.util.TimeProvider
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.testing.utils.{OwnedResource, Resource}
import com.daml.ledger.participant.state.v1.ParticipantId
import com.daml.lf.data.ImmArray
import com.daml.lf.transaction.StandardTransactionCommitter
import com.daml.logging.LoggingContext
import com.daml.metrics.Metrics
import com.daml.platform.common.LedgerIdMode
import com.daml.platform.configuration.ServerRole
import com.daml.platform.packages.InMemoryPackageStore
import com.daml.platform.sandbox.stores.InMemoryActiveLedgerState
import com.daml.platform.sandbox.stores.ledger.Ledger
import com.daml.platform.sandbox.stores.ledger.ScenarioLoader.LedgerEntryOrBump
import com.daml.platform.sandbox.stores.ledger.inmemory.InMemoryLedger
import com.daml.platform.sandbox.stores.ledger.sql.{SqlLedger, SqlStartMode}
import com.daml.platform.store.dao.events.LfValueTranslation
import com.daml.resources.ResourceOwner
import com.daml.testing.postgresql.PostgresResource

import scala.concurrent.ExecutionContext

object LedgerResource {

  def inMemory(
      ledgerId: LedgerId,
      participantId: ParticipantId,
      timeProvider: TimeProvider,
      acs: InMemoryActiveLedgerState = InMemoryActiveLedgerState.empty,
      packages: InMemoryPackageStore = InMemoryPackageStore.empty,
      entries: ImmArray[LedgerEntryOrBump] = ImmArray.empty,
  )(implicit executionContext: ExecutionContext): Resource[Ledger] =
    new OwnedResource(
      ResourceOwner.forValue(() =>
        new InMemoryLedger(
          ledgerId = ledgerId,
          participantId = participantId,
          timeProvider = timeProvider,
          acs0 = acs,
          transactionCommitter = StandardTransactionCommitter,
          packageStoreInit = packages,
          ledgerEntries = entries,
      )))

  def postgres(
      testClass: Class[_],
      ledgerId: LedgerId,
      participantId: ParticipantId,
      timeProvider: TimeProvider,
      metrics: MetricRegistry,
      packages: InMemoryPackageStore = InMemoryPackageStore.empty,
  )(
      implicit executionContext: ExecutionContext,
      materializer: Materializer,
      logCtx: LoggingContext,
  ): Resource[Ledger] =
    new OwnedResource(
      for {
        database <- PostgresResource.owner()
        ledger <- new SqlLedger.Owner(
          serverRole = ServerRole.Testing(testClass),
          jdbcUrl = database.url,
          initialLedgerId = LedgerIdMode.Static(ledgerId),
          participantId = participantId,
          timeProvider = timeProvider,
          acs = InMemoryActiveLedgerState.empty,
          packages = packages,
          initialLedgerEntries = ImmArray.empty,
          queueDepth = 128,
          transactionCommitter = StandardTransactionCommitter,
          startMode = SqlStartMode.AlwaysReset,
          eventsPageSize = 100,
          metrics = new Metrics(metrics),
          lfValueTranslationCache = LfValueTranslation.Cache.none,
        )
      } yield ledger
    )
} 
Example 21
Source File: AbstractSandboxFixture.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox

import java.io.File
import java.net.InetAddress

import akka.stream.Materializer
import com.daml.api.util.TimeProvider
import com.daml.bazeltools.BazelRunfiles._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.auth.client.LedgerCallCredentials
import com.daml.ledger.api.domain
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.ledger.api.v1.ledger_identity_service.{
  GetLedgerIdentityRequest,
  LedgerIdentityServiceGrpc
}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc
import com.daml.ledger.client.services.testing.time.StaticTime
import com.daml.ledger.participant.state.v1.SeedService.Seeding
import com.daml.platform.common.LedgerIdMode
import com.daml.platform.sandbox.config.SandboxConfig
import com.daml.platform.sandbox.services.DbInfo
import com.daml.platform.services.time.TimeProviderType
import com.daml.ports.Port
import com.daml.resources.ResourceOwner
import io.grpc.Channel
import org.scalatest.Suite
import scalaz.syntax.tag._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

trait AbstractSandboxFixture extends AkkaBeforeAndAfterAll {
  self: Suite =>

  protected def darFile = new File(rlocation("ledger/test-common/model-tests.dar"))

  protected def ledgerId(token: Option[String] = None): domain.LedgerId =
    domain.LedgerId(
      LedgerIdentityServiceGrpc
        .blockingStub(channel)
        .withCallCredentials(token.map(new LedgerCallCredentials(_)).orNull)
        .getLedgerIdentity(GetLedgerIdentityRequest())
        .ledgerId)

  protected def getTimeProviderForClient(
      implicit mat: Materializer,
      esf: ExecutionSequencerFactory
  ): TimeProvider = {
    Try(TimeServiceGrpc.stub(channel))
      .map(StaticTime.updatedVia(_, ledgerId().unwrap)(mat, esf))
      .fold[TimeProvider](_ => TimeProvider.UTC, Await.result(_, 30.seconds))
  }

  protected def config: SandboxConfig =
    SandboxConfig.default.copy(
      port = Port.Dynamic,
      damlPackages = packageFiles,
      timeProviderType = Some(TimeProviderType.Static),
      scenario = scenario,
      ledgerIdMode = LedgerIdMode.Static(LedgerId("sandbox-server")),
      seeding = Some(Seeding.Weak),
    )

  protected def packageFiles: List[File] = List(darFile)

  protected def scenario: Option[String] = None

  protected def database: Option[ResourceOwner[DbInfo]] = None

  protected def serverHost: String = InetAddress.getLoopbackAddress.getHostName

  protected def serverPort: Port

  protected def channel: Channel
} 
Example 22
Source File: GrpcCommandCompletionService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.server.api.services.grpc

import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.dec.DirectExecutionContext
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.domain
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.messages.command.completion.{
  CompletionStreamRequest => ValidatedCompletionStreamRequest
}
import com.daml.ledger.api.v1.command_completion_service._
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.validation.{CompletionServiceRequestValidator, PartyNameChecker}
import com.daml.platform.server.api.services.domain.CommandCompletionService

import scala.concurrent.Future

object GrpcCommandCompletionService {

  private[this] val completionStreamDefaultOffset = Some(domain.LedgerOffset.LedgerEnd)

  private def fillInWithDefaults(
      request: ValidatedCompletionStreamRequest): ValidatedCompletionStreamRequest =
    if (request.offset.isDefined) {
      request
    } else {
      request.copy(offset = completionStreamDefaultOffset)
    }

}

class GrpcCommandCompletionService(
    ledgerId: LedgerId,
    service: CommandCompletionService,
    partyNameChecker: PartyNameChecker
)(implicit protected val esf: ExecutionSequencerFactory, protected val mat: Materializer)
    extends CommandCompletionServiceAkkaGrpc {

  private val validator = new CompletionServiceRequestValidator(ledgerId, partyNameChecker)

  override def completionStreamSource(
      request: CompletionStreamRequest): Source[CompletionStreamResponse, akka.NotUsed] = {
    validator
      .validateCompletionStreamRequest(request)
      .fold(
        Source.failed[CompletionStreamResponse],
        GrpcCommandCompletionService.fillInWithDefaults _ andThen service.completionStreamSource
      )
  }

  override def completionEnd(request: CompletionEndRequest): Future[CompletionEndResponse] =
    validator
      .validateCompletionEndRequest(request)
      .fold(
        Future.failed[CompletionEndResponse],
        req =>
          service
            .getLedgerEnd(req.ledgerId)
            .map(abs =>
              CompletionEndResponse(Some(LedgerOffset(LedgerOffset.Value.Absolute(abs.value)))))(
              DirectExecutionContext)
      )

} 
Example 23
Source File: GrpcHealthService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.server.api.services.grpc

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.health.HealthChecks
import com.daml.platform.api.grpc.GrpcApiService
import com.daml.platform.server.api.DropRepeated
import com.daml.platform.server.api.services.grpc.GrpcHealthService._
import io.grpc.health.v1.health.{
  HealthAkkaGrpc,
  HealthCheckRequest,
  HealthCheckResponse,
  HealthGrpc
}
import io.grpc.{ServerServiceDefinition, Status, StatusException}

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

class GrpcHealthService(
    healthChecks: HealthChecks,
    maximumWatchFrequency: FiniteDuration = 1.second,
)(
    implicit protected val esf: ExecutionSequencerFactory,
    protected val mat: Materializer,
    executionContext: ExecutionContext,
) extends HealthAkkaGrpc
    with GrpcApiService {
  override def bindService(): ServerServiceDefinition =
    HealthGrpc.bindService(this, DirectExecutionContext)

  override def check(request: HealthCheckRequest): Future[HealthCheckResponse] =
    Future.fromTry(matchResponse(serviceFrom(request)))

  override def watchSource(request: HealthCheckRequest): Source[HealthCheckResponse, NotUsed] =
    Source
      .fromIterator(() => Iterator.continually(matchResponse(serviceFrom(request)).get))
      .throttle(1, per = maximumWatchFrequency)
      .via(DropRepeated())

  private def matchResponse(componentName: Option[String]): Try[HealthCheckResponse] =
    if (!componentName.forall(healthChecks.hasComponent))
      Failure(new StatusException(Status.NOT_FOUND))
    else if (healthChecks.isHealthy(componentName))
      Success(servingResponse)
    else
      Success(notServingResponse)
}

object GrpcHealthService {
  private[grpc] val servingResponse =
    HealthCheckResponse(HealthCheckResponse.ServingStatus.SERVING)

  private[grpc] val notServingResponse =
    HealthCheckResponse(HealthCheckResponse.ServingStatus.NOT_SERVING)

  private def serviceFrom(request: HealthCheckRequest): Option[String] = {
    Option(request.service).filter(_.nonEmpty)
  }
} 
Example 24
Source File: DropRepeatedSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.server.api

import akka.actor.ActorSystem
import akka.pattern.pipe
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.{TestKit, TestProbe}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.collection.immutable
import scala.concurrent.ExecutionContext

final class DropRepeatedSpec
    extends TestKit(ActorSystem(classOf[DropRepeatedSpec].getSimpleName))
    with WordSpecLike
    with Matchers
    with BeforeAndAfterAll {

  private[this] implicit val materializer: Materializer = Materializer(system)
  private[this] implicit val executionContext: ExecutionContext = materializer.executionContext

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }

  "DropRepeated" should {
    "drop repeated elements" in {
      val probe = TestProbe()
      val input = immutable.Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5)

      val _ = Source(input)
        .via(DropRepeated())
        .runWith(Sink.seq)
        .pipeTo(probe.ref)
        .failed
        .foreach(fail(_))

      probe.expectMsg(Vector(1, 2, 3, 4, 5))
    }

    "does not drop duplicate elements that are not repeated" in {
      val probe = TestProbe()
      val input = immutable.Seq(1, 1, 2, 2, 1, 1, 2, 2)

      val _ = Source(input)
        .via(DropRepeated())
        .runWith(Sink.seq)
        .pipeTo(probe.ref)
        .failed
        .foreach(fail(_))

      probe.expectMsg(Vector(1, 2, 1, 2))
    }
  }
} 
Example 25
Source File: BatchedValidatingCommitter.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.validator

import java.time.Instant

import akka.stream.Materializer
import com.daml.caching.Cache
import com.daml.ledger.participant.state.kvutils.Bytes
import com.daml.ledger.participant.state.kvutils.DamlKvutils.{DamlStateKey, DamlStateValue}
import com.daml.ledger.participant.state.v1.{ParticipantId, SubmissionResult}
import com.daml.ledger.validator.batch.{
  BatchedSubmissionValidator,
  BatchedSubmissionValidatorFactory
}
import com.daml.ledger.validator.caching.{CacheUpdatePolicy, ImmutablesOnlyCacheUpdatePolicy}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


class BatchedValidatingCommitter[LogResult](
    now: () => Instant,
    keySerializationStrategy: StateKeySerializationStrategy,
    validator: BatchedSubmissionValidator[LogResult],
    stateValueCache: Cache[DamlStateKey, DamlStateValue],
    cacheUpdatePolicy: CacheUpdatePolicy
)(implicit materializer: Materializer) {
  def commit(
      correlationId: String,
      envelope: Bytes,
      submittingParticipantId: ParticipantId,
      ledgerStateOperations: LedgerStateOperations[LogResult]
  )(implicit executionContext: ExecutionContext): Future[SubmissionResult] = {
    val (ledgerStateReader, commitStrategy) = readerAndCommitStrategyFrom(ledgerStateOperations)
    validator
      .validateAndCommit(
        envelope,
        correlationId,
        now(),
        submittingParticipantId,
        ledgerStateReader,
        commitStrategy
      )
      .transformWith {
        case Success(_) =>
          Future.successful(SubmissionResult.Acknowledged)
        case Failure(exception) =>
          Future.successful(SubmissionResult.InternalError(exception.getLocalizedMessage))
      }
  }

  private def readerAndCommitStrategyFrom(ledgerStateOperations: LedgerStateOperations[LogResult])(
      implicit executionContext: ExecutionContext)
    : (DamlLedgerStateReader, CommitStrategy[LogResult]) =
    if (stateValueCache == Cache.none) {
      BatchedSubmissionValidatorFactory
        .readerAndCommitStrategyFrom(ledgerStateOperations, keySerializationStrategy)
    } else {
      BatchedSubmissionValidatorFactory
        .cachingReaderAndCommitStrategyFrom(
          ledgerStateOperations,
          stateValueCache,
          cacheUpdatePolicy,
          keySerializationStrategy)
    }
}

object BatchedValidatingCommitter {
  def apply[LogResult](now: () => Instant, validator: BatchedSubmissionValidator[LogResult])(
      implicit materializer: Materializer): BatchedValidatingCommitter[LogResult] =
    new BatchedValidatingCommitter[LogResult](
      now,
      DefaultStateKeySerializationStrategy,
      validator,
      Cache.none,
      ImmutablesOnlyCacheUpdatePolicy)

  def apply[LogResult](
      now: () => Instant,
      validator: BatchedSubmissionValidator[LogResult],
      stateValueCache: Cache[DamlStateKey, DamlStateValue])(
      implicit materializer: Materializer): BatchedValidatingCommitter[LogResult] =
    new BatchedValidatingCommitter[LogResult](
      now,
      DefaultStateKeySerializationStrategy,
      validator,
      stateValueCache,
      ImmutablesOnlyCacheUpdatePolicy)
} 
Example 26
Source File: KeyValueParticipantStateReader.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.api

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.ledger.api.health.HealthStatus
import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlLogEntryId
import com.daml.ledger.participant.state.kvutils.{Envelope, KVOffset, KeyValueConsumption}
import com.daml.ledger.participant.state.v1._
import com.daml.lf.data.Time
import com.daml.metrics.{Metrics, Timed}

class KeyValueParticipantStateReader(reader: LedgerReader, metrics: Metrics)(
    implicit materializer: Materializer)
    extends ReadService {
  override def getLedgerInitialConditions(): Source[LedgerInitialConditions, NotUsed] =
    Source.single(createLedgerInitialConditions())

  override def stateUpdates(beginAfter: Option[Offset]): Source[(Offset, Update), NotUsed] = {
    Source
      .single(beginAfter.map(KVOffset.onlyKeepHighestIndex))
      .flatMapConcat(reader.events)
      .flatMapConcat {
        case LedgerRecord(offset, entryId, envelope) =>
          Timed
            .value(metrics.daml.kvutils.reader.openEnvelope, Envelope.open(envelope))
            .flatMap {
              case Envelope.LogEntryMessage(logEntry) =>
                Timed.value(
                  metrics.daml.kvutils.reader.parseUpdates, {
                    val logEntryId = DamlLogEntryId.parseFrom(entryId)
                    val updates = KeyValueConsumption.logEntryToUpdate(logEntryId, logEntry)
                    val updateOffset: (Offset, Int) => Offset =
                      if (updates.size > 1) KVOffset.setMiddleIndex else (offset, _) => offset
                    val updatesWithOffsets = Source(updates).zipWithIndex.map {
                      case (update, index) =>
                        updateOffset(offset, index.toInt) -> update
                    }
                    Right(updatesWithOffsets)
                  }
                )
              case _ =>
                Left("Envelope does not contain a log entry")
            }
            .getOrElse(throw new IllegalArgumentException(
              s"Invalid log entry received at offset $offset"))
      }
  }

  override def currentHealth(): HealthStatus =
    reader.currentHealth()

  private def createLedgerInitialConditions(): LedgerInitialConditions =
    LedgerInitialConditions(
      reader.ledgerId(),
      LedgerReader.DefaultConfiguration,
      Time.Timestamp.Epoch)
} 
Example 27
Source File: KeyValueParticipantState.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.api

import java.util.concurrent.CompletionStage

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.daml_lf_dev.DamlLf
import com.daml.ledger.api.health.HealthStatus
import com.daml.ledger.participant.state.v1._
import com.daml.lf.data.Time
import com.daml.metrics.Metrics


class KeyValueParticipantState(
    reader: LedgerReader,
    writer: LedgerWriter,
    metrics: Metrics,
)(implicit materializer: Materializer)
    extends ReadService
    with WriteService {
  private val readerAdapter =
    new KeyValueParticipantStateReader(reader, metrics)
  private val writerAdapter =
    new KeyValueParticipantStateWriter(new TimedLedgerWriter(writer, metrics), metrics)

  override def getLedgerInitialConditions(): Source[LedgerInitialConditions, NotUsed] =
    readerAdapter.getLedgerInitialConditions()

  override def stateUpdates(beginAfter: Option[Offset]): Source[(Offset, Update), NotUsed] =
    readerAdapter.stateUpdates(beginAfter)

  override def submitTransaction(
      submitterInfo: SubmitterInfo,
      transactionMeta: TransactionMeta,
      transaction: SubmittedTransaction,
      estimatedInterpretationCost: Long,
  ): CompletionStage[SubmissionResult] =
    writerAdapter.submitTransaction(
      submitterInfo,
      transactionMeta,
      transaction,
      estimatedInterpretationCost,
    )

  override def submitConfiguration(
      maxRecordTime: Time.Timestamp,
      submissionId: SubmissionId,
      config: Configuration): CompletionStage[SubmissionResult] =
    writerAdapter.submitConfiguration(maxRecordTime, submissionId, config)

  override def uploadPackages(
      submissionId: SubmissionId,
      archives: List[DamlLf.Archive],
      sourceDescription: Option[String]): CompletionStage[SubmissionResult] =
    writerAdapter.uploadPackages(submissionId, archives, sourceDescription)

  override def allocateParty(
      hint: Option[Party],
      displayName: Option[String],
      submissionId: SubmissionId): CompletionStage[SubmissionResult] =
    writerAdapter.allocateParty(hint, displayName, submissionId)

  override def currentHealth(): HealthStatus =
    reader.currentHealth() and writer.currentHealth()
} 
Example 28
Source File: BatchingQueue.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.api

import java.io.Closeable
import java.util.concurrent.atomic.AtomicBoolean

import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete}
import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult}
import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch
import com.daml.ledger.participant.state.v1.SubmissionResult

import scala.concurrent.Future
import scala.concurrent.duration._

object BatchingQueue {
  type CommitBatchFunction =
    Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit]
}


case class DefaultBatchingQueue(
    maxQueueSize: Int,
    maxBatchSizeBytes: Long,
    maxWaitDuration: FiniteDuration,
    maxConcurrentCommits: Int
) extends BatchingQueue {
  private val queue: Source[
    Seq[DamlSubmissionBatch.CorrelatedSubmission],
    SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] =
    Source
      .queue(maxQueueSize, OverflowStrategy.dropNew)
      .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)(
        (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong)

  def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])(
      implicit materializer: Materializer): RunningBatchingQueueHandle = {
    val materializedQueue = queue
      .mapAsync(maxConcurrentCommits)(commitBatch)
      .to(Sink.ignore)
      .run()

    val queueAlive = new AtomicBoolean(true)
    materializedQueue.watchCompletion.foreach { _ =>
      queueAlive.set(false)
    }(materializer.executionContext)

    new RunningBatchingQueueHandle {
      override def alive: Boolean = queueAlive.get()

      override def offer(
          submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = {
        materializedQueue
          .offer(submission)
          .map {
            case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged
            case QueueOfferResult.Dropped => SubmissionResult.Overloaded
            case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString)
            case QueueOfferResult.QueueClosed =>
              SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed")
          }(materializer.executionContext)
      }

      override def close(): Unit = {
        materializedQueue.complete()
      }
    }
  }
} 
Example 29
Source File: BatchedValidatingCommitterSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.validator

import java.time.Instant

import akka.stream.Materializer
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.ledger.participant.state.v1.{ParticipantId, SubmissionResult}
import com.daml.ledger.validator.TestHelper.aParticipantId
import com.daml.ledger.validator.batch.BatchedSubmissionValidator
import com.google.protobuf.ByteString
import org.mockito.ArgumentMatchers.{any, anyString}
import org.mockito.Mockito.when
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{AsyncWordSpec, Matchers}

import scala.concurrent.{ExecutionContext, Future}

class BatchedValidatingCommitterSpec
    extends AsyncWordSpec
    with AkkaBeforeAndAfterAll
    with Matchers
    with MockitoSugar {
  "commit" should {
    "return Acknowledged in case of success" in {
      val mockValidator = mock[BatchedSubmissionValidator[Unit]]
      when(
        mockValidator.validateAndCommit(
          any[ByteString](),
          anyString(),
          any[Instant](),
          any[ParticipantId](),
          any[DamlLedgerStateReader](),
          any[CommitStrategy[Unit]]())(any[Materializer](), any[ExecutionContext]()))
        .thenReturn(Future.unit)
      val instance =
        BatchedValidatingCommitter[Unit](() => Instant.now(), mockValidator)

      instance
        .commit("", ByteString.EMPTY, aParticipantId, mock[LedgerStateOperations[Unit]])
        .map { actual =>
          actual shouldBe SubmissionResult.Acknowledged
        }
    }

    "return InternalError in case of an exception" in {
      val mockValidator = mock[BatchedSubmissionValidator[Unit]]
      when(
        mockValidator.validateAndCommit(
          any[ByteString](),
          anyString(),
          any[Instant](),
          any[ParticipantId](),
          any[DamlLedgerStateReader](),
          any[CommitStrategy[Unit]]())(any[Materializer](), any[ExecutionContext]()))
        .thenReturn(Future.failed(new IllegalArgumentException("Validation failure")))
      val instance = BatchedValidatingCommitter[Unit](() => Instant.now(), mockValidator)

      instance
        .commit("", ByteString.EMPTY, aParticipantId, mock[LedgerStateOperations[Unit]])
        .map { actual =>
          actual shouldBe SubmissionResult.InternalError("Validation failure")
        }
    }
  }
} 
Example 30
Source File: ProtobufByteStrings.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http.util

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.{Source, StreamConverters}
import com.google.protobuf

import scala.collection.JavaConverters._

object ProtobufByteStrings {

  def readFrom(source: Source[akka.util.ByteString, NotUsed])(
      implicit mat: Materializer): protobuf.ByteString = {
    val inputStream = source.runWith(StreamConverters.asInputStream())
    protobuf.ByteString.readFrom(inputStream)
  }

  def toSource(a: protobuf.ByteString): Source[akka.util.ByteString, NotUsed] = {
    Source.fromIterator(() =>
      a.asReadOnlyByteBufferList().iterator.asScala.map(x => akka.util.ByteString(x)))
  }
} 
Example 31
Source File: StaticContentEndpoints.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.directives.ContentTypeResolver.Default
import akka.http.scaladsl.server.{Directives, RoutingLog}
import akka.http.scaladsl.settings.{ParserSettings, RoutingSettings}
import akka.stream.Materializer
import com.typesafe.scalalogging.StrictLogging
import scalaz.syntax.show._

import scala.concurrent.Future

object StaticContentEndpoints {
  def all(config: StaticContentConfig)(
      implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog): HttpRequest PartialFunction Future[HttpResponse] =
    new StaticContentRouter(config)
}

private class StaticContentRouter(config: StaticContentConfig)(
    implicit
    routingSettings: RoutingSettings,
    parserSettings: ParserSettings,
    materializer: Materializer,
    routingLog: RoutingLog)
    extends PartialFunction[HttpRequest, Future[HttpResponse]]
    with StrictLogging {

  private val pathPrefix: Uri.Path = Uri.Path("/" + config.prefix)

  logger.warn(s"StaticContentRouter configured: ${config.shows}")
  logger.warn("DO NOT USE StaticContentRouter IN PRODUCTION, CONSIDER SETTING UP REVERSE PROXY!!!")

  private val fn =
    akka.http.scaladsl.server.Route.asyncHandler(
      Directives.rawPathPrefix(Slash ~ config.prefix)(
        Directives.getFromDirectory(config.directory.getAbsolutePath)
      ))

  override def isDefinedAt(x: HttpRequest): Boolean =
    x.uri.path.startsWith(pathPrefix)

  override def apply(x: HttpRequest): Future[HttpResponse] =
    fn(x)
} 
Example 32
Source File: PackageManagementService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import com.daml.http.util.ProtobufByteStrings
import com.daml.jwt.domain.Jwt

import scala.concurrent.{ExecutionContext, Future}

class PackageManagementService(
    listKnownPackagesFn: LedgerClientJwt.ListPackages,
    getPackageFn: LedgerClientJwt.GetPackage,
    uploadDarFileFn: LedgerClientJwt.UploadDarFile,
)(implicit ec: ExecutionContext, mat: Materializer) {

  def listPackages(jwt: Jwt): Future[Seq[String]] =
    listKnownPackagesFn(jwt).map(_.packageIds)

  def getPackage(jwt: Jwt, packageId: String): Future[admin.GetPackageResponse] =
    getPackageFn(jwt, packageId).map(admin.GetPackageResponse.fromLedgerApi)

  def uploadDarFile(jwt: Jwt, source: Source[ByteString, NotUsed]): Future[Unit] =
    uploadDarFileFn(jwt, ProtobufByteStrings.readFrom(source))
} 
Example 33
Source File: AuthorizationTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http

import java.nio.file.Files

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.auth.TokenHolder
import com.daml.bazeltools.BazelRunfiles.rlocation
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.daml.http.util.TestUtil.requiredFile
import com.daml.ledger.api.auth.{AuthServiceStatic, Claim, ClaimPublic, Claims}
import com.daml.ledger.client.LedgerClient
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.slf4j.LoggerFactory

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

final class AuthorizationTest extends AsyncFlatSpec with BeforeAndAfterAll with Matchers {

  private val dar = requiredFile(rlocation("docs/quickstart-model.dar"))
    .fold(e => throw new IllegalStateException(e), identity)

  private val testId: String = this.getClass.getSimpleName

  implicit val asys: ActorSystem = ActorSystem(testId)
  implicit val mat: Materializer = Materializer(asys)
  implicit val aesf: ExecutionSequencerFactory = new AkkaExecutionSequencerPool(testId)(asys)
  implicit val ec: ExecutionContext = asys.dispatcher

  private val publicToken = "public"
  private val emptyToken = "empty"
  private val mockedAuthService = Option(AuthServiceStatic {
    case `publicToken` => Claims(Seq[Claim](ClaimPublic))
    case `emptyToken` => Claims(Nil)
  })

  private val accessTokenFile = Files.createTempFile("Extractor", "AuthSpec")
  private val tokenHolder = Option(new TokenHolder(accessTokenFile))

  private def setToken(string: String): Unit = {
    val _ = Files.write(accessTokenFile, string.getBytes())
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    try {
      Files.delete(accessTokenFile)
    } catch {
      case NonFatal(e) =>
        LoggerFactory
          .getLogger(classOf[AuthorizationTest])
          .warn("Unable to delete temporary token file", e)
    }
  }

  protected def withLedger[A] =
    HttpServiceTestFixture
      .withLedger[A](List(dar), testId, Option(publicToken), mockedAuthService) _

  private def packageService(client: LedgerClient): PackageService =
    new PackageService(HttpService.loadPackageStoreUpdates(client.packageClient, tokenHolder))

  behavior of "PackageService against an authenticated sandbox"

  it should "fail immediately if the authorization is insufficient" in withLedger { client =>
    setToken(emptyToken)
    packageService(client).reload.failed.map(_ => succeed)
  }

  it should "succeed if the authorization is sufficient" in withLedger { client =>
    setToken(publicToken)
    packageService(client).reload.map(_ => succeed)
  }

} 
Example 34
Source File: FlowUtilTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http.util

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import org.scalacheck.{Gen, Arbitrary}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.{FlatSpec, Matchers}
import scalaz.{-\/, \/, \/-}

import scala.concurrent.Future

class FlowUtilTest
    extends FlatSpec
    with ScalaFutures
    with Matchers
    with GeneratorDrivenPropertyChecks {
  import FlowUtil._

  implicit val asys: ActorSystem = ActorSystem(this.getClass.getSimpleName)
  implicit val materializer: Materializer = Materializer(asys)

  "allowOnlyFirstInput" should "pass 1st message through and replace all others with errors" in forAll(
    nonEmptyVectorOfInts) { xs: Vector[Int] =>
    val error = "Error"
    val errorNum = Math.max(xs.size - 1, 0)
    val expected: Vector[String \/ Int] =
      xs.take(1).map(\/-(_)) ++ Vector.fill(errorNum)(-\/(error))
    val input: Source[String \/ Int, NotUsed] =
      Source.fromIterator(() => xs.toIterator).map(\/-(_))

    val actualF: Future[Vector[String \/ Int]] =
      input
        .via(allowOnlyFirstInput[String, Int](error))
        .runFold(Vector.empty[String \/ Int])(_ :+ _)

    whenReady(actualF) { actual =>
      actual shouldBe expected
    }
  }

  private val nonEmptyVectorOfInts: Gen[Vector[Int]] =
    Gen.nonEmptyBuildableOf[Vector[Int], Int](Arbitrary.arbitrary[Int])
} 
Example 35
Source File: MigrationStep.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml

import java.nio.file.Path

import akka.actor.ActorSystem
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.daml.lf.archive.DarReader
import scalaz.syntax.traverse._

import scala.concurrent.{ExecutionContext, Future}
import akka.stream.Materializer

import scala.util.control.NonFatal

object MigrationStep {

  trait Test {
    def execute(packageId: String, config: Config.Test)(
        implicit ec: ExecutionContext,
        esf: ExecutionSequencerFactory,
        mat: Materializer,
    ): Future[Unit]
  }

  private def readPackageId(path: Path): String =
    DarReader().readArchiveFromFile(path.toFile).get.map(_._1.toString).main

  def main(args: Array[String]): Unit = {
    val config = Config.parser.parse(args, Config.default).getOrElse(sys.exit(1))
    val packageId = readPackageId(config.dar)

    implicit val system: ActorSystem = ActorSystem(packageId)
    implicit val sequencer: ExecutionSequencerFactory =
      new AkkaExecutionSequencerPool(packageId)(system)
    implicit val ec: ExecutionContext = system.dispatcher

    val result = config.test.execute(packageId, config)

    result.failed.foreach { case NonFatal(e) => e.printStackTrace(System.err) }
    result.onComplete(_ => system.terminate())
  }
} 
Example 36
Source File: AkkaStreamGrpcServerResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.net.SocketAddress
import java.util.concurrent.TimeUnit

import akka.stream.Materializer
import io.grpc.BindableService

class AkkaStreamGrpcServerResource(
    constructServices: Materializer => Iterable[BindableService],
    actorMaterializerResource: Resource[Materializer],
    address: Option[SocketAddress])
    extends DerivedResource[Materializer, ServerWithChannelProvider](actorMaterializerResource) {

  @volatile private var runningServices: Iterable[BindableService] = Nil

  def getRunningServices: Iterable[BindableService] = runningServices

  override protected def construct(source: Materializer): ServerWithChannelProvider = {

    runningServices = constructServices(actorMaterializerResource.value)
    ServerWithChannelProvider.fromServices(runningServices, address, "server")

  }

  override protected def destruct(resource: ServerWithChannelProvider): Unit = {
    val server = derivedValue.server

    server.shutdownNow()

    runningServices.foreach {
      case closeable: AutoCloseable => closeable.close()
      case _ => ()
    }
    runningServices = Nil

    server.awaitTermination(10, TimeUnit.SECONDS)
    ()
  }
}

object AkkaStreamGrpcServerResource {
  def apply(
      constructServices: Materializer => Iterable[BindableService],
      actorSystemName: String = "",
      address: Option[SocketAddress]) =
    new AkkaStreamGrpcServerResource(
      constructServices,
      new ActorMaterializerResource(actorSystemName),
      address)
} 
Example 37
Source File: ActorMaterializerResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import akka.actor.ActorSystem
import akka.stream.Materializer

import scala.concurrent.Await
import scala.concurrent.duration._

final class ActorMaterializerResource(actorSystemName: String = "")
    extends ManagedResource[Materializer] {
  override protected def construct(): Materializer = {
    implicit val system: ActorSystem =
      if (actorSystemName.isEmpty) ActorSystem() else ActorSystem(actorSystemName)
    Materializer(system)
  }

  override protected def destruct(resource: Materializer): Unit = {
    resource.shutdown()
    Await.result(resource.system.terminate(), 30.seconds)
    ()
  }
}

object ActorMaterializerResource {
  def apply(actorSystemName: String = ""): ActorMaterializerResource =
    new ActorMaterializerResource(actorSystemName)
} 
Example 38
Source File: AkkaBeforeAndAfterAll.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.Executors

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.LoggerFactory

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContext}

trait AkkaBeforeAndAfterAll extends BeforeAndAfterAll {
  self: Suite =>
  private val logger = LoggerFactory.getLogger(getClass)

  protected def actorSystemName: String = this.getClass.getSimpleName

  private implicit lazy val executionContext: ExecutionContext =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(
        new ThreadFactoryBuilder()
          .setDaemon(true)
          .setNameFormat(s"$actorSystemName-thread-pool-worker-%d")
          .setUncaughtExceptionHandler((thread, _) =>
            logger.error(s"got an uncaught exception on thread: ${thread.getName}"))
          .build()))

  protected implicit lazy val system: ActorSystem =
    ActorSystem(actorSystemName, defaultExecutionContext = Some(executionContext))

  protected implicit lazy val materializer: Materializer = Materializer(system)

  protected implicit lazy val executionSequencerFactory: ExecutionSequencerFactory =
    new AkkaExecutionSequencerPool(poolName = actorSystemName, actorCount = 1)

  override protected def afterAll(): Unit = {
    executionSequencerFactory.close()
    materializer.shutdown()
    Await.result(system.terminate(), 30.seconds)
    super.afterAll()
  }
} 
Example 39
Source File: AkkaStreamPerformanceTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.perf.util

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.ledger.api.testing.utils.Resource
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}

@SuppressWarnings(Array("org.wartremover.warts.LeakingSealed"))
abstract class AkkaStreamPerformanceTest extends PerformanceTest {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  type ResourceType

  @volatile protected var system: ActorSystem = _
  @volatile protected var materializer: Materializer = _
  @transient protected implicit val ec: ExecutionContextExecutor = ExecutionContext.global

  protected def resource: Resource[ResourceType]

  protected def setup(): Unit = {
    resource.setup()
    implicit val sys: ActorSystem = ActorSystem(this.getClass.getSimpleName.stripSuffix("$"))
    system = sys
    materializer = Materializer(system)
  }

  protected def teardown(): Unit = {
    await(system.terminate())
    resource.close()
  }

  implicit class FixtureSetup[T](using: Using[T]) extends Serializable {
    def withLifecycleManagement(additionalSetup: T => Unit = _ => ()): Using[T] =
      using
        .setUp { input =>
          try {
            setup()
            additionalSetup(input)
          } catch {
            case t: Throwable =>
              logger.error("Setup failed.", t)
              throw t
          }
        }
        .tearDown { _ =>
          try {
            teardown()
          } catch {
            case t: Throwable =>
              logger.error("Teardown failed.", t)
              throw t
          }
        }
  }
} 
Example 40
Source File: AkkaClientCompatibilityCheck.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import akka.actor.ActorSystem
import akka.stream.scaladsl.Sink
import akka.stream.{Materializer, ThrottleMode}
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.ResultAssertions
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.platform.hello.HelloRequest
import com.daml.platform.hello.HelloServiceGrpc.HelloServiceStub
import io.grpc.{ClientCall, MethodDescriptor}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

trait AkkaClientCompatibilityCheck {
  self: WordSpec with Matchers with ScalaFutures with ResultAssertions =>

  implicit protected def system: ActorSystem

  implicit protected def materializer: Materializer

  implicit protected def esf: ExecutionSequencerFactory

  def akkaClientCompatible(helloStub: => HelloServiceStub): Unit = {

    def getCall[Req, Resp](call: MethodDescriptor[Req, Resp]): ClientCall[Req, Resp] =
      helloStub.getChannel.newCall(call, helloStub.getCallOptions)

    "respond with the correct number of elements and correct content in 1-* setup" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
        .runWith(Sink.seq)

      whenReady(elemsF)(assertElementsAreInOrder(elemCount.toLong))
    }

    "tolerate rematerialization of the same response source in 1-* setup" in {
      val source = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
      val elemsF1 = source.runWith(Sink.seq)
      val elemsF2 = source.runWith(Sink.seq)

      whenReady(for {
        elems1 <- elemsF1
        elems2 <- elemsF2
      } yield elems1 -> elems2)({
        case (elems1, elems2) =>
          val check = assertElementsAreInOrder(elemCount.toLong) _
          check(elems1)
          check(elems2)
      })
    }

    "respond with the correct number of elements and correct content in 1-* setup when back-pressured" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
        .throttle(100, 1.second, 16, ThrottleMode.shaping)
        .runWith(Sink.seq)

      whenReady(elemsF)(assertElementsAreInOrder(elemCount.toLong))
    }

    "handle cancellation in 1-* setup" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
        .take(halfCount.toLong)
        .runWith(Sink.seq)

      whenReady(elemsF)(assertElementsAreInOrder(halfCount.toLong))
    }

  }
} 
Example 41
Source File: AkkaImplementation.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.utils.implementations

import java.util.concurrent.atomic.AtomicInteger

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.server.akka.ServerAdapter
import com.daml.grpc.sampleservice.Responding
import com.daml.platform.hello.HelloServiceGrpc.HelloService
import com.daml.platform.hello.{HelloRequest, HelloResponse, HelloServiceGrpc}
import io.grpc.stub.StreamObserver
import io.grpc.{BindableService, ServerServiceDefinition}

import scala.concurrent.ExecutionContext.Implicits.global

class AkkaImplementation(
    implicit executionSequencerFactory: ExecutionSequencerFactory,
    materializer: Materializer,
) extends HelloService
    with Responding
    with BindableService {

  private val serverStreamingCalls = new AtomicInteger()

  def getServerStreamingCalls: Int = serverStreamingCalls.get()

  override def bindService(): ServerServiceDefinition =
    HelloServiceGrpc.bindService(this, global)

  override def serverStreaming(
      request: HelloRequest,
      responseObserver: StreamObserver[HelloResponse],
  ): Unit =
    Source
      .single(request)
      .via(Flow[HelloRequest].mapConcat(responses))
      .runWith(ServerAdapter.toSink(responseObserver))
      .onComplete(_ => serverStreamingCalls.incrementAndGet())

} 
Example 42
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 43
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.{Command, Commands}
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val packageClient = client.packageClient
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def listPackages(implicit ec: ExecutionContext): Future[Set[String]] =
    packageClient.listPackages().map(_.packageIds.toSet)

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand(party: String, workflowId: WorkflowId, cmd: Command.Command): Future[Empty] = {
    val commands = Commands(
      ledgerId = LedgerId.unwrap(ledgerId),
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = party,
      commands = Seq(Command(cmd)),
    )

    commandClient.submitSingleCommand(SubmitRequest(Some(commands), None))
  }

  def nextTransaction(party: String, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: String, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(parties: String*): TransactionFilter =
    TransactionFilter(parties.map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: String): WorkflowId =
    WorkflowId(s"$p Workflow")
} 
Example 44
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.Commands
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.ledger.client.binding.{Primitive => P}
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scalaz.syntax.tag._

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand[T](
      sender: P.Party,
      workflowId: WorkflowId,
      command: P.Update[P.ContractId[T]]): Future[Empty] = {
    commandClient.submitSingleCommand(submitRequest(sender, workflowId, command))
  }

  def submitRequest[T](
      party: P.Party,
      workflowId: WorkflowId,
      seq: P.Update[P.ContractId[T]]*): SubmitRequest = {
    val commands = Commands(
      ledgerId = ledgerId.unwrap,
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = P.Party.unwrap(party),
      commands = seq.map(_.command)
    )
    SubmitRequest(Some(commands), None)
  }

  def nextTransaction(party: P.Party, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(ps: P.Party*): TransactionFilter =
    TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: P.Party): WorkflowId =
    WorkflowId(s"${P.Party.unwrap(p): String} Workflow")
} 
Example 45
Source File: TriggerRunner.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.engine.trigger

import akka.actor.typed.{Behavior, PostStop}
import akka.actor.typed.scaladsl.AbstractBehavior
import akka.actor.typed.SupervisorStrategy._
import akka.actor.typed.Signal
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.ActorContext
import akka.stream.Materializer
import com.typesafe.scalalogging.StrictLogging
import com.daml.grpc.adapter.ExecutionSequencerFactory

class InitializationHalted(s: String) extends Exception(s) {}
class InitializationException(s: String) extends Exception(s) {}

object TriggerRunner {
  type Config = TriggerRunnerImpl.Config

  trait Message
  final case object Stop extends Message

  def apply(config: Config, name: String)(
      implicit esf: ExecutionSequencerFactory,
      mat: Materializer): Behavior[TriggerRunner.Message] =
    Behaviors.setup(ctx => new TriggerRunner(ctx, config, name))
}

class TriggerRunner(
    ctx: ActorContext[TriggerRunner.Message],
    config: TriggerRunner.Config,
    name: String)(implicit esf: ExecutionSequencerFactory, mat: Materializer)
    extends AbstractBehavior[TriggerRunner.Message](ctx)
    with StrictLogging {

  import TriggerRunner.{Message, Stop}

  // Spawn a trigger runner impl. Supervise it. Stop immediately on
  // initialization halted exceptions, retry any initialization or
  // execution failure exceptions.
  private val child =
    ctx.spawn(
      Behaviors
        .supervise(
          Behaviors
            .supervise(TriggerRunnerImpl(config))
            .onFailure[InitializationHalted](stop)
        )
        .onFailure(
          restartWithBackoff(
            config.restartConfig.minRestartInterval,
            config.restartConfig.maxRestartInterval,
            config.restartConfig.restartIntervalRandomFactor)),
      name
    )

  override def onMessage(msg: Message): Behavior[Message] =
    Behaviors.receiveMessagePartial[Message] {
      case Stop =>
        Behaviors.stopped // Automatically stops the child actor if running.
    }

  override def onSignal: PartialFunction[Signal, Behavior[Message]] = {
    case PostStop =>
      logger.info(s"Trigger $name stopped")
      this
  }

} 
Example 46
Source File: AkkaResourceOwnerSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources.akka

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.{Done, NotUsed}
import com.daml.resources.ResourceOwner
import org.scalatest.{AsyncWordSpec, Matchers}

import scala.concurrent.{Future, Promise}

class AkkaResourceOwnerSpec extends AsyncWordSpec with Matchers {
  "a function returning an ActorSystem" should {
    "convert to a ResourceOwner" in {
      val testPromise = Promise[Int]()
      class TestActor extends Actor {
        @SuppressWarnings(Array("org.wartremover.warts.Any"))
        override def receive: Receive = {
          case value: Int => testPromise.success(value)
          case value => testPromise.failure(new IllegalArgumentException(s"$value"))
        }
      }

      val resource = for {
        actorSystem <- AkkaResourceOwner
          .forActorSystem(() => ActorSystem("TestActorSystem"))
          .acquire()
        actor <- ResourceOwner
          .successful(actorSystem.actorOf(Props(new TestActor)))
          .acquire()
      } yield (actorSystem, actor)

      for {
        resourceFuture <- resource.asFuture
        (actorSystem, actor) = resourceFuture
        _ = actor ! 7
        result <- testPromise.future
        _ <- resource.release()
      } yield {
        result should be(7)
        an[IllegalStateException] should be thrownBy actorSystem.actorOf(Props(new TestActor))
      }
    }
  }

  "a function returning a Materializer" should {
    "convert to a ResourceOwner" in {
      val resource = for {
        actorSystem <- AkkaResourceOwner
          .forActorSystem(() => ActorSystem("TestActorSystem"))
          .acquire()
        materializer <- AkkaResourceOwner.forMaterializer(() => Materializer(actorSystem)).acquire()
      } yield materializer

      for {
        materializer <- resource.asFuture
        numbers <- Source(1 to 10)
          .toMat(Sink.seq)(Keep.right[NotUsed, Future[Seq[Int]]])
          .run()(materializer)
        _ <- resource.release()
      } yield {
        numbers should be(1 to 10)
        an[IllegalStateException] should be thrownBy Source
          .single(0)
          .toMat(Sink.ignore)(Keep.right[NotUsed, Future[Done]])
          .run()(materializer)
      }
    }
  }
} 
Example 47
Source File: WebSockets.scala    From Scala-Programming-Projects   with MIT License 5 votes vote down vote up
package controllers

import actors.{BrowserManagerActor, BrowserActor}
import akka.actor.{ActorRef, ActorSystem}
import akka.stream.Materializer
import javax.inject._
import play.api.Logger
import play.api.libs.streams.ActorFlow
import play.api.mvc.{AbstractController, ControllerComponents, WebSocket}

@Singleton
class WebSockets @Inject()(
                            implicit actorSystem: ActorSystem,
                            materializer: Materializer,
                            cc: ControllerComponents) extends AbstractController(cc) {

  val managerActor: ActorRef = actorSystem.actorOf(BrowserManagerActor.props(), "manager-actor")

  def cartEventWS: WebSocket = WebSocket.accept[String, String] {
    implicit request =>
      ActorFlow.actorRef { out =>
        Logger.info(s"Got a new websocket connection from ${request.host}")
        managerActor ! BrowserManagerActor.AddBrowser(out)
        BrowserActor.props(managerActor)
      }
  }
} 
Example 48
Source File: WebsocketController.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.server.controller

import akka.actor._
import akka.stream.scaladsl.Flow
import akka.stream.{Materializer, OverflowStrategy}
import monix.execution.Scheduler
import play.api.http.websocket.Message
import play.api.libs.streams._
import play.api.mvc._
import play.api.mvc.WebSocket.MessageFlowTransformer
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future
import walfie.gbf.raidfinder.domain._
import walfie.gbf.raidfinder.protocol._
import walfie.gbf.raidfinder.RaidFinder
import walfie.gbf.raidfinder.server.actor.WebsocketRaidsHandler
import walfie.gbf.raidfinder.server.util.MessageFlowTransformerUtil
import walfie.gbf.raidfinder.server.{BossNameTranslator, MetricsCollector}

class WebsocketController(
  raidFinder:        RaidFinder[BinaryProtobuf],
  translator:        BossNameTranslator,
  keepAliveInterval: FiniteDuration,
  metricsCollector:  MetricsCollector
)(implicit system: ActorSystem, materializer: Materializer, scheduler: Scheduler) extends Controller {
  private val jsonTransformer = MessageFlowTransformerUtil.protobufJsonMessageFlowTransformer
  private val binaryTransformer = MessageFlowTransformerUtil.protobufBinaryMessageFlowTransformer
  private val defaultTransformer = jsonTransformer

  
        val flow = ActorFlow.actorRef(props = props)
        transformer.transform(flow)
      }
      case None => Left {
        val unsupportedProtocols = requestedProtocols.mkString("[", ", ", "]")
        Results.BadRequest("Unsupported websocket subprotocols " + unsupportedProtocols)
      }
    }

    Future.successful(result)
  }
} 
Example 49
Source File: Registry.scala    From kanadi   with MIT License 5 votes vote down vote up
package org.zalando.kanadi.api

import java.net.URI

import defaults._
import akka.http.scaladsl.HttpExt
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ContentTypes, HttpMethods, HttpRequest, Uri}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import com.typesafe.scalalogging.{Logger, LoggerTakingImplicit}
import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport._
import org.mdedetrich.webmodels.{FlowId, OAuth2TokenProvider}
import org.mdedetrich.webmodels.RequestHeaders.`X-Flow-ID`
import org.zalando.kanadi.models._

import scala.concurrent.{ExecutionContext, Future}

case class Registry(baseUri: URI, oAuth2TokenProvider: Option[OAuth2TokenProvider] = None)(implicit
                                                                                           kanadiHttpConfig: HttpConfig,
                                                                                           http: HttpExt,
                                                                                           materializer: Materializer)
    extends RegistryInterface {
  protected val logger: LoggerTakingImplicit[FlowId] = Logger.takingImplicit[FlowId](classOf[Registry])
  private val baseUri_                               = Uri(baseUri.toString)

  
  def partitionStrategies(implicit flowId: FlowId = randomFlowId(),
                          executionContext: ExecutionContext): Future[List[PartitionStrategy]] = {
    val uri =
      baseUri_.withPath(baseUri_.path / "registry" / "partition-strategies")

    val baseHeaders = List(RawHeader(`X-Flow-ID`, flowId.value))

    for {
      headers <- oAuth2TokenProvider match {
                  case None => Future.successful(baseHeaders)
                  case Some(futureProvider) =>
                    futureProvider.value().map { oAuth2Token =>
                      toHeader(oAuth2Token) +: baseHeaders
                    }
                }
      request  = HttpRequest(HttpMethods.GET, uri, headers)
      _        = logger.debug(request.toString)
      response <- http.singleRequest(request)
      result <- {
        if (response.status.isSuccess()) {
          Unmarshal(response.entity.httpEntity.withContentType(ContentTypes.`application/json`))
            .to[List[PartitionStrategy]]
        } else
          processNotSuccessful(request, response)
      }
    } yield result
  }

} 
Example 50
Source File: TestSpec.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.serialization.SerializationExtension
import akka.stream.{ ActorMaterializer, Materializer }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.prop.PropertyChecks
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

trait TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually
    with PropertyChecks
    with AkkaPersistenceQueries
    with AkkaStreamUtils
    with InMemoryCleanup {

  implicit val timeout: Timeout = Timeout(10.seconds)
  implicit val system: ActorSystem = ActorSystem()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val mat: Materializer = ActorMaterializer()
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  val serialization = SerializationExtension(system)

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  def killActors(actors: ActorRef*): Unit = {
    val probe = TestProbe()
    actors.foreach { actor ⇒
      probe watch actor
      actor ! PoisonPill
      probe expectTerminated actor
    }
  }

  override protected def afterAll(): Unit = {
    system.terminate()
    system.whenTerminated.toTry should be a 'success
  }
} 
Example 51
Source File: ExampleFilter.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.contacts.filters

import javax.inject._

import akka.stream.Materializer
import play.api.mvc._

import scala.concurrent.{ExecutionContext, Future}

@Singleton
class ExampleFilter @Inject()(
    implicit override val mat: Materializer,
    exec: ExecutionContext) extends Filter {

  override def apply(nextFilter: RequestHeader => Future[Result])
           (requestHeader: RequestHeader): Future[Result] = {
    nextFilter(requestHeader).map { result =>
      result.withHeaders("X-ExampleFilter" -> "foo")
    }
  }

} 
Example 52
Source File: DexWsConnector.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.tool.connectors

import akka.actor.ActorSystem
import akka.stream.Materializer
import cats.syntax.either._
import com.wavesplatform.dex.api.ws.connection.WsConnection
import com.wavesplatform.dex.api.ws.connection.WsConnectionOps._
import com.wavesplatform.dex.api.ws.protocol._
import com.wavesplatform.dex.cli.{ErrorOr, lift}
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.tool.connectors.AuthServiceRestConnector.AuthCredentials
import com.wavesplatform.dex.tool.connectors.Connector.RepeatRequestOptions

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Try

case class DexWsConnector private (target: String, wsc: WsConnection)(implicit system: ActorSystem, materializer: Materializer) extends Connector {

  import DexWsConnector._

  override implicit val repeatRequestOptions: RepeatRequestOptions = RepeatRequestOptions(30, 100.millis)

  private def repeat[A](f: => A)(test: A => Boolean): ErrorOr[A] = repeatRequest { lift(f) } { _.exists(test) }

  def receiveAtLeastN[T <: WsServerMessage: ClassTag](count: Int): ErrorOr[List[T]] =
    for {
      result <- repeat(wsc.collectMessages[T])(_.length >= count)
      _      <- lift { Thread.sleep(awaitingAdditionalMessagesPeriodMs) }
    } yield result

  def receiveInitialMessage: ErrorOr[WsInitial] =
    for {
      wsInitial <- receiveAtLeastN[WsInitial](1).map(_.head)
      _         <- clearMessages()
    } yield wsInitial

  def subscribeForOrderBookUpdates(assetPair: AssetPair): ErrorOr[WsOrderBookChanges] =
    for {
      _        <- lift { wsc.send(WsOrderBookSubscribe(assetPair, defaultDepth)) }
      snapshot <- receiveAtLeastN[WsOrderBookChanges](1).bimap(ex => s"Cannot get order book snapshot! $ex", _.head)
    } yield snapshot

  def subscribeForAccountUpdates(credentials: AuthCredentials): ErrorOr[WsAddressChanges] =
    for {
      _        <- lift { wsc.send(WsAddressSubscribe(credentials.keyPair, WsAddressSubscribe.defaultAuthType, credentials.token)) }
      snapshot <- receiveAtLeastN[WsAddressChanges](1).bimap(ex => s"Cannot get account snapshot! $ex", _.head)
    } yield snapshot

  def clearMessages(): ErrorOr[Unit] = lift { wsc.clearMessages() }

  override def close(): Unit = {
    wsc.close()
    Await.result(wsc.closed.zip(system.terminate()), 3.seconds)
    materializer.shutdown()
  }
}

object DexWsConnector {

  private val defaultDepth                       = 10
  private val awaitingAdditionalMessagesPeriodMs = 200

  def create(target: String): ErrorOr[DexWsConnector] = {

    implicit val system: ActorSystem        = ActorSystem()
    implicit val materializer: Materializer = Materializer.matFromSystem(system)

    Try { new WsConnection(target, true) }.toEither
      .bimap(ex => s"Web Socket connection cannot be established! $ex", wsc => DexWsConnector(target, wsc))
  }
} 
Example 53
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.api.ws.connection

import java.util.concurrent.ConcurrentLinkedQueue

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging {

  log.info(s"""Connecting to Matcher WS API:
            |         URI = $uri
            |  Keep alive = $keepAlive""".stripMargin)

  import materializer.executionContext

  private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive)

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // From test to server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]()

  // From server to test
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage =>
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x) => {
              messagesBuffer.add(x)
              if (keepAlive) x match {
                case value: WsPingOrPong => wsHandlerRef ! value
                case _                   =>
              }
              Future.successful(x)
            }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  val connectionOpenedTs: Long                   = System.currentTimeMillis
  val connectionClosedTs: Future[Long]           = closed.map(_ => System.currentTimeMillis)
  val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS))

  def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList
  def clearMessages(): Unit           = messagesBuffer.clear()

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def close(): Unit     = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
  def isClosed: Boolean = closed.isCompleted
} 
Example 54
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.load.ws

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging {

  import system.dispatcher
  private implicit val materializer = Materializer(system)
  private val wsHandlerRef          = system.actorOf(TestWsHandlerActor.props(keepAlive = true))

  log.info(s"Connecting to Matcher WS API: $uri")

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // To server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  // To client
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage => // TODO move to tests
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x)         => Future.successful { receive(x).foreach(wsHandlerRef ! _) }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def isClosed: Boolean = closed.isCompleted
  def close(): Future[Done] = {
    if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
    closed
  }
} 
Example 55
Source File: HasWebSockets.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.websockets

import java.lang
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.wavesplatform.dex.api.ws.connection.{WsConnection, WsConnectionOps}
import com.wavesplatform.dex.api.ws.entities.{WsBalances, WsOrder}
import com.wavesplatform.dex.api.ws.protocol.{WsAddressSubscribe, WsInitial, WsOrderBookSubscribe}
import com.wavesplatform.dex.domain.account.KeyPair
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.error.ErrorFormatterContext
import com.wavesplatform.dex.it.config.PredefinedAssets
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import mouse.any._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._

trait HasWebSockets extends BeforeAndAfterAll with HasJwt with WsConnectionOps with WsMessageOps {
  _: Suite with Eventually with Matchers with DiffMatcherWithImplicits with PredefinedAssets =>

  implicit protected val system: ActorSystem        = ActorSystem()
  implicit protected val materializer: Materializer = Materializer.matFromSystem(system)
  implicit protected val efc: ErrorFormatterContext = assetDecimalsMap.apply

  protected def getWsStreamUri(dex: DexContainer): String = s"ws://127.0.0.1:${dex.restApiAddress.getPort}/ws/v0"

  protected val knownWsConnections: ConcurrentHashMap.KeySetView[WsConnection, lang.Boolean] =
    ConcurrentHashMap.newKeySet[WsConnection]()

  protected def addConnection(connection: WsConnection): Unit = knownWsConnections.add(connection)

  protected def mkWsAddressConnection(client: KeyPair,
                                      dex: DexContainer,
                                      keepAlive: Boolean = true,
                                      subscriptionLifetime: FiniteDuration = 1.hour): WsConnection = {
    val jwt        = mkJwt(client, lifetime = subscriptionLifetime)
    val connection = mkDexWsConnection(dex, keepAlive)
    connection.send(WsAddressSubscribe(client.toAddress, WsAddressSubscribe.defaultAuthType, jwt))
    connection
  }

  protected def mkWsOrderBookConnection(assetPair: AssetPair, dex: DexContainer, depth: Int = 1): WsConnection = {
    val connection = mkDexWsConnection(dex)
    connection.send(WsOrderBookSubscribe(assetPair, depth))
    connection
  }

  protected def mkWsInternalConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(s"${getWsStreamUri(dex)}/internal", keepAlive)

  protected def mkDexWsConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(getWsStreamUri(dex), keepAlive)

  protected def mkWsConnection(uri: String, keepAlive: Boolean = true): WsConnection = {
    new WsConnection(uri, keepAlive) unsafeTap { wsc =>
      addConnection(wsc)
      eventually { wsc.collectMessages[WsInitial] should have size 1 }
      wsc.clearMessages()
    }
  }

  protected def assertChanges(c: WsConnection, squash: Boolean = true)(expBs: Map[Asset, WsBalances]*)(expOs: WsOrder*): Unit = {
    eventually {
      if (squash) {
        c.balanceChanges.size should be <= expBs.size
        c.balanceChanges.squashed should matchTo { expBs.toList.squashed }
        c.orderChanges.size should be <= expOs.size
        c.orderChanges.squashed should matchTo { expOs.toList.squashed }
      } else {
        c.balanceChanges should matchTo(expBs)
        c.orderChanges should matchTo(expOs)
      }
    }

    c.clearMessages()
  }

  protected def cleanupWebSockets(): Unit = {
    if (!knownWsConnections.isEmpty) {
      knownWsConnections.forEach { _.close() }
      materializer.shutdown()
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    cleanupWebSockets()
  }
} 
Example 56
Source File: TestSpec.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams

import akka.NotUsed
import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import com.github.dnvriend.streams.util.ClasspathResources
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.libs.json.{ Format, Json }
import play.api.test.WsTestClient

import scala.collection.immutable._
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

object Person {
  implicit val format: Format[Person] = Json.format[Person]
}

final case class Person(firstName: String, age: Int)

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with ClasspathResources
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]
  def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8081
  implicit val timeout: Timeout = 1.second
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]
  val log: LoggingAdapter = Logging(system, this.getClass)

  // ================================== Supporting Operations ====================================
  def id: String = java.util.UUID.randomUUID().toString

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, NotUsed]) {
    def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T =
    f(Source.fromIterator(() ⇒ Iterator from start))

  def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
    f(Source(xs).runWith(TestSink.probe(system)))

  def killActors(refs: ActorRef*): Unit = {
    val tp = TestProbe()
    refs.foreach { ref ⇒
      tp watch ref
      tp.send(ref, PoisonPill)
      tp.expectTerminated(ref)
    }
  }
} 
Example 57
Source File: Akka.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.indefinite

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}

trait Akka {

  this: Logger =>

  implicit lazy val system: ActorSystem = ActorSystem()
  implicit lazy val materializer: Materializer = ActorMaterializer()
  implicit def executionContext: ExecutionContext = system.dispatcher

  def stopAkka(): Unit = {
    logger.info("Stopping Akka...")
    Await.ready(system.terminate(), 1.second)
  }
} 
Example 58
Source File: WriteAndReadFilteredAkkaApp.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.akka

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.{ActorMaterializer, Materializer}
import com.github.mjakubowski84.parquet4s.{Col, ParquetReader, ParquetStreams}
import com.google.common.io.Files

import scala.concurrent.Future
import scala.util.Random

object WriteAndReadFilteredAkkaApp extends App {

  object Dict {
    val A = "A"
    val B = "B"
    val C = "C"
    val D = "D"

    val values: List[String] = List(A, B, C, D)
    def random: String = values(Random.nextInt(values.length))
  }

  case class Data(id: Int, dict: String)

  val count = 100
  val data = (1 to count).map { i => Data(id = i, dict = Dict.random) }
  val path = Files.createTempDir().getAbsolutePath

  implicit val system: ActorSystem = ActorSystem()
  implicit val materializer: Materializer = ActorMaterializer()
  import system.dispatcher

  val options = ParquetReader.Options()
  val printingSink = Sink.foreach(println)

  for {
    // write
    _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet"))
    // read filtered
    _ <- Future(println("""dict == "A""""))
    _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("dict") === Dict.A).runWith(printingSink)
    _ <- Future(println("""id >= 20 && id < 40"""))
    _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("id") >= 20 && Col("id") < 40).runWith(printingSink)
    // finish
    _ <- system.terminate()
  } yield ()

} 
Example 59
Source File: WriteAndReadCustomTypeAkkaApp.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.akka

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.{ActorMaterializer, Materializer}
import com.github.mjakubowski84.parquet4s.CustomType._
import com.github.mjakubowski84.parquet4s.ParquetStreams
import com.google.common.io.Files

object WriteAndReadCustomTypeAkkaApp extends App {

  object Data {
    def generate(count: Int): Iterator[Data] = Iterator.range(1, count).map { i => Data(id = i, dict = Dict.random) }
  }
  case class Data(id: Long, dict: Dict.Type)

  val data = () => Data.generate(count = 100)
  val path = Files.createTempDir().getAbsolutePath

  implicit val system: ActorSystem = ActorSystem()
  implicit val materializer: Materializer = ActorMaterializer()
  import system.dispatcher

  for {
    // write
    _ <- Source.fromIterator(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet"))
    // read
    // hint: you can filter by dict using string value, for example: filter = Col("dict") === "A"
    _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println))
    // finish
    _ <- system.terminate()
  } yield ()

} 
Example 60
Source File: WriteAndReadAkkaApp.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.akka

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.{ActorMaterializer, Materializer}
import com.github.mjakubowski84.parquet4s.ParquetStreams
import com.google.common.io.Files

import scala.util.Random

object WriteAndReadAkkaApp extends App {

  case class Data(id: Int, text: String)

  val count = 100
  val data = (1 to count).map { i => Data(id = i, text = Random.nextString(4)) }
  val path = Files.createTempDir().getAbsolutePath

  implicit val system: ActorSystem = ActorSystem()
  implicit val materializer: Materializer = ActorMaterializer()
  import system.dispatcher

  for {
    // write
    _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet"))
    // read
    _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println))
    // finish
    _ <- system.terminate()
  } yield ()

} 
Example 61
Source File: TestAbstractModule.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package controllers.modules

import akka.stream.Materializer
import daf.util.TestCache
import org.pac4j.play.store.{ PlayCacheSessionStore, PlaySessionStore }
import play.api.{ Configuration, Environment }
import play.api.inject.Module
import play.api.libs.ws.WSClient
import play.api.libs.ws.ahc.AhcWSClient
import play.api.mvc.BodyParsers
import play.cache.{ CacheApi, DefaultCacheApi }
import play.api.cache.{ CacheApi => ApiCacheApi }
import play.api.inject.guice.GuiceInjectorBuilder

abstract class TestAbstractModule extends Module {

  private lazy val injector = new GuiceInjectorBuilder().bindings(this).injector()

  private lazy val sessionStoreInstance: PlaySessionStore = injector.instanceOf { classOf[PlaySessionStore] }
  private lazy val wsClientInstance: WSClient = injector.instanceOf { classOf[WSClient] }
  private lazy val bodyParsersInstance: BodyParsers = BodyParsers

  final def sessionStore: PlaySessionStore = sessionStoreInstance
  final def ws: WSClient = wsClientInstance
  final def bodyParsers: BodyParsers = bodyParsersInstance

  protected implicit def materializer: Materializer

  override def bindings(environment: Environment, configuration: Configuration) = Seq(
    bind(classOf[ApiCacheApi]).to(classOf[TestCache]),
    bind(classOf[CacheApi]).to(classOf[DefaultCacheApi]),
    bind(classOf[PlaySessionStore]).to(classOf[PlayCacheSessionStore]),
    bind(classOf[BodyParsers]).to(BodyParsers),
    bind(classOf[WSClient]).toInstance(AhcWSClient())
  )
} 
Example 62
Source File: SecurityFilter.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.common.filters.authentication

import javax.inject.{Inject, Singleton}

import akka.stream.Materializer
import org.pac4j.core.config.Config
import org.pac4j.play.store.PlaySessionStore
import play.api.Configuration
import play.api.mvc._
import play.libs.concurrent.HttpExecutionContext

import scala.concurrent.Future

@SuppressWarnings(
  Array(
    "org.wartremover.warts.Overloading"
  )
)
@Singleton
class SecurityFilter @Inject()(mat: Materializer, configuration: Configuration, playSessionStore: PlaySessionStore, config: Config, ec: HttpExecutionContext) extends org.pac4j.play.filters.SecurityFilter(mat, configuration, playSessionStore, config, ec) {

  override def apply(nextFilter: (RequestHeader) => Future[play.api.mvc.Result])
                    (request: RequestHeader): Future[play.api.mvc.Result] = {
    super.apply(nextFilter)(request)
  }
} 
Example 63
Source File: LogCollector.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse.logcollector

import java.io.FileInputStream
import java.util.Properties
import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.{ ActorMaterializer, Materializer }
import com.typesafe.scalalogging.LazyLogging
import io.phdata.pulse.common.SolrService
import io.phdata.pulse.solr.SolrProvider
import org.apache.kudu.client.KuduClient.KuduClientBuilder

import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, Future }
import scala.util.{ Failure, Success }


  def main(args: Array[String]): Unit =
    System.getProperty("java.security.auth.login.config") match {
      case null => {
        logger.info(
          "java.security.auth.login.config is not set, continuing without kerberos authentication")
      }
      case _ => {
        KerberosContext.scheduleKerberosLogin(0, 9, TimeUnit.HOURS)
      }

      start(args)

    }

  private def start(args: Array[String]): Unit = {
    val cliParser = new LogCollectorCliParser(args)

    val solrService = SolrProvider.create(cliParser.zkHosts().split(",").toList)
    val solrStream  = new SolrCloudStream(solrService)

    val kuduClient =
      cliParser.kuduMasters.toOption.map(masters =>
        KerberosContext.runPrivileged(new KuduClientBuilder(masters).build()))

    val kuduService =
      kuduClient.map(client => KerberosContext.runPrivileged(new KuduService(client)))

    val routes = new LogCollectorRoutes(solrStream, kuduService)

    cliParser.mode() match {
      case "kafka" => {
        kafka(solrService, cliParser.kafkaProps(), cliParser.topic())
      }
      case _ => {
        http(cliParser.port(), routes)
      }
    }
  }

  // Starts Http Service
  def http(port: Int, routes: LogCollectorRoutes): Future[Unit] = {
    implicit val actorSystem: ActorSystem   = ActorSystem()
    implicit val ec                         = actorSystem.dispatchers.lookup("akka.actor.http-dispatcher")
    implicit val materializer: Materializer = ActorMaterializer.create(actorSystem)

    val httpServerFuture = Http().bindAndHandle(routes.routes, "0.0.0.0", port)(materializer) map {
      binding =>
        logger.info(s"Log Collector interface bound to: ${binding.localAddress}")
    }

    httpServerFuture.onComplete {
      case Success(v) => ()
      case Failure(ex) => {
        logger.error("HTTP server failed, exiting. ", ex)
        System.exit(1)
      }
    }

    Await.ready(
      httpServerFuture,
      Duration.Inf
    )
  }

  // Starts Kafka Consumer
  def kafka(solrService: SolrService, kafkaProps: String, topic: String): Unit = {

    val solrCloudStream = new SolrCloudStream(solrService)

    val kafkaConsumer      = new PulseKafkaConsumer(solrCloudStream)
    val kafkaConsumerProps = new Properties()

    kafkaConsumerProps.load(new FileInputStream(kafkaProps))

    kafkaConsumer.read(kafkaConsumerProps, topic)
  }
} 
Example 64
Source File: ClientSpec.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import scala.concurrent.Await
import scala.concurrent.duration._

import cats.effect.IO

import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec

import fs2.Stream

import akka.actor.ActorSystem
import akka.stream.{ Materializer, ActorMaterializer }

import Protocol.v1.{ Response, RandInt, Seeded }

class ClientSpec extends AnyFlatSpec with Matchers with com.example.lib.TcpTest {

  implicit lazy val sys: ActorSystem = ActorSystem("InteropSpec")
  implicit lazy val mat: Materializer = ActorMaterializer()

  protected override def ec = sys.dispatcher

  override def afterAll(): Unit = {
    super.afterAll()
    sys.terminate()
  }

  "Client" should "receive the correct response" in {
    val sem = cats.effect.concurrent.Semaphore[IO](0).unsafeRunSync()
    Stream(Server.serve(1237, sockGroup).drain, Stream.eval(sem.acquire))
      .parJoin(Int.MaxValue)
      .take(1)
      .compile
      .drain
      .unsafeRunAsync(_ => ())
    try {
      val resp = Await.result(Client.client(1237), 2.seconds)
      // constant, because we always seed with the same value:
      resp should === (Vector[Response](Seeded, RandInt(42)))
    } finally {
      sem.release.unsafeRunSync()
    }
  }
} 
Example 65
Source File: schema.scala    From sangria-subscriptions-example   with Apache License 2.0 5 votes vote down vote up
import akka.actor.ActorRef
import akka.util.Timeout
import generic.{Event, Versioned, View}
import generic.View.Get
import sangria.execution.UserFacingError
import sangria.schema._
import sangria.macros.derive._
import akka.pattern.ask
import akka.stream.Materializer
import sangria.execution.deferred.{Fetcher, HasId}

import scala.concurrent.ExecutionContext
import sangria.streaming.akkaStreams._

object schema {
  case class MutationError(message: String) extends Exception(message) with UserFacingError

  val authors = Fetcher.caching((c: Ctx, ids: Seq[String]) ⇒ c.loadAuthors(ids))(HasId(_.id))

  def createSchema(implicit timeout: Timeout, ec: ExecutionContext, mat: Materializer) = {
    val VersionedType = InterfaceType("Versioned", fields[Ctx, Versioned](
      Field("id", StringType, resolve = _.value.id),
      Field("version", LongType, resolve = _.value.version)))

    implicit val AuthorType = deriveObjectType[Unit, Author](Interfaces(VersionedType))

    val EventType = InterfaceType("Event", fields[Ctx, Event](
      Field("id", StringType, resolve = _.value.id),
      Field("version", LongType, resolve = _.value.version)))

    val AuthorCreatedType = deriveObjectType[Unit, AuthorCreated](Interfaces(EventType))
    val AuthorNameChangedType = deriveObjectType[Unit, AuthorNameChanged](Interfaces(EventType))
    val AuthorDeletedType = deriveObjectType[Unit, AuthorDeleted](Interfaces(EventType))

    val ArticleCreatedType = deriveObjectType[Unit, ArticleCreated](
      Interfaces(EventType),
      ReplaceField("authorId", Field("author", OptionType(AuthorType),
        resolve = c ⇒ authors.deferOpt(c.value.authorId))))

    val ArticleTextChangedType = deriveObjectType[Unit, ArticleTextChanged](Interfaces(EventType))
    val ArticleDeletedType = deriveObjectType[Unit, ArticleDeleted](Interfaces(EventType))

    implicit val ArticleType = deriveObjectType[Ctx, Article](
      Interfaces(VersionedType),
      ReplaceField("authorId", Field("author", OptionType(AuthorType),
        resolve = c ⇒ authors.deferOpt(c.value.authorId))))

    val IdArg = Argument("id", StringType)
    val OffsetArg = Argument("offset", OptionInputType(IntType), 0)
    val LimitArg = Argument("limit", OptionInputType(IntType), 100)

    def entityFields[T](name: String, tpe: ObjectType[Ctx, T], actor: Ctx ⇒ ActorRef) = fields[Ctx, Unit](
      Field(name, OptionType(tpe),
        arguments = IdArg :: Nil,
        resolve = c ⇒ (actor(c.ctx) ? Get(c.arg(IdArg))).mapTo[Option[T]]),
      Field(name + "s", ListType(tpe),
        arguments = OffsetArg :: LimitArg :: Nil,
        resolve = c ⇒ (actor(c.ctx) ? View.List(c.arg(OffsetArg), c.arg(LimitArg))).mapTo[Seq[T]]))

    val QueryType = ObjectType("Query",
      entityFields[Author]("author", AuthorType, _.authors) ++
      entityFields[Article]("article", ArticleType, _.articles))

    val MutationType = deriveContextObjectType[Ctx, Mutation, Unit](identity)

    
    def subscriptionField[T <: Event](tpe: ObjectType[Ctx, T]) = {
      val fieldName = tpe.name.head.toLower + tpe.name.tail

      Field.subs(fieldName, tpe,
        resolve = (c: Context[Ctx, Unit]) ⇒
          c.ctx.eventStream
            .filter(event ⇒ tpe.valClass.isAssignableFrom(event.getClass))
            .map(event ⇒ Action(event.asInstanceOf[T])))
    }

    val SubscriptionType = ObjectType("Subscription", fields[Ctx, Unit](
      subscriptionField(AuthorCreatedType),
      subscriptionField(AuthorNameChangedType),
      subscriptionField(AuthorDeletedType),
      subscriptionField(ArticleCreatedType),
      subscriptionField(ArticleTextChangedType),
      subscriptionField(ArticleDeletedType),
      Field.subs("allEvents", EventType, resolve = _.ctx.eventStream.map(Action(_)))
    ))

    Schema(QueryType, Some(MutationType), Some(SubscriptionType))
  }
} 
Example 66
Source File: ReliableHttpProxyFactory.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkahttp.proxy

import akka.NotUsed
import akka.actor._
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpEntity, HttpRequest, HttpResponse}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import org.slf4j.LoggerFactory
import rhttpc.client.protocol.{Correlated, Request}
import rhttpc.client.proxy._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

object ReliableHttpProxyFactory {

  private lazy val logger = LoggerFactory.getLogger(getClass)

  def send(successRecognizer: SuccessHttpResponseRecognizer, batchSize: Int, parallelConsumers: Int)
          (request: Request[HttpRequest])
          (implicit actorSystem: ActorSystem, materialize: Materializer): Future[HttpResponse] = {
    import actorSystem.dispatcher
    send(prepareHttpFlow(batchSize * parallelConsumers), successRecognizer)(request.correlated)
  }

  private def prepareHttpFlow(parallelism: Int)
                             (implicit actorSystem: ActorSystem, materialize: Materializer):
    Flow[(HttpRequest, String), HttpResponse, NotUsed] = {

    import actorSystem.dispatcher
    Http().superPool[String]().mapAsync(parallelism) {
      case (tryResponse, id) =>
        tryResponse match {
          case Success(response) =>
            response.toStrict(1 minute)
          case Failure(ex) =>
            Future.failed(ex)
        }
    }
  }

  private def send(httpFlow: Flow[(HttpRequest, String), HttpResponse, Any], successRecognizer: SuccessHttpResponseRecognizer)
                  (corr: Correlated[HttpRequest])
                  (implicit ec: ExecutionContext, materialize: Materializer): Future[HttpResponse] = {
    import collection.JavaConverters._
    logger.debug(
      s"""Sending request for ${corr.correlationId} to ${corr.msg.getUri()}. Headers:
         |${corr.msg.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${corr.msg.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
    val logResp = logResponse(corr) _
    val responseFuture = Source.single((corr.msg, corr.correlationId)).via(httpFlow).runWith(Sink.head)
    responseFuture.onComplete {
      case Failure(ex) =>
        logger.error(s"Got failure for ${corr.correlationId} to ${corr.msg.getUri()}", ex)
      case Success(_) =>
    }
    for {
      response <- responseFuture
      transformedToFailureIfNeed <- {
        if (successRecognizer.isSuccess(response)) {
          logResp(response, "success response")
          Future.successful(response)
        } else {
          logResp(response, "response recognized as non-success")
          Future.failed(NonSuccessResponse)
        }
      }
    } yield transformedToFailureIfNeed
  }

  private def logResponse(corr: Correlated[HttpRequest])
                         (response: HttpResponse, additionalInfo: String): Unit = {
    import collection.JavaConverters._
    logger.debug(
      s"""Got $additionalInfo for ${corr.correlationId} to ${corr.msg.getUri()}. Status: ${response.status.value}. Headers:
         |${response.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${response.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
  }

} 
Example 67
Source File: JacksonSupport.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.api.http

import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.unmarshalling._
import akka.stream.Materializer
import com.fasterxml.jackson.databind._
import com.fasterxml.jackson.module.scala.DefaultScalaModule

import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag
import scala.language.postfixOps

object JacksonSupport {

  private val mapper = new ObjectMapper().registerModule(DefaultScalaModule)

  implicit def JacksonRequestUnmarshaller[T <: AnyRef](implicit c: ClassTag[T]): FromRequestUnmarshaller[T] = {
    new FromRequestUnmarshaller[T] {
      override def apply(request: HttpRequest)(implicit ec: ExecutionContext, materializer: Materializer): Future[T] = {
        Unmarshal(request.entity).to[String].map(str => {
          if (str.isEmpty) mapper.readValue("{}", c.runtimeClass).asInstanceOf[T]
          else mapper.readValue(str, c.runtimeClass).asInstanceOf[T]
        })
      }
    }
  }
} 
Example 68
Source File: HttpRequestFactory.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.http

import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
import akka.http.scaladsl.server.directives.CodingDirectives
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import hydra.core.ingest.RequestParams._
import hydra.core.ingest._
import hydra.core.transport.{AckStrategy, ValidationStrategy}

import scala.concurrent.Future
import scala.util.Success


class HttpRequestFactory
    extends RequestFactory[HttpRequest]
    with CodingDirectives {

  override def createRequest(correlationId: String, request: HttpRequest)(
      implicit mat: Materializer
  ): Future[HydraRequest] = {
    implicit val ec = mat.executionContext

    lazy val vs = request.headers
      .find(_.lowercaseName() == HYDRA_VALIDATION_STRATEGY)
      .map(h => ValidationStrategy(h.value()))
      .getOrElse(ValidationStrategy.Strict)

    lazy val as = request.headers
      .find(_.lowercaseName() == HYDRA_ACK_STRATEGY)
      .map(h => AckStrategy(h.value()))
      .getOrElse(Success(AckStrategy.NoAck))

    lazy val clientId = request.headers
      .find(_.lowercaseName() == HydraClientId)
      .map(_.value().toLowerCase)

    Unmarshal(request.entity).to[String].flatMap { payload =>
      val dPayload = if (request.method == HttpMethods.DELETE) null else payload
      val metadata: Map[String, String] =
        request.headers.map(h => h.name.toLowerCase -> h.value).toMap
      Future
        .fromTry(as)
        .map(ack =>
          HydraRequest(correlationId, dPayload, clientId, metadata, vs, ack)
        )
    }
  }
} 
Example 69
Source File: RequestFactories.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.http.scaladsl.model.HttpRequest
import akka.stream.Materializer
import hydra.core.ingest.{HydraRequest, RequestFactory}
import hydra.ingest.http.HttpRequestFactory

import scala.concurrent.Future


object RequestFactories {

  implicit object RequestFactoryLikeHttp extends RequestFactory[HttpRequest] {

    override def createRequest(correlationId: String, source: HttpRequest)(
        implicit mat: Materializer
    ): Future[HydraRequest] = {
      implicit val ec = mat.executionContext
      new HttpRequestFactory().createRequest(correlationId, source)
    }
  }

  def createRequest[D](
      correlationId: String,
      source: D
  )(implicit ev: RequestFactory[D], mat: Materializer): Future[HydraRequest] = {
    ev.createRequest(correlationId, source)
  }
} 
Example 70
Source File: BootstrapEndpointActors.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.endpoints

import akka.actor.{ActorRef, ActorSystem}
import akka.stream.{ActorMaterializer, Materializer}
import hydra.avro.registry.ConfluentSchemaRegistry
import hydra.common.config.ConfigSupport
import hydra.core.akka.SchemaRegistryActor
import hydra.kafka.services.{StreamsManagerActor, TopicBootstrapActor}
import hydra.kafka.util.KafkaUtils

import scala.concurrent.ExecutionContext

trait BootstrapEndpointActors extends ConfigSupport {

  implicit val system: ActorSystem

  private[kafka] val kafkaIngestor = system.actorSelection(path =
    applicationConfig.getString("kafka-ingestor-path")
  )

  private[kafka] val schemaRegistryActor =
    system.actorOf(SchemaRegistryActor.props(applicationConfig))

  private[kafka] val bootstrapKafkaConfig =
    applicationConfig.getConfig("bootstrap-config")

  private[kafka] val streamsManagerProps = StreamsManagerActor.props(
    bootstrapKafkaConfig,
    KafkaUtils.BootstrapServers,
    ConfluentSchemaRegistry.forConfig(applicationConfig).registryClient
  )

  val bootstrapActor: ActorRef = system.actorOf(
    TopicBootstrapActor.props(
      schemaRegistryActor,
      kafkaIngestor,
      streamsManagerProps,
      Some(bootstrapKafkaConfig)
    )
  )

} 
Example 71
Source File: DurableEventLogs.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.stream

//# durable-event-logs
import akka.actor.{ ActorRef, ActorSystem }
import akka.stream.{ ActorMaterializer, Materializer }
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog

//#
trait DurableEventLogs {
  //# durable-event-logs
  implicit val system: ActorSystem = ActorSystem("example")
  implicit val materializer: Materializer = ActorMaterializer()

  val logAId = "A"
  val logBId = "B"
  val logCId = "C"

  val logA: ActorRef = createLog(logAId)
  val logB: ActorRef = createLog(logBId)
  val logC: ActorRef = createLog(logCId)

  def createLog(id: String): ActorRef =
    system.actorOf(LeveldbEventLog.props(id))
  //#
} 
Example 72
Source File: AkkaUnitTestLike.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import akka.actor.{ActorSystem, Scheduler}
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKitBase
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContextExecutor


trait AkkaUnitTestLike extends TestKitBase with ScalaFutures with BeforeAndAfterAll {
  self: Suite =>

  implicit lazy val config: Config                = ConfigFactory.load("sample.conf")
  implicit lazy val system: ActorSystem           = ActorSystem(suiteName, config)
  implicit lazy val scheduler: Scheduler          = system.scheduler
  implicit lazy val mat: Materializer             = ActorMaterializer()
  implicit lazy val ctx: ExecutionContextExecutor = system.dispatcher

  abstract override def afterAll(): Unit = {
    super.afterAll()
    // intentionally shutdown the actor system last.
    system.terminate().futureValue
  }
} 
Example 73
Source File: Main.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.{ ActorMaterializer, Materializer }
import jp.co.dzl.example.akka.api.di.{ ServiceModule, HandlerModule, ConfigModule, AkkaModule }
import jp.co.dzl.example.akka.api.handler.RootHandler
import scaldi.{ Injector, Injectable }

import scala.concurrent.ExecutionContextExecutor

trait MainService extends Injectable {
  implicit val module: Injector =
    new AkkaModule :: new ConfigModule :: new HandlerModule :: new ServiceModule

  implicit val system: ActorSystem = inject[ActorSystem]
  implicit val executor: ExecutionContextExecutor = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()

  val host = inject[String](identified by "http.listen.host")
  val port = inject[Int](identified by "http.listen.port")
  val handler = inject[RootHandler]
}

object Main extends App with MainService {
  Http().bindAndHandle(handler.routes, host, port)
} 
Example 74
Source File: ClickhouseHostHealth.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.health

import akka.NotUsed
import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import com.crobox.clickhouse.internal.ClickhouseResponseParser

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

object ClickhouseHostHealth extends ClickhouseResponseParser {

  sealed trait ClickhouseHostStatus {
    val host: Uri
    val code: String
  }

  case class Alive(host: Uri) extends ClickhouseHostStatus { override val code: String = "ok" }

  case class Dead(host: Uri, reason: Throwable) extends ClickhouseHostStatus { override val code: String = "nok" }

  
  def healthFlow(host: Uri)(
      implicit system: ActorSystem,
      materializer: Materializer,
      executionContext: ExecutionContext
  ): Source[ClickhouseHostStatus, Cancellable] = {
    val healthCheckInterval: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.interval")
        .getSeconds.seconds
    val healthCheckTimeout: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.timeout")
        .getSeconds.seconds

    val healthCachedPool = Http(system).cachedHostConnectionPool[Int](
      host.authority.host.address(),
      host.effectivePort,
      settings = ConnectionPoolSettings(system)
        .withMaxConnections(1)
        .withMinConnections(1)
        .withMaxOpenRequests(2)
        .withMaxRetries(3)
        .withUpdatedConnectionSettings(
          _.withIdleTimeout(healthCheckTimeout + healthCheckInterval).withConnectingTimeout(healthCheckTimeout)
        )
    )
    Source
      .tick(0.milliseconds, healthCheckInterval, 0)
      .map(tick => {
        (HttpRequest(method = HttpMethods.GET, uri = host), tick)
      })
      .via(healthCachedPool)
      .via(parsingFlow(host))
  }

  private[health] def parsingFlow[T](
      host: Uri
  )(implicit ec: ExecutionContext, mat: Materializer): Flow[(Try[HttpResponse], T), ClickhouseHostStatus, NotUsed] =
    Flow[(Try[HttpResponse], T)].mapAsync(1) {
      case (Success(response @ akka.http.scaladsl.model.HttpResponse(StatusCodes.OK, _, _, _)), _) =>
        Unmarshaller.stringUnmarshaller(decodeResponse(response).entity)
          .map(splitResponse)
          .map(
            stringResponse =>
              if (stringResponse.equals(Seq("Ok."))) {
                Alive(host)
              } else {
                Dead(host, new IllegalArgumentException(s"Got wrong result $stringResponse"))
            }
          )
      case (Success(response), _) =>
        Future.successful(Dead(host, new IllegalArgumentException(s"Got response with status code ${response.status}")))
      case (Failure(ex), _) =>
        Future.successful(Dead(host, ex))
    }

} 
Example 75
Source File: ClusterConnectionFlow.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.cluster

import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.Connections
import com.crobox.clickhouse.internal.QuerySettings.ReadQueries
import com.crobox.clickhouse.internal.{ClickhouseHostBuilder, ClickhouseQueryBuilder, ClickhouseResponseParser, QuerySettings}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

private[clickhouse] object ClusterConnectionFlow
    extends ClickhouseQueryBuilder
    with ClickhouseResponseParser
    with LazyLogging {

  def clusterConnectionsFlow(
      targetHost: => Future[Uri],
      scanningInterval: FiniteDuration,
      cluster: String
  )(implicit system: ActorSystem,
    materializer: Materializer,
    ec: ExecutionContext): Source[Connections, Cancellable] = {
    val http                   = Http(system)
    val settings = ConnectionPoolSettings(system)
      .withMaxConnections(1)
      .withMinConnections(1)
      .withMaxOpenRequests(2)
      .withMaxRetries(3)
      .withUpdatedConnectionSettings(
        _.withIdleTimeout(scanningInterval.plus(1.second))
      )
    Source
      .tick(0.millis, scanningInterval, {})
      .mapAsync(1)(_ => targetHost)
      .mapAsync(1)(host => {
        val query = s"SELECT host_address FROM system.clusters WHERE cluster='$cluster'"
        val request =
          toRequest(host, query, None, QuerySettings(readOnly = ReadQueries, idempotent = Some(true)), None)(
            system.settings.config
          )
        processClickhouseResponse(http.singleRequest(request, settings = settings), query, host, None)
          .map(splitResponse)
          .map(_.toSet.filter(_.nonEmpty))
          .map(result => {
            if (result.isEmpty) {
              throw new IllegalArgumentException(
                s"Could not determine clickhouse cluster hosts for cluster $cluster and host $host. " +
                s"This could indicate that you are trying to use the cluster balancer to connect to a non cluster based clickhouse server. " +
                s"Please use the `SingleHostQueryBalancer` in that case."
              )
            }
            Connections(result.map(ClickhouseHostBuilder.toHost(_, Some(8123))))
          })
      })
  }
} 
Example 76
Source File: HostBalancer.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.stream.Materializer
import com.crobox.clickhouse.balancing.Connection.{BalancingHosts, ClusterAware, ConnectionType, SingleHost}
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor
import com.crobox.clickhouse.balancing.discovery.health.ClickhouseHostHealth
import com.crobox.clickhouse.internal.ClickhouseHostBuilder
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

trait HostBalancer extends LazyLogging {

  def nextHost: Future[Uri]

}

object HostBalancer extends ClickhouseHostBuilder {

  def apply(
      optionalConfig: Option[Config] = None
  )(implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext): HostBalancer = {
    val config = optionalConfig.getOrElse(system.settings.config)
    val connectionConfig = config.getConfig("connection")
    val connectionType           = ConnectionType(connectionConfig.getString("type"))
    val connectionHostFromConfig = extractHost(connectionConfig)
    connectionType match {
      case SingleHost => SingleHostBalancer(connectionHostFromConfig)
      case BalancingHosts =>
        val manager = system.actorOf(
          ConnectionManagerActor
            .props(ClickhouseHostHealth.healthFlow(_))
        )
        MultiHostBalancer(connectionConfig
                            .getConfigList("hosts")
                            .asScala
                            .toSet
                            .map((config: Config) => extractHost(config)),
                          manager)
      case ClusterAware =>
        val manager = system.actorOf(
          ConnectionManagerActor.props(ClickhouseHostHealth.healthFlow(_))
        )
        ClusterAwareHostBalancer(
          connectionHostFromConfig,
          connectionConfig.getString("cluster"),
          manager,
          connectionConfig.getDuration("scanning-interval").getSeconds.seconds
        )(system,
          config.getDuration("host-retrieval-timeout").getSeconds.seconds,
          ec,
          materializer)
    }
  }

  def extractHost(connectionConfig: Config): Uri =
    toHost(connectionConfig.getString("host"),
           if (connectionConfig.hasPath("port")) Option(connectionConfig.getInt("port")) else None)
} 
Example 77
Source File: ClusterAwareHostBalancer.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.scaladsl.Sink
import akka.stream.{ActorAttributes, Materializer, Supervision}
import akka.util.Timeout
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.{GetConnection, LogDeadConnections}
import com.crobox.clickhouse.balancing.discovery.cluster.ClusterConnectionFlow

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


case class ClusterAwareHostBalancer(host: Uri,
                                    cluster: String = "cluster",
                                    manager: ActorRef,
                                    scanningInterval: FiniteDuration)(
    implicit system: ActorSystem,
    connectionRetrievalTimeout: Timeout,
    ec: ExecutionContext,
    materializer: Materializer
) extends HostBalancer {

  ClusterConnectionFlow
    .clusterConnectionsFlow(Future.successful(host), scanningInterval, cluster)
    .withAttributes(
      ActorAttributes.supervisionStrategy({
        case ex: IllegalArgumentException =>
          logger.error("Failed resolving hosts for cluster, stopping the flow.", ex)
          Supervision.stop
        case ex =>
          logger.error("Failed resolving hosts for cluster, resuming.", ex)
          Supervision.Resume
      })
    )
    .runWith(Sink.actorRef(manager, LogDeadConnections))

  override def nextHost: Future[Uri] =
    (manager ? GetConnection()).mapTo[Uri]
} 
Example 78
Source File: ClickhouseResponseParser.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.internal

import akka.http.scaladsl.coding.{Deflate, Gzip, NoCoding}
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{HttpEncoding, HttpEncodings}
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.Materializer
import akka.stream.scaladsl.SourceQueue
import com.crobox.clickhouse.internal.progress.QueryProgress.{QueryProgress, _}
import com.crobox.clickhouse.{ClickhouseChunkedException, ClickhouseException}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

private[clickhouse] trait ClickhouseResponseParser {

  protected def processClickhouseResponse(responseFuture: Future[HttpResponse],
                                          query: String,
                                          host: Uri,
                                          progressQueue: Option[SourceQueue[QueryProgress]])(
      implicit materializer: Materializer,
      executionContext: ExecutionContext
  ): Future[String] =
    responseFuture.flatMap { response =>
      decodeResponse(response) match {
        case HttpResponse(StatusCodes.OK, _, entity, _) =>
          Unmarshaller.stringUnmarshaller(entity).map(content => {
            if (content.contains("DB::Exception")) { //FIXME this is quite a fragile way to detect failures, hopefully nobody will have a valid exception string in the result. Check https://github.com/yandex/ClickHouse/issues/2999
              throw ClickhouseException("Found exception in the query return body",
                                        query,
                                        ClickhouseChunkedException(content),
                                        StatusCodes.OK)
            }
            content
          })
          .andThen {
            case Success(_) =>
              progressQueue.foreach(queue => {
                queue.offer(QueryFinished)
              })
            case Failure(exception) =>
              progressQueue.foreach(queue => {
                queue.offer(QueryFailed(exception))
              })
          }
        case HttpResponse(code, _, entity, _) =>
          progressQueue.foreach(_.offer(QueryRejected))
          Unmarshaller.stringUnmarshaller(entity).flatMap(
            response =>
              Future.failed(
                  ClickhouseException(s"Server [$host] returned code $code; $response", query, statusCode = code)
              )
          )
      }
    }

  protected def decodeResponse(response: HttpResponse): HttpResponse = {
    val decoder = response.encoding match {
      case HttpEncodings.gzip => Gzip
      case HttpEncodings.deflate => Deflate
      case HttpEncodings.identity => NoCoding
      case HttpEncoding(enc) => throw new IllegalArgumentException(s"Unsupported response encoding: $enc")
    }
    decoder.decodeMessage(response)
  }

  protected def splitResponse(response: String): Seq[String] =
    response.split("\n").toSeq
} 
Example 79
Source File: HostBalancerTest.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.stream.{ActorMaterializer, Materializer}
import com.crobox.clickhouse.ClickhouseClientSpec
import com.crobox.clickhouse.internal.ClickhouseHostBuilder
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

class HostBalancerTest extends ClickhouseClientSpec {

  it should "resolve to single host balancer" in {
    HostBalancer() match {
      case SingleHostBalancer(host) =>
        host shouldEqual ClickhouseHostBuilder.toHost("localhost", Some(8123))
    }
  }

  it should "resolve to multi host balancer" in {
    HostBalancer(Some(ConfigFactory.parseString("""
        |    connection: {
        |        type: "balancing-hosts"
        |        hosts: [
        |          {
        |            host: "localhost",
        |            port: 8123
        |          }
        |        ]
        |        health-check {
        |         timeout = 1 second
        |         interval = 1 second
        |        }
        |    }
        |
      """.stripMargin).withFallback(config.getConfig("crobox.clickhouse.client")))) match {
      case MultiHostBalancer(hosts, _) =>
        hosts.toSeq should contain theSameElementsInOrderAs Seq(ClickhouseHostBuilder.toHost("localhost", Some(8123)))
    }
  }

  it should "resolve to cluster aware host balancer" in {
    HostBalancer(Some(ConfigFactory.parseString("""
        |    connection: {
        |        type: "cluster-aware"
        |        host: "localhost"
        |        port: 8123
        |        cluster: "cluster"
        |        scanning-interval = 1 second
        |        health-check {
        |         timeout = 1 second
        |         interval = 1 second
        |        }
        |    }
        |
      """.stripMargin).withFallback(config.getConfig("crobox.clickhouse.client")))) match {
      case ClusterAwareHostBalancer(host, cluster, _, builtTimeout) =>
        host shouldEqual ClickhouseHostBuilder.toHost("localhost", Some(8123))
        cluster shouldBe "cluster"
        builtTimeout shouldBe (1 second)
    }
  }

} 
Example 80
Source File: ClickhouseClientSpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import java.util.UUID

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Random

abstract class ClickhouseClientSpec(val config: Config = ConfigFactory.load())
    extends TestKit(ActorSystem("clickhouseClientTestSystem", config.getConfig("crobox.clickhouse.client")))
    with AnyFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures {

  implicit val materializer: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  override implicit def patienceConfig: PatienceConfig  = PatienceConfig(1.seconds, 50.millis)

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally Await.result(system.terminate(), 10.seconds)
  }

  def randomUUID: UUID =
    UUID.randomUUID

  def randomString: String =
    Random.alphanumeric.take(10).mkString

  def randomInt: Int =
    Random.nextInt(100000)
} 
Example 81
Source File: ClickhouseClientAsyncSpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import akka.util.Timeout
import akka.util.Timeout.durationToTimeout
import com.crobox.clickhouse.balancing.HostBalancer
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.GetConnection
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import org.scalatest.flatspec.AsyncFlatSpecLike
import org.scalatest.matchers.should.Matchers

abstract class ClickhouseClientAsyncSpec(val config: Config = ConfigFactory.load())
    extends TestKit(ActorSystem("clickhouseClientAsyncTestSystem", config.getConfig("crobox.clickhouse.client")))
    with AsyncFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach {

  implicit val timeout: Timeout = 5.second
  implicit val materializer: Materializer = ActorMaterializer()

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally Await.result(system.terminate(), 10.seconds)
  }

  def requestParallelHosts(balancer: HostBalancer, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          balancer.nextHost
        })
    )

  def getConnections(manager: ActorRef, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          (manager ? GetConnection()).mapTo[Uri]
        })
    )

  //  TODO change this methods to custom matchers
  def returnsConnectionsInRoundRobinFashion(manager: ActorRef, expectedConnections: Set[Uri]): Future[Assertion] = {
    val RequestConnectionsPerHost = 100
    getConnections(manager, RequestConnectionsPerHost * expectedConnections.size)
      .map(connections => {
        expectedConnections.foreach(
          uri =>
            connections
              .count(_ == uri) shouldBe (RequestConnectionsPerHost +- RequestConnectionsPerHost / 10) //10% delta for warm-up phase
        )
        succeed
      })
  }

} 
Example 82
Source File: AuthenticationProviderSTS.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.provider

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ HttpRequest, StatusCodes, Uri }
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import com.ing.wbaa.rokku.proxy.config.StsSettings
import com.ing.wbaa.rokku.proxy.data.{ AwsRequestCredential, JsonProtocols, RequestId, User, UserRawJson }
import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId
import com.ing.wbaa.rokku.proxy.util.JwtToken

import scala.concurrent.{ ExecutionContext, Future }

trait AuthenticationProviderSTS extends JsonProtocols with JwtToken {

  private val logger = new LoggerHandlerWithId

  import AuthenticationProviderSTS.STSException
  import spray.json._

  protected[this] implicit def system: ActorSystem
  protected[this] implicit def executionContext: ExecutionContext
  protected[this] implicit def materializer: Materializer

  protected[this] def stsSettings: StsSettings

  protected[this] def areCredentialsActive(awsRequestCredential: AwsRequestCredential)(implicit id: RequestId): Future[Option[User]] = {
    val QueryParameters =
      Map("accessKey" -> awsRequestCredential.accessKey.value) ++
        awsRequestCredential.sessionToken.map(s => "sessionToken" -> s.value)

    val uri = stsSettings.stsBaseUri
      .withPath(Uri.Path("/isCredentialActive"))
      .withQuery(Uri.Query(QueryParameters))

    Http()
      .singleRequest(
        HttpRequest(uri = uri)
          .addHeader(RawHeader("Authorization", createInternalToken))
          .addHeader(RawHeader("x-rokku-request-id", id.value))
      )
      .flatMap { response =>
        response.status match {

          case StatusCodes.OK =>
            Unmarshal(response.entity).to[String].map { jsonString =>
              Some(User(jsonString.parseJson.convertTo[UserRawJson]))
            }

          case StatusCodes.Forbidden =>
            logger.error(s"User not authenticated " +
              s"with accessKey (${awsRequestCredential.accessKey.value}) " +
              s"and sessionToken (${awsRequestCredential.sessionToken})")
            Future.successful(None)

          case c =>
            val msg = s"Received unexpected StatusCode ($c) for " +
              s"accessKey (${awsRequestCredential.accessKey.value}) " +
              s"and sessionToken (${awsRequestCredential.sessionToken})"
            logger.error(msg)
            Future.failed(STSException(msg))
        }
      }
  }
}

object AuthenticationProviderSTS {
  final case class STSException(private val message: String, private val cause: Throwable = None.orNull)
    extends Exception(message, cause)
} 
Example 83
Source File: FilterRecursiveListBucketHandlerSpec.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.handler

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ HttpMethods, MediaTypes, RemoteAddress, Uri }
import akka.stream.scaladsl.{ Sink, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.ByteString
import com.ing.wbaa.rokku.proxy.data._
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AsyncWordSpec

import scala.concurrent.ExecutionContext

class FilterRecursiveListBucketHandlerSpec extends AsyncWordSpec with Diagrams with FilterRecursiveListBucketHandler {

  implicit val system: ActorSystem = ActorSystem.create("test-system")
  override implicit val executionContext: ExecutionContext = system.dispatcher
  implicit val requestId: RequestId = RequestId("test")

  implicit def materializer: Materializer = ActorMaterializer()(system)

  def isUserAuthorizedForRequest(request: S3Request, user: User)(implicit id: RequestId): Boolean = {
    user match {
      case User(userName, _, _, _, _) if userName.value == "admin" => true
      case User(userName, _, _, _, _) if userName.value == "user1" =>
        request match {
          case S3Request(_, s3BucketPath, _, _, _, _, _) =>
            if (s3BucketPath.get.startsWith("/demobucket/user/user2")) false else true
        }
      case _ => true
    }
  }

  val listBucketXmlResponse: String = scala.io.Source.fromResource("listBucket.xml").mkString.stripMargin.trim

  val adminUser = User(UserRawJson("admin", Some(Set.empty[String]), "a", "s", None))
  val user1 = User(UserRawJson("user1", Some(Set.empty[String]), "a", "s", None))
  val s3Request = S3Request(AwsRequestCredential(AwsAccessKey(""), None), Uri.Path("/demobucket/user"), HttpMethods.GET, RemoteAddress.Unknown, HeaderIPs(), MediaTypes.`text/plain`)
  val data: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(listBucketXmlResponse))

  "List bucket object response" should {
    "returns all objects to admin" in {
      data.via(filterRecursiveListObjects(adminUser, s3Request)).map(_.utf8String).runWith(Sink.seq).map(x => {
        assert(x.mkString.stripMargin.equals(listBucketXmlResponse))
      })
    }

    val filteredXml: String = scala.io.Source.fromResource("filteredListBucket.xml").mkString.stripMargin.trim
    "returns filtered object for user 1" in {
      data.via(filterRecursiveListObjects(user1, s3Request)).map(_.utf8String).runWith(Sink.seq).map(x => {
        assert(x.mkString.stripMargin.replaceAll("[\n\r\\s]", "")
          .equals(filteredXml.replaceAll("[\n\r\\s]", "")))
      })
    }
  }
} 
Example 84
Source File: StandaloneWSClientSupport.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play.libs.ws.ahc

import akka.stream.Materializer
import org.specs2.execute.Result
import play.api.libs.ws.ahc.AhcConfigBuilder
import play.api.libs.ws.ahc.AhcWSClientConfig
import play.api.libs.ws.ahc.{ AhcWSClientConfigFactory => ScalaAhcWSClientConfigFactory }
import play.shaded.ahc.org.asynchttpclient.DefaultAsyncHttpClient

trait StandaloneWSClientSupport {

  def materializer: Materializer

  def withClient(
      config: AhcWSClientConfig = ScalaAhcWSClientConfigFactory.forConfig()
  )(block: StandaloneAhcWSClient => Result): Result = {
    val asyncHttpClient = new DefaultAsyncHttpClient(new AhcConfigBuilder(config).build())
    val client          = new StandaloneAhcWSClient(asyncHttpClient, materializer)
    try {
      block(client)
    } finally {
      client.close()
    }
  }
} 
Example 85
Source File: JsonRequestSpec.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play.api.libs.ws.ahc

import java.nio.charset.StandardCharsets

import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.util.ByteString
import org.mockito.Mockito.times
import org.mockito.Mockito.verify
import org.mockito.Mockito.when
import org.specs2.mock.Mockito

import org.specs2.mutable.Specification
import org.specs2.specification.AfterAll
import play.api.libs.json.JsString
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import play.api.libs.ws.JsonBodyReadables
import play.api.libs.ws.JsonBodyWritables
import play.libs.ws.DefaultObjectMapper
import play.shaded.ahc.org.asynchttpclient.Response

import scala.io.Codec


class JsonRequestSpec extends Specification with Mockito with AfterAll with JsonBodyWritables {
  sequential

  implicit val system       = ActorSystem()
  implicit val materializer = Materializer.matFromSystem

  override def afterAll: Unit = {
    system.terminate()
  }

  "set a json node" in {
    val jsValue = Json.obj("k1" -> JsString("v1"))
    val client  = mock[StandaloneAhcWSClient]
    val req = new StandaloneAhcWSRequest(client, "http://playframework.com/", null)
      .withBody(jsValue)
      .asInstanceOf[StandaloneAhcWSRequest]
      .buildRequest()

    req.getHeaders.get("Content-Type") must be_==("application/json")
    ByteString.fromArray(req.getByteData).utf8String must be_==("""{"k1":"v1"}""")
  }

  "set a json node using the default object mapper" in {
    val objectMapper = DefaultObjectMapper.instance

    implicit val jsonReadable = body(objectMapper)
    val jsonNode              = objectMapper.readTree("""{"k1":"v1"}""")
    val client                = mock[StandaloneAhcWSClient]
    val req = new StandaloneAhcWSRequest(client, "http://playframework.com/", null)
      .withBody(jsonNode)
      .asInstanceOf[StandaloneAhcWSRequest]
      .buildRequest()

    req.getHeaders.get("Content-Type") must be_==("application/json")
    ByteString.fromArray(req.getByteData).utf8String must be_==("""{"k1":"v1"}""")
  }

  "read an encoding of UTF-8" in {
    val json = io.Source.fromResource("test.json")(Codec.ISO8859).getLines.mkString

    val ahcResponse = mock[Response]
    val response    = new StandaloneAhcWSResponse(ahcResponse)

    when(ahcResponse.getResponseBody(StandardCharsets.UTF_8)).thenReturn(json)
    when(ahcResponse.getContentType).thenReturn("application/json")

    val value: JsValue = JsonBodyReadables.readableAsJson.transform(response)
    verify(ahcResponse, times(1)).getResponseBody(StandardCharsets.UTF_8)
    verify(ahcResponse, times(1)).getContentType
    value.toString must beEqualTo(json)
  }

  "read an encoding of ISO-8859-1" in {
    val json = io.Source.fromResource("test.json")(Codec.ISO8859).getLines.mkString

    val ahcResponse = mock[Response]
    val response    = new StandaloneAhcWSResponse(ahcResponse)

    when(ahcResponse.getResponseBody(StandardCharsets.ISO_8859_1)).thenReturn(json)
    when(ahcResponse.getContentType).thenReturn("application/json;charset=iso-8859-1")

    val value: JsValue = JsonBodyReadables.readableAsJson.transform(response)
    verify(ahcResponse, times(1)).getResponseBody(StandardCharsets.ISO_8859_1)
    verify(ahcResponse, times(1)).getContentType
    value.toString must beEqualTo(json)
  }
} 
Example 86
Source File: StandaloneWSClientSupport.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play.api.libs.ws.ahc

import akka.stream.Materializer
import org.specs2.execute.Result

trait StandaloneWSClientSupport {

  def materializer: Materializer

  def withClient(
      config: AhcWSClientConfig = AhcWSClientConfigFactory.forConfig()
  )(block: StandaloneAhcWSClient => Result): Result = {
    val client = StandaloneAhcWSClient(config)(materializer)
    try {
      block(client)
    } finally {
      client.close()
    }
  }
} 
Example 87
Source File: AkkaServerProvider.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import org.specs2.concurrent.ExecutionEnv
import org.specs2.specification.BeforeAfterAll

import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.Future
import akka.stream.Materializer

trait AkkaServerProvider extends BeforeAfterAll {

  
  def executionEnv: ExecutionEnv

  var testServerPort: Int            = _
  val defaultTimeout: FiniteDuration = 5.seconds

  // Create Akka system for thread and streaming management
  implicit val system       = ActorSystem()
  implicit val materializer = Materializer.matFromSystem

  lazy val futureServer: Future[Http.ServerBinding] = {
    // Using 0 (zero) means that a random free port will be used.
    // So our tests can run in parallel and won't mess with each other.
    Http().bindAndHandle(routes, "localhost", 0)
  }

  override def beforeAll(): Unit = {
    val portFuture = futureServer.map(_.localAddress.getPort)(executionEnv.executionContext)
    portFuture.foreach(port => testServerPort = port)(executionEnv.executionContext)
    Await.ready(portFuture, defaultTimeout)
  }

  override def afterAll(): Unit = {
    futureServer.foreach(_.unbind())(executionEnv.executionContext)
    val terminate = system.terminate()
    Await.ready(terminate, defaultTimeout)
  }
} 
Example 88
Source File: StandaloneAhcWSRequestBenchMapsBench.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play.api.libs.ws.ahc

import java.util.concurrent.TimeUnit

import akka.stream.Materializer
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import play.api.libs.ws.StandaloneWSRequest


@OutputTimeUnit(TimeUnit.NANOSECONDS)
@BenchmarkMode(Array(Mode.AverageTime))
@Fork(jvmArgsAppend = Array("-Xmx350m", "-XX:+HeapDumpOnOutOfMemoryError"), value = 1)
@State(Scope.Benchmark)
class StandaloneAhcWSRequestBenchMapsBench {

  private implicit val materializer: Materializer = null // we're not actually going to execute anything.
  private var exampleRequest: StandaloneWSRequest = _

  @Param(Array("1", "10", "100", "1000", "10000"))
  private var size: Int = _

  @Setup def setup(): Unit = {
    val params = (1 to size)
      .map(_.toString)
      .map(s => s -> s)

    exampleRequest = StandaloneAhcWSRequest(new StandaloneAhcWSClient(null), "https://www.example.com")
      .addQueryStringParameters(params: _*)
      .addHttpHeaders(params: _*)
  }

  @Benchmark
  def addQueryParams(bh: Blackhole): Unit = {
    bh.consume(exampleRequest.addQueryStringParameters("nthParam" -> "nthParam"))
  }

  @Benchmark
  def addHeaders(bh: Blackhole): Unit = {
    bh.consume(exampleRequest.addHttpHeaders("nthHeader" -> "nthHeader"))
  }
} 
Example 89
Source File: WebSocket.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package controllers

import javax.inject.{Inject, Singleton}

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.stream.{Materializer, ThrottleMode}
import com.typesafe.config.ConfigFactory
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
import play.api.libs.streams.ActorFlow
import play.api.mvc.{Controller, WebSocket}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._


//@Singleton
class KafkaWebSocket @Inject() (implicit system: ActorSystem, materializer: Materializer) extends Controller {

  def kafkaWS = WebSocket.accept[String, String] { request =>
    ActorFlow.actorRef(out => KafkaWSActor.props(out))
  }

  object KafkaWSActor {
    def props(outRef: ActorRef) = Props(new KafkaWSActor(outRef))
  }

  class KafkaWSActor(outRef: ActorRef) extends Actor {

    val config = ConfigFactory.load()
    val combinedConfig = ConfigFactory.defaultOverrides()
      .withFallback(config)
      .withFallback(ConfigFactory.defaultApplication())
      .getConfig("trucking-web-application.backend")

    val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer)
      //.withBootstrapServers("sandbox-hdf.hortonworks.com:6667")
      .withBootstrapServers(combinedConfig.getString("kafka.bootstrap-servers"))
      .withGroupId("group1")
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    Consumer.committableSource(consumerSettings, Subscriptions.topics("trucking_data_joined"))
      .mapAsync(1) { msg => Future(outRef ! msg.record.value).map(_ => msg) }
      //.mapAsync(1) { msg => msg.committableOffset.commitScaladsl() } // TODO: Disabling commits for debug
      .throttle(1, 250.milliseconds, 1, ThrottleMode.Shaping)
      .runWith(Sink.ignore)

    def receive = {
      case msg: String => outRef ! s"Ack: $msg"
    }
  }

} 
Example 90
Source File: AuthenticationFilter.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2018, Oath Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.
package filter
import akka.stream.Materializer
import com.google.common.base.Charsets
import com.google.common.hash.Hashing
import com.yahoo.maha.core.auth.{AuthValidator, ValidationResult}
import play.api.Logger
import play.api.mvc._

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}

class AuthenticationFilter(authValidator: AuthValidator)(implicit val mat: Materializer) extends Filter {

  private val routesWhichRequireAuth : Set[String] = Set("/segments", "/overlord/workers", "/lookups", "/kill/segments")

  def apply(nextFilter: RequestHeader => Future[Result])
           (requestHeader: RequestHeader): Future[Result] = {

    if(routesWhichRequireAuth.contains(requestHeader.path)) {
      Try {
        val result: ValidationResult = authValidator.validate(requestHeader)
        result
      } match {
        case Success(result) =>
          val requestHeaderWithId = requestHeader.copy(tags = requestHeader.tags + ("X-Request-Id" -> generateRequestId(requestHeader))
            + ("userId" -> result.user.getOrElse("Authorized User")))
          nextFilter(requestHeaderWithId)
        case Failure(e) =>
          Logger.error(s"Exception while authenticating user", e)
          val result: Result = authValidator.handleAuthFailure(requestHeader)
          Future.successful(result)
      }
    } else {
      Logger.debug(s"no auth required for path : ${requestHeader.path}")
      nextFilter(requestHeader)
    }
  }

  private def generateRequestId(requestHeader: RequestHeader): String = {
    return s" ${Hashing.goodFastHash(128).newHasher.putString(requestHeader.path + requestHeader.queryString, Charsets.UTF_8).hash.asLong}-${System.nanoTime}"
  }

} 
Example 91
Source File: Error.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import com.github.dakatsuka.akka.http.oauth2.client.utils.JsonUnmarshaller
import io.circe.Decoder

import scala.concurrent.{ ExecutionContext, Future }

object Error {
  sealed abstract class Code(val value: String)
  case object InvalidRequest       extends Code("invalid_request")
  case object InvalidClient        extends Code("invalid_client")
  case object InvalidToken         extends Code("invalid_token")
  case object InvalidGrant         extends Code("invalid_grant")
  case object InvalidScope         extends Code("invalid_scope")
  case object UnsupportedGrantType extends Code("unsupported_grant_type")
  case object Unknown              extends Code("unknown")

  object Code {
    def fromString(code: String): Code = code match {
      case "invalid_request"        => InvalidRequest
      case "invalid_client"         => InvalidClient
      case "invalid_token"          => InvalidToken
      case "invalid_grant"          => InvalidGrant
      case "invalid_scope"          => InvalidScope
      case "unsupported_grant_type" => UnsupportedGrantType
      case _                        => Unknown
    }
  }

  class UnauthorizedException(val code: Code, val description: String, val response: HttpResponse)
      extends RuntimeException(s"$code: $description")

  object UnauthorizedException extends JsonUnmarshaller {
    case class UnauthorizedResponse(error: String, errorDescription: String)

    implicit def decoder: Decoder[UnauthorizedResponse] = Decoder.instance { c =>
      for {
        error       <- c.downField("error").as[String].right
        description <- c.downField("error_description").as[String].right
      } yield UnauthorizedResponse(error, description)
    }

    def fromHttpResponse(response: HttpResponse)(implicit ec: ExecutionContext, mat: Materializer): Future[UnauthorizedException] = {
      Unmarshal(response).to[UnauthorizedResponse].map { r =>
        new UnauthorizedException(Code.fromString(r.error), r.errorDescription, response)
      }
    }
  }
} 
Example 92
Source File: AccessToken.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import com.github.dakatsuka.akka.http.oauth2.client.utils.JsonUnmarshaller
import io.circe.Decoder

import scala.concurrent.Future

case class AccessToken(
    accessToken: String,
    tokenType: String,
    expiresIn: Int,
    refreshToken: Option[String]
)

object AccessToken extends JsonUnmarshaller {
  implicit def decoder: Decoder[AccessToken] = Decoder.instance { c =>
    for {
      accessToken  <- c.downField("access_token").as[String].right
      tokenType    <- c.downField("token_type").as[String].right
      expiresIn    <- c.downField("expires_in").as[Int].right
      refreshToken <- c.downField("refresh_token").as[Option[String]].right
    } yield AccessToken(accessToken, tokenType, expiresIn, refreshToken)
  }

  def apply(response: HttpResponse)(implicit mat: Materializer): Future[AccessToken] = {
    Unmarshal(response).to[AccessToken]
  }
} 
Example 93
Source File: Client.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse, Uri }
import akka.stream.Materializer
import akka.stream.scaladsl.{ Flow, Sink }
import com.github.dakatsuka.akka.http.oauth2.client.Error.UnauthorizedException
import com.github.dakatsuka.akka.http.oauth2.client.strategy.Strategy

import scala.concurrent.{ ExecutionContext, Future }

class Client(config: ConfigLike, connection: Option[Flow[HttpRequest, HttpResponse, _]] = None)(implicit system: ActorSystem)
    extends ClientLike {
  def getAuthorizeUrl[A <: GrantType](grant: A, params: Map[String, String] = Map.empty)(implicit s: Strategy[A]): Option[Uri] =
    s.getAuthorizeUrl(config, params)

  def getAccessToken[A <: GrantType](
      grant: A,
      params: Map[String, String] = Map.empty
  )(implicit s: Strategy[A], ec: ExecutionContext, mat: Materializer): Future[Either[Throwable, AccessToken]] = {
    val source = s.getAccessTokenSource(config, params)

    source
      .via(connection.getOrElse(defaultConnection))
      .mapAsync(1)(handleError)
      .mapAsync(1)(AccessToken.apply)
      .runWith(Sink.head)
      .map(Right.apply)
      .recover {
        case ex => Left(ex)
      }
  }

  def getConnectionWithAccessToken(accessToken: AccessToken): Flow[HttpRequest, HttpResponse, _] =
    Flow[HttpRequest]
      .map(_.addCredentials(OAuth2BearerToken(accessToken.accessToken)))
      .via(connection.getOrElse(defaultConnection))

  private def defaultConnection: Flow[HttpRequest, HttpResponse, _] =
    config.site.getScheme match {
      case "http"  => Http().outgoingConnection(config.getHost, config.getPort)
      case "https" => Http().outgoingConnectionHttps(config.getHost, config.getPort)
    }

  private def handleError(response: HttpResponse)(implicit ec: ExecutionContext, mat: Materializer): Future[HttpResponse] = {
    if (response.status.isFailure()) UnauthorizedException.fromHttpResponse(response).flatMap(Future.failed(_))
    else Future.successful(response)
  }
}

object Client {
  def apply(config: ConfigLike)(implicit system: ActorSystem): Client =
    new Client(config)

  def apply(config: ConfigLike, connection: Flow[HttpRequest, HttpResponse, _])(implicit system: ActorSystem): Client =
    new Client(config, Some(connection))
} 
Example 94
Source File: AccessTokenSpec.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ HttpEntity, HttpResponse, StatusCodes }
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.stream.{ ActorMaterializer, Materializer }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec }

import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration.Duration

class AccessTokenSpec extends FlatSpec with DiagrammedAssertions with ScalaFutures with BeforeAndAfterAll {
  implicit val system: ActorSystem        = ActorSystem()
  implicit val ec: ExecutionContext       = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()
  implicit val defaultPatience: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(700, Millis))

  override def afterAll(): Unit = {
    Await.ready(system.terminate(), Duration.Inf)
  }

  behavior of "AccessToken"

  it should "apply from HttpResponse" in {
    val accessToken  = "xxx"
    val tokenType    = "bearer"
    val expiresIn    = 86400
    val refreshToken = "yyy"

    val httpResponse = HttpResponse(
      status = StatusCodes.OK,
      headers = Nil,
      entity = HttpEntity(
        `application/json`,
        s"""
           |{
           |  "access_token": "$accessToken",
           |  "token_type": "$tokenType",
           |  "expires_in": $expiresIn,
           |  "refresh_token": "$refreshToken"
           |}
         """.stripMargin
      )
    )

    val result = AccessToken(httpResponse)

    whenReady(result) { token =>
      assert(token.accessToken == accessToken)
      assert(token.tokenType == tokenType)
      assert(token.expiresIn == expiresIn)
      assert(token.refreshToken.contains(refreshToken))
    }
  }
} 
Example 95
Source File: Unmarshallers.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.httpapi

import java.util.UUID

import akka.http.scaladsl.unmarshalling._
import akka.stream.Materializer
import spray.json.{JsString, JsValue, JsonFormat, _}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

object Unmarshallers {

  implicit val UuidFormat = new JsonFormat[UUID] {
    override def read(json: JsValue): UUID = {
      json match {
        case JsString(uuid) => Try(UUID.fromString(uuid)) match {
          case Success(parsedUuid) => parsedUuid
          case Failure(_)          => deserializationError("UUID could not be created from given string")
        }
        case _ => deserializationError("UUID could not be converted to UUID object.")
      }
    }
    override def write(obj: UUID): JsValue = JsString(obj.toString)
  }

  object UUIDUnmarshaller extends FromStringUnmarshaller[UUID] {
    override def apply(value: String)(implicit ec: ExecutionContext, materializer: Materializer): Future[UUID] = {
      Future.apply(UUID.fromString(value))
    }
  }
} 
Example 96
Source File: JustinDB.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.http.management.ClusterHttpManagement
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
import buildinfo.BuildInfo
import com.typesafe.scalalogging.StrictLogging
import justin.db.actors.{StorageNodeActor, StorageNodeActorRef}
import justin.db.client.ActorRefStorageNodeClient
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica.N
import justin.db.storage.PluggableStorageProtocol
import justin.db.storage.provider.StorageProvider
import justin.httpapi.{BuildInfoRouter, HealthCheckRouter, HttpRouter}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Promise}
import scala.language.reflectiveCalls

// $COVERAGE-OFF$
final class JustinDB

object JustinDB extends StrictLogging {

  private[this] def validConfiguration(justinDBConfig: JustinDBConfig): Unit = {
    require(justinDBConfig.replication.N > 0, "replication N factor can't be smaller or equal 0")
    require(justinDBConfig.ring.`members-count` > 0, "members-counter can't be smaller or equal 0")
    require(justinDBConfig.ring.partitions > 0, "ring partitions can't be smaller or equal 0")
    require(justinDBConfig.ring.partitions >= justinDBConfig.ring.`members-count`, "number of ring partitions can't be smaller than number of members-count")
    require(justinDBConfig.replication.N <= justinDBConfig.ring.`members-count`, "replication N factor can't be bigger than defined members-count number")
  }

  private[this] def initStorage(justinConfig: JustinDBConfig) = {
    val provider = StorageProvider.apply(justinConfig.storage.provider)
    logger.info("Storage provider: " + provider.name)
    provider.init
  }

  def init(justinConfig: JustinDBConfig)(implicit actorSystem: ActorSystem): JustinDB = {
    validConfiguration(justinConfig)

    val processOrchestrator = Promise[JustinDB]

    implicit val executor: ExecutionContext = actorSystem.dispatcher
    implicit val materializer: Materializer = ActorMaterializer()

    val storage: PluggableStorageProtocol = initStorage(justinConfig)

    val cluster = Cluster(actorSystem)

    cluster.registerOnMemberUp {
      // STORAGE ACTOR
      val storageNodeActorRef = StorageNodeActorRef {
        val nodeId     = NodeId(justinConfig.`kubernetes-hostname`.split("-").last.toInt)
        val ring       = Ring(justinConfig.ring.`members-count`, justinConfig.ring.partitions)
        val n          = N(justinConfig.replication.N)
        val datacenter = Datacenter(justinConfig.dc.`self-data-center`)

        actorSystem.actorOf(
          props = StorageNodeActor.props(nodeId, datacenter, storage, ring, n),
          name  = StorageNodeActor.name(nodeId, datacenter)
        )
      }

      // AKKA-MANAGEMENT
      ClusterHttpManagement(cluster).start().map { _ =>
        logger.info("Cluster HTTP-Management is ready!")
      }.recover { case ex => processOrchestrator.failure(ex) }

      // HTTP API
      val routes = logRequestResult(actorSystem.name) {
        new HttpRouter(new ActorRefStorageNodeClient(storageNodeActorRef)).routes ~
          new HealthCheckRouter().routes ~
          new BuildInfoRouter().routes(BuildInfo.toJson)
      }
      Http()
        .bindAndHandle(routes, justinConfig.http.interface, justinConfig.http.port)
        .map { binding => logger.info(s"HTTP server started at ${binding.localAddress}"); processOrchestrator.trySuccess(new JustinDB) }
        .recover { case ex => logger.error("Could not start HTTP server", ex); processOrchestrator.failure(ex) }
    }

    Await.result(processOrchestrator.future, 2.minutes)
  }
}
// $COVERAGE-ON$ 
Example 97
Source File: HttpMetricsRoute.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.core.scaladsl.server

import akka.NotUsed
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.server._
import akka.http.scaladsl.settings.{ParserSettings, RoutingSettings}
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import fr.davit.akka.http.metrics.core.HttpMetricsHandler
import fr.davit.akka.http.metrics.core.scaladsl.model.PathLabelHeader

import scala.concurrent.{ExecutionContextExecutor, Future}

object HttpMetricsRoute {

  implicit def apply(route: Route): HttpMetricsRoute = new HttpMetricsRoute(route)

}


final class HttpMetricsRoute private (route: Route) extends HttpMetricsDirectives {

  private def markUnhandled(inner: Route): Route = {
    Directives.mapResponse(markUnhandled).tapply(_ => inner)
  }

  private def markUnhandled(response: HttpResponse): HttpResponse = {
    response.addHeader(PathLabelHeader.Unhandled)
  }

  def recordMetrics(metricsHandler: HttpMetricsHandler)(
      implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): Flow[HttpRequest, HttpResponse, NotUsed] = {
    val effectiveEC = if (executionContext ne null) executionContext else materializer.executionContext

    {
      // override the execution context passed as parameter
      implicit val executionContext: ExecutionContextExecutor = effectiveEC
      Flow[HttpRequest]
        .mapAsync(1)(recordMetricsAsync(metricsHandler))
        .watchTermination() {
          case (mat, completion) =>
            // every connection materializes a stream.
            metricsHandler.onConnection(completion)
            mat
        }
    }
  }

  def recordMetricsAsync(metricsHandler: HttpMetricsHandler)(
      implicit
      routingSettings: RoutingSettings,
      parserSettings: ParserSettings,
      materializer: Materializer,
      routingLog: RoutingLog,
      executionContext: ExecutionContextExecutor = null,
      rejectionHandler: RejectionHandler = RejectionHandler.default,
      exceptionHandler: ExceptionHandler = null
  ): HttpRequest => Future[HttpResponse] = {
    val effectiveEC               = if (executionContext ne null) executionContext else materializer.executionContext
    val effectiveRejectionHandler = rejectionHandler.mapRejectionResponse(markUnhandled)
    val effectiveExceptionHandler = ExceptionHandler.seal(exceptionHandler).andThen(markUnhandled(_))

    {
      // override the execution context passed as parameter, rejection and error handler
      implicit val executionContext: ExecutionContextExecutor = effectiveEC
      implicit val rejectionHandler: RejectionHandler         = effectiveRejectionHandler
      implicit val exceptionHandler: ExceptionHandler         = effectiveExceptionHandler

      request =>
        val response = Route.asyncHandler(route).apply(request)
        metricsHandler.onRequest(request, response)
        response
    }
  }
} 
Example 98
Source File: EmptyResponseFilter.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.filters

import akka.stream.Materializer
import javax.inject.Inject
import play.api.http.HttpEntity
import play.api.mvc._

import scala.concurrent.{ExecutionContext, Future}

class EmptyResponseFilter @Inject()(implicit val mat: Materializer, ec: ExecutionContext) extends Filter {

  val emptyHeader = "Gov-Empty-Response"

  def apply(f: (RequestHeader) => Future[Result])(rh: RequestHeader): Future[Result] = {
    f(rh) map { res =>
      if ((res.header.status == 201 || res.header.status == 409) && res.body.isKnownEmpty) {
        val headers = res.header.headers
          .updated("Content-Type", "application/json")
          .updated(emptyHeader, "true")
        res.copy(res.header.copy(headers = headers), HttpEntity.NoEntity)
      } else res
    }
  }

} 
Example 99
Source File: HeaderValidatorFilter.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.filters

import akka.stream.Materializer
import javax.inject.Inject
import play.api.libs.json.Json
import play.api.mvc._
import play.api.routing.Router.Attrs
import uk.gov.hmrc.api.controllers.{ErrorAcceptHeaderInvalid, HeaderValidator}
import uk.gov.hmrc.vatapi.config.ControllerConfiguration

import scala.concurrent.Future

class HeaderValidatorFilter @Inject()(implicit val mat: Materializer, controllerConfiguration: ControllerConfiguration,
                                      cc: MessagesControllerComponents
                                     ) extends Filter with HeaderValidator {

  protected def executionContext: scala.concurrent.ExecutionContext = cc.executionContext
  def parser: play.api.mvc.BodyParser[play.api.mvc.AnyContent] = cc.parsers.defaultBodyParser

  def apply(f: (RequestHeader) => Future[Result])(rh: RequestHeader): Future[Result] = {
    val handlerDef = rh.attrs.get(Attrs.HandlerDef)
    val needsHeaderValidation =
      handlerDef.forall(
        hd =>
          controllerConfiguration
            .controllerParamsConfig(hd.controller)
            .needsHeaderValidation)

    if (!needsHeaderValidation || acceptHeaderValidationRules(
      rh.headers.get("Accept"))) {
      f(rh)
    }
    else {
      Future.successful(
        Status(ErrorAcceptHeaderInvalid.httpStatusCode)(
          Json.toJson(ErrorAcceptHeaderInvalid)))
    }
  }

} 
Example 100
Source File: Filters.scala    From dr-cla   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package utils

import akka.stream.Materializer
import javax.inject.Inject
import play.api.http.{HeaderNames, HttpFilters}
import play.api.mvc._
import play.filters.gzip.GzipFilter

import scala.concurrent.{ExecutionContext, Future}

class OnlyHttpsFilter @Inject() (implicit val mat: Materializer, ec: ExecutionContext) extends Filter {
  def apply(nextFilter: (RequestHeader) => Future[Result])(requestHeader: RequestHeader): Future[Result] = {
    nextFilter(requestHeader).map { result =>
      val isWellKnown = requestHeader.path.startsWith(controllers.routes.Application.wellKnown("").url)
      val isForwardedAndInsecure = requestHeader.headers.get(HeaderNames.X_FORWARDED_PROTO).exists(_ != "https")

      if (isWellKnown || !isForwardedAndInsecure) {
        result
      }
      else {
        Results.MovedPermanently("https://" + requestHeader.host + requestHeader.uri)
      }
    }
  }
}

class Filters @Inject() (gzip: GzipFilter, onlyHttpsFilter: OnlyHttpsFilter) extends HttpFilters {
  val filters = Seq(gzip, onlyHttpsFilter)
} 
Example 101
Source File: AckFlowOps.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package activemq

import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.{ Done, NotUsed }

import scala.concurrent.{ ExecutionContext, Future }

object AckFlowOps {

  implicit class SourceOps[A, B](src: Source[AckTup[A, B], NotUsed]) {

    def fmapAck(f: B => A): Source[AckTup[A, A], NotUsed] = src.map {
      case (p, b) =>
        try {
          val a = f(b)
          val out = p -> a
          if (!p.isCompleted) p.success(a)
          out
        } catch {
          case cause: Throwable =>
            if (!p.isCompleted) p.failure(cause)
            throw cause
        }
    }

    def fmap[C](f: B => C): Source[AckTup[A, C], NotUsed] = src.map {
      case (p, a) => p -> f(a)
    }

    def fmapAsync(qos: Int)(f: B => Future[A])(implicit ec: ExecutionContext): Source[AckTup[A, A], NotUsed] = src.mapAsync(qos) {
      case (p, b) => f(b).map { a =>
        if (!p.isCompleted) p.success(a)
        p -> a
      }.recover {
        case t: Throwable =>
          if (!p.isCompleted) p.failure(t)
          throw t
      }
    }
  }

  implicit class SourceUnitOps[A](src: Source[AckTup[Unit, A], NotUsed]) {
    def runForeachAck(f: A => Unit)(implicit mat: Materializer): Future[Done] = src.runWith(AckSink.foreach(f))
  }
} 
Example 102
Source File: OrderConsumer.scala    From kafka-k8s-monitoring   with MIT License 5 votes vote down vote up
package com.xebia.orders

import akka.Done
import akka.actor.ActorSystem
import akka.kafka.scaladsl._
import akka.kafka._
import akka.stream.{ActorMaterializer, Materializer}
import akka.stream.scaladsl.Sink
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization._

object OrderConsumer extends App {

  private implicit val actorSystem = ActorSystem("orders")
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec = actorSystem.dispatcher

  val settings = ConsumerSettings(actorSystem, new StringDeserializer, new ByteArrayDeserializer)
    .withBootstrapServers("kafka:9092")
    .withGroupId("my-group")
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  Consumer.plainSource(settings, Subscriptions.topics("orders"))
    .map(_.value())
    .map(new String(_))
    .map(println)
    .runWith(Sink.ignore).onComplete {
      _ =>
        println("Stream is dead!")
        sys.exit(1)
    }


} 
Example 103
Source File: AttachmentSupportTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.test

import akka.http.scaladsl.model.Uri
import akka.stream.scaladsl.Source
import akka.stream.{ActorMaterializer, Materializer}
import akka.util.CompactByteString
import common.WskActorSystem
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FlatSpec, Matchers}
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database.{AttachmentSupport, InliningConfig}
import org.apache.openwhisk.core.entity.WhiskEntity
import org.apache.openwhisk.core.entity.size._

@RunWith(classOf[JUnitRunner])
class AttachmentSupportTests extends FlatSpec with Matchers with ScalaFutures with WskActorSystem {

  behavior of "Attachment inlining"

  implicit val materializer: Materializer = ActorMaterializer()

  it should "not inline if maxInlineSize set to zero" in {
    val inliner = new AttachmentSupportTestMock(InliningConfig(maxInlineSize = 0.KB))
    val bs = CompactByteString("hello world")

    val bytesOrSource = inliner.inlineOrAttach(Source.single(bs)).futureValue
    val uri = inliner.uriOf(bytesOrSource, "foo")

    uri shouldBe Uri("test:foo")
  }

  class AttachmentSupportTestMock(val inliningConfig: InliningConfig) extends AttachmentSupport[WhiskEntity] {
    override protected[core] implicit val materializer: Materializer = ActorMaterializer()
    override protected def attachmentScheme: String = "test"
    override protected def executionContext = actorSystem.dispatcher
    override protected[database] def put(d: WhiskEntity)(implicit transid: TransactionId) = ???
  }
} 
Example 104
Source File: RunServer.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.grpc.server

import java.util.concurrent.{Executors, TimeUnit}

import akka.Done
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.stream.{ActorMaterializer, Materializer}
import com.typesafe.config.Config
import com.typesafe.scalalogging.Logger
import io.grpc.ServerBuilder
import ml.combust.mleap.executor.MleapExecutor
import ml.combust.mleap.pb.MleapGrpc

import scala.concurrent.{ExecutionContext, Future}
import scala.language.existentials
import scala.util.{Failure, Success, Try}

class RunServer(config: Config)
               (implicit system: ActorSystem) {
  private val logger = Logger(classOf[RunServer])

  private var coordinator: Option[CoordinatedShutdown] = None

  def run(): Unit = {
    Try {
      logger.info("Starting MLeap gRPC Server")

      val coordinator = CoordinatedShutdown(system)
      this.coordinator = Some(coordinator)

      implicit val materializer: Materializer = ActorMaterializer()

      val grpcServerConfig = new GrpcServerConfig(config.getConfig("default"))
      val mleapExecutor = MleapExecutor(system)
      val port: Int = config.getInt("port")
      val threads: Option[Int] = if (config.hasPath("threads")) Some(config.getInt("threads")) else None
      val threadCount = threads.getOrElse {
        Math.min(Math.max(Runtime.getRuntime.availableProcessors() * 4, 32), 64)
      }

      logger.info(s"Creating thread pool for server with size $threadCount")
      val grpcThreadPool = Executors.newFixedThreadPool(threadCount)
      implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(grpcThreadPool)

      coordinator.addTask(CoordinatedShutdown.PhaseServiceRequestsDone, "threadPoolShutdownNow") {
        () =>
          Future {
            logger.info("Shutting down gRPC thread pool")
            grpcThreadPool.shutdown()
            grpcThreadPool.awaitTermination(5, TimeUnit.SECONDS)

            Done
          }
      }

      logger.info(s"Creating executor service")
      val grpcService: GrpcServer = new GrpcServer(mleapExecutor, grpcServerConfig)
      val builder = ServerBuilder.forPort(port)
      builder.intercept(new ErrorInterceptor)
      builder.addService(MleapGrpc.bindService(grpcService, ec))
      val grpcServer = builder.build()

      logger.info(s"Starting server on port $port")
      grpcServer.start()

      coordinator.addTask(CoordinatedShutdown.PhaseServiceUnbind, "grpcServiceShutdown") {
        () =>
          Future {
            logger.info("Shutting down gRPC")
            grpcServer.shutdown()
            grpcServer.awaitTermination(10, TimeUnit.SECONDS)
            Done
          }(ExecutionContext.global)
      }

      coordinator.addTask(CoordinatedShutdown.PhaseServiceStop, "grpcServiceShutdownNow") {
        () =>
          Future {
            if (!grpcServer.isShutdown) {
              logger.info("Shutting down gRPC NOW!")

              grpcServer.shutdownNow()
              grpcServer.awaitTermination(5, TimeUnit.SECONDS)
            }

            Done
          }(ExecutionContext.global)
      }
    } match {
      case Success(_) =>
      case Failure(err) =>
        logger.error("Error encountered starting server", err)
        for (c <- this.coordinator) {
          c.run(CoordinatedShutdown.UnknownReason)
        }
        throw err
    }
  }
} 
Example 105
Source File: GrpcSpec.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.grpc.server

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import io.grpc.{ManagedChannel, Server}
import ml.combust.mleap.executor.service.TransformService
import ml.combust.mleap.executor.testkit.TransformServiceSpec
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.concurrent.ScalaFutures

import scala.concurrent.duration._
import ml.combust.mleap.grpc.server.TestUtil._

class GrpcSpec extends TestKit(ActorSystem("grpc-server-test"))
  with TransformServiceSpec
  with BeforeAndAfterEach
  with BeforeAndAfterAll
  with ScalaFutures {

  private lazy val server = createServer(system)
  private lazy val channel = inProcessChannel
  private lazy val client = createClient(channel)

  override lazy val transformService: TransformService = {
    server
    client
  }

  override implicit def materializer: Materializer = ActorMaterializer()(system)

  override protected def afterAll(): Unit = {
    server.shutdown()
    channel.shutdown()
    TestKit.shutdownActorSystem(system, 5.seconds, verifySystemShutdown = true)
  }
} 
Example 106
Source File: LocalTransformServiceActor.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor.service

import akka.actor.{Actor, ActorRef, Props, Status, Terminated}
import akka.stream.{ActorMaterializer, Materializer}
import ml.combust.mleap.executor.repository.RepositoryBundleLoader
import ml.combust.mleap.executor._
import ml.combust.mleap.executor.error.NotFoundException

import scala.util.{Failure, Success, Try}

object LocalTransformServiceActor {
  def props(loader: RepositoryBundleLoader,
            config: ExecutorConfig): Props = {
    Props(new LocalTransformServiceActor(loader, config))
  }

  object Messages {
    case object Close
  }
}

class LocalTransformServiceActor(loader: RepositoryBundleLoader,
                                 config: ExecutorConfig) extends Actor {
  import LocalTransformServiceActor.Messages

  private implicit val materializer: Materializer = ActorMaterializer()(context.system)

  private var lookup: Map[String, ActorRef] = Map()
  private var modelNameLookup: Map[ActorRef, String] = Map()

  override def postStop(): Unit = {
    for (child <- context.children) {
      context.unwatch(child)
      context.stop(child)
    }
  }

  override def receive: Receive = {
    case request: TransformFrameRequest => handleModelRequest(request)
    case request: GetBundleMetaRequest => handleModelRequest(request)
    case request: GetModelRequest => handleModelRequest(request)
    case request: CreateFrameStreamRequest => handleModelRequest(request)
    case request: CreateRowStreamRequest => handleModelRequest(request)
    case request: GetRowStreamRequest => handleModelRequest(request)
    case request: CreateFrameFlowRequest => handleModelRequest(request)
    case request: GetFrameStreamRequest => handleModelRequest(request)
    case request: CreateRowFlowRequest => handleModelRequest(request)
    case request: UnloadModelRequest => handleModelRequest(request)
    case request: LoadModelRequest => loadModel(request)
    case Messages.Close => context.stop(self)

    case Terminated(actor) => terminated(actor)
  }

  def handleModelRequest(request: ModelRequest): Unit = {
    lookup.get(request.modelName) match {
      case Some(actor) => actor.tell(request, sender)
      case None => sender ! Status.Failure(new NotFoundException(s"no model with name ${request.modelName}"))
    }
  }

  def loadModel(request: LoadModelRequest): Unit = {
    Try(context.actorOf(BundleActor.props(request, loader, config), request.modelName)) match {
      case Success(actor) =>
        lookup += (request.modelName -> actor)
        modelNameLookup += (actor -> request.modelName)
        context.watch(actor)
        actor.tell(request, sender)
      case Failure(err) => sender ! Status.Failure(err)
    }
  }

  private def terminated(ref: ActorRef): Unit = {
    val uri = modelNameLookup(ref)
    modelNameLookup -= ref
    lookup -= uri
  }
} 
Example 107
Source File: MleapExecutorSpec.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import ml.combust.mleap.executor.testkit.{TestUtil, TransformServiceSpec}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class MleapExecutorSpec extends TestKit(ActorSystem("MleapExecutorSpec"))
  with TransformServiceSpec
  with BeforeAndAfterAll
  with ScalaFutures {

  override lazy val transformService: MleapExecutor = MleapExecutor(system)
  private val frame = TestUtil.frame
  override implicit val materializer: Materializer = ActorMaterializer()(system)

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system, 5.seconds, verifySystemShutdown = true)
  }
} 
Example 108
Source File: CSRFFilter.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import javax.inject.{Inject, Provider, Singleton}

import play.api.Logger
import play.api.http.SessionConfiguration
import play.api.libs.crypto.CSRFTokenSigner
import play.filters.csrf.{CSRFFilter ⇒ PCSRFFilter}
import play.api.mvc.RequestHeader
import play.filters.csrf.CSRF.{ErrorHandler ⇒ CSRFErrorHandler, TokenProvider}
import play.filters.csrf.CSRFConfig

import akka.stream.Materializer

object CSRFFilter {
  private[CSRFFilter] lazy val logger = Logger(getClass)

  def shouldProtect(request: RequestHeader): Boolean = {
    val isLogin     = request.uri.startsWith("/api/login")
    val isApi       = request.uri.startsWith("/api")
    val isInSession = request.session.data.nonEmpty
    val check       = !isLogin && isApi && isInSession
    logger.debug(s"[csrf] uri ${request.uri} (isLogin=$isLogin, isApi=$isApi, isInSession=$isInSession): ${if (check) "" else "don't"} check")
    check
  }

}

@Singleton
class CSRFFilter @Inject()(
    config: Provider[CSRFConfig],
    tokenSignerProvider: Provider[CSRFTokenSigner],
    sessionConfiguration: SessionConfiguration,
    tokenProvider: TokenProvider,
    errorHandler: CSRFErrorHandler
)(mat: Materializer)
    extends PCSRFFilter(
      config.get.copy(shouldProtect = CSRFFilter.shouldProtect),
      tokenSignerProvider.get,
      sessionConfiguration,
      tokenProvider,
      errorHandler
    )(mat) 
Example 109
Source File: AnalyzerConfigSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import scala.concurrent.{ExecutionContext, Future}

import play.api.Configuration

import akka.stream.Materializer
import javax.inject.{Inject, Singleton}
import org.thp.cortex.models.{BaseConfig, WorkerConfigModel, WorkerType}

import org.elastic4play.services.{CreateSrv, FindSrv, UpdateSrv}

@Singleton
class AnalyzerConfigSrv @Inject()(
    val configuration: Configuration,
    val workerConfigModel: WorkerConfigModel,
    val userSrv: UserSrv,
    val organizationSrv: OrganizationSrv,
    val workerSrv: WorkerSrv,
    val createSrv: CreateSrv,
    val updateSrv: UpdateSrv,
    val findSrv: FindSrv,
    implicit val ec: ExecutionContext,
    implicit val mat: Materializer
) extends WorkerConfigSrv {

  override val workerType: WorkerType.Type = WorkerType.analyzer

  def definitions: Future[Map[String, BaseConfig]] =
    buildDefinitionMap(workerSrv.listAnalyzerDefinitions._1)
} 
Example 110
Source File: LocalAuthSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import javax.inject.{Inject, Singleton}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random

import play.api.mvc.RequestHeader

import akka.stream.Materializer
import org.thp.cortex.models.User

import org.elastic4play.controllers.Fields
import org.elastic4play.services.{AuthCapability, AuthContext, AuthSrv}
import org.elastic4play.utils.Hasher
import org.elastic4play.{AuthenticationError, AuthorizationError}

@Singleton
class LocalAuthSrv @Inject()(userSrv: UserSrv, implicit val ec: ExecutionContext, implicit val mat: Materializer) extends AuthSrv {

  val name                  = "local"
  override val capabilities = Set(AuthCapability.changePassword, AuthCapability.setPassword)

  private[services] def doAuthenticate(user: User, password: String): Boolean =
    user.password().map(_.split(",", 2)).fold(false) {
      case Array(seed, pwd) ⇒
        val hash = Hasher("SHA-256").fromString(seed + password).head.toString
        hash == pwd
      case _ ⇒ false
    }

  override def authenticate(username: String, password: String)(implicit request: RequestHeader): Future[AuthContext] =
    userSrv.get(username).flatMap { user ⇒
      if (doAuthenticate(user, password)) userSrv.getFromUser(request, user, name)
      else Future.failed(AuthenticationError("Authentication failure"))
    }

  override def changePassword(username: String, oldPassword: String, newPassword: String)(implicit authContext: AuthContext): Future[Unit] =
    userSrv.get(username).flatMap { user ⇒
      if (doAuthenticate(user, oldPassword)) setPassword(username, newPassword)
      else Future.failed(AuthorizationError("Authentication failure"))
    }

  override def setPassword(username: String, newPassword: String)(implicit authContext: AuthContext): Future[Unit] = {
    val seed    = Random.nextString(10).replace(',', '!')
    val newHash = seed + "," + Hasher("SHA-256").fromString(seed + newPassword).head.toString
    userSrv.update(username, Fields.empty.set("password", newHash)).map(_ ⇒ ())
  }
} 
Example 111
Source File: KeyAuthSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import java.util.Base64
import javax.inject.{Inject, Singleton}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random

import play.api.libs.json.JsArray
import play.api.mvc.RequestHeader

import akka.stream.Materializer
import akka.stream.scaladsl.Sink

import org.elastic4play.controllers.Fields
import org.elastic4play.services.{AuthCapability, AuthContext, AuthSrv}
import org.elastic4play.{AuthenticationError, BadRequestError}

@Singleton
class KeyAuthSrv @Inject()(userSrv: UserSrv, implicit val ec: ExecutionContext, implicit val mat: Materializer) extends AuthSrv {
  override val name = "key"

  final protected def generateKey(): String = {
    val bytes = Array.ofDim[Byte](24)
    Random.nextBytes(bytes)
    Base64.getEncoder.encodeToString(bytes)
  }

  override val capabilities = Set(AuthCapability.authByKey)

  override def authenticate(key: String)(implicit request: RequestHeader): Future[AuthContext] = {
    import org.elastic4play.services.QueryDSL._
    // key attribute is sensitive so it is not possible to search on that field
    userSrv
      .find("status" ~= "Ok", Some("all"), Nil)
      ._1
      .filter(_.key().contains(key))
      .runWith(Sink.headOption)
      .flatMap {
        case Some(user) ⇒ userSrv.getFromUser(request, user, name)
        case None       ⇒ Future.failed(AuthenticationError("Authentication failure"))
      }
  }

  override def renewKey(username: String)(implicit authContext: AuthContext): Future[String] = {
    val newKey = generateKey()
    userSrv.update(username, Fields.empty.set("key", newKey)).map(_ ⇒ newKey)
  }

  override def getKey(username: String)(implicit authContext: AuthContext): Future[String] =
    userSrv.get(username).map(_.key().getOrElse(throw BadRequestError(s"User $username hasn't key")))

  override def removeKey(username: String)(implicit authContext: AuthContext): Future[Unit] =
    userSrv.update(username, Fields.empty.set("key", JsArray())).map(_ ⇒ ())
} 
Example 112
Source File: ResponderConfigSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import scala.concurrent.{ExecutionContext, Future}

import play.api.Configuration

import akka.stream.Materializer
import javax.inject.{Inject, Singleton}
import org.thp.cortex.models.{BaseConfig, WorkerConfigModel, WorkerType}

import org.elastic4play.services.{CreateSrv, FindSrv, UpdateSrv}

@Singleton
class ResponderConfigSrv @Inject()(
    val configuration: Configuration,
    val workerConfigModel: WorkerConfigModel,
    val userSrv: UserSrv,
    val organizationSrv: OrganizationSrv,
    val workerSrv: WorkerSrv,
    val createSrv: CreateSrv,
    val updateSrv: UpdateSrv,
    val findSrv: FindSrv,
    implicit val ec: ExecutionContext,
    implicit val mat: Materializer
) extends WorkerConfigSrv {

  override val workerType: WorkerType.Type         = WorkerType.responder
  def definitions: Future[Map[String, BaseConfig]] = buildDefinitionMap(workerSrv.listResponderDefinitions._1)
} 
Example 113
Source File: AnalyzerCtrl.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.controllers

import scala.concurrent.{ExecutionContext, Future}

import play.api.libs.json.{JsObject, JsString, Json}
import play.api.mvc.{AbstractController, Action, AnyContent, ControllerComponents}

import akka.stream.Materializer
import javax.inject.{Inject, Singleton}
import org.thp.cortex.models.{Roles, Worker}
import org.thp.cortex.services.{UserSrv, WorkerSrv}

import org.elastic4play.controllers.{Authenticated, Fields, FieldsBodyParser, Renderer}
import org.elastic4play.services.JsonFormat.queryReads
import org.elastic4play.services.{QueryDSL, QueryDef}

@Singleton
class AnalyzerCtrl @Inject()(
    workerSrv: WorkerSrv,
    userSrv: UserSrv,
    authenticated: Authenticated,
    fieldsBodyParser: FieldsBodyParser,
    renderer: Renderer,
    components: ControllerComponents,
    implicit val ec: ExecutionContext,
    implicit val mat: Materializer
) extends AbstractController(components) {

  def find: Action[Fields] = authenticated(Roles.read).async(fieldsBodyParser) { request ⇒
    val query                      = request.body.getValue("query").fold[QueryDef](QueryDSL.any)(_.as[QueryDef])
    val range                      = request.body.getString("range")
    val sort                       = request.body.getStrings("sort").getOrElse(Nil)
    val isAdmin                    = request.roles.contains(Roles.orgAdmin)
    val (analyzers, analyzerTotal) = workerSrv.findAnalyzersForUser(request.userId, query, range, sort)
    renderer.toOutput(OK, analyzers.map(analyzerJson(isAdmin)), analyzerTotal)
  }

  def get(analyzerId: String): Action[AnyContent] = authenticated(Roles.read).async { request ⇒
    val isAdmin = request.roles.contains(Roles.orgAdmin)
    workerSrv
      .getForUser(request.userId, analyzerId)
      .map(a ⇒ renderer.toOutput(OK, analyzerJson(isAdmin)(a)))
  }

  private def analyzerJson(isAdmin: Boolean)(analyzer: Worker): JsObject =
    if (isAdmin)
      analyzer.toJson + ("configuration" → Json.parse(analyzer.configuration())) + ("analyzerDefinitionId" → JsString(analyzer.workerDefinitionId()))
    else
      analyzer.toJson + ("analyzerDefinitionId" → JsString(analyzer.workerDefinitionId()))

  def listForType(dataType: String): Action[AnyContent] = authenticated(Roles.read).async { request ⇒
    import org.elastic4play.services.QueryDSL._
    val (responderList, responderCount) = workerSrv.findAnalyzersForUser(request.userId, "dataTypeList" ~= dataType, Some("all"), Nil)
    renderer.toOutput(OK, responderList.map(analyzerJson(isAdmin = false)), responderCount)
  }

  def create(analyzerDefinitionId: String): Action[Fields] = authenticated(Roles.orgAdmin).async(fieldsBodyParser) { implicit request ⇒
    for {
      organizationId   ← userSrv.getOrganizationId(request.userId)
      workerDefinition ← Future.fromTry(workerSrv.getDefinition(analyzerDefinitionId))
      analyzer         ← workerSrv.create(organizationId, workerDefinition, request.body)
    } yield renderer.toOutput(CREATED, analyzerJson(isAdmin = false)(analyzer))
  }

  def listDefinitions: Action[AnyContent] = authenticated(Roles.orgAdmin, Roles.superAdmin).async { _ ⇒
    val (analyzers, analyzerTotal) = workerSrv.listAnalyzerDefinitions
    renderer.toOutput(OK, analyzers, analyzerTotal)
  }

  def scan: Action[AnyContent] = authenticated(Roles.orgAdmin, Roles.superAdmin) { _ ⇒
    workerSrv.rescan()
    NoContent
  }

  def delete(analyzerId: String): Action[AnyContent] = authenticated(Roles.orgAdmin, Roles.superAdmin).async { implicit request ⇒
    for {
      analyzer ← workerSrv.getForUser(request.userId, analyzerId)
      _        ← workerSrv.delete(analyzer)
    } yield NoContent
  }

  def update(analyzerId: String): Action[Fields] = authenticated(Roles.orgAdmin).async(fieldsBodyParser) { implicit request ⇒
    for {
      analyzer        ← workerSrv.getForUser(request.userId, analyzerId)
      updatedAnalyzer ← workerSrv.update(analyzer, request.body)
    } yield renderer.toOutput(OK, analyzerJson(isAdmin = true)(updatedAnalyzer))
  }
} 
Example 114
Source File: InstructorsResource.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package resources

import javax.inject.Inject
import javax.inject.Singleton

import akka.stream.Materializer
import org.coursera.example.Instructor
import org.coursera.naptime.Fields
import org.coursera.naptime.FinderGraphQLRelation
import org.coursera.naptime.GetGraphQLRelation
import org.coursera.naptime.Ok
import org.coursera.naptime.ResourceName
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.resources.CourierCollectionResource
import stores.InstructorStore

import scala.concurrent.ExecutionContext

@Singleton
class InstructorsResource @Inject() (
    instructorStore: InstructorStore)(implicit ec: ExecutionContext, mat: Materializer)
  extends CourierCollectionResource[Int, Instructor] {

  override def resourceName = "instructors"
  override def resourceVersion = 1
  override implicit lazy val Fields: Fields[Instructor] = BaseFields
    .withGraphQLRelations(
      "courses" -> FinderGraphQLRelation(
        resourceName = ResourceName("courses", 1),
        finderName = "byInstructor",
        arguments = Map("instructorId" -> "$id")),
      "partner" -> GetGraphQLRelation(
        resourceName = ResourceName("partners", 1),
        id = "$partnerId"))

  def get(id: Int) = Nap.get { context =>
    OkIfPresent(id, instructorStore.get(id))
  }

  def multiGet(ids: Set[Int]) = Nap.multiGet { context =>
    Ok(instructorStore.all()
      .filter(instructor => ids.contains(instructor._1))
      .map { case (id, instructor) => Keyed(id, instructor) }.toList)
  }

  def getAll() = Nap.getAll { context =>
    Ok(instructorStore.all().map { case (id, instructor) => Keyed(id, instructor) }.toList)
  }

} 
Example 115
Source File: UserResource.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package resources

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.Inject
import javax.inject.Singleton

import akka.stream.Materializer
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.Ok
import org.coursera.example.User
import org.coursera.naptime.courier.CourierFormats
import org.coursera.naptime.resources.TopLevelCollectionResource
import play.api.libs.json.OFormat

import scala.concurrent.ExecutionContext


@Singleton
class UsersResource @Inject() (
    userStore: UserStore,
    banManager: UserBanManager)
    (implicit override val executionContext: ExecutionContext,
    override val materializer: Materializer)
  extends TopLevelCollectionResource[Int, User] {

  override def resourceName = "users"
  override def resourceVersion = 1  // optional; defaults to 1
  implicit val fields = Fields.withDefaultFields(  // default field projection
    "id", "name", "email")

  override def keyFormat: KeyFormat[KeyType] = KeyFormat.intKeyFormat
  override implicit def resourceFormat: OFormat[User] = CourierFormats.recordTemplateFormats[User]

  def get(id: Int) = Nap.get { context =>
    OkIfPresent(id, userStore.get(id))
  }

  def multiGet(ids: Set[Int]) = Nap.multiGet { context =>
    Ok(userStore.all()
      .filter(user => ids.contains(user._1))
      .map { case (id, user) => Keyed(id, user) }.toList)
  }

  def getAll() = Nap.getAll { context =>
    Ok(userStore.all().map { case (id, user) => Keyed(id, user) }.toList)
  }

  def create() = Nap
    .jsonBody[User]
    .create { context =>
      val user = context.body
      val id = userStore.create(user)

      // Could return Ok(Keyed(id, None)) if we want to return 201 Created,
      // with an empty body. Prefer returning the updated body, however.
      Ok(Keyed(id, Some(user)))
    }

  def email(email: String) = Nap.finder { context =>
    Ok(userStore.all()
      .filter(_._2.email == email)
      .map { case (id, user) => Keyed(id, user) }.toList)
  }

}


trait UserStore {
  def get(id: Int): Option[User]
  def create(user: User): Int
  def all(): Map[Int, User]
}

@Singleton
class UserStoreImpl extends UserStore {
  @volatile
  var userStore = Map.empty[Int, User]
  val nextId = new AtomicInteger(0)

  def get(id: Int) = userStore.get(id)

  def create(user: User): Int = {
    val id = nextId.incrementAndGet()
    userStore = userStore + (id -> user)
    id
  }

  def all() = userStore

}

class UserBanManager {
  @volatile
  var bannedUsers = Set.empty[Int]
} 
Example 116
Source File: PartnersResource.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package resources

import javax.inject.Inject
import javax.inject.Singleton

import akka.stream.Materializer
import org.coursera.example.Partner
import org.coursera.naptime.Fields
import org.coursera.naptime.MultiGetGraphQLRelation
import org.coursera.naptime.Ok
import org.coursera.naptime.ResourceName
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.resources.CourierCollectionResource
import stores.PartnerStore

import scala.concurrent.ExecutionContext

@Singleton
class PartnersResource @Inject() (
    partnerStore: PartnerStore)(implicit ec: ExecutionContext, mat: Materializer)
  extends CourierCollectionResource[String, Partner] {

  override def resourceName = "partners"
  override def resourceVersion = 1
  override implicit lazy val Fields: Fields[Partner] = BaseFields
    .withGraphQLRelations(
      "instructors" -> MultiGetGraphQLRelation(
        resourceName = ResourceName("instructors", 1),
        ids = "$instructorIds"),
      "courses" -> MultiGetGraphQLRelation(
        resourceName = ResourceName("courses", 1),
        ids = "$courseIds"))

  def get(id: String) = Nap.get { context =>
    OkIfPresent(id, partnerStore.get(id))
  }

  def multiGet(ids: Set[String]) = Nap.multiGet { context =>
    Ok(partnerStore.all()
      .filter(partner => ids.contains(partner._1))
      .map { case (id, partner) => Keyed(id, partner) }.toList)
  }

  def getAll() = Nap.getAll { context =>
    Ok(partnerStore.all().map { case (id, partner) => Keyed(id, partner) }.toList)
  }

} 
Example 117
Source File: CoursesResource.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package resources

import javax.inject.Inject
import javax.inject.Singleton

import akka.stream.Materializer
import org.coursera.example.Course
import org.coursera.naptime.Fields
import org.coursera.naptime.GetGraphQLRelation
import org.coursera.naptime.MultiGetGraphQLRelation
import org.coursera.naptime.Ok
import org.coursera.naptime.ResourceName
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.resources.CourierCollectionResource
import stores.CourseStore

import scala.concurrent.ExecutionContext

@Singleton
class CoursesResource @Inject() (
    courseStore: CourseStore)(implicit ec: ExecutionContext, mat: Materializer)
  extends CourierCollectionResource[String, Course] {

  override def resourceName = "courses"
  override def resourceVersion = 1
  override implicit lazy val Fields: Fields[Course] = BaseFields
    .withGraphQLRelations(
      "instructors" -> MultiGetGraphQLRelation(
        resourceName = ResourceName("instructors", 1),
        ids = "$instructorIds"),
      "partner" -> GetGraphQLRelation(
        resourceName = ResourceName("partners", 1),
        id = "$partnerId",
        description = "Partner who produces this course."),
      "courseMetadata/org.coursera.example.CertificateCourseMetadata/certificateInstructors" ->
        MultiGetGraphQLRelation(
          resourceName = ResourceName("instructors", 1),
          ids = "${courseMetadata/certificate/certificateInstructorIds}",
          description = "Instructor whose name and signature appears on the course certificate."),
      "courseMetadata/org.coursera.example.DegreeCourseMetadata/degreeInstructors" ->
        MultiGetGraphQLRelation(
          resourceName = ResourceName("instructors", 1),
          ids = "${courseMetadata/degree/degreeInstructorIds}",
          description = "Instructor whose name and signature appears on the degree certificate."))

  def get(id: String = "v1-123") = Nap.get { context =>
    OkIfPresent(id, courseStore.get(id))
  }

  def multiGet(ids: Set[String], types: Set[String] = Set("course", "specialization")) = Nap.multiGet { context =>
    Ok(courseStore.all()
      .filter(course => ids.contains(course._1))
      .map { case (id, course) => Keyed(id, course) }.toList)
  }

  def getAll() = Nap.getAll { context =>

    val courses = courseStore.all().toList.map { case (id, course) => Keyed(id, course) }
    val coursesAfterNext = context.paging.start
      .map(s => courses.dropWhile(_.key != s))
      .getOrElse(courses)

    val coursesSubset = coursesAfterNext.take(context.paging.limit)

    val next = coursesAfterNext.drop(context.paging.limit).headOption.map(_.key)

    Ok(coursesSubset)
      .withPagination(next, Some(courses.size.toLong))
  }

  def byInstructor(instructorId: String) = Nap.finder { context =>
    val courses = courseStore.all()
      .filter(course => course._2.instructorIds.map(_.toString).contains(instructorId))
    Ok(courses.toList.map { case (id, course) => Keyed(id, course) })
      .withPagination(next = "testNext")
  }

} 
Example 118
Source File: RestActionTester.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime.actions

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.Materializer
import org.coursera.naptime.NaptimeActionException
import org.coursera.naptime.QueryFields
import org.coursera.naptime.QueryIncludes
import org.coursera.naptime.RequestEvidence
import org.coursera.naptime.RequestPagination
import org.coursera.naptime.RestContext
import org.coursera.naptime.RestError
import org.coursera.naptime.RestResponse
import org.junit.After
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.exceptions.TestFailedException
import play.api.test.FakeRequest

import scala.concurrent.ExecutionContext
import scala.util.Try


  protected[this] implicit class RestActionTestOps[AuthType, BodyType, ResponseType](
      action: RestAction[_, AuthType, BodyType, _, _, ResponseType]) {

    def testAction(ctx: RestContext[AuthType, BodyType]): RestResponse[ResponseType] = {
      val updatedAuthEither = action.restAuthGenerator.apply(ctx.body).check(ctx.auth)

      updatedAuthEither match {
        case Left(error) => RestError(error)
        case Right(updatedAuth) =>
          val responseFuture = action.safeApply(ctx.copyWithAuth(updatedAuth)).recover {
            case e: NaptimeActionException => RestError(e)
          }

          Try(responseFuture.futureValue).recover {
            case e: TestFailedException => e.cause.map(throw _).getOrElse(throw e)
          }.get
      }
    }

    def testActionPassAuth(ctx: RestContext[AuthType, BodyType]): RestResponse[ResponseType] = {
      val responseFuture = action.safeApply(ctx).recover {
        case e: NaptimeActionException => RestError(e)
      }

      Try(responseFuture.futureValue).recover {
        case e: TestFailedException => e.cause.map(throw _).getOrElse(throw e)
      }.get
    }
  }
} 
Example 119
Source File: AuthMacroTest.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime

import akka.stream.Materializer
import org.coursera.naptime.access.HeaderAccessControl
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.resources.TopLevelCollectionResource
import org.coursera.naptime.router2._
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
import org.scalatest.mockito.MockitoSugar
import play.api.libs.json.OFormat
import play.api.mvc.RequestHeader

import scala.concurrent.ExecutionContext

case class CustomAuth()

object CustomAuthorizer extends HeaderAccessControl[CustomAuth] {
  override def run(requestHeader: RequestHeader)(implicit executionContext: ExecutionContext) = ???
  override private[naptime] def check(authInfo: CustomAuth) = ???
}

class AuthorizedResource(
    implicit val executionContext: ExecutionContext,
    val materializer: Materializer)
    extends TopLevelCollectionResource[String, Item] {

  override def keyFormat: KeyFormat[String] = KeyFormat.stringKeyFormat

  override implicit def resourceFormat: OFormat[Item] = Item.jsonFormat

  override def resourceName: String = "items"

  implicit val fields = Fields.withDefaultFields("name")

  def get(id: String) =
    Nap
      .auth(CustomAuthorizer)
      .get { ctx =>
        ???
      }

}

object AuthorizedResource {
  val routerBuilder = Router.build[AuthorizedResource]
}

class AuthMacroTest extends AssertionsForJUnit with MockitoSugar with ResourceTestImplicits {

  val schema = AuthorizedResource.routerBuilder.schema

  @Test
  def get(): Unit = {
    val handler = schema.handlers.find(_.name === "get").get
    assert(handler.authType === Some("org.coursera.naptime.CustomAuth"))
  }

} 
Example 120
Source File: RestActionBodyBuilder.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime.actions

import akka.stream.Materializer
import org.coursera.common.concurrent.Futures
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.ResourceFields
import org.coursera.naptime.PaginationConfiguration
import org.coursera.naptime.RestContext
import org.coursera.naptime.RestError
import org.coursera.naptime.RestResponse
import play.api.libs.json.OFormat
import play.api.mvc.BodyParser

import scala.concurrent.ExecutionContext
import scala.concurrent.Future


class RestActionBodyBuilder[
    RACType,
    AuthType,
    BodyType,
    ResourceKeyType,
    ResourceType,
    ResponseType](
    authGeneratorOrAuth: AuthGenerator[BodyType, AuthType],
    bodyParser: BodyParser[BodyType],
    errorHandler: PartialFunction[Throwable, RestError])(
    implicit keyFormat: KeyFormat[ResourceKeyType],
    resourceFormat: OFormat[ResourceType],
    ec: ExecutionContext,
    mat: Materializer) { self =>

  type CategoryEngine =
    RestActionCategoryEngine[RACType, ResourceKeyType, ResourceType, ResponseType]
  type BuiltAction =
    RestAction[RACType, AuthType, BodyType, ResourceKeyType, ResourceType, ResponseType]

  def apply(fn: RestContext[AuthType, BodyType] => RestResponse[ResponseType])(
      implicit category: CategoryEngine,
      fields: ResourceFields[ResourceType],
      paginationConfiguration: PaginationConfiguration): BuiltAction = {

    async(ctx => Future.successful(fn(ctx)))
  }

  def apply(fn: => RestResponse[ResponseType])(
      implicit category: CategoryEngine,
      fields: ResourceFields[ResourceType],
      paginationConfiguration: PaginationConfiguration): BuiltAction = {

    async(_ => Futures.immediate(fn))
  }

  def async(fn: => Future[RestResponse[ResponseType]])(
      implicit category: CategoryEngine,
      fields: ResourceFields[ResourceType],
      paginationConfiguration: PaginationConfiguration): BuiltAction = {

    async(_ => fn)
  }

  def async(fn: RestContext[AuthType, BodyType] => Future[RestResponse[ResponseType]])(
      implicit category: CategoryEngine,
      fields: ResourceFields[ResourceType],
      _paginationConfiguration: PaginationConfiguration): BuiltAction = {

    new RestAction[RACType, AuthType, BodyType, ResourceKeyType, ResourceType, ResponseType] {
      override def restAuthGenerator = authGeneratorOrAuth
      override def restBodyParser = bodyParser
      override def restEngine = category
      override def fieldsEngine = fields
      override def paginationConfiguration = _paginationConfiguration
      override def errorHandler: PartialFunction[Throwable, RestError] = self.errorHandler
      override val keyFormat = self.keyFormat
      override val resourceFormat = self.resourceFormat
      override val executionContext = ec
      override val materializer = mat

      override def apply(
          context: RestContext[AuthType, BodyType]): Future[RestResponse[ResponseType]] =
        Futures.safelyCall(fn(context))
    }
  }
} 
Example 121
Source File: DefinedBodyTypeRestActionBuilder.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime.actions

import akka.stream.Materializer
import org.coursera.naptime.RestError
import org.coursera.naptime.access.HeaderAccessControl
import org.coursera.naptime.model.KeyFormat
import play.api.libs.json.OFormat
import play.api.mvc.BodyParser

import scala.concurrent.ExecutionContext


  def returning[NewResponseType](): DefinedBodyTypeRestActionBuilder[
    RACType,
    AuthType,
    BodyType,
    ResourceKeyType,
    ResourceType,
    NewResponseType] =
    new DefinedBodyTypeRestActionBuilder(authGeneratorOrAuth, bodyParser, errorHandler)

  override protected def bodyBuilder[Category, Response](): BodyBuilder[Category, Response] = {
    new RestActionBodyBuilder[
      Category,
      AuthType,
      BodyType,
      ResourceKeyType,
      ResourceType,
      Response](authGeneratorOrAuth, bodyParser, errorHandler)
  }

} 
Example 122
Source File: NestingTests.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime.resources

import akka.stream.Materializer
import org.coursera.common.jsonformat.JsonFormats.Implicits.dateTimeFormat
import org.coursera.naptime.ResourceTestImplicits
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.path.ParseFailure
import org.coursera.naptime.path.ParseSuccess
import org.coursera.naptime.path.RootParsedPathKey
import org.coursera.naptime.resources.NestingTests.FriendInfoResource
import org.coursera.naptime.resources.NestingTests.PeopleResource
import org.joda.time.DateTime
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
import play.api.libs.json.Json
import play.api.libs.json.OFormat

import scala.concurrent.ExecutionContext

object NestingTests {
  case class Person(name: String)
  object Person {
    implicit val jsonFormat: OFormat[Person] = Json.format[Person]
  }

  class PeopleResource(
      implicit val executionContext: ExecutionContext,
      val materializer: Materializer)
      extends TopLevelCollectionResource[String, Person] {

    override def keyFormat = KeyFormat.stringKeyFormat
    override implicit def resourceFormat = implicitly
    override def resourceName: String = "people"
  }

  case class FriendInfo(since: DateTime, important: Boolean)
  object FriendInfo {
    implicit val jsonFormat: OFormat[FriendInfo] = Json.format[FriendInfo]
  }

  class FriendInfoResource(peopleResource: PeopleResource)(
      implicit val executionContext: ExecutionContext,
      val materializer: Materializer)
      extends CollectionResource[PeopleResource, String, FriendInfo] {

    override def keyFormat = KeyFormat.stringKeyFormat
    override val parentResource = peopleResource
    override implicit def resourceFormat = implicitly
    override def resourceName: String = "friendInfo"
  }
}

class NestingTests extends AssertionsForJUnit with ResourceTestImplicits {

  @Test
  def topLevelRouting(): Unit = {
    val peopleResource = new PeopleResource
    assert(
      ParseSuccess(None, "asdf" ::: RootParsedPathKey) ===
        peopleResource.pathParser.parse("/people.v1/asdf"))
    assert(
      ParseSuccess(Some("/friendInfo.v1/fdsa"), "asdf" ::: RootParsedPathKey) ===
        peopleResource.pathParser.parse("/people.v1/asdf/friendInfo.v1/fdsa"))
    assert(ParseFailure === peopleResource.pathParser.parse("/friendInfo.v1/asdf"))
  }

  @Test
  def nestedRouting(): Unit = {
    val peopleResource = new PeopleResource
    val friendInfoResource = new FriendInfoResource(peopleResource)
    assert(
      ParseSuccess(None, "fdsa" ::: "asdf" ::: RootParsedPathKey) ===
        friendInfoResource.pathParser.parse("/people.v1/asdf/friendInfo.v1/fdsa"))
    assert(ParseFailure === friendInfoResource.pathParser.parse("/friendInfo.v1/fdsa"))
    assert(ParseFailure === friendInfoResource.pathParser.parse("/people.v1/asdf"))
  }
} 
Example 123
Source File: ResourceTestImplicits.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.Materializer
import org.junit.After

import scala.concurrent.ExecutionContext

trait ResourceTestImplicits {
  private[this] val internalActorSystem: ActorSystem = ActorSystem("test")
  private[this] val internalExecutionContext: ExecutionContext = actorSystem.dispatcher
  private[this] val internalMaterializer: Materializer = ActorMaterializer()

  implicit protected def actorSystem: ActorSystem = internalActorSystem
  implicit protected def executionContext: ExecutionContext = internalExecutionContext
  implicit protected def materializer: Materializer = internalMaterializer

  @After
  def shutDownActorSystem(): Unit = {
    actorSystem.terminate()
  }
} 
Example 124
Source File: Xmlrpc.scala    From xmlrpc   with MIT License 5 votes vote down vote up
package xmlrpc

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding._
import akka.http.scaladsl.marshallers.xml.ScalaXmlSupport._
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.{FromResponseUnmarshaller, Unmarshal}
import akka.stream.Materializer
import akka.util.Timeout
import xmlrpc.protocol._

import scala.concurrent.{ExecutionContext, Future}
import scala.xml.NodeSeq


object Xmlrpc {

  import XmlrpcProtocol._

  case class XmlrpcServer(fullAddress: String) {
    def uri: Uri = Uri(fullAddress)
  }

  def invokeMethod[P: Datatype, R: Datatype](name: String, parameter: P = Void)
                                            (implicit xmlrpcServer: XmlrpcServer,
                                             as: ActorSystem,
                                             ma: Materializer,
                                             ec: ExecutionContext,
                                             fc: Timeout): XmlrpcResponse[R] = {

    import XmlrpcResponse.AkkaHttpToXmlrpcResponse

    def unmarshall[A](f: Future[HttpResponse])(implicit um: FromResponseUnmarshaller[A]): Future[A] =
      f.flatMap(Unmarshal(_).to[A])


    val request: NodeSeq = writeXmlRequest(name, parameter)
    val requestWithHeader: String = """<?xml version="1.0"?>""" + request.toString


    try {
      (Http().singleRequest(Post(xmlrpcServer.uri, request)) ~> unmarshall[NodeSeq]).asXmlrpcResponse[R]
    } catch {
      case t: Throwable => XmlrpcResponse(ConnectionError("An exception has been thrown by Spray", Some(t)).failures)
    }
  }
} 
Example 125
Source File: BakerServiceSpec.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch15

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.scaladsl.TestSink
import ch15.model.{RawCookies, ReadyCookies}
import com.lightbend.lagom.scaladsl.server.LocalServiceLocator
import com.lightbend.lagom.scaladsl.testkit.ServiceTest
import org.scalatest.{AsyncWordSpec, Matchers}

class BakerServiceSpec extends AsyncWordSpec with Matchers {

  "The BakerService" should {
    "bake cookies" in ServiceTest.withServer(ServiceTest.defaultSetup) { ctx =>
      new BakerApplication(ctx) with LocalServiceLocator
    } { server =>
      implicit val as: Materializer = server.materializer
      val input: Source[RawCookies, NotUsed] =
        Source(List(RawCookies(10), RawCookies(10), RawCookies(10)))
          .concat(Source.maybe)

      val client = server.serviceClient.implement[BakerService]

      client.bake.invoke(input).map { output =>
        val probe = output.runWith(TestSink.probe(server.actorSystem))
        probe.request(10)
        probe.expectNext(ReadyCookies(12))
        probe.expectNext(ReadyCookies(12))
        // because the oven is not full for the 6 other
        probe.cancel
        succeed
      }
    }
  }
} 
Example 126
Source File: Filters.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
import javax.inject._

import akka.stream.Materializer
import play.api._
import play.api.http.HttpFilters
import play.api.mvc._

import scala.concurrent.{ ExecutionContext, Future }


@Singleton
class ExampleFilter @Inject() (implicit override val mat: Materializer, exec: ExecutionContext) extends Filter {
  override def apply(nextFilter: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] = {
    // Run the next filter in the chain. This will call other filters
    // and eventually call the action. Take the result and modify it
    // by adding a new header.
    nextFilter(requestHeader).map { result =>
      result.withHeaders("X-ExampleFilter" -> "foo")
    }
  }
} 
Example 127
Source File: LowLevelServer.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.lowlevelserver

import akka.NotUsed
import akka.actor.{ ActorSystem, Props }
import akka.event.{ Logging, LoggingAdapter }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.pattern.ask
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.Timeout
import com.github.dnvriend.component.lowlevelserver.dto.{ Person, PersonWithId }
import com.github.dnvriend.component.lowlevelserver.marshaller.Marshaller
import com.github.dnvriend.component.lowlevelserver.repository.PersonRepository
import spray.json.{ DefaultJsonProtocol, _ }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }

class LowLevelServer(implicit val system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter, timeout: Timeout) extends DefaultJsonProtocol with Marshaller {
  val personDb = system.actorOf(Props[PersonRepository])

  def debug(t: Any)(implicit log: LoggingAdapter = null): Unit =
    if (Option(log).isEmpty) println(t) else log.debug(t.toString)

  def http200Okay(req: HttpRequest): HttpResponse =
    HttpResponse(StatusCodes.OK)

  def http200AsyncOkay(req: HttpRequest): Future[HttpResponse] =
    Future(http200Okay(req))

  val http200OkayFlow: Flow[HttpRequest, HttpResponse, NotUsed] = Flow[HttpRequest].map { req =>
    HttpResponse(StatusCodes.OK)
  }

  val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] =
    Http().bind(interface = "localhost", port = 8080)

  val binding: Future[Http.ServerBinding] = serverSource.to(Sink.foreach { conn =>
    //    conn.handleWith(http200OkayFlow)
    //    conn.handleWithSyncHandler(http200Okay)
    //    conn.handleWithAsyncHandler(http200AsyncOkay, 8)
    conn.handleWithAsyncHandler(personRequestHandler)
  }).run()

  def personRequestHandler(req: HttpRequest): Future[HttpResponse] = req match {
    case HttpRequest(HttpMethods.GET, Uri.Path("/api/person"), _, _, _) => for {
      xs <- (personDb ? "findAll").mapTo[List[PersonWithId]]
      entity = HttpEntity(ContentTypes.`application/json`, xs.toJson.compactPrint)
    } yield HttpResponse(StatusCodes.OK, entity = entity)
    case HttpRequest(HttpMethods.POST, Uri.Path("/api/person"), _, ent, _) => for {
      strictEntity <- ent.toStrict(1.second)
      person <- (personDb ? strictEntity.data.utf8String.parseJson.convertTo[Person]).mapTo[PersonWithId]
    } yield HttpResponse(StatusCodes.OK, entity = person.toJson.compactPrint)
    case req =>
      req.discardEntityBytes()
      Future.successful(HttpResponse(StatusCodes.NotFound))
  }
}

object LowLevelServerLauncher extends App with DefaultJsonProtocol {
  // setting up some machinery
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val timeout: Timeout = Timeout(10.seconds)

  new LowLevelServer()
} 
Example 128
Source File: PostcodeClient.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.webservices.postcode

import akka.NotUsed
import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import com.github.dnvriend.component.webservices.generic.HttpClient
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
import scala.util.matching.Regex

case class Address(
  street: String,
  houseNumber: Int,
  houseNumberAddition: String,
  postcode: String,
  city: String,
  municipality: String,
  province: String,
  rdX: Option[Int],
  rdY: Option[Int],
  latitude: Double,
  longitude: Double,
  bagNumberDesignationId: String,
  bagAddressableObjectId: String,
  addressType: String,
  purposes: Option[List[String]],
  surfaceArea: Int,
  houseNumberAdditions: List[String]
)

trait Marshallers extends DefaultJsonProtocol {
  implicit val addressJsonFormat = jsonFormat17(Address)
}

case class GetAddressRequest(zip: String, houseNumber: String)

trait PostcodeClient {
  def address(postcode: String, houseNumber: Int): Future[Option[Address]]

  def address[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetAddressRequest, T), (Option[Address], T), NotUsed]
}

object PostcodeClient {
  import spray.json._
  val ZipcodeWithoutSpacePattern: Regex = """([1-9][0-9]{3})([A-Za-z]{2})""".r
  val ZipcodeWithSpacePattern: Regex = """([1-9][0-9]{3})[\s]([A-Za-z]{2})""".r

  def mapToAddress(json: String)(implicit reader: JsonReader[Address]): Option[Address] =
    Try(json.parseJson.convertTo[Address]).toOption

  def responseToString(resp: HttpResponse)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Future[String] =
    HttpClient.responseToString(resp)

  def getAddressRequestFlow[T]: Flow[(GetAddressRequest, T), (HttpRequest, T), NotUsed] =
    Flow[(GetAddressRequest, T)].map { case (request, id) => (HttpClient.mkGetRequest(s"/rest/addresses/${request.zip}/${request.houseNumber}/"), id) }

  def mapResponseToAddressFlow[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, reader: JsonReader[Address]): Flow[(Try[HttpResponse], T), (Option[Address], T), NotUsed] =
    HttpClient.responseToString[T].map { case (json, id) => (mapToAddress(json), id) }
  
  def normalizeZipcode(zipcode: String): Option[String] = zipcode.toUpperCase match {
    case ZipcodeWithoutSpacePattern(numbers, letters) => Option(s"$numbers$letters")
    case ZipcodeWithSpacePattern(numbers, letters)    => Option(s"$numbers$letters")
    case _                                            => None
  }

  def apply()(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter) = new PostcodeClientImpl
}

class PostcodeClientImpl()(implicit val system: ActorSystem, val mat: Materializer, val ec: ExecutionContext, val log: LoggingAdapter) extends PostcodeClient with Marshallers {
  import PostcodeClient._
  private val client = HttpClient("postcode")

  override def address(postcode: String, houseNumber: Int): Future[Option[Address]] =
    normalizeZipcode(postcode) match {
      case Some(zip) => client.get(s"/rest/addresses/$zip/$houseNumber/")
        .flatMap(responseToString).map(mapToAddress)
      case None => Future.successful(None)
    }

  override def address[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetAddressRequest, T), (Option[Address], T), NotUsed] =
    getAddressRequestFlow[T]
      .via(client.cachedHostConnectionFlow[T])
      .via(mapResponseToAddressFlow[T])
} 
Example 129
Source File: WeatherClient.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.webservices.weather

import akka.NotUsed
import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import com.github.dnvriend.component.webservices.generic.HttpClient
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

case class Wind(speed: Double, deg: Double)
case class Main(temp: Double, temp_min: Double, temp_max: Double, pressure: Double, sea_level: Option[Double], grnd_level: Option[Double], humidity: Int)
case class Cloud(all: Int)
case class Weather(id: Int, main: String, description: String, icon: String)
case class Sys(message: Double, country: String, sunrise: Long, sunset: Long)
case class Coord(lon: Double, lat: Double)
case class WeatherResult(coord: Coord, sys: Sys, weather: List[Weather], base: String, main: Main, wind: Wind, clouds: Cloud, dt: Long, id: Int, name: String, cod: Int)

trait Marshallers extends DefaultJsonProtocol {
  implicit val windJsonFormat = jsonFormat2(Wind)
  implicit val mainJsonFormat = jsonFormat7(Main)
  implicit val cloudJsonFormat = jsonFormat1(Cloud)
  implicit val weatherJsonFormat = jsonFormat4(Weather)
  implicit val sysJsonFormat = jsonFormat4(Sys)
  implicit val coordJsonFormat = jsonFormat2(Coord)
  implicit val weatherResultJsonFormat = jsonFormat11(WeatherResult)
}

case class GetWeatherRequest(zip: String, country: String)

trait OpenWeatherApi {
  def getWeather(zip: String, country: String): Future[Option[WeatherResult]]

  def getWeather[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetWeatherRequest, T), (Option[WeatherResult], T), NotUsed]
}

object OpenWeatherApi {
  import spray.json._
  def apply()(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter) = new OpenWeatherApiImpl

  def mapResponseToWeatherResult(json: String)(implicit reader: JsonReader[WeatherResult]): Option[WeatherResult] =
    Try(json.parseJson.convertTo[WeatherResult]).toOption

  def responseToString(resp: HttpResponse)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Future[String] =
    HttpClient.responseToString(resp)

  def getWeatherRequestFlow[T]: Flow[(GetWeatherRequest, T), (HttpRequest, T), NotUsed] =
    Flow[(GetWeatherRequest, T)].map { case (request, id) => (HttpClient.mkGetRequest(s"/data/2.5/weather?zip=${request.zip},${request.country}"), id) }

  def mapResponseToWeatherResultFlow[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, reader: JsonReader[WeatherResult]): Flow[(Try[HttpResponse], T), (Option[WeatherResult], T), NotUsed] =
    HttpClient.responseToString[T].map { case (json, id) => (mapResponseToWeatherResult(json), id) }
}

class OpenWeatherApiImpl()(implicit val system: ActorSystem, val ec: ExecutionContext, val mat: Materializer, val log: LoggingAdapter) extends OpenWeatherApi with Marshallers {
  import OpenWeatherApi._

  private val client = HttpClient("weather")

  override def getWeather(zip: String, country: String): Future[Option[WeatherResult]] =
    client.get(s"/data/2.5/weather?zip=$zip,$country").
      flatMap(responseToString)
      .map(mapResponseToWeatherResult)

  override def getWeather[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetWeatherRequest, T), (Option[WeatherResult], T), NotUsed] =
    getWeatherRequestFlow[T]
      .via(client.cachedHostConnectionFlow[T])
      .via(mapResponseToWeatherResultFlow[T])
} 
Example 130
Source File: SimpleServer.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.simpleserver

import javax.inject.Inject

import akka.actor.ActorSystem
import akka.event.{ Logging, LoggingAdapter }
import akka.http.scaladsl._
import akka.pattern.CircuitBreaker
import akka.stream.{ ActorMaterializer, Materializer }
import com.github.dnvriend.component.repository.PersonRepository
import com.github.dnvriend.component.simpleserver.route._
import com.google.inject.Singleton
import play.api.Configuration

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

@Singleton
class SimpleServer @Inject() (personDao: PersonRepository, cb: CircuitBreaker, interface: String, port: Int)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext) {
  Http().bindAndHandle(SimpleServerRestRoutes.routes(personDao, cb), interface, port)
}

object SimpleServerLauncher extends App {
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  val maxFailures: Int = 3
  val callTimeout: FiniteDuration = 1.seconds
  val resetTimeout: FiniteDuration = 10.seconds
  val cb = new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout)
  val config: play.api.Configuration = Configuration(system.settings.config)

  sys.addShutdownHook {
    system.terminate()
  }

  new SimpleServer(new PersonRepository, cb, config.getString("http.interface").getOrElse("0.0.0.0"), config.getInt("http.port").getOrElse(8080))
} 
Example 131
Source File: SimpleServerRestRoutes.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.simpleserver.route

import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.{ Directives, Route }
import akka.pattern.CircuitBreaker
import akka.stream.Materializer
import com.github.dnvriend.component.repository.PersonRepository
import com.github.dnvriend.component.simpleserver.dto.http.{ Person, Ping }
import com.github.dnvriend.component.simpleserver.marshaller.Marshallers
import com.github.dnvriend.util.TimeUtil

import scala.concurrent.ExecutionContext

object SimpleServerRestRoutes extends Directives with Marshallers {
  def routes(dao: PersonRepository, cb: CircuitBreaker)(implicit mat: Materializer, ec: ExecutionContext): Route =
    logRequestResult("akka-http-test") {
      pathPrefix("person") {
        path("sync") {
          get {
            complete(dao.personSync)
          }
        } ~
          path("async") {
            get {
              complete(cb.withCircuitBreaker(dao.personAsync))
            }
          } ~
          path("failed") {
            get {
              complete(cb.withCircuitBreaker(dao.personAsyncFailed))
            }
          } ~
          pathEnd {
            get {
              complete(cb.withSyncCircuitBreaker(dao.personSync))
            }
          } ~
          (post & entity(as[Person])) { person =>
            complete(StatusCodes.Created)
          }
      } ~ pathPrefix("persons") {
        pathPrefix("strict" / IntNumber) { numberOfPersons =>
          pathEnd {
            get {
              complete(cb.withSyncCircuitBreaker(dao.listOfPersons(numberOfPersons)))
            }
          }
        } ~ JsonStreamingRoute.route(dao) ~ CsvStreamingRoute.route(dao)
      } ~ (get & pathPrefix("ping")) {
        complete(Ping(TimeUtil.timestamp))
      } ~ SimpleDisjunctionRoute.route ~ TryRoute.route
    }
} 
Example 132
Source File: JsonStreamingRoute.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.component.simpleserver.route

import akka.event.LoggingAdapter
import akka.http.scaladsl.common.{ EntityStreamingSupport, JsonEntityStreamingSupport }
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.{ Directives, Route }
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import com.github.dnvriend.component.repository.PersonRepository
import com.github.dnvriend.component.simpleserver.dto.http.Person
import com.github.dnvriend.component.simpleserver.marshaller.Marshallers

import scala.concurrent.ExecutionContext

object JsonStreamingRoute extends Directives with SprayJsonSupport with Marshallers {
  val start = ByteString.empty
  val sep = ByteString("\n")
  val end = ByteString.empty

  implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json()
    .withFramingRenderer(Flow[ByteString].intersperse(start, sep, end))
    .withParallelMarshalling(parallelism = 8, unordered = true)

  def route(dao: PersonRepository)(implicit mat: Materializer, ec: ExecutionContext): Route =
    path("stream" / IntNumber) { numberOfPersons =>
      (get & pathEnd) {
        complete(dao.people(numberOfPersons))
      }
    } ~
      (post & path("stream") & entity(asSourceOf[Person])) { people =>
        val total = people.log("people").runFold(0) { case (c, _) => c + 1 }
        complete(total.map(n => s"Received $n number of person"))
      }
} 
Example 133
Source File: Module.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
import javax.inject.Inject

import akka.actor.ActorSystem
import akka.pattern.CircuitBreaker
import akka.stream.Materializer
import com.github.dnvriend.component.repository.PersonRepository
import com.github.dnvriend.component.simpleserver.SimpleServer
import com.google.inject.{ AbstractModule, Provider, Provides }
import play.api.Configuration
import play.api.libs.concurrent.AkkaGuiceSupport

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

class Module extends AbstractModule with AkkaGuiceSupport {
  override def configure(): Unit = {
    bind(classOf[SimpleServer])
      .toProvider(classOf[SimpleServerProvider])
      .asEagerSingleton()
  }

  @Provides
  def circuitBreakerProvider(system: ActorSystem)(implicit ec: ExecutionContext): CircuitBreaker = {
    val maxFailures: Int = 3
    val callTimeout: FiniteDuration = 1.seconds
    val resetTimeout: FiniteDuration = 10.seconds
    new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout)
  }
}

// alternative way to provide services
class SimpleServerProvider @Inject() (personRepository: PersonRepository, cb: CircuitBreaker, config: Configuration)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext) extends Provider[SimpleServer] {
  override def get(): SimpleServer =
    new SimpleServer(personRepository, cb, config.getString("http.interface").getOrElse("0.0.0.0"), config.getInt("http.port").getOrElse(8080))
} 
Example 134
Source File: TestSpec.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.test.WsTestClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]

  def getAnnotatedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8080
  implicit val timeout: Timeout = 10.seconds
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]

  // ================================== Supporting Operations ====================================
  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, _]) {
    def testProbe(f: TestSubscriber.Probe[A] => Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def killActors(actors: ActorRef*): Unit = {
    val tp = TestProbe()
    actors.foreach { (actor: ActorRef) =>
      tp watch actor
      actor ! PoisonPill
      tp.expectTerminated(actor)
    }
  }

  override protected def beforeEach(): Unit = {
  }
} 
Example 135
Source File: Main.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.cluster.ddata.LWWMapKey
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.management.scaladsl.AkkaManagement
import akka.stream.Materializer
import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings}
import spray.json.DefaultJsonProtocol

object Main extends SprayJsonSupport with DefaultJsonProtocol {

  case class NodeStatus(status: String)

  implicit val transactionFormat = jsonFormat1(NodeStatus)

  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { ctx =>

    val oledDriver = ctx.spawn(OledDriver(settings), "oled-driver")
    oledDriver ! OledDriver.RegisterView("Cluster State", 0)
    oledDriver ! OledDriver.RegisterView("Distributed Data State", 1)

    val clusterView = ctx.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view")
    val clusterStatusTracker = ctx.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView)

    val ddataTracker = ctx.spawn(
      DistributedDataTracker(1, LWWMapKey[String, String]("cache"), oledDriver),
      "oled-ddata-view")

    val routes = new Routes(ddataTracker)(ctx.system)

    implicit val untypedSystem: akka.actor.ActorSystem = ctx.system.toClassic
    implicit val mat: Materializer = Materializer.createMaterializer(ctx.system.toClassic)
    Http()(ctx.system.toClassic).bindAndHandle(routes.route,
      settings.config.getString("cluster-node-configuration.external-ip"), 8080)

    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object DisplayDistributedDataMain {
  def main(args: Array[String]): Unit = {
    val settings = Settings()
    val system = ActorSystem[NotUsed](Main(settings), "akka-oled", settings.config)

    // Start Akka HTTP Management extension
    AkkaManagement(system).start()
  }
} 
Example 136
Source File: Main.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.cluster.sharding.typed.scaladsl.{ClusterSharding, Entity}
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.management.scaladsl.AkkaManagement
import akka.persistence.typed.PersistenceId
import akka.stream.Materializer
import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings}
import spray.json._

import scala.concurrent.ExecutionContextExecutor

object Main extends SprayJsonSupport with DefaultJsonProtocol {

  case class AddPoints(points: Int)

  implicit val transactionFormat = jsonFormat1(AddPoints)

  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { ctx =>
    implicit val system = ctx.system
    implicit val untypedSystem: akka.actor.ActorSystem = ctx.system.toClassic
    implicit val ec: ExecutionContextExecutor = ctx.system.executionContext

    val oledDriver = ctx.spawn(OledDriver(settings), "oled-driver")
    oledDriver ! OledDriver.RegisterView("Cluster State", 0)
    oledDriver ! OledDriver.RegisterView("Sharding State", 1)

    val clusterView = ctx.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view")
    val clusterStatusTracker = ctx.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView)

    val shardVisualizer = ctx.spawn(OledShardingVisualizer(1, oledDriver), "oled-sharding-view")

    val sharding = ClusterSharding(ctx.system)
    sharding.init(Entity(typeKey = ClientEntity.TypeKey) { entityContext =>
      ClientEntity(entityContext.entityId,
        PersistenceId(entityContext.entityTypeKey.name, entityContext.entityId),
        shardVisualizer)
    })
    val tracker = ctx.spawn(ShardStateTracker(shardVisualizer), "oled-sharding-tracker")
    ctx.spawn(ShardStateScheduler(sharding.shardState, tracker), "oled-sharding-scheduler")

    val routes = new Routes(sharding)

    //materializer
    Materializer.createMaterializer(ctx.system.toClassic)
    implicit val mat: Materializer = Materializer.createMaterializer(ctx.system.toClassic)
    Http()(ctx.system.toClassic).bindAndHandle(routes.route,
      settings.config.getString("cluster-node-configuration.external-ip"), 8080)

    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object DisplayClusterShardingMain {
  def main(args: Array[String]): Unit = {
    val settings = Settings()
    val system = ActorSystem[NotUsed](Main(settings), "akka-oled", settings.config)

    // Start Akka HTTP Management extension
    AkkaManagement(system).start()
  }
} 
Example 137
Source File: EndpointsSettings.scala    From endpoints4s   with MIT License 5 votes vote down vote up
package endpoints4s.akkahttp.client

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpEntity, HttpRequest, HttpResponse, Uri}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Try

final case class EndpointsSettings(
    requestExecutor: AkkaHttpRequestExecutor,
    baseUri: Uri = Uri("/"),
    toStrictTimeout: FiniteDuration = 2.seconds,
    stringContentExtractor: HttpEntity.Strict => String = _.data.utf8String
)

trait AkkaHttpRequestExecutor {
  def apply(request: HttpRequest): Future[HttpResponse]
}

object AkkaHttpRequestExecutor {
  def cachedHostConnectionPool(host: String, port: Int)(implicit
      system: ActorSystem,
      materializer: Materializer
  ): AkkaHttpRequestExecutor =
    default(Http().cachedHostConnectionPool[Int](host, port))

  def default(
      poolClientFlow: Flow[
        (HttpRequest, Int),
        (Try[HttpResponse], Int),
        Http.HostConnectionPool
      ]
  )(implicit materializer: Materializer): AkkaHttpRequestExecutor =
    new AkkaHttpRequestExecutor {
      override def apply(request: HttpRequest): Future[HttpResponse] =
        Source
          .single(request -> 1)
          .via(poolClientFlow)
          .map(_._1.get)
          .runWith(Sink.head)
    }
} 
Example 138
Source File: AkkaHttpClientEndpointsTest.scala    From endpoints4s   with MIT License 5 votes vote down vote up
package endpoints4s.akkahttp.client

import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import endpoints4s.algebra
import endpoints4s.algebra.ChunkedJsonEntitiesTestApi

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

class TestClient(settings: EndpointsSettings)(implicit
    EC: ExecutionContext,
    M: Materializer
) extends Endpoints(settings)
    with BasicAuthentication
    with algebra.EndpointsTestApi
    with algebra.BasicAuthenticationTestApi
    with algebra.TextEntitiesTestApi
    with algebra.JsonFromCodecTestApi
    with algebra.SumTypedEntitiesTestApi
    with algebra.circe.JsonFromCirceCodecTestApi
    with JsonEntitiesFromCodecs
    with algebra.circe.JsonEntitiesFromCodecs
    with ChunkedJsonEntities
    with ChunkedJsonEntitiesTestApi
    with algebra.circe.ChunkedJsonEntitiesTestApi

class AkkaHttpClientEndpointsTest
    extends algebra.client.EndpointsTestSuite[TestClient]
    with algebra.client.BasicAuthTestSuite[TestClient]
    with algebra.client.JsonFromCodecTestSuite[TestClient]
    with algebra.client.TextEntitiesTestSuite[TestClient]
    with algebra.client.SumTypedEntitiesTestSuite[TestClient]
    with algebra.client.ChunkedJsonEntitiesTestSuite[TestClient] {

  implicit val system = ActorSystem()
  implicit val ec = system.dispatcher

  val client: TestClient = new TestClient(
    EndpointsSettings(
      AkkaHttpRequestExecutor
        .cachedHostConnectionPool("localhost", wiremockPort)
    )
  )

  val streamingClient: TestClient = new TestClient(
    EndpointsSettings(
      AkkaHttpRequestExecutor
        .cachedHostConnectionPool("localhost", streamingPort)
    )
  )

  def call[Req, Resp](
      endpoint: client.Endpoint[Req, Resp],
      args: Req
  ): Future[Resp] = endpoint(args)

  def encodeUrl[A](url: client.Url[A])(a: A): String = url.encode(a)

  def callStreamedEndpoint[A, B](
      endpoint: streamingClient.Endpoint[A, streamingClient.Chunks[B]],
      req: A
  ): Future[Seq[Either[String, B]]] =
    Source
      .futureSource(endpoint(req))
      .map(Right(_))
      .recover { case NonFatal(t) => Left(t.toString) }
      .runWith(Sink.seq)

  def callStreamedEndpoint[A, B](
      endpoint: streamingClient.Endpoint[streamingClient.Chunks[A], B],
      req: Source[A, _]
  ): Future[B] =
    endpoint(req)

  clientTestSuite()
  basicAuthSuite()
  jsonFromCodecTestSuite()
  textEntitiesTestSuite()
  sumTypedRequestsTestSuite()

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

} 
Example 139
Source File: AkkaHttpClientEndpointsJsonSchemaTest.scala    From endpoints4s   with MIT License 5 votes vote down vote up
package endpoints4s.akkahttp.client

import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.testkit.TestKit
import endpoints4s.algebra.client.{BasicAuthTestSuite, JsonTestSuite}
import endpoints4s.algebra.{Address, BasicAuthenticationTestApi, JsonTestApi, User}
import endpoints4s.generic

import scala.concurrent.{ExecutionContext, Future}

class TestJsonSchemaClient(settings: EndpointsSettings)(implicit
    EC: ExecutionContext,
    M: Materializer
) extends Endpoints(settings)
    with BasicAuthentication
    with BasicAuthenticationTestApi
    with generic.JsonSchemas
    with JsonTestApi
    with JsonEntitiesFromSchemas {
  implicit def userCodec: JsonSchema[User] = genericJsonSchema[User]
  implicit def addresCodec: JsonSchema[Address] = genericJsonSchema[Address]
}

class AkkaHttpClientEndpointsJsonSchemaTest
    extends JsonTestSuite[TestJsonSchemaClient]
    with BasicAuthTestSuite[TestJsonSchemaClient] {

  implicit val system = ActorSystem()
  implicit val ec = system.dispatcher

  val client: TestJsonSchemaClient = new TestJsonSchemaClient(
    EndpointsSettings(
      AkkaHttpRequestExecutor
        .cachedHostConnectionPool("localhost", wiremockPort)
    )
  )

  def call[Req, Resp](
      endpoint: client.Endpoint[Req, Resp],
      args: Req
  ): Future[Resp] = endpoint(args)

  def encodeUrl[A](url: client.Url[A])(a: A): String = url.encode(a)

  clientTestSuite()
  basicAuthSuite()

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

} 
Example 140
Source File: FileDirective.scala    From akka-http-file-server   with Apache License 2.0 5 votes vote down vote up
package akkahttp

import java.io.File

import akka.http.scaladsl.model.{HttpEntity, MediaTypes, Multipart}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.Materializer
import akka.stream.io.{SynchronousFileSink, SynchronousFileSource}

import scala.concurrent.{ExecutionContext, Future}


object FileDirective {

  //form field name
  type Name = String

  case class FileInfo(fileName: String, targetFile: String, length: Long)

  private def uploadFileImpl(implicit mat: Materializer, ec: ExecutionContext): Directive1[Future[Map[Name, FileInfo]]] = {
    Directive[Tuple1[Future[Map[Name, FileInfo]]]] { inner =>
      entity(as[Multipart.FormData]) { (formdata: Multipart.FormData) =>
        val fileNameMap = formdata.parts.mapAsync(1) { p =>
          if (p.filename.isDefined) {
            val targetPath = File.createTempFile(s"userfile_${p.name}_${p.filename.getOrElse("")}", "")
            val written = p.entity.dataBytes.runWith(SynchronousFileSink(targetPath))
            written.map(written => 
              Map(p.name -> FileInfo(p.filename.get, targetPath.getAbsolutePath, written)))
          } else {
            Future(Map.empty[Name, FileInfo])
          }
        }.runFold(Map.empty[Name, FileInfo])((set, value) => set ++ value)
        inner(Tuple1(fileNameMap))
      }
    }
  }

  def uploadFile: Directive1[Map[Name, FileInfo]] = {
    Directive[Tuple1[Map[Name, FileInfo]]] { inner =>
      extractMaterializer {implicit mat =>
        extractExecutionContext {implicit ec =>
          uploadFileImpl(mat, ec) { filesFuture =>
            ctx => {
              filesFuture.map(map => inner(Tuple1(map))).flatMap(route => route(ctx))
            }
          }
        }
      }
    }
  }

  def downloadFile(file: String): Route = {
    val f = new File(file)
    val responseEntity = HttpEntity(
      MediaTypes.`application/octet-stream`,
      f.length,
      SynchronousFileSource(f, chunkSize = 262144))
    complete(responseEntity)
  }
} 
Example 141
Source File: StaticService.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.ToResponseMarshallable
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.marshalling.ToResponseMarshallable._
import akka.stream.Materializer
import org.apache.gearpump.util.Util
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._


class StaticService(override val system: ActorSystem, supervisorPath: String)
  extends BasicService {

  private val version = Util.version

  protected override def prefix = Neutral

  override def cache: Boolean = true

  protected override def doRoute(implicit mat: Materializer) = {
    path("version") {
      get { ctx =>
        ctx.complete(version)
      }
    } ~
    // For YARN usage, we need to make sure supervisor-path
    // can be accessed without authentication.
    path("supervisor-actor-path") {
      get {
        complete(supervisorPath)
      }
    } ~
    pathEndOrSingleSlash {
      getFromResource("index.html")
    } ~
    path("favicon.ico") {
      complete(ToResponseMarshallable(StatusCodes.NotFound))
    } ~
    pathPrefix("webjars") {
      get {
        getFromResourceDirectory("META-INF/resources/webjars")
      }
    } ~
    path(Remaining) { path =>
      getFromResource("%s" format path)
    }
  }
} 
Example 142
Source File: SupervisorService.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.Future
import scala.util.{Failure, Success}

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.Materializer

import org.apache.gearpump.cluster.AppMasterToMaster.{GetWorkerData, WorkerData}
import org.apache.gearpump.cluster.ClientToMaster._
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.services.SupervisorService.{Path, Status}
import org.apache.gearpump.util.ActorUtil._
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._


  private def authorize(internal: Route): Route = {
    if (supervisor == null) {
      failWith(new Exception("API not enabled, cannot find a valid supervisor! " +
        "Please make sure Gearpump is running on top of YARN or other resource managers"))
    } else {
      internal
    }
  }

  protected override def doRoute(implicit mat: Materializer) = pathPrefix("supervisor") {
    pathEnd {
      get {
        val path = if (supervisor == null) {
          null
        } else {
          supervisor.path.toString
        }
        complete(write(Path(path)))
      }
    } ~
    path("status") {
      post {
        if (supervisor == null) {
          complete(write(Status(enabled = false)))
        } else {
          complete(write(Status(enabled = true)))
        }
      }
    } ~
    path("addworker" / IntNumber) { workerCount =>
      post {
        authorize {
          onComplete(askActor[CommandResult](supervisor, AddWorker(workerCount))) {
            case Success(value) =>
              complete(write(value))
            case Failure(ex) =>
              failWith(ex)
          }
        }
      }
    } ~
    path("removeworker" / Segment) { workerIdString =>
      post {
        authorize {
          val workerId = WorkerId.parse(workerIdString)
          def future(): Future[CommandResult] = {
            askWorker[WorkerData](master, workerId, GetWorkerData(workerId)).flatMap{workerData =>
              val containerId = workerData.workerDescription.resourceManagerContainerId
              askActor[CommandResult](supervisor, RemoveWorker(containerId))
            }
          }

          onComplete[CommandResult](future()) {
            case Success(value) =>
              complete(write(value))
            case Failure(ex) =>
              failWith(ex)
          }
        }
      }
    }
  }
}

object SupervisorService {
  case class Status(enabled: Boolean)

  case class Path(path: String)
} 
Example 143
Source File: BasicService.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.concurrent.ExecutionContext

import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.CacheDirectives.{`max-age`, `no-cache`}
import akka.http.scaladsl.model.headers.`Cache-Control`
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.Materializer

import org.apache.gearpump.util.{Constants, LogUtil}
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._

trait RouteService {
  def route: Route
}


trait BasicService extends RouteService {

  implicit def system: ActorSystem

  implicit def timeout: akka.util.Timeout = Constants.FUTURE_TIMEOUT

  implicit def ec: ExecutionContext = system.dispatcher

  protected def doRoute(implicit mat: Materializer): Route

  protected def prefix = Slash ~ "api" / s"$REST_VERSION"

  protected val LOG = LogUtil.getLogger(getClass)

  protected def cache = false
  private val noCacheHeader = `Cache-Control`(`no-cache`, `max-age`(0L))

  def route: Route = encodeResponse {
    extractMaterializer { implicit mat =>
      rawPathPrefix(prefix) {
        if (cache) {
          doRoute(mat)
        } else {
          respondWithHeader(noCacheHeader) {
            doRoute(mat)
          }
        }
      }
    }
  }
} 
Example 144
Source File: AdminService.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.Materializer

// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._



// TODO: Add YARN resource manager capacities to add/remove machines.
class AdminService(override val system: ActorSystem)
  extends BasicService {

  protected override def prefix = Neutral

  protected override def doRoute(implicit mat: Materializer) = {
    path("terminate") {
      post {
        system.terminate()
        complete(StatusCodes.NotFound)
      }
    }
  }
} 
Example 145
Source File: WorkerService.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.services

import scala.util.{Failure, Success}

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.server.Directives._
import akka.stream.Materializer

import org.apache.gearpump.cluster.AppMasterToMaster.{GetWorkerData, WorkerData}
import org.apache.gearpump.cluster.ClientToMaster.{QueryHistoryMetrics, QueryWorkerConfig, ReadOption}
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.MasterToClient.{HistoryMetrics, WorkerConfig}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.util.ActorUtil._
import org.apache.gearpump.util.Constants
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._


class WorkerService(val master: ActorRef, override val system: ActorSystem)
  extends BasicService {

  import upickle.default.write
  private val systemConfig = system.settings.config
  private val concise = systemConfig.getBoolean(Constants.GEARPUMP_SERVICE_RENDER_CONFIG_CONCISE)

  protected override def doRoute(implicit mat: Materializer) = pathPrefix("worker" / Segment) {
    workerIdString => {
      pathEnd {
        val workerId = WorkerId.parse(workerIdString)
        onComplete(askWorker[WorkerData](master, workerId, GetWorkerData(workerId))) {
          case Success(value: WorkerData) =>
            complete(write(value.workerDescription))
          case Failure(ex) => failWith(ex)
        }
      }
    }~
    path("config") {
      val workerId = WorkerId.parse(workerIdString)
      onComplete(askWorker[WorkerConfig](master, workerId, QueryWorkerConfig(workerId))) {
        case Success(value: WorkerConfig) =>
          val config = Option(value.config).map(ClusterConfig.render(_, concise)).getOrElse("{}")
          complete(config)
        case Failure(ex) =>
          failWith(ex)
      }
    } ~
    path("metrics" / RemainingPath ) { path =>
      val workerId = WorkerId.parse(workerIdString)
      parameter(ReadOption.Key ? ReadOption.ReadLatest) { readOption =>
        val query = QueryHistoryMetrics(path.head.toString, readOption)
        onComplete(askWorker[HistoryMetrics](master, workerId, query)) {
          case Success(value) =>
            complete(write(value))
          case Failure(ex) =>
            failWith(ex)
        }
      }
    }
  }
} 
Example 146
Source File: EventUnmarshaller.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import scala.concurrent.{ ExecutionContext, Future }
import akka.NotUsed
import akka.stream.scaladsl.Framing
import akka.stream.scaladsl.JsonFraming
import akka.stream.Materializer
import akka.stream.scaladsl.Source

import akka.http.scaladsl.model.HttpCharsets._
import akka.http.scaladsl.model.MediaTypes._
import akka.http.scaladsl.model.headers.Accept
import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model._

import akka.util.ByteString
import spray.json._

import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.http.scaladsl.unmarshalling.Unmarshaller._

object EventUnmarshaller extends EventMarshalling {
  val supported = Set[ContentTypeRange](
    ContentTypes.`text/plain(UTF-8)`, 
    ContentTypes.`application/json`
  )

  def create(maxLine: Int, maxJsonObject: Int) = {
    new Unmarshaller[HttpEntity, Source[Event, _]] {
      def apply(entity: HttpEntity)(implicit ec: ExecutionContext, 
        materializer: Materializer): Future[Source[Event, _]] = {

        val future = entity.contentType match {
          case ContentTypes.`text/plain(UTF-8)` => 
            Future.successful(LogJson.textInFlow(maxLine))
          case ContentTypes.`application/json` =>
            Future.successful(LogJson.jsonInFlow(maxJsonObject))
          case other => 
            Future.failed(
              new UnsupportedContentTypeException(supported)
            )
        }
        future.map(flow => entity.dataBytes.via(flow))(ec)
      } 
    }.forContentTypes(supported.toList:_*)
  }
} 
Example 147
Source File: GrpcAkkaStreamsServerCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import io.grpc.ServerCallHandler
import io.grpc.stub.{CallStreamObserver, ServerCalls, StreamObserver}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}

object GrpcAkkaStreamsServerCalls {

  def unaryCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncUnaryCall(
    new ServerCalls.UnaryMethod[I, O] {
      override def invoke(request: I, responseObserver: StreamObserver[O]) =
        Source
          .single(request)
          .via(service)
          .runForeach(responseObserver.onNext)
          .onComplete {
            case Success(_) => responseObserver.onCompleted()
            case Failure(t) => responseObserver.onError(t)
          }(mat.executionContext)
    }
  )

  def serverStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] =
    ServerCalls.asyncServerStreamingCall(
      new ServerCalls.ServerStreamingMethod[I, O] {
        override def invoke(request: I, responseObserver: StreamObserver[O]) =
          Source
            .single(request)
            .via(service)
            .runWith(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            )))
      }
    )

  def clientStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncClientStreamingCall(
    new ServerCalls.ClientStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )

  def bidiStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncBidiStreamingCall(
    new ServerCalls.BidiStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )
} 
Example 148
Source File: TestKit.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
import org.scalatest.{ Matchers, WordSpec }
import akka.http.scaladsl.testkit.ScalatestRouteTest



object TestKit extends WordSpec with Matchers with ScalatestRouteTest {

  def main(args: Array[String]) {

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
        path("demo"){
          get{
            complete {
              "welcome to demonstration"
            }
          }
        }


    val getRequest = HttpRequest(GET, "/welcome")

    getRequest ~> route ~> check {
      status.intValue shouldEqual 200
      entityAs[String] shouldEqual "welcome to rest service"
    }

    system.terminate()
  }

} 
Example 149
Source File: RoutingDSL.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.{ActorMaterializer, Materializer}


object RoutingDSL {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
      path("demo"){
        get{
          complete {
            "welcome to demonstration"
          }
        }
      }

    Http().bindAndHandle(route, "localhost", 8090)

  }

} 
Example 150
Source File: Failure.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.ExceptionHandler
import akka.stream.{ActorMaterializer, Materializer}


object Failure {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    implicit def myExceptionHandler = ExceptionHandler {
      case _: ArithmeticException =>
        complete(HttpResponse(StatusCodes.BadRequest, entity = "Bad numbers, bad result!!!"))
      case e: Throwable => {
        println(e.getMessage)
        println(e.getStackTraceString)
        complete(HttpResponse(StatusCodes.BadRequest, entity = e.getMessage))
      }
    }

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
      path("demo"){
        get {
          complete {
            100/0
            "welcome to demonstration"
          }
        }
      }

    Http().bindAndHandle(route, "localhost", 8090)
  }

} 
Example 151
Source File: UnMarshalling.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.{HttpMethods, HttpRequest, HttpResponse, MessageEntity}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.{ActorMaterializer, Materializer}
import akka.util.ByteString

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import spray.json._


object UnMarshalling {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    //type FromStringUnmarshaller[T] = Unmarshaller[String, T]
    val intFuture = Unmarshal("42").to[Int]
    val int = Await.result(intFuture, 1.second)
    println("int unmarshalling "+int)

    //type FromStringUnmarshaller[T] = Unmarshaller[String, T]
    val boolFuture = Unmarshal("off").to[Boolean]
    val bool = Await.result(boolFuture, 1.second)
    println("off unmarshalling "+bool)

    //type ToEntityMarshaller[T] = Marshaller[T, MessageEntity]
    val string = "Yeah"
    val entityFuture = Marshal(string).to[MessageEntity]
    val entity = Await.result(entityFuture, 1.second) // don't block in non-test code!
    println(entity)

    //type ToResponseMarshaller[T] = Marshaller[T, HttpResponse]
    val errorMsg = "Not found, pal!"
    val responseFuture = Marshal(404 -> errorMsg).to[HttpResponse]
    val response = Await.result(responseFuture, 1.second)
    println(response)


    //type FromEntityUnmarshaller[T] = Unmarshaller[HttpEntity, T]
    val jsonByteString = ByteString("""{"name":"Hello"}""")
    val httpRequest = HttpRequest(HttpMethods.POST, entity = jsonByteString)
    val jsonDataUnmarshalledFuture = Unmarshal(httpRequest).to[String]
    val jsonDataUnmarshalled = Await.result(jsonDataUnmarshalledFuture, 1.second)
    println(jsonDataUnmarshalled)

    sys.terminate()

  }

} 
Example 152
Source File: Rejection.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.{ActorMaterializer, Materializer}


object Rejection {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    implicit def myRejectionHandler = RejectionHandler.newBuilder().handle{
      case MissingCookieRejection(cookieName) =>
        complete(HttpResponse(StatusCodes.BadRequest, entity = "No cookies, no service!!!"))
    }.handleNotFound {
      complete((StatusCodes.NotFound, "Not here!"))
    }.result()

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
      path("demo"){
        get{
          complete {
            "welcome to demonstration"
          }
        }
      } ~
      path("wrong"){
        reject{
          ValidationRejection("Invalid path", None)
        }
      }

    Http().bindAndHandle(route, "localhost", 8090)

  }

} 
Example 153
Source File: ReverseProxy.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.serving

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{Host, `Access-Control-Allow-Origin`}
import akka.stream.scaladsl.Flow
import akka.stream.{ActorMaterializer, Materializer}


object ReverseProxy {

  def main(args: Array[String]) {
    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    val redirectHost = "localhost"
    val redirectPort = 8090

    val requestFlow = Flow.fromFunction[HttpRequest, HttpRequest]( request => {
      request
        .withUri(request.uri.withAuthority(redirectHost, redirectPort))
        .mapHeaders(headers => headers.filterNot(_.lowercaseName() == Host.lowercaseName))
        .addHeader(Host(redirectHost, redirectPort))
    })

    val outgoingConnection = Http().outgoingConnection(redirectHost, redirectPort)

    val responseFlow = Flow.fromFunction[HttpResponse, HttpResponse]( response => {
      response.withHeaders(`Access-Control-Allow-Origin`.*)
    })

    Http().bindAndHandle(requestFlow via outgoingConnection via responseFlow, "localhost", 8080)
  }

} 
Example 154
Source File: PersistenceComponents.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.scaladsl.persistence.ReadSideImpl
import com.lightbend.lagom.scaladsl.cluster.ClusterComponents
import com.lightbend.lagom.scaladsl.projection.ProjectionComponents
import play.api.Configuration

import scala.concurrent.ExecutionContext


trait ReadSidePersistenceComponents extends WriteSidePersistenceComponents with ProjectionComponents {
  def actorSystem: ActorSystem
  def executionContext: ExecutionContext
  def materializer: Materializer

  def configuration: Configuration

  lazy val readSideConfig: ReadSideConfig = ReadSideConfig(
    configuration.underlying.getConfig("lagom.persistence.read-side")
  )
  lazy val readSide: ReadSide =
    new ReadSideImpl(actorSystem, readSideConfig, persistentEntityRegistry, projectionRegistry, None)(
      executionContext,
      materializer
    )
} 
Example 155
Source File: ReadSideImpl.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.scaladsl.persistence

import java.net.URLEncoder

import akka.actor.ActorSystem
import akka.actor.Props
import akka.stream.Materializer
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.cluster.ClusterStartupTask
import com.lightbend.lagom.internal.projection.ProjectionRegistry
import com.lightbend.lagom.internal.projection.ProjectionRegistryActor.WorkerCoordinates
import com.lightbend.lagom.scaladsl.persistence._

import scala.concurrent.ExecutionContext

private[lagom] class ReadSideImpl(
    system: ActorSystem,
    config: ReadSideConfig,
    persistentEntityRegistry: PersistentEntityRegistry,
    projectionRegistry: ProjectionRegistry,
    name: Option[String]
)(implicit ec: ExecutionContext, mat: Materializer)
    extends ReadSide {
  override def register[Event <: AggregateEvent[Event]](processorFactory: => ReadSideProcessor[Event]): Unit =
    registerFactory(() => processorFactory)

  private[lagom] def registerFactory[Event <: AggregateEvent[Event]](
      processorFactory: () => ReadSideProcessor[Event]
  ) = {
    val readSideProcessor = processorFactory()
    val readSideName      = name.fold("")(_ + "-") + readSideProcessor.readSideName
    val tags              = readSideProcessor.aggregateTags
    val entityIds         = tags.map(_.tag)
    // try to create one instance to fail fast
    val eventClass = tags.headOption match {
      case Some(tag) => tag.eventType
      case None =>
        throw new IllegalArgumentException(s"ReadSideProcessor ${readSideProcessor.getClass.getName} returned 0 tags")
    }

    val encodedReadSideName = URLEncoder.encode(readSideName, "utf-8")
    val globalPrepareTask: ClusterStartupTask =
      ClusterStartupTask(
        system,
        s"readSideGlobalPrepare-$encodedReadSideName",
        () => processorFactory().buildHandler().globalPrepare(),
        config.globalPrepareTimeout,
        config.role,
        config.minBackoff,
        config.maxBackoff,
        config.randomBackoffFactor
      )

    val projectionName = readSideName

    val readSidePropsFactory: WorkerCoordinates => Props = (coordinates) =>
      ReadSideActor.props(
        coordinates,
        config,
        eventClass,
        globalPrepareTask,
        persistentEntityRegistry.eventStream[Event],
        processorFactory
      )

    projectionRegistry.registerProjection(
      projectionName,
      entityIds,
      readSidePropsFactory,
      config.role
    )
  }
} 
Example 156
Source File: ServiceSupport.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.it

import java.util.Collections
import java.util.function.{ Function => JFunction }

import akka.stream.Materializer
import akka.stream.scaladsl.Source
import org.scalatest.Inside
import play.api.Application
import play.api.Configuration
import play.api.Environment
import play.inject.guice.GuiceApplicationBuilder

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.reflect.ClassTag
import akka.japi.function.Procedure
import com.google.inject.Binder
import com.google.inject.Module
import com.google.inject.TypeLiteral
import com.lightbend.lagom.javadsl.testkit.ServiceTest
import com.lightbend.lagom.javadsl.testkit.ServiceTest.TestServer
import play.api.routing.Router
import java.util

import com.lightbend.lagom.internal.testkit.EmptyAdditionalRoutersModule
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

sealed trait HttpBackend {
  final val provider: String = s"play.core.server.${codeName}ServerProvider"
  val codeName: String
}

case object AkkaHttp extends HttpBackend {
  val codeName = "AkkaHttp"
}

case object Netty extends HttpBackend {
  val codeName = "Netty"
}

trait ServiceSupport extends AnyWordSpecLike with Matchers with Inside {
  def withServer(
      configureBuilder: GuiceApplicationBuilder => GuiceApplicationBuilder
  )(block: Application => Unit)(implicit httpBackend: HttpBackend): Unit = {
    val jConfigureBuilder = new JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder] {
      override def apply(b: GuiceApplicationBuilder): GuiceApplicationBuilder = {
        configureBuilder(b)
          .overrides(EmptyAdditionalRoutersModule)
          .configure("play.server.provider", httpBackend.provider)
      }
    }
    val jBlock = new Procedure[TestServer] {
      override def apply(server: TestServer): Unit = {
        block(server.app.asScala())
      }
    }
    val setup = ServiceTest.defaultSetup.configureBuilder(jConfigureBuilder).withCluster(false)
    ServiceTest.withServer(setup, jBlock)
  }

  def withClient[T: ClassTag](
      configureBuilder: GuiceApplicationBuilder => GuiceApplicationBuilder
  )(block: Application => T => Unit)(implicit httpBackend: HttpBackend): Unit = {
    withServer(configureBuilder) { application =>
      val client = application.injector.instanceOf[T]
      block(application)(client)
    }
  }

  implicit def materializer(implicit app: Application): Materializer = app.materializer

  def consume[T](source: Source[T, _])(implicit mat: Materializer): List[T] = {
    Await.result(source.runFold(List.empty[T])((list, t) => t :: list), 10.seconds).reverse
  }
} 
Example 157
Source File: JavadslKafkaTopic.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.broker.kafka

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.lightbend.lagom.internal.broker.kafka.KafkaConfig
import com.lightbend.lagom.javadsl.api.Descriptor.TopicCall
import com.lightbend.lagom.javadsl.api.ServiceInfo
import com.lightbend.lagom.javadsl.api.ServiceLocator
import com.lightbend.lagom.javadsl.api.broker.Topic.TopicId
import com.lightbend.lagom.javadsl.api.broker.Subscriber
import com.lightbend.lagom.javadsl.api.broker.Topic

import scala.concurrent.ExecutionContext


private[lagom] class JavadslKafkaTopic[Payload](
    kafkaConfig: KafkaConfig,
    topicCall: TopicCall[Payload],
    info: ServiceInfo,
    system: ActorSystem,
    serviceLocator: ServiceLocator
)(implicit mat: Materializer, ec: ExecutionContext)
    extends Topic[Payload] {
  override def topicId: TopicId = topicCall.topicId

  override def subscribe(): Subscriber[Payload] =
    new JavadslKafkaSubscriber[Payload, Payload](
      kafkaConfig,
      topicCall,
      JavadslKafkaSubscriber.GroupId.default(info),
      info,
      system,
      serviceLocator,
      _.value()
    )
} 
Example 158
Source File: MockFilters.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.server.mocks

import java.util.concurrent.atomic.AtomicInteger

import akka.stream.Materializer
import com.lightbend.lagom.scaladsl.api.transport.Forbidden
import com.lightbend.lagom.scaladsl.api.transport.HeaderFilter
import com.lightbend.lagom.scaladsl.api.transport.RequestHeader
import com.lightbend.lagom.scaladsl.api.transport.ResponseHeader
import play.api.mvc.Filter
import play.api.mvc.Result
import play.api.mvc.{ RequestHeader => PlayRequestHeader }
import play.api.mvc.{ ResponseHeader => PlayResponseHeader }

import scala.concurrent.ExecutionContext
import scala.concurrent.Future

// ------------------------------------------------------------------------------------------------------------
// This is a play filter that adds a header on the request and the adds a header on the response. Headers may only
// be added once so invoking this Filter twice breaks the test.
class VerboseHeaderPlayFilter(atomicInt: AtomicInteger, mt: Materializer)(implicit ctx: ExecutionContext)
    extends Filter {
  import VerboseHeaderPlayFilter._

  implicit override def mat: Materializer = mt

  override def apply(f: (PlayRequestHeader) => Future[Result])(rh: PlayRequestHeader): Future[Result] = {
    ensureMissing(rh.headers.toSimpleMap, addedOnRequest)
    val richerHeaders = rh.headers.add(addedOnRequest -> atomicInt.incrementAndGet().toString)
    val richerRequest = rh.withHeaders(richerHeaders)
    f(richerRequest).map {
      case result =>
        ensureMissing(result.header.headers, addedOnResponse)
        result.withHeaders(addedOnResponse -> atomicInt.incrementAndGet().toString)
    }
  }

  private def ensureMissing(headers: Map[String, String], key: String) =
    if (headers.get(key).isDefined) throw Forbidden(s"Header $key already exists.")
}

object VerboseHeaderPlayFilter {
  val addedOnRequest  = "addedOnRequest-play"
  val addedOnResponse = "addedOnResponse-play"
}

// ------------------------------------------------------------------------------------------------------------
// This is a Lagom HeaderFilter that adds a header on the request and the adds a header on the response.
class VerboseHeaderLagomFilter(atomicInteger: AtomicInteger) extends HeaderFilter {
  override def transformServerRequest(request: RequestHeader): RequestHeader =
    request.addHeader(VerboseHeaderLagomFilter.addedOnRequest, atomicInteger.incrementAndGet().toString)

  override def transformServerResponse(response: ResponseHeader, request: RequestHeader): ResponseHeader =
    response.addHeader(VerboseHeaderLagomFilter.addedOnResponse, atomicInteger.incrementAndGet().toString)

  override def transformClientResponse(response: ResponseHeader, request: RequestHeader): ResponseHeader = ???
  override def transformClientRequest(request: RequestHeader): RequestHeader                             = ???
}

object VerboseHeaderLagomFilter {
  val addedOnRequest  = "addedOnRequest-Lagom"
  val addedOnResponse = "addedOnResponse-Lagom"
} 
Example 159
Source File: LagomKafkaClientComponents.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.broker.kafka

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.lightbend.lagom.internal.scaladsl.api.broker.TopicFactory
import com.lightbend.lagom.internal.scaladsl.api.broker.TopicFactoryProvider
import com.lightbend.lagom.internal.scaladsl.broker.kafka.KafkaTopicFactory
import com.lightbend.lagom.scaladsl.api.ServiceInfo
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.typesafe.config.Config

import scala.concurrent.ExecutionContext

trait LagomKafkaClientComponents extends TopicFactoryProvider {
  def serviceInfo: ServiceInfo
  def actorSystem: ActorSystem
  def materializer: Materializer
  def executionContext: ExecutionContext
  def serviceLocator: ServiceLocator
  def config: Config

  lazy val topicFactory: TopicFactory =
    new KafkaTopicFactory(serviceInfo, actorSystem, serviceLocator, config)(materializer, executionContext)
  override def optionalTopicFactory: Option[TopicFactory] = Some(topicFactory)
} 
Example 160
Source File: ScaladslKafkaTopic.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.scaladsl.broker.kafka

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.lightbend.lagom.internal.broker.kafka.KafkaConfig
import com.lightbend.lagom.scaladsl.api.Descriptor.TopicCall
import com.lightbend.lagom.scaladsl.api.ServiceInfo
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.api.broker.Subscriber
import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.api.broker.Topic.TopicId

import scala.concurrent.ExecutionContext

private[lagom] class ScaladslKafkaTopic[Message](
    kafkaConfig: KafkaConfig,
    topicCall: TopicCall[Message],
    info: ServiceInfo,
    system: ActorSystem,
    serviceLocator: ServiceLocator
)(implicit mat: Materializer, ec: ExecutionContext)
    extends Topic[Message] {
  override def topicId: TopicId = topicCall.topicId

  override def subscribe: Subscriber[Message] =
    new ScaladslKafkaSubscriber[Message, Message](
      kafkaConfig,
      topicCall,
      ScaladslKafkaSubscriber.GroupId.default(info),
      info,
      system,
      serviceLocator,
      _.value
    )
} 
Example 161
Source File: KafkaTopicFactory.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.scaladsl.broker.kafka

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.lightbend.lagom.internal.broker.kafka.KafkaConfig
import com.lightbend.lagom.internal.scaladsl.api.broker.TopicFactory
import com.lightbend.lagom.scaladsl.api.Descriptor.TopicCall
import com.lightbend.lagom.scaladsl.api.ServiceInfo
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.typesafe.config.Config

import scala.concurrent.ExecutionContext


private[lagom] class KafkaTopicFactory(
    serviceInfo: ServiceInfo,
    system: ActorSystem,
    serviceLocator: ServiceLocator,
    config: Config
)(implicit materializer: Materializer, executionContext: ExecutionContext)
    extends TopicFactory {
  @deprecated("Use constructor that accepts a Config", "2.0.0")
  def this(serviceInfo: ServiceInfo, system: ActorSystem, serviceLocator: ServiceLocator)(
      implicit materializer: Materializer,
      executionContext: ExecutionContext
  ) = {
    this(serviceInfo, system, serviceLocator, system.settings.config)
  }

  private val kafkaConfig = KafkaConfig(config)

  def create[Message](topicCall: TopicCall[Message]): Topic[Message] = {
    new ScaladslKafkaTopic(kafkaConfig, topicCall, serviceInfo, system, serviceLocator)
  }
} 
Example 162
Source File: Producer.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.broker.kafka

import java.net.URI

import akka.actor.ActorSystem
import akka.persistence.query.{ Offset => AkkaOffset }
import akka.stream.Materializer
import akka.stream.scaladsl._
import com.lightbend.lagom.internal.projection.ProjectionRegistry
import com.lightbend.lagom.internal.projection.ProjectionRegistryActor.WorkerCoordinates
import com.lightbend.lagom.spi.persistence.OffsetStore
import org.apache.kafka.common.serialization.Serializer

import scala.collection.immutable
import scala.concurrent.ExecutionContext
import scala.concurrent.Future


private[lagom] object Producer {
  def startTaggedOffsetProducer[Message](
      system: ActorSystem,
      tags: immutable.Seq[String],
      kafkaConfig: KafkaConfig,
      locateService: String => Future[Seq[URI]],
      topicId: String,
      eventStreamFactory: (String, AkkaOffset) => Source[(Message, AkkaOffset), _],
      partitionKeyStrategy: Option[Message => String],
      serializer: Serializer[Message],
      offsetStore: OffsetStore,
      projectionRegistry: ProjectionRegistry
  )(implicit mat: Materializer, ec: ExecutionContext): Unit = {
    val projectionName = s"kafkaProducer-$topicId"

    val producerConfig = ProducerConfig(system.settings.config)
    val topicProducerProps = (coordinates: WorkerCoordinates) =>
      TopicProducerActor.props(
        coordinates,
        kafkaConfig,
        producerConfig,
        locateService,
        topicId,
        eventStreamFactory,
        partitionKeyStrategy,
        serializer,
        offsetStore
      )

    val entityIds = tags.toSet

    projectionRegistry.registerProjection(
      projectionName,
      entityIds,
      topicProducerProps,
      producerConfig.role
    )
  }
} 
Example 163
Source File: JdbcClusteredPersistentEntitySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.jdbc

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.stream.Materializer
import akka.stream.SystemMaterializer
import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntityConfig
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.TestEntitySerializerRegistry
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec.Ports
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.h2.tools.Server
import play.api.Configuration
import play.api.Environment
import play.api.db.HikariCPComponents
import play.api.inject.ApplicationLifecycle
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future

object JdbcClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {

  override def specPorts: Ports.SpecPorts = Ports.jdbcSpecPorts

  override def additionalCommonConfig: Config = ConfigFactory.parseString(
    s"""
      db.default.driver=org.h2.Driver
      db.default.url="jdbc:h2:tcp://localhost:${specPorts.database}/mem:JdbcClusteredPersistentEntitySpec"
    """
  )
}

class JdbcClusteredPersistentEntitySpecMultiJvmNode1 extends JdbcClusteredPersistentEntitySpec
class JdbcClusteredPersistentEntitySpecMultiJvmNode2 extends JdbcClusteredPersistentEntitySpec
class JdbcClusteredPersistentEntitySpecMultiJvmNode3 extends JdbcClusteredPersistentEntitySpec

class JdbcClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(JdbcClusteredPersistentEntityConfig) {
  import JdbcClusteredPersistentEntityConfig._

  var h2: Server = _

  protected override def atStartup(): Unit = {
    runOn(node1) {
      h2 = Server.createTcpServer("-tcpPort", specPorts.database.toString, "-ifNotExists").start()
    }

    enterBarrier("h2-started")
    super.atStartup()
  }

  protected override def afterTermination(): Unit = {
    super.afterTermination()
    Await.ready(defaultApplicationLifecycle.stop(), shutdownTimeout)
    Option(h2).foreach(_.stop())
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: JdbcPersistenceComponents =
    new JdbcPersistenceComponents with HikariCPComponents {
      override def actorSystem: ActorSystem                 = JdbcClusteredPersistentEntitySpec.this.system
      override def executionContext: ExecutionContext       = system.dispatcher
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(actorSystem)

      override lazy val materializer: Materializer                 = SystemMaterializer(actorSystem).materializer
      override lazy val configuration: Configuration               = Configuration(system.settings.config)
      override def environment: Environment                        = JdbcClusteredPersistentEntityConfig.environment
      override lazy val applicationLifecycle: ApplicationLifecycle = defaultApplicationLifecycle
      override def jsonSerializerRegistry: JsonSerializerRegistry  = TestEntitySerializerRegistry
    }

  lazy val jdbcTestEntityReadSide: JdbcTestEntityReadSide =
    new JdbcTestEntityReadSide(components.jdbcSession)

  protected override def getAppendCount(id: String): Future[Long] =
    jdbcTestEntityReadSide.getAppendCount(id)

  protected override def readSideProcessor: () => ReadSideProcessor[Evt] = { () =>
    new JdbcTestEntityReadSide.TestEntityReadSideProcessor(components.jdbcReadSide)
  }
} 
Example 164
Source File: SlickClusteredPersistentEntitySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.slick

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.stream.Materializer
import akka.stream.SystemMaterializer
import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntityConfig
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.TestEntitySerializerRegistry
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec.Ports
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.h2.tools.Server
import play.api.Configuration
import play.api.Environment
import play.api.db.HikariCPComponents
import play.api.inject.ApplicationLifecycle
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future

object SlickClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {

  override def specPorts: Ports.SpecPorts = Ports.slickSpecPorts

  override def additionalCommonConfig: Config = ConfigFactory.parseString(
    s"""
      db.default.driver=org.h2.Driver
      db.default.url="jdbc:h2:tcp://localhost:${specPorts.database}/mem:JdbcClusteredPersistentEntitySpec"
    """
  )
}

class SlickClusteredPersistentEntitySpecMultiJvmNode1 extends SlickClusteredPersistentEntitySpec
class SlickClusteredPersistentEntitySpecMultiJvmNode2 extends SlickClusteredPersistentEntitySpec
class SlickClusteredPersistentEntitySpecMultiJvmNode3 extends SlickClusteredPersistentEntitySpec

class SlickClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(SlickClusteredPersistentEntityConfig) {
  import SlickClusteredPersistentEntityConfig._

  var h2: Server = _

  protected override def atStartup(): Unit = {
    runOn(node1) {
      h2 = Server.createTcpServer("-tcpPort", specPorts.database.toString, "-ifNotExists").start()
    }
    enterBarrier("h2-started")
    super.atStartup()
  }

  protected override def afterTermination(): Unit = {
    super.afterTermination()
    Await.ready(defaultApplicationLifecycle.stop(), shutdownTimeout)
    Option(h2).foreach(_.stop())
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: SlickPersistenceComponents =
    new SlickPersistenceComponents with HikariCPComponents {
      override def actorSystem: ActorSystem                 = SlickClusteredPersistentEntitySpec.this.system
      override def executionContext: ExecutionContext       = system.dispatcher
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(actorSystem)

      override lazy val materializer: Materializer                 = SystemMaterializer(actorSystem).materializer
      override lazy val configuration: Configuration               = Configuration(system.settings.config)
      override def environment: Environment                        = SlickClusteredPersistentEntityConfig.environment
      override lazy val applicationLifecycle: ApplicationLifecycle = defaultApplicationLifecycle
      override def jsonSerializerRegistry: JsonSerializerRegistry  = TestEntitySerializerRegistry
    }

  lazy val jdbcTestEntityReadSide: SlickTestEntityReadSide =
    new SlickTestEntityReadSide(
      components.db,
      components.profile
    )(components.executionContext)

  protected override def getAppendCount(id: String): Future[Long] =
    jdbcTestEntityReadSide.getAppendCount(id)

  protected override def readSideProcessor: () => ReadSideProcessor[Evt] = { () =>
    new SlickTestEntityReadSide.TestEntityReadSideProcessor(
      components.slickReadSide,
      components.db,
      components.profile
    )(components.executionContext)
  }
} 
Example 165
Source File: TopicStub.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.testkit

import java.util.concurrent.CompletionStage

import akka.Done
import akka.actor.ActorRef
import akka.stream.Materializer
import akka.stream.javadsl.Flow
import akka.stream.javadsl.Source
import akka.stream.scaladsl.{ Flow => ScalaFlow }
import com.lightbend.lagom.internal.testkit.InternalSubscriberStub
import com.lightbend.lagom.javadsl.api.broker.Message
import com.lightbend.lagom.javadsl.api.broker.Subscriber
import com.lightbend.lagom.javadsl.api.broker.Topic

import scala.compat.java8.FutureConverters.toJava

private[lagom] class TopicStub[T](val topicId: Topic.TopicId, topicBuffer: ActorRef)(
    implicit materializer: Materializer
) extends Topic[T] {
  // TODO: use ServiceInfo's name as a default value.
  def subscribe = new SubscriberStub("default", topicBuffer, _.getPayload)

  class SubscriberStub[SubscriberPayload](
      groupId: String,
      topicBuffer: ActorRef,
      transform: Message[T] => SubscriberPayload
  )(implicit materializer: Materializer)
      extends InternalSubscriberStub[T, Message](groupId, topicBuffer)(materializer)
      with Subscriber[SubscriberPayload] {
    override def withGroupId(groupId: String): Subscriber[SubscriberPayload] =
      new SubscriberStub(groupId, topicBuffer, transform)

    override def withMetadata(): Subscriber[Message[SubscriberPayload]] =
      new SubscriberStub[Message[SubscriberPayload]](groupId, topicBuffer, msg => msg.withPayload(transform(msg)))

    override def atMostOnceSource(): Source[SubscriberPayload, _] =
      super.mostOnceSource.map(transform).asJava

    override def atLeastOnce(flow: Flow[SubscriberPayload, Done, _]): CompletionStage[Done] =
      toJava(super.leastOnce(ScalaFlow[Message[T]].map(transform).via(flow.asScala)))
  }
} 
Example 166
Source File: TestTopicFactory.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.testkit

import java.util.concurrent.CompletionStage
import javax.inject.Inject

import akka.Done
import akka.stream.Materializer
import akka.stream.javadsl.Flow
import akka.stream.javadsl.Sink
import akka.stream.javadsl.Source
import com.lightbend.lagom.internal.broker.TaggedOffsetTopicProducer
import com.lightbend.lagom.internal.javadsl.api.MethodTopicHolder
import com.lightbend.lagom.internal.javadsl.api.broker.TopicFactory
import com.lightbend.lagom.internal.javadsl.server.ResolvedServices
import com.lightbend.lagom.javadsl.api.Descriptor.TopicCall
import com.lightbend.lagom.javadsl.api.broker.Topic.TopicId
import com.lightbend.lagom.javadsl.api.broker.Message
import com.lightbend.lagom.javadsl.api.broker.Subscriber
import com.lightbend.lagom.javadsl.api.broker.Topic
import com.lightbend.lagom.javadsl.persistence.AggregateEvent
import com.lightbend.lagom.javadsl.persistence.Offset

import scala.collection.JavaConverters._


class TestTopicFactory @Inject() (resolvedServices: ResolvedServices, materializer: Materializer) extends TopicFactory {
  private val topics: Map[TopicId, Any] = resolvedServices.services.flatMap { service =>
    service.descriptor.topicCalls().asScala.map { topicCall =>
      topicCall.topicId -> service.service
    }
  }.toMap

  override def create[Message](topicCall: TopicCall[Message]): Topic[Message] = {
    topics.get(topicCall.topicId()) match {
      case Some(service) =>
        topicCall.topicHolder() match {
          case method: MethodTopicHolder =>
            method.create(service) match {
              case topicProducer: TaggedOffsetTopicProducer[Message, _] =>
                new TestTopic(topicCall, topicProducer)
              case other =>
                throw new IllegalArgumentException(s"Testkit does not know how to handle topic $other")
            }
        }
      case None => throw new IllegalArgumentException(s"$topicCall hasn't been resolved.")
    }
  }

  private class TestTopic[Payload, Event <: AggregateEvent[Event]](
      topicCall: TopicCall[Payload],
      topicProducer: TaggedOffsetTopicProducer[Payload, Event]
  ) extends Topic[Payload] {
    override def topicId = topicCall.topicId

    override def subscribe(): Subscriber[Payload] = new TestSubscriber[Payload](identity)

    private class TestSubscriber[SubscriberPayload](transform: Payload => SubscriberPayload)
        extends Subscriber[SubscriberPayload] {
      override def withGroupId(groupId: String): Subscriber[SubscriberPayload] = this

      override def withMetadata(): Subscriber[Message[SubscriberPayload]] =
        new TestSubscriber(msg => Message.create(transform(msg)))

      override def atMostOnceSource(): Source[SubscriberPayload, _] = {
        val serializer   = topicCall.messageSerializer().serializerForRequest()
        val deserializer = topicCall.messageSerializer().deserializer(serializer.protocol())

        // Create a source for all the tags, and merge them all together.
        // Then, send the flow through a serializer and deserializer, to simulate sending it over the wire.
        Source
          .from(topicProducer.tags)
          .asScala
          .flatMapMerge(topicProducer.tags.size(), { tag =>
            topicProducer.readSideStream.apply(tag, Offset.NONE).asScala.map(_.first)
          })
          .map { message =>
            serializer.serialize(message)
          }
          .map { bytes =>
            deserializer.deserialize(bytes)
          }
          .map(transform)
          .asJava
      }

      override def atLeastOnce(flow: Flow[SubscriberPayload, Done, _]): CompletionStage[Done] = {
        atMostOnceSource().via(flow).runWith(Sink.ignore[Done], materializer)
      }
    }
  }
} 
Example 167
Source File: ReadSideTestDriver.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.testkit

import akka.Done
import akka.persistence.query.Offset
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import com.lightbend.lagom.scaladsl.persistence.ReadSide
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor

import scala.concurrent.ExecutionContext
import scala.concurrent.Future

class ReadSideTestDriver(implicit val materializer: Materializer, ec: ExecutionContext) extends ReadSide {
  private var processors = Map.empty[Class[_], Seq[Future[(ReadSideHandler[_], Offset)]]]

  override def register[Event <: AggregateEvent[Event]](processorFactory: => ReadSideProcessor[Event]): Unit = {
    val processor = processorFactory
    val eventTags = processor.aggregateTags
    val handler   = processor.buildHandler()
    val future = for {
      _      <- handler.globalPrepare()
      offset <- handler.prepare(eventTags.head)
    } yield {
      handler -> offset
    }
    synchronized {
      val handlers = processors.getOrElse(eventTags.head.eventType, Nil)
      processors += (eventTags.head.eventType -> (handlers :+ future))
    }
  }

  def feed[Event <: AggregateEvent[Event]](entityId: String, event: Event, offset: Offset): Future[Done] = {
    processors.get(event.aggregateTag.eventType) match {
      case None => sys.error(s"No processor registered for Event ${event.aggregateTag.eventType.getCanonicalName}")
      case Some(handlerFutures) =>
        for {
          handlers <- Future.sequence(handlerFutures)
          _ <- Future.sequence(handlers.map {
            case (handler: ReadSideHandler[Event], _) =>
              Source
                .single(new EventStreamElement(entityId, event, offset))
                .via(handler.handle())
                .runWith(Sink.ignore)
          })
        } yield {
          Done
        }
    }
  }
} 
Example 168
Source File: ProducerStubFactory.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.testkit

import java.util.concurrent.ConcurrentHashMap
import java.util.function.{ Function => JFunction }

import akka.Done
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Source
import com.lightbend.lagom.internal.testkit.InternalSubscriberStub
import com.lightbend.lagom.internal.testkit.TopicBufferActor
import com.lightbend.lagom.scaladsl.api.broker.Topic.TopicId
import com.lightbend.lagom.scaladsl.api.broker.Message
import com.lightbend.lagom.scaladsl.api.broker.Subscriber
import com.lightbend.lagom.scaladsl.api.broker.Topic

import scala.concurrent.Future


  def send(message: Message[T]): Unit = bufferActor.tell(message, ActorRef.noSender)
}

private[lagom] class TopicStub[T](val topicId: Topic.TopicId, topicBuffer: ActorRef)(
    implicit materializer: Materializer
) extends Topic[T] {
  def subscribe = new SubscriberStub[T, T]("default", topicBuffer, _.payload)

  class SubscriberStub[Payload, SubscriberPayload](
      groupId: String,
      topicBuffer: ActorRef,
      transform: Message[Payload] => SubscriberPayload
  )(implicit materializer: Materializer)
      extends InternalSubscriberStub[Payload, Message](groupId, topicBuffer)
      with Subscriber[SubscriberPayload] {
    override def withMetadata: Subscriber[Message[SubscriberPayload]] =
      new SubscriberStub[Payload, Message[SubscriberPayload]](
        groupId,
        topicBuffer,
        msg => msg.withPayload(transform(msg))
      )

    override def withGroupId(groupId: String): Subscriber[SubscriberPayload] =
      new SubscriberStub[Payload, SubscriberPayload](groupId, topicBuffer, transform)

    override def atMostOnceSource: Source[SubscriberPayload, _] = super.mostOnceSource.map(transform)

    override def atLeastOnce(flow: Flow[SubscriberPayload, Done, _]): Future[Done] =
      super.leastOnce(Flow[Message[Payload]].map(transform).via(flow))
  }
} 
Example 169
Source File: InternalSubscriberStub.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.testkit

import akka.Done
import akka.actor.ActorRef
import akka.stream.Materializer
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source

import scala.concurrent.Future
import scala.language.higherKinds

private[lagom] class InternalSubscriberStub[Payload, Message[_]](
    groupId: String,
    topicBuffer: ActorRef
)(implicit materializer: Materializer) {
  def mostOnceSource: Source[Message[Payload], _] = {
    Source
      .actorRef[Message[Payload]](1024, OverflowStrategy.fail)
      .prependMat(Source.empty)(subscribeToBuffer)
  }

  def leastOnce(flow: Flow[Message[Payload], Done, _]): Future[Done] = {
    mostOnceSource
      .via(flow)
      .toMat(Sink.ignore)(Keep.right[Any, Future[Done]])
      .run()
  }

  private def subscribeToBuffer[R](ref: ActorRef, t: R) = {
    topicBuffer.tell(TopicBufferActor.SubscribeToBuffer(groupId, ref), ActorRef.noSender)
    t
  }
} 
Example 170
Source File: CsrfDirectives.scala    From akka-http-session   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.session

import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Directive0, Directive1}
import akka.stream.Materializer

trait CsrfDirectives {

  
  def randomTokenCsrfProtection[T](checkMode: CsrfCheckMode[T]): Directive0 = {
    csrfTokenFromCookie(checkMode).flatMap {
      case Some(cookie) =>
        // if a cookie is already set, we let through all get requests (without setting a new token), or validate
        // that the token matches.
        get.recover { _ =>
          submittedCsrfToken(checkMode).flatMap { submitted =>
            if (submitted == cookie && !cookie.isEmpty) {
              pass
            } else {
              reject(checkMode.csrfManager.tokenInvalidRejection).toDirective[Unit]
            }
          }
        }
      case None =>
        // if a cookie is not set, generating a new one for get requests, rejecting other
        (get & setNewCsrfToken(checkMode)).recover(_ => reject(checkMode.csrfManager.tokenInvalidRejection))
    }
  }

  def submittedCsrfToken[T](checkMode: CsrfCheckMode[T]): Directive1[String] = {
    headerValueByName(checkMode.manager.config.csrfSubmittedName).recover { rejections =>
      checkMode match {
        case c: CheckHeaderAndForm[T] =>
          import c.materializer
          formField(checkMode.manager.config.csrfSubmittedName)
        case _ => reject(rejections: _*)
      }
    }
  }

  def csrfTokenFromCookie[T](checkMode: CsrfCheckMode[T]): Directive1[Option[String]] =
    optionalCookie(checkMode.manager.config.csrfCookieConfig.name).map(_.map(_.value))

  def setNewCsrfToken[T](checkMode: CsrfCheckMode[T]): Directive0 =
    setCookie(checkMode.csrfManager.createCookie())
}

object CsrfDirectives extends CsrfDirectives

sealed trait CsrfCheckMode[T] {
  def manager: SessionManager[T]
  def csrfManager = manager.csrfManager
}
class CheckHeader[T] private[session] (implicit val manager: SessionManager[T]) extends CsrfCheckMode[T]
class CheckHeaderAndForm[T] private[session] (implicit
                                              val manager: SessionManager[T],
                                              val materializer: Materializer)
    extends CsrfCheckMode[T]

object CsrfOptions {
  def checkHeader[T](implicit manager: SessionManager[T]): CheckHeader[T] = new CheckHeader[T]()
  def checkHeaderAndForm[T](implicit manager: SessionManager[T], materializer: Materializer): CheckHeaderAndForm[T] =
    new CheckHeaderAndForm[T]()
} 
Example 171
Source File: YetAnotherAkkaClient.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
package com.bot4s.telegram.clients

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import cats.instances.future._
import com.bot4s.telegram.api.RequestHandler
import com.bot4s.telegram.methods.{Request, Response}
import io.circe.{Decoder, Encoder}
import slogging.StrictLogging
import com.bot4s.telegram.marshalling.responseDecoder

import scala.concurrent.{ExecutionContext, Future}

class YetAnotherAkkaClient(token: String, telegramHost: String = "api.telegram.org")
                          (implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext)
  extends RequestHandler[Future] with StrictLogging {

  private val flow = Http().outgoingConnectionHttps(telegramHost)

  import com.bot4s.telegram.marshalling.AkkaHttpMarshalling._

  override def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = {
    Source.fromFuture(
      Marshal(request).to[RequestEntity]
        .map {
          re =>
            HttpRequest(HttpMethods.POST, Uri(path = Path(s"/bot$token/" + request.methodName)), entity = re)
        })
      .via(flow)
      .mapAsync(1)(r => Unmarshal(r.entity).to[Response[R]])
      .runWith(Sink.head)
      .map(processApiResponse[R])
  }
} 
Example 172
Source File: AkkaHttpClient.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
package com.bot4s.telegram.clients

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import cats.instances.future._
import com.bot4s.telegram.api.RequestHandler
import com.bot4s.telegram.marshalling.AkkaHttpMarshalling
import com.bot4s.telegram.marshalling._
import com.bot4s.telegram.methods.{Request, Response}
import io.circe.{Decoder, Encoder}
import slogging.StrictLogging

import scala.concurrent.{ExecutionContext, Future}


class AkkaHttpClient(token: String, telegramHost: String = "api.telegram.org")
  (implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext)
  extends RequestHandler[Future] with StrictLogging {

  import AkkaHttpMarshalling._
  private val apiBaseUrl = s"https://$telegramHost/bot$token/"
  private val http = Http()

  override def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = {
    Marshal(request).to[RequestEntity]
      .map {
        re =>
          HttpRequest(HttpMethods.POST, Uri(apiBaseUrl + request.methodName), entity = re)
      }
      .flatMap(http.singleRequest(_))
      .flatMap(r => Unmarshal(r.entity).to[Response[R]])
      .map(t => processApiResponse[R](t))
  }
} 
Example 173
Source File: AppContext.scala    From ws_to_kafka   with MIT License 5 votes vote down vote up
package com.pkinsky

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import com.softwaremill.react.kafka.ReactiveKafka
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.language.postfixOps


trait AppContext {
  //implicit context: actor system, execution context, materializer
  implicit val system: ActorSystem = ActorSystem()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val mat: Materializer = ActorMaterializer()

  //kafka setup
  val kafkaConf = KafkaServiceConf("192.168.99.100:9092")
  val kafkaClient: ReactiveKafka = new ReactiveKafka()
  val kafka = new KafkaService(kafkaClient, kafkaConf)

  //constants
  val eventTopic = "event_topic_newer" //kafka topic
  val port = 9000 //server port

  def awaitTermination() = {
    System.console().readLine() //wait for enter

    println(s"shutting down because enter was pressed")
    system.shutdown()

    system.awaitTermination(30 seconds)
    System.exit(0)
  }
} 
Example 174
Source File: AkkaBodyUnmarshaller.scala    From chronicler   with Apache License 2.0 5 votes vote down vote up
package com.github.fsanaulla.chronicler.akka.shared.handlers

import akka.http.scaladsl.coding.Gzip
import akka.http.scaladsl.model.{HttpCharsets, HttpEntity}
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.Materializer
import akka.util.ByteString
import com.github.fsanaulla.chronicler.core.alias.ErrorOr
import com.github.fsanaulla.chronicler.core.jawn.RichJParser
import org.typelevel.jawn.ast.{JParser, JValue}

import scala.concurrent.{ExecutionContext, Future}

final class AkkaBodyUnmarshaller(compressed: Boolean)
  extends Unmarshaller[HttpEntity, ErrorOr[JValue]] {

  override def apply(
      value: HttpEntity
    )(implicit ec: ExecutionContext,
      mat: Materializer
    ): Future[ErrorOr[JValue]] = {

    // get encoding from response content type, otherwise use UTF-8 as default
    val encoding = value.contentType.charsetOption
      .getOrElse(HttpCharsets.`UTF-8`)
      .nioCharset()

    val srcBody = if (compressed) value.dataBytes.via(Gzip.decoderFlow) else value.dataBytes

    srcBody
      .runFold(ByteString.empty)(_ ++ _)
      .map(_.decodeString(encoding))
      .map(JParser.parseFromStringEither)
  }
} 
Example 175
Source File: ExampleFilter.scala    From play-webpack-typescript-react   with MIT License 5 votes vote down vote up
package filters

import akka.stream.Materializer
import javax.inject._
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}


@Singleton
class ExampleFilter @Inject()(implicit override val mat: Materializer, exec: ExecutionContext) extends Filter {

  override def apply(nextFilter: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] = {
    // Run the next filter in the chain. This will call other filters
    // and eventually call the action. Take the result and modify it
    // by adding a new header.
    nextFilter(requestHeader).map { result =>
      result.withHeaders("X-ExampleFilter" -> "foo")
    }
  }

} 
Example 176
Source File: CouchbaseClusteredPersistentEntitySpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.couchbase

import java.io.File

import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.persistence.couchbase.CouchbaseClusterConnection
import akka.stream.{ActorMaterializer, Materializer}
import com.lightbend.lagom.internal.persistence.couchbase.TestConfig
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.persistence.multinode.{
  AbstractClusteredPersistentEntityConfig,
  AbstractClusteredPersistentEntitySpec
}
import com.lightbend.lagom.scaladsl.persistence.{ReadSideProcessor, TestEntity}
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import play.api.{Configuration, Environment, Mode}
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.{ExecutionContext, Future}

object CouchbaseClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {
  override def additionalCommonConfig(databasePort: Int): Config =
    TestConfig.persistenceConfig
}

class CouchbaseClusteredPersistentEntitySpecMultiJvmNode1 extends CouchbaseClusteredPersistentEntitySpec
class CouchbaseClusteredPersistentEntitySpecMultiJvmNode2 extends CouchbaseClusteredPersistentEntitySpec
class CouchbaseClusteredPersistentEntitySpecMultiJvmNode3 extends CouchbaseClusteredPersistentEntitySpec

class CouchbaseClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(CouchbaseClusteredPersistentEntityConfig) {
  import com.lightbend.lagom.scaladsl.persistence.couchbase.CouchbaseClusteredPersistentEntityConfig._

  override protected def atStartup(): Unit = {
    runOn(node1) {
      CouchbaseClusterConnection.connect().cleanUp().close()
      awaitPersistenceInit(system)
    }
    enterBarrier("couchbase-started")

    super.atStartup()
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: CouchbasePersistenceComponents =
    new CouchbasePersistenceComponents {
      override def actorSystem: ActorSystem = system
      override def executionContext: ExecutionContext = system.dispatcher
      override def materializer: Materializer = ActorMaterializer()(system)
      override def configuration: Configuration = Configuration(system.settings.config)
      override def serviceLocator: ServiceLocator = NoServiceLocator
      override def environment: Environment = Environment(new File("."), getClass.getClassLoader, Mode.Test)
      override def jsonSerializerRegistry: JsonSerializerRegistry = ???
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(system)
    }

  def testEntityReadSide = new TestEntityReadSide(components.actorSystem, components.couchbase)

  override protected def readSideProcessor: () => ReadSideProcessor[TestEntity.Evt] =
    () => new TestEntityReadSide.TestEntityReadSideProcessor(system, components.couchbaseReadSide)

  override protected def getAppendCount(id: String): Future[Long] = testEntityReadSide.getAppendCount(id)
} 
Example 177
Source File: AbstractCouchbaseSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.scaladsl

import akka.actor.{ActorRef, ActorSystem}
import akka.persistence.couchbase.{CouchbaseBucketSetup, TestActor}
import akka.persistence.query.PersistenceQuery
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.{TestKit, TestProbe, WithLogCapturing}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._

abstract class AbstractCouchbaseSpec(testName: String, config: Config)
    extends TestKit(
      ActorSystem(testName, config.withFallback(ConfigFactory.load()))
    )
    with WordSpecLike
    with BeforeAndAfterAll
    with Matchers
    with ScalaFutures
    with CouchbaseBucketSetup
    with WithLogCapturing {
  def this(testName: String) =
    this(
      testName,
      ConfigFactory.parseString("""
            couchbase-journal.read {
              page-size = 10
            }
            akka.loggers = ["akka.testkit.SilenceAllTestEventListener"]
            akka.loglevel=debug
          """)
    )

  var idCounter = 0
  def nextPersistenceId(): String = {
    idCounter += 1
    val id = Integer.toString(idCounter, 24)
    id.toString
  }

  // provides a unique persistence-id per test case and some initial persisted events
  protected trait Setup {
    lazy val probe = TestProbe()
    implicit def sender: ActorRef = probe.ref
    // note must be a def or lazy val or else it doesn't work (init order)
    def initialPersistedEvents: Int = 0
    def startPersistentActor(initialEvents: Int): (String, ActorRef) = {
      val pid = nextPersistenceId()
      system.log.debug("Starting actor with pid {}, and writing {} initial events", pid, initialPersistedEvents)
      val persistentActor = system.actorOf(TestActor.props(pid))
      if (initialEvents > 0) {
        for (i <- 1 to initialEvents) {
          persistentActor ! s"$pid-$i"
          probe.expectMsg(s"$pid-$i-done")
        }
      }
      (pid, persistentActor)
    }
    val (pid, persistentActor) = startPersistentActor(initialPersistedEvents)

    // no guarantee we can immediately read our own writes
    def readingOurOwnWrites[A](f: => A): A =
      awaitAssert(f, readOurOwnWritesTimeout, interval = 250.millis) // no need to bombard the db with retries
  }

  protected val noMsgTimeout = 100.millis
  protected val readOurOwnWritesTimeout = 10.seconds
  override implicit val patienceConfig: PatienceConfig = PatienceConfig(readOurOwnWritesTimeout)
  implicit val materializer: Materializer = ActorMaterializer()

  lazy // #read-journal-access
  val queries: CouchbaseReadJournal =
    PersistenceQuery(system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
  // #read-journal-access

  protected override def afterAll(): Unit = {
    super.afterAll()
    shutdown(system)
  }
} 
Example 178
Source File: ExampleFilter.scala    From sbt-header   with Apache License 2.0 5 votes vote down vote up
package filters

import akka.stream.Materializer
import javax.inject._
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}


@Singleton
class ExampleFilter @Inject()(
    implicit override val mat: Materializer,
    exec: ExecutionContext) extends Filter {

  override def apply(nextFilter: RequestHeader => Future[Result])
           (requestHeader: RequestHeader): Future[Result] = {
    // Run the next filter in the chain. This will call other filters
    // and eventually call the action. Take the result and modify it
    // by adding a new header.
    nextFilter(requestHeader).map { result =>
      result.withHeaders("X-ExampleFilter" -> "foo")
    }
  }

} 
Example 179
Source File: WebService.scala    From heimdallr   with Apache License 2.0 5 votes vote down vote up
package chat

import scala.concurrent.ExecutionContext.Implicits._
import scala.util.{Failure,Success}
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.{ ServerBinding }
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse, Uri }
import akka.stream.scaladsl.{ Flow, Sink, Source }
import org.slf4j.LoggerFactory

trait WebService {
  val log = LoggerFactory.getLogger("total")
  private var binding: scala.concurrent.Future[ServerBinding] = null

  def serviceBind(serviceName: String, bindRoute: Flow[HttpRequest, HttpResponse, Any], bindPort: Int)
                 (implicit actorSystem: ActorSystem, materializer: Materializer): Unit = {
    binding = Http().bindAndHandle(bindRoute,"0.0.0.0", bindPort)

    // the rest of the sample code will go here
    binding.onComplete {
      //binding success check
      case Success(binding) =>
        val localAddress = binding.localAddress
        log.info(s"${serviceName} is listening on ${localAddress.getAddress}:${localAddress.getPort}")

      case Failure(e) =>
        log.error(s"${serviceName} Binding failed with ${e.getMessage}")
    }
  }

  def serviceUnbind(serviceName: String) = {
    if( binding != null )
    {
      binding
        .flatMap(_.unbind())
        .onComplete(_ =>
          log.info(s"${serviceName} listening port unbinding ... ")
        )
    }
    else
      log.info( s"${serviceName} Unbinding Failed !" )
  }
} 
Example 180
Source File: EventSource.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.client

import java.util.UUID

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.persistence.query.{NoOffset, Offset, Sequence, TimeBasedUUID}
import akka.stream.Materializer
import akka.stream.alpakka.sse.scaladsl.{EventSource => SSESource}
import akka.stream.scaladsl.Source
import ch.epfl.bluebrain.nexus.iam.auth.AccessToken
import ch.epfl.bluebrain.nexus.rdf.implicits._
import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri
import com.typesafe.scalalogging.Logger
import io.circe.Decoder
import io.circe.parser.decode

import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try

trait EventSource[A] {

  
  def apply[A: Decoder](
      config: KgClientConfig
  )(implicit as: ActorSystem, mt: Materializer, ec: ExecutionContext): EventSource[A] =
    new EventSource[A] {
      private val logger = Logger[this.type]
      private val http   = Http()

      private def addCredentials(request: HttpRequest)(implicit cred: Option[AccessToken]): HttpRequest =
        cred.map(token => request.addCredentials(OAuth2BearerToken(token.value))).getOrElse(request)

      private def send(request: HttpRequest)(implicit cred: Option[AccessToken]): Future[HttpResponse] =
        http.singleRequest(addCredentials(request)).map { resp =>
          if (!resp.status.isSuccess())
            logger.warn(s"HTTP response when performing SSE request: status = '${resp.status}'")
          resp
        }

      private def toOffset(id: String): Offset =
        Try(TimeBasedUUID(UUID.fromString(id))).orElse(Try(Sequence(id.toLong))).getOrElse(NoOffset)

      override def apply(iri: AbsoluteIri, offset: Option[String])(implicit
          cred: Option[AccessToken]
      ): Source[(Offset, A), NotUsed] =
        SSESource(iri.asAkka, send, offset, config.sseRetryDelay).flatMapConcat { sse =>
          val offset = sse.id.map(toOffset).getOrElse(NoOffset)
          decode[A](sse.data) match {
            case Right(ev) => Source.single(offset -> ev)
            case Left(err) =>
              logger.error(s"Failed to decode admin event '$sse'", err)
              Source.empty
          }
        }
    }
} 
Example 181
Source File: TestHelper.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg

import java.time.Clock
import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import ch.epfl.bluebrain.nexus.commons.test.{EitherValues, Randomness}
import ch.epfl.bluebrain.nexus.iam.acls.AccessControlList
import ch.epfl.bluebrain.nexus.iam.types.Identity.Anonymous
import ch.epfl.bluebrain.nexus.iam.types.{Identity, Permission, ResourceF => IamResourceF}
import ch.epfl.bluebrain.nexus.kg.config.Schemas.unconstrainedSchemaUri
import ch.epfl.bluebrain.nexus.kg.resources.ResourceF.Value
import ch.epfl.bluebrain.nexus.kg.resources.{Ref, ResId, ResourceF}
import ch.epfl.bluebrain.nexus.kg.storage.AkkaSource
import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri
import ch.epfl.bluebrain.nexus.rdf.implicits._
import io.circe.Json

trait TestHelper extends EitherValues with Randomness {

  private val clock     = Clock.systemUTC()
  val read: Permission  = Permission.unsafe("resources/read")
  val write: Permission = Permission.unsafe("files/write")

  def consume(source: AkkaSource)(implicit mt: Materializer): String = {
    import org.scalatest.concurrent.ScalaFutures._
    source.runFold("")(_ ++ _.utf8String).futureValue
  }

  def produce(string: String, chunkSize: Int = 100): AkkaSource =
    Source(string.grouped(chunkSize).map(ByteString(_)).toList)

  def resourceAcls(acl: AccessControlList): IamResourceF[AccessControlList] =
    IamResourceF(
      url"http://example.com/id",
      1L,
      Set.empty,
      clock.instant(),
      Anonymous,
      clock.instant(),
      Anonymous,
      acl
    )

  def simpleV(
      id: ResId,
      value: Json,
      rev: Long = 1L,
      types: Set[AbsoluteIri] = Set.empty,
      deprecated: Boolean = false,
      schema: Ref = Ref(unconstrainedSchemaUri),
      created: Identity = Anonymous,
      updated: Identity = Anonymous
  )(implicit clock: Clock): ResourceF[Value] =
    ResourceF(
      id,
      rev,
      types,
      deprecated,
      Map.empty,
      None,
      clock.instant(),
      clock.instant(),
      created,
      updated,
      schema,
      Value(value, value.contextValue, value.toGraph(id.value).rightValue)
    )

  def simpleV(res: ResourceF[Json])(implicit clock: Clock) =
    ResourceF(
      res.id,
      res.rev,
      res.types,
      res.deprecated,
      Map.empty,
      None,
      clock.instant(),
      clock.instant(),
      res.createdBy,
      res.updatedBy,
      res.schema,
      Value(res.value, res.value.contextValue, res.value.toGraph(res.id.value).rightValue)
    )

  def genUUID: UUID = UUID.randomUUID()

  def genIri: AbsoluteIri = url"http://example.com/" + genUUID.toString

  private def sourceInChunks(input: String): AkkaSource =
    Source.fromIterator(() => input.grouped(10000).map(ByteString(_)))

  def genSource: AkkaSource = sourceInChunks(genString())

} 
Example 182
Source File: AttributesComputation.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage.attributes

import java.nio.file.{Files, Path}
import java.security.MessageDigest

import akka.http.scaladsl.model.HttpCharsets.`UTF-8`
import akka.http.scaladsl.model.MediaTypes.{`application/octet-stream`, `application/x-tar`}
import akka.http.scaladsl.model.{ContentType, MediaType, MediaTypes}
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink}
import akka.util.ByteString
import cats.effect.Effect
import cats.implicits._
import ch.epfl.bluebrain.nexus.storage.File.{Digest, FileAttributes}
import ch.epfl.bluebrain.nexus.storage.StorageError.InternalError
import ch.epfl.bluebrain.nexus.storage._
import org.apache.commons.io.FilenameUtils

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

trait AttributesComputation[F[_], Source] {

  
  implicit def akkaAttributes[F[_]](implicit
      ec: ExecutionContext,
      mt: Materializer,
      F: Effect[F]
  ): AttributesComputation[F, AkkaSource] =
    (path: Path, algorithm: String) => {
      if (!Files.exists(path)) F.raiseError(InternalError(s"Path not found '$path'"))
      else
        Try(MessageDigest.getInstance(algorithm)) match {
          case Success(msgDigest) =>
            val isDir  = Files.isDirectory(path)
            val source = if (isDir) folderSource(path) else fileSource(path)
            source
              .alsoToMat(sinkSize)(Keep.right)
              .toMat(sinkDigest(msgDigest)) { (bytesF, digestF) =>
                (bytesF, digestF).mapN {
                  case (bytes, digest) => FileAttributes(path.toAkkaUri, bytes, digest, detectMediaType(path, isDir))
                }
              }
              .run()
              .to[F]
          case Failure(_)         => F.raiseError(InternalError(s"Invalid algorithm '$algorithm'."))
        }

    }
} 
Example 183
Source File: Watcher.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.operator

import akka.{Done, NotUsed}
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.scaladsl.{Flow, Keep, RestartSource, Sink, Source}
import play.api.libs.json.Format
import skuber.{ListResource, ObjectResource, ResourceDefinition}
import skuber.api.client.{EventType, KubernetesClient, WatchEvent}

import scala.concurrent.duration._
import skuber.json.format._

import scala.concurrent.ExecutionContext

object Watcher {

  private implicit def listResourceFormat[Resource <: ObjectResource: Format]: Format[ListResource[Resource]] =
    ListResourceFormat(implicitly[Format[Resource]])

  def watch[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    // Summary of what we want our event loop to look like:
    // * We start by listing all the resources, and process them.
    // * Then we start watching from the resourceVersion that we got in our list, so we get all updates.
    // * But we also want to periodically recheck all resources, since sometimes there are race conditions
    //   between operators handling dependent resources (eg, if you deploy a journal and a service that uses
    //   it at the same time), so we only run the watch for a maximum of that time (eg, 5 minutes), before
    //   restarting.
    // * Also, if errors are encountered, we don't want to continually restart in a hot loop, so we use the
    //   RestartSource to restart with backoff.
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client
                  .list[ListResource[Resource]]()
                  .map { resources =>
                    val watch = client
                      .watchAllContinuously[Resource](sinceResourceVersion = Some(resources.resourceVersion))

                    Source(resources)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()

  def watchSingle[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      resourceName: String,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client.getOption[Resource](resourceName).map {
                  case Some(resource) =>
                    val watch =
                      client.watchContinuously[Resource](resourceName,
                                                         sinceResourceVersion = Some(resource.resourceVersion))
                    Source
                      .single(resource)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  case None =>
                    throw new RuntimeException(
                      s"Resource $resourceName not found in namespace ${client.namespaceName}!"
                    )
                }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()
} 
Example 184
Source File: AkkaHttpPrometheusExporter.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.proxy

import java.io.OutputStreamWriter
import java.util

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import io.prometheus.client.CollectorRegistry
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.Materializer
import akka.util.ByteString
import io.prometheus.client.exporter.common.TextFormat

import scala.concurrent.Future


class AkkaHttpPrometheusExporter(metricsPort: Int, registry: CollectorRegistry = CollectorRegistry.defaultRegistry)(
    implicit system: ActorSystem,
    mat: Materializer
) {

  private[this] final val PrometheusContentType = ContentType.parse(TextFormat.CONTENT_TYPE_004).right.get

  private def routes = get {
    (path("metrics") | pathSingleSlash) {
      encodeResponse {
        parameter(Symbol("name[]").*) { names =>
          complete {
            val namesSet = new util.HashSet[String]()
            names.foreach(namesSet.add)
            val builder = ByteString.newBuilder
            val writer = new OutputStreamWriter(builder.asOutputStream)
            TextFormat.write004(writer, registry.filteredMetricFamilySamples(namesSet))
            // Very important to flush the writer before we build the byte string!
            writer.flush()
            HttpEntity(PrometheusContentType, builder.result())
          }
        }
      }
    }
  }

  def start(): Future[Http.ServerBinding] =
    Http().bindAndHandle(routes, "0.0.0.0", metricsPort)
} 
Example 185
Source File: EventSourcedSupportFactory.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.proxy.eventsourced

import akka.NotUsed
import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.sharding.ShardRegion.HashCodeMessageExtractor
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import akka.event.Logging
import akka.grpc.GrpcClientSettings
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import akka.util.Timeout
import com.google.protobuf.Descriptors.ServiceDescriptor
import io.cloudstate.protocol.entity.{Entity, Metadata}
import io.cloudstate.protocol.event_sourced.EventSourcedClient
import io.cloudstate.proxy._
import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply}

import scala.concurrent.{ExecutionContext, Future}
import scala.collection.JavaConverters._

class EventSourcedSupportFactory(system: ActorSystem,
                                 config: EntityDiscoveryManager.Configuration,
                                 grpcClientSettings: GrpcClientSettings,
                                 concurrencyEnforcer: ActorRef,
                                 statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer)
    extends EntityTypeSupportFactory {

  private final val log = Logging.getLogger(system, this.getClass)

  private val eventSourcedClient = EventSourcedClient(grpcClientSettings)(system)

  override def buildEntityTypeSupport(entity: Entity,
                                      serviceDescriptor: ServiceDescriptor,
                                      methodDescriptors: Map[String, EntityMethodDescriptor]): EntityTypeSupport = {
    validate(serviceDescriptor, methodDescriptors)

    val stateManagerConfig = EventSourcedEntity.Configuration(entity.serviceName,
                                                              entity.persistenceId,
                                                              config.passivationTimeout,
                                                              config.relayOutputBufferSize)

    log.debug("Starting EventSourcedEntity for {}", entity.persistenceId)
    val clusterSharding = ClusterSharding(system)
    val clusterShardingSettings = ClusterShardingSettings(system)
    val eventSourcedEntity = clusterSharding.start(
      typeName = entity.persistenceId,
      entityProps =
        EventSourcedEntitySupervisor.props(eventSourcedClient, stateManagerConfig, concurrencyEnforcer, statsCollector),
      settings = clusterShardingSettings,
      messageExtractor = new EntityIdExtractor(config.numberOfShards),
      allocationStrategy = new DynamicLeastShardAllocationStrategy(1, 10, 2, 0.0),
      handOffStopMessage = EventSourcedEntity.Stop
    )

    new EventSourcedSupport(eventSourcedEntity, config.proxyParallelism, config.relayTimeout)
  }

  private def validate(serviceDescriptor: ServiceDescriptor,
                       methodDescriptors: Map[String, EntityMethodDescriptor]): Unit = {
    val streamedMethods =
      methodDescriptors.values.filter(m => m.method.toProto.getClientStreaming || m.method.toProto.getServerStreaming)
    if (streamedMethods.nonEmpty) {
      val offendingMethods = streamedMethods.map(_.method.getName).mkString(",")
      throw EntityDiscoveryException(
        s"Event sourced entities do not support streamed methods, but ${serviceDescriptor.getFullName} has the following streamed methods: ${offendingMethods}"
      )
    }
    val methodsWithoutKeys = methodDescriptors.values.filter(_.keyFieldsCount < 1)
    if (methodsWithoutKeys.nonEmpty) {
      val offendingMethods = methodsWithoutKeys.map(_.method.getName).mkString(",")
      throw new EntityDiscoveryException(
        s"Event sourced entities do not support methods whose parameters do not have at least one field marked as entity_key, " +
        "but ${serviceDescriptor.getFullName} has the following methods without keys: ${offendingMethods}"
      )
    }
  }
}

private class EventSourcedSupport(eventSourcedEntity: ActorRef,
                                  parallelism: Int,
                                  private implicit val relayTimeout: Timeout)
    extends EntityTypeSupport {
  import akka.pattern.ask

  override def handler(method: EntityMethodDescriptor,
                       metadata: Metadata): Flow[EntityCommand, UserFunctionReply, NotUsed] =
    Flow[EntityCommand].mapAsync(parallelism)(
      command =>
        (eventSourcedEntity ? EntityTypeSupport.mergeStreamLevelMetadata(metadata, command))
          .mapTo[UserFunctionReply]
    )

  override def handleUnary(command: EntityCommand): Future[UserFunctionReply] =
    (eventSourcedEntity ? command).mapTo[UserFunctionReply]
}

private final class EntityIdExtractor(shards: Int) extends HashCodeMessageExtractor(shards) {
  override final def entityId(message: Any): String = message match {
    case command: EntityCommand => command.entityId
  }
} 
Example 186
Source File: SeedElasticSearch.scala    From scaladex   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package ch.epfl.scala.index.data.elastic

import akka.actor.ActorSystem
import akka.stream.Materializer

import build.info.BuildInfo

import ch.epfl.scala.index.data.DataPaths
import ch.epfl.scala.index.data.github.GithubDownload
import ch.epfl.scala.index.data.maven.PomsReader
import ch.epfl.scala.index.data.ProgressBar
import ch.epfl.scala.index.data.project._
import ch.epfl.scala.index.search.DataRepository

import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Success
import ch.epfl.scala.index.model.Project
import ch.epfl.scala.index.model.Release
import ch.epfl.scala.index.model.release.ScalaDependency

class SeedElasticSearch(
    paths: DataPaths,
    githubDownload: GithubDownload,
    dataRepository: DataRepository
)(
    implicit val ec: ExecutionContext
) extends LazyLogging {

  def run(): Unit = {

    val resetIndex = for {
      _ <- dataRepository.deleteAll()
      _ = logger.info("creating index")
      _ <- dataRepository.create()
    } yield ()

    Await.result(resetIndex, Duration.Inf)

    logger.info("loading update data")
    val projectConverter = new ProjectConvert(paths, githubDownload)
    val allData = projectConverter.convertAll(PomsReader.loadAll(paths), Map())

    var count = 0
    allData.foreach {
      case (project, releases, dependencies) =>
        logger.info(s"indexing ${project.reference}")
        val indexProjectF = dataRepository.insertProject(project)
        val indexReleasesF = dataRepository.insertReleases(releases)
        val indexDependenciesF = dataRepository.insertDependencies(dependencies)

        val indexAll = for {
          _ <- indexProjectF
          releasesResult <- indexReleasesF
          dependenciesResult <- indexDependenciesF
        } yield {
          if (releasesResult.hasFailures || dependenciesResult.hasFailures) {
            logger.error(s"indexing projects ${project.reference} failed")
            releasesResult.failures.foreach(p => logger.error(p.failureMessage))
            dependenciesResult.failures.foreach(
              p => logger.error(p.failureMessage)
            )
          }
        }
        Await.result(indexAll, Duration.Inf)
        count += 1
    }
    logger.info(s"$count projects indexed")
  }

}

object SeedElasticSearch {
  def run(dataPaths: DataPaths)(implicit sys: ActorSystem,
                                mat: Materializer): Unit = {
    import sys.dispatcher
    for (dataRepository <- DataRepository.open(BuildInfo.baseDirectory)) {
      val githubDownload = new GithubDownload(dataPaths)
      val seed =
        new SeedElasticSearch(dataPaths, githubDownload, dataRepository)
      seed.run()
    }
  }
} 
Example 187
Source File: RecordProcessorFactoryImpl.scala    From kinesis-stream   with MIT License 5 votes vote down vote up
package px.kinesis.stream.consumer

import akka.NotUsed
import akka.event.LoggingAdapter
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{KillSwitch, Materializer, OverflowStrategy}
import px.kinesis.stream.consumer.checkpoint.CheckpointTracker
import software.amazon.kinesis.processor.{ShardRecordProcessor, ShardRecordProcessorFactory}

import scala.collection.immutable.Seq
import scala.concurrent.ExecutionContext

class RecordProcessorFactoryImpl(
  sink: Sink[Record, NotUsed],
  workerId: String,
  checkpointTracker: CheckpointTracker,
  killSwitch: KillSwitch
)(implicit am: Materializer, ec: ExecutionContext, logging: LoggingAdapter) extends ShardRecordProcessorFactory {
  override def shardRecordProcessor(): ShardRecordProcessor = {
    val queue = Source
      .queue[Seq[Record]](0, OverflowStrategy.backpressure)
      .mapConcat(identity)
      .toMat(sink)(Keep.left)
      .run()

    new RecordProcessorImpl(queue, checkpointTracker, killSwitch, workerId)
  }
} 
Example 188
Source File: WolframServiceImpl.scala    From lagom-on-kube   with Apache License 2.0 5 votes vote down vote up
package me.alexray.wolfram.impl

import java.net.URLEncoder

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import akka.util.ByteString
import com.lightbend.lagom.scaladsl.api.ServiceCall
import me.alexray.wolfram.api.WolframService
import play.api.Configuration

import scala.concurrent.{ExecutionContext, Future}


class WolframServiceImpl(config: Configuration)
                        (implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext)
  extends WolframService
{

  val appID = config.underlying.getString("wolfram.appid")
  val apiUrl = s"http://api.wolframalpha.com/v2/"


  override def query(q: String): ServiceCall[NotUsed, String] = ServiceCall { _ =>

    val url = apiUrl + s"query?appid=$appID&input=" + URLEncoder.encode(q, "UTF-8")

    for {
      response <- Http().singleRequest(HttpRequest(uri = Uri(url)))
      if response.status.isSuccess()
      data <- Unmarshal(response).to[String]
    } yield data

  }

  override def simple(q: String): ServiceCall[NotUsed, Array[Byte]] = ServiceCall { _ =>

    println(s"quetions = '$q'")

    val url = apiUrl + s"simple?appid=$appID&input=" +  URLEncoder.encode(q, "UTF-8").replace("+", "%20")

    println(s"url = '$url'")

    for {
      response <- Http().singleRequest(HttpRequest(uri = Uri(url)))
      if response.status.isSuccess()
      bytes <- Unmarshal(response).to[ByteString]
    } yield {
      println(s"received image ${bytes.size} bytes long")
      bytes.toArray
    }

  }
} 
Example 189
Source File: OnlyHttpsFilter.scala    From get-you-a-license   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package filters

import akka.stream.Materializer
import play.api.{Environment, Mode}
import play.api.http.HeaderNames
import play.api.mvc.{Filter, RequestHeader, Result, Results}

import scala.concurrent.{ExecutionContext, Future}

class OnlyHttpsFilter(environment: Environment)(implicit val mat: Materializer, ec: ExecutionContext) extends Filter {
  def apply(nextFilter: (RequestHeader) => Future[Result])(requestHeader: RequestHeader): Future[Result] = {
    nextFilter(requestHeader).map { result =>
      if (requestHeader.secure || environment.mode == Mode.Dev) {
        result
      }
      else {
        Results.MovedPermanently("https://" + requestHeader.host + requestHeader.uri)
      }
    }
  }
} 
Example 190
Source File: RestApiServer.scala    From akka-blog-example   with Apache License 2.0 5 votes vote down vote up
package com.spr.akka

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.stream.Materializer
import com.typesafe.config.ConfigFactory

import scala.concurrent.Future


class RestApiServer(api: RestApi)(implicit system: ActorSystem, materializer: Materializer) {

  def bind(): Future[ServerBinding] = {
    val config = ConfigFactory.load()
    val host = config.getString("http.host")
    val port = config.getInt("http.port")
    implicit val system = this.system
    implicit val materializer = this.materializer
    Http().bindAndHandle(api.route, host, port)
  }

} 
Example 191
Source File: TLSFilter.scala    From HAT2.0   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.hatdex.hat.utils

import akka.stream.Materializer
import javax.inject.Inject
import play.api.Environment
import play.api.mvc._

import scala.concurrent.{ ExecutionContext, Future }

class TLSFilter @Inject() (
    implicit
    val mat: Materializer, ec: ExecutionContext, env: Environment) extends Filter {
  def apply(nextFilter: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] = {
    if (requestHeader.headers.get("X-Forwarded-Proto").getOrElse("http") != "https" && env.mode == play.api.Mode.Prod) {
      if (requestHeader.method == "GET") {
        Future.successful(Results.MovedPermanently("https://" + requestHeader.host + requestHeader.uri))
      }
      else {
        Future.successful(Results.BadRequest("This service requires strict transport security"))
      }
    }
    else {
      nextFilter(requestHeader).map(_.withHeaders("Strict-Transport-Security" -> "max-age=31536000"))
    }
  }
} 
Example 192
Source File: LoggingFilter.scala    From HAT2.0   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.hatdex.hat.utils

import javax.inject.{ Inject, Singleton }
import akka.stream.Materializer
import com.nimbusds.jose.JWSObject
import com.nimbusds.jwt.JWTClaimsSet
import play.api.http.HttpErrorHandler
import play.api.mvc.{ Filter, RequestHeader, Result }
import play.api.{ Configuration, Logger }

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

@Singleton
class ActiveHatCounter() {
  // Careful! Mutable state
  private var count: Long = 0

  def get(): Long = count
  def increase(): Unit = this.synchronized(count += 1)
  def decrease(): Unit = this.synchronized(count -= 1)
}

class LoggingFilter @Inject() (
    errorHandler: HttpErrorHandler,
    configuration: Configuration,
    hatCounter: ActiveHatCounter)(
    implicit
    ec: ExecutionContext,
    val mat: Materializer) extends Filter {
  private val logger = Logger("api")

  def apply(nextFilter: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] = {

    val startTime = System.currentTimeMillis

    nextFilter(requestHeader)
      .recoverWith({
        case e ⇒ errorHandler.onServerError(requestHeader, e)
      })
      .map { result =>
        val active = hatCounter.get()
        val requestTime = System.currentTimeMillis - startTime
        logger.info(s"[${requestHeader.remoteAddress}] [${requestHeader.method}:${requestHeader.host}:${requestHeader.uri}] " +
          s"[${result.header.status}] [$requestTime:ms] [hats:$active] ${tokenInfo(requestHeader)}")

        result.withHeaders("Request-Time" -> requestTime.toString)
      }
  }

  private val authTokenFieldName: String = configuration.get[String]("silhouette.authenticator.fieldName")
  private def tokenInfo(requestHeader: RequestHeader): String = {
    requestHeader.queryString.get(authTokenFieldName).flatMap(_.headOption)
      .orElse(requestHeader.headers.get(authTokenFieldName))
      .flatMap(t ⇒ if (t.isEmpty) { None } else { Some(t) })
      .flatMap(t ⇒ Try(JWSObject.parse(t)).toOption)
      .map(o ⇒ JWTClaimsSet.parse(o.getPayload.toJSONObject))
      .map { claimSet =>
        s"[${Option(claimSet.getStringClaim("application")).getOrElse("api")}@" +
          s"${Option(claimSet.getStringClaim("applicationVersion")).getOrElse("_")}]"
      }
      .getOrElse("[unauthenticated@_]")
  }
} 
Example 193
Source File: HatDataStatsProcessorSpec.scala    From HAT2.0   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.hatdex.hat.api.service.monitoring

import java.util.UUID

import akka.stream.Materializer
import com.google.inject.AbstractModule
import net.codingwell.scalaguice.ScalaModule
import org.hatdex.hat.api.models.{ EndpointData, Owner }
import org.hatdex.hat.api.service.applications.{ TestApplicationProvider, TrustedApplicationProvider }
import org.hatdex.hat.api.service.monitoring.HatDataEventBus.DataCreatedEvent
import org.hatdex.hat.authentication.models.HatUser
import org.hatdex.hat.dal.ModelTranslation
import org.hatdex.hat.resourceManagement.FakeHatConfiguration
import org.joda.time.DateTime
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.{ JsValue, Json }
import play.api.test.PlaySpecification
import play.api.{ Application, Logger }

class HatDataStatsProcessorSpec extends PlaySpecification with Mockito with HatDataStatsProcessorContext {

  val logger = Logger(this.getClass)

  sequential

  "The `computeInboundStats` method" should {
    "Correctly count numbers of values for simple objects" in {
      val service = application.injector.instanceOf[HatDataStatsProcessor]
      val stats = service.computeInboundStats(simpleDataCreatedEvent)

      import org.hatdex.hat.api.json.DataStatsFormat._
      logger.debug(s"Got back stats: ${Json.prettyPrint(Json.toJson(stats))}")

      stats.logEntry must be equalTo "test item"
      stats.statsType must be equalTo "inbound"
      stats.stats.length must be equalTo 1
      val endpointStats = stats.stats.head
      endpointStats.endpoint must be equalTo "testendpoint"

      endpointStats.propertyStats("field") must equalTo(1)
      endpointStats.propertyStats("date") must equalTo(1)
      endpointStats.propertyStats("date_iso") must equalTo(1)
      endpointStats.propertyStats("anotherField") must equalTo(1)
      endpointStats.propertyStats("object.objectField") must equalTo(1)
      endpointStats.propertyStats("object.objectFieldArray[]") must equalTo(3)
      endpointStats.propertyStats("object.objectFieldObjectArray[].subObjectName") must equalTo(2)
      endpointStats.propertyStats("object.objectFieldObjectArray[].subObjectName2") must equalTo(2)
    }
  }

}

trait HatDataStatsProcessorContext extends Scope {
  import scala.concurrent.ExecutionContext.Implicits.global
  // Setup default users for testing
  val owner = HatUser(UUID.randomUUID(), "hatuser", Some("pa55w0rd"), "hatuser", Seq(Owner()), enabled = true)

  class ExtrasModule extends AbstractModule with ScalaModule {
    override def configure(): Unit = {
      bind[TrustedApplicationProvider].toInstance(new TestApplicationProvider(Seq()))
    }
  }

  lazy val application: Application = new GuiceApplicationBuilder()
    .configure(FakeHatConfiguration.config)
    .overrides(new ExtrasModule)
    .build()

  implicit lazy val materializer: Materializer = application.materializer

  val simpleJson: JsValue = Json.parse(
    """
      | {
      |   "field": "value",
      |   "date": 1492699047,
      |   "date_iso": "2017-04-20T14:37:27+00:00",
      |   "anotherField": "anotherFieldValue",
      |   "object": {
      |     "objectField": "objectFieldValue",
      |     "objectFieldArray": ["objectFieldArray1", "objectFieldArray2", "objectFieldArray3"],
      |     "objectFieldObjectArray": [
      |       {"subObjectName": "subObject1", "subObjectName2": "subObject1-2"},
      |       {"subObjectName": "subObject2", "subObjectName2": "subObject2-2"}
      |     ]
      |   }
      | }
    """.stripMargin)

  val simpleDataCreatedEvent = DataCreatedEvent(
    "testhat.hubofallthings.net",
    ModelTranslation.fromInternalModel(owner).clean,
    DateTime.now(), "test item",
    Seq(
      EndpointData("testendpoint", Option(UUID.randomUUID()), None, None, simpleJson, None)))
} 
Example 194
Source File: TestSpec.scala    From study-category-theory   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.util.Timeout
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.libs.json.{ JsValue, Json, Writes }
import play.api.test.WsTestClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

object Person {
  implicit val format = Json.format[Person]
  implicit class ValueObjectOps(val self: Person) {
    def toJson: JsValue = Json.toJson(self)
  }
  implicit class IterableOps(val self: Iterable[Person]) {
    def toJson: JsValue = Json.toJson(self)
  }
}
final case class Person(firstName: String, age: Int)

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]
  def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = getNamedComponent[Int]("test.port")
  implicit val timeout: Timeout = getComponent[Timeout]
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]

  // ================================== Supporting Operations ====================================
  def id: String = java.util.UUID.randomUUID().toString

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  
  final val FirstName: String = "John"
  final val LastName: String = "Doe"

  override protected def beforeEach(): Unit = {
  }
} 
Example 195
Source File: RequestRunner.scala    From aws-spi-akka-http   with Apache License 2.0 5 votes vote down vote up
package com.github.matsluni.akkahttpspi

import java.util.concurrent.CompletableFuture

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink}
import org.slf4j.LoggerFactory
import software.amazon.awssdk.http.SdkHttpFullResponse
import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler

import scala.compat.java8.FutureConverters
import scala.concurrent.ExecutionContext
import scala.collection.JavaConverters._

class RequestRunner(connectionPoolSettings: ConnectionPoolSettings)(implicit sys: ActorSystem,
                                                          ec: ExecutionContext,
                                                          mat: Materializer) {
  val logger = LoggerFactory.getLogger(this.getClass)

  def run(httpRequest: HttpRequest,
          handler: SdkAsyncHttpResponseHandler): CompletableFuture[Void] = {
    val result = Http()
      .singleRequest(httpRequest, settings = connectionPoolSettings)
      .flatMap { response =>
        val sdkResponse = SdkHttpFullResponse.builder()
          .headers(response.headers.groupBy(_.name()).map{ case (k, v) => k -> v.map(_.value()).asJava }.asJava)
          .statusCode(response.status.intValue())
          .statusText(response.status.reason)
          .build

        handler.onHeaders(sdkResponse)

        val (complete, publisher) = response
          .entity
          .dataBytes
          .map(_.asByteBuffer)
          .alsoToMat(Sink.ignore)(Keep.right)
          .toMat(Sink.asPublisher(fanout = false))(Keep.both)
          .run()

        handler.onStream(publisher)

        complete
      }

    result.failed.foreach(handler.onError)
    FutureConverters.toJava(result.map(_ => null: Void)).toCompletableFuture
  }
} 
Example 196
Source File: TagViewSequenceNumberScanner.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query

import java.lang.{ Long => JLong }
import java.util.UUID

import akka.NotUsed
import akka.annotation.InternalApi
import akka.event.Logging
import akka.persistence.cassandra.journal.CassandraJournal._
import akka.persistence.cassandra.journal.TimeBucket
import akka.persistence.cassandra.formatOffset
import akka.persistence.cassandra.query.TagViewSequenceNumberScanner.Session
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.datastax.oss.driver.api.core.cql.{ PreparedStatement, Row }

import scala.concurrent.duration.{ Deadline, FiniteDuration }
import scala.concurrent.{ ExecutionContext, Future }
import akka.persistence.cassandra.BucketSize
import akka.stream.alpakka.cassandra.scaladsl.CassandraSession


  private[akka] def scan(
      tag: String,
      fromOffset: UUID,
      toOffset: UUID,
      bucketSize: BucketSize,
      scanningPeriod: FiniteDuration,
      whichToKeep: (TagPidSequenceNr, TagPidSequenceNr) => TagPidSequenceNr)
      : Future[Map[PersistenceId, (TagPidSequenceNr, UUID)]] = {
    val deadline: Deadline = Deadline.now + scanningPeriod

    def doIt(): Future[Map[PersistenceId, (TagPidSequenceNr, UUID)]] = {

      // How many buckets is this spread across?
      val startBucket = TimeBucket(fromOffset, bucketSize)
      val endBucket = TimeBucket(toOffset, bucketSize)

      require(startBucket <= endBucket)

      if (log.isDebugEnabled) {
        log.debug(
          s"Scanning tag: $tag from: {}, to: {}. Bucket {} to {}",
          formatOffset(fromOffset),
          formatOffset(toOffset),
          startBucket,
          endBucket)
      }

      Source
        .unfold(startBucket)(current => {
          if (current <= endBucket) {
            Some((current.next(), current))
          } else {
            None
          }
        })
        .flatMapConcat(bucket => {
          log.debug("Scanning bucket {}", bucket)
          session.selectTagSequenceNrs(tag, bucket, fromOffset, toOffset)
        })
        .map(row => (row.getString("persistence_id"), row.getLong("tag_pid_sequence_nr"), row.getUuid("timestamp")))
        .runFold(Map.empty[Tag, (TagPidSequenceNr, UUID)]) {
          case (acc, (pid, tagPidSequenceNr, timestamp)) =>
            val (newTagPidSequenceNr, newTimestamp) = acc.get(pid) match {
              case None =>
                (tagPidSequenceNr, timestamp)
              case Some((currentTagPidSequenceNr, currentTimestamp)) =>
                if (whichToKeep(tagPidSequenceNr, currentTagPidSequenceNr) == tagPidSequenceNr)
                  (tagPidSequenceNr, timestamp)
                else
                  (currentTagPidSequenceNr, currentTimestamp)
            }
            acc + (pid -> ((newTagPidSequenceNr, newTimestamp)))
        }
        .flatMap { result =>
          if (deadline.hasTimeLeft()) {
            doIt()
          } else {
            Future.successful(result)
          }
        }
    }
    doIt()
  }
} 
Example 197
Source File: WSClient.scala    From play-auditing   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.audit

import akka.stream.Materializer
import org.slf4j.{Logger, LoggerFactory}
import play.api.libs.ws.WSClientConfig
import play.api.libs.ws.ahc.{AhcConfigBuilder, AhcWSClientConfig, AhcWSClient}

import scala.concurrent.Future
import scala.concurrent.duration.Duration


package object handler {
  type WSClient = play.api.libs.ws.WSClient

  object WSClient {
    private val logger: Logger = LoggerFactory.getLogger(getClass)

    def apply(
      connectTimeout: Duration,
      requestTimeout: Duration,
      userAgent     : String
    )(implicit
      materializer: Materializer
    ): WSClient =
      new AhcWSClient(
        new AhcConfigBuilder(
          ahcConfig = AhcWSClientConfig()
                        .copy(wsClientConfig = WSClientConfig()
                          .copy(
                            connectionTimeout = connectTimeout,
                            requestTimeout    = requestTimeout,
                            userAgent         = Some(userAgent)

                          )
                        )
        ).build()
      )
  }
} 
Example 198
Source File: WSClient.scala    From play-auditing   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.audit

import akka.stream.Materializer
import play.api.libs.ws.ahc.{StandaloneAhcWSClient, AhcWSClientConfigFactory}
import play.api.libs.ws.WSClientConfig

import scala.concurrent.duration.Duration

package object handler {
  type WSClient = play.api.libs.ws.StandaloneWSClient

  object WSClient {
    def apply(
      connectTimeout: Duration,
      requestTimeout: Duration,
      userAgent     : String
    )(implicit
      materializer: Materializer
    ): WSClient =
      StandaloneAhcWSClient(
        config = AhcWSClientConfigFactory.forConfig()
                   .copy(wsClientConfig = WSClientConfig()
                     .copy(
                       connectionTimeout = connectTimeout,
                       requestTimeout    = requestTimeout,
                       userAgent         = Some(userAgent)
                     )
                   )
      )
  }
} 
Example 199
Source File: DatastreamHandler.scala    From play-auditing   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.audit.handler

import java.net.URL

import akka.stream.Materializer
import org.slf4j.{Logger, LoggerFactory}
import play.api.inject.ApplicationLifecycle
import play.api.libs.json.JsValue
import uk.gov.hmrc.audit.HandlerResult
import uk.gov.hmrc.audit.HandlerResult.{Failure, Rejected, Success}

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.Duration

class DatastreamHandler(
  scheme        : String,
  host          : String,
  port          : Integer,
  path          : String,
  connectTimeout: Duration,
  requestTimeout: Duration,
  userAgent     : String,
  materializer  : Materializer,
  lifecycle     : ApplicationLifecycle
) extends HttpHandler(
  endpointUrl    = new URL(s"$scheme://$host:$port$path"),
  userAgent      = userAgent,
  connectTimeout = connectTimeout,
  requestTimeout = requestTimeout,
  materializer   = materializer,
  lifecycle      = lifecycle
) with AuditHandler {

  private val logger: Logger = LoggerFactory.getLogger(getClass)

  override def sendEvent(event: JsValue)(implicit ec: ExecutionContext): Future[HandlerResult] =
    sendEvent(event, retryIfMalformed = true)

  private def sendEvent(event: JsValue, retryIfMalformed: Boolean)(implicit ec: ExecutionContext): Future[HandlerResult] =
    sendHttpRequest(event).flatMap {
      case HttpResult.Response(status) =>
        Future.successful(status match {
          case 204 => Success
          case 400 => logger.warn("Malformed request rejected by Datastream")
                      Rejected
          case 413 => logger.warn("Too large request rejected by Datastream")
                      Rejected
          case _   => logger.error(s"Unknown return value $status")
                      Failure
        })
      case HttpResult.Malformed =>
        if (retryIfMalformed) {
          logger.warn("Malformed response on first request, retrying")
          sendEvent(event, retryIfMalformed = false)
        } else {
          logger.warn("Malformed response on second request, failing")
          Future.successful(Failure)
        }
      case HttpResult.Failure(msg, exceptionOption) =>
        exceptionOption match {
          case None     => logger.error(msg)
          case Some(ex) => logger.error(msg, ex)
        }
        Future.successful(Failure)
    }
} 
Example 200
Source File: HttpHandler.scala    From play-auditing   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.audit.handler

import java.io.IOException
import java.net.URL
import java.util.concurrent.TimeoutException

import akka.stream.Materializer
import org.slf4j.{Logger, LoggerFactory}
import play.api.inject.ApplicationLifecycle
import play.api.libs.json.JsValue

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.Duration


sealed trait HttpResult
object HttpResult {
  case class Response(statusCode: Int) extends HttpResult
  case object Malformed extends HttpResult
  case class Failure(msg: String, nested: Option[Throwable] = None) extends Exception(msg, nested.orNull) with HttpResult
}

abstract class HttpHandler(
  endpointUrl      : URL,
  userAgent        : String,
  connectTimeout   : Duration,
  requestTimeout   : Duration,
  materializer     : Materializer,
  lifecycle        : ApplicationLifecycle
) {
  private val logger: Logger = LoggerFactory.getLogger(getClass)

  val HTTP_STATUS_CONTINUE = 100

  val wsClient: WSClient = {
    implicit val m = materializer
    val wsClient = WSClient(connectTimeout, requestTimeout, userAgent)
    lifecycle.addStopHook { () =>
      logger.info("Closing play-auditing http connections...")
      wsClient.close()
      Future.successful(())
    }
    wsClient
  }

  def sendHttpRequest(event: JsValue)(implicit ec: ExecutionContext): Future[HttpResult] =
    try {
      logger.debug(s"Sending audit request to URL ${endpointUrl.toString}")

      wsClient.url(endpointUrl.toString)
        .post(event)
        .map { response =>
          val httpStatusCode = response.status
          logger.debug(s"Got status code : $httpStatusCode")
          response.body
          logger.debug("Response processed and closed")

          if (httpStatusCode >= HTTP_STATUS_CONTINUE) {
            logger.info(s"Got status code $httpStatusCode from HTTP server.")
            HttpResult.Response(httpStatusCode)
          } else {
            logger.warn(s"Malformed response (status $httpStatusCode) returned from server")
            HttpResult.Malformed
          }
        }.recover {
          case e: TimeoutException =>
            HttpResult.Failure("Error opening connection, or request timed out", Some(e))
          case e: IOException =>
            HttpResult.Failure("Error opening connection, or request timed out", Some(e))
        }
    } catch {
      case t: Throwable =>
        Future.successful(HttpResult.Failure("Error sending HTTP request", Some(t)))
    }
}