com.typesafe.config.ConfigFactory Scala Examples

The following examples show how to use com.typesafe.config.ConfigFactory. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: HasDex.scala    From matcher   with MIT License 8 votes vote down vote up
package com.wavesplatform.dex.it.dex

import java.util.Properties
import java.util.concurrent.ThreadLocalRandom

import cats.Functor
import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.it.api.BaseContainersKit
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.dex.it.fp.CanExtract
import mouse.any._
import org.apache.kafka.clients.admin.{AdminClient, NewTopic}

import scala.collection.JavaConverters._

trait HasDex { self: BaseContainersKit =>
  private val defaultTag = Option(System.getenv("DEX_TAG")).getOrElse("latest")

  protected implicit def toDexExplicitGetOps[F[_]: CanExtract: Functor](self: DexApi[F]): DexApiOps.ExplicitGetDexApiOps[F] = {
    new DexApiOps.ExplicitGetDexApiOps[F](self)
  }

  protected def dexInitialSuiteConfig: Config = ConfigFactory.empty()

  protected lazy val dexRunConfig: Config = dexQueueConfig(ThreadLocalRandom.current.nextInt(0, Int.MaxValue))

  protected def kafkaServer: Option[String] = Option { System.getenv("KAFKA_SERVER") }

  protected def dexQueueConfig(queueId: Int): Config = {
    kafkaServer.fold { ConfigFactory.empty() } { kafkaServer =>
      ConfigFactory.parseString(s"""waves.dex.events-queue {
                                   |  type = kafka
                                   |  kafka {
                                   |    servers = "$kafkaServer"
                                   |    topic = "dex-$queueId"
                                   |  }
                                   |}""".stripMargin)
    }
  }

  protected def createDex(name: String,
                          runConfig: Config = dexRunConfig,
                          suiteInitialConfig: Config = dexInitialSuiteConfig,
                          tag: String = defaultTag): DexContainer =
    DexContainer(name, networkName, network, getIp(name), runConfig, suiteInitialConfig, localLogsDir, tag) unsafeTap addKnownContainer

  lazy val dex1: DexContainer = createDex("dex-1")

  protected def createKafkaTopic(name: String): Unit = kafkaServer.foreach { server =>
    val properties = new Properties()
    properties.putAll(
      Map(
        "bootstrap.servers"  -> server,
        "group.id"           -> s"create-$name",
        "key.deserializer"   -> "org.apache.kafka.common.serialization.StringDeserializer",
        "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer"
      ).asJava
    )

    val adminClient = AdminClient.create(properties)

    try {
      val newTopic = new NewTopic(name, 1, 1.toShort)
      adminClient.createTopics(java.util.Collections.singletonList(newTopic))
    } finally {
      adminClient.close()
    }
  }
} 
Example 2
Source File: KafkaTopicInfo.scala    From matcher   with MIT License 7 votes vote down vote up
package tools

import java.io.File

import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
import com.wavesplatform.dex.queue.KafkaMatcherQueue.eventDeserializer
import com.wavesplatform.dex.queue.{QueueEvent, QueueEventWithMeta}
import com.wavesplatform.dex.settings.toConfigOps
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.collection.JavaConverters._
import scala.concurrent.duration.DurationInt

object KafkaTopicInfo extends App {
  implicit val system: ActorSystem = ActorSystem()

  val configFile = new File(args(0))
  val topic      = args(1)
  val from       = args(2).toLong
  val max        = args(3).toInt

  println(s"""configFile: ${configFile.getAbsolutePath}
             |topic: $topic
             |from: $from
             |max: $max""".stripMargin)

  val requestTimeout = java.time.Duration.ofNanos(5.seconds.toNanos)

  val config = ConfigFactory
    .parseString("""waves.dex.events-queue.kafka.consumer.client {
                   |  client.id = "kafka-topics-info"
                   |  enable.auto.commit = false
                   |  auto.offset.reset = earliest
                   |}
                   |
                   |""".stripMargin)
    .withFallback {
      ConfigFactory
        .parseFile(configFile)
        .withFallback(ConfigFactory.defaultApplication())
        .withFallback(ConfigFactory.defaultReference())
        .resolve()
        .getConfig("waves.dex.events-queue.kafka")
    }

  val consumer = new KafkaConsumer[String, QueueEvent](
    config.getConfig("waves.dex.events-queue.kafka.consumer.client").toProperties,
    new StringDeserializer,
    eventDeserializer
  )

  try {
    val topicPartition  = new TopicPartition(topic, 0)
    val topicPartitions = java.util.Collections.singletonList(topicPartition)
    consumer.assign(topicPartitions)

    {
      val r = consumer.partitionsFor(topic, requestTimeout)
      println(s"Partitions:\n${r.asScala.mkString("\n")}")
    }

    {
      val r = consumer.endOffsets(topicPartitions, requestTimeout)
      println(s"End offsets for $topicPartition: ${r.asScala.mkString(", ")}")
    }

    consumer.seek(topicPartition, from)

    val pollDuriation = java.time.Duration.ofNanos(1.seconds.toNanos)
    val lastOffset    = from + max
    var continue      = true
    while (continue) {
      println(s"Reading from Kafka")

      val xs = consumer.poll(pollDuriation).asScala.toVector
      xs.foreach { msg =>
        println(QueueEventWithMeta(msg.offset(), msg.timestamp(), msg.value()))
      }

      xs.lastOption.foreach { x =>
        if (x.offset() == lastOffset) continue = false
      }
    }
  } finally {
    consumer.close()
  }
} 
Example 3
Source File: UserSampleConfig.scala    From akka-http-slick-sample   with MIT License 7 votes vote down vote up
package net.softler.config

import com.typesafe.config.ConfigFactory

trait UserSampleConfig {
  private lazy val config = ConfigFactory.load()
  private lazy val httpConfig = config.getConfig("http")
  private lazy val slickConfig = config.getConfig("database")

  lazy val applicationName: String = config.getString("application.name")
  lazy val httpHost: String = httpConfig.getString("interface")
  lazy val httpPort: Int = httpConfig.getInt("port")

  lazy val jdbcUrl: String = slickConfig.getString("url")
  lazy val dbUser: String = slickConfig.getString("user")
  lazy val dbPassword: String = slickConfig.getString("password")
  lazy val dbDriver: String = slickConfig.getString("driver")
} 
Example 4
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 5
Source File: LedgerClientConfig.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.config

import java.io.File
import java.time.Duration

import com.daml.ledger.client.binding.LedgerClientConfigurationError.MalformedTypesafeConfig
import com.daml.ledger.client.binding.config.LedgerClientConfig.ClientSslConfig
import com.daml.ledger.client.configuration.{
  CommandClientConfiguration,
  LedgerClientConfiguration,
  LedgerIdRequirement
}
import com.typesafe.config.{Config, ConfigFactory}
import io.grpc.netty.GrpcSslContexts
import io.netty.handler.ssl.SslContext
import pureconfig._

import scala.util.Try

case class LedgerClientConfig(
    ledgerId: Option[String],
    commandClient: CommandClientConfiguration,
    maxRetryTime: Duration,
    ssl: Option[ClientSslConfig]
) {
  def toBindingConfig(applicationId: String) =
    LedgerClientConfiguration(
      applicationId,
      ledgerIdRequirement,
      commandClient,
      ssl.map(_.sslContext)
    )

  private val ledgerIdRequirement = LedgerIdRequirement(ledgerId)
}

object LedgerClientConfig {

  case class ClientSslConfig(
      clientKeyCertChainFile: File,
      clientKeyFile: File,
      trustedCertsFile: File) {

    def sslContext: SslContext =
      GrpcSslContexts
        .forClient()
        .keyManager(clientKeyCertChainFile, clientKeyFile)
        .trustManager(trustedCertsFile)
        .build()

  }

  def create(config: Config = ConfigFactory.load()): Try[LedgerClientConfig] = {
    wrapError(loadConfig[LedgerClientConfig](config, "ledger-client"))
  }

  private def wrapError[T](
      failuresOrConfig: Either[pureconfig.error.ConfigReaderFailures, T]): Try[T] = {
    failuresOrConfig.left.map(MalformedTypesafeConfig).toTry
  }
} 
Example 6
Source File: LedgerClientConfigTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.config

import java.io.File

import com.daml.ledger.client.binding.LedgerClientConfigurationError.MalformedTypesafeConfig
import com.typesafe.config.ConfigFactory
import org.scalatest.{Matchers, WordSpec}

import scala.util.Success

class LedgerClientConfigTest extends WordSpec with Matchers {

  "TypeSafePlatformConfig" should {

    "parse the reference conf without errors" in {
      LedgerClientConfig.create() should be(a[Success[_]])
    }

    "parse the expected values out of the reference conf" in {
      val config = LedgerClientConfig.create().get

      config.ledgerId shouldEqual None
      config.commandClient.maxCommandsInFlight shouldEqual 256
      config.commandClient.maxParallelSubmissions shouldEqual 32
      config.commandClient.defaultDeduplicationTime.getSeconds shouldEqual 30
      config.maxRetryTime.getSeconds shouldEqual 60
      config.ssl shouldBe None
    }

    "parse the expected values out of the mock config" in {
      val configStr = """
                        |ledger-client {
                        |  ledger-id = "ledgerId_mock"
                        |  command-client {
                        |    max-commands-in-flight = 260
                        |    max-parallel-submissions = 40
                        |    default-deduplication-time = PT40S
                        |  }
                        |  max-retry-time = PT45S
                        |  ssl {
                        |    client-key-cert-chain-file = "file1"
                        |    client-key-file = "file2"
                        |    trusted-certs-file = "file3"
                        |  }
                        |}""".stripMargin

      val clientConfig = LedgerClientConfig.create(ConfigFactory.parseString(configStr)).get

      clientConfig.ledgerId shouldEqual Some("ledgerId_mock")
      clientConfig.commandClient.maxCommandsInFlight shouldEqual 260
      clientConfig.commandClient.maxParallelSubmissions shouldEqual 40
      clientConfig.commandClient.defaultDeduplicationTime.getSeconds shouldEqual 40
      clientConfig.maxRetryTime.getSeconds shouldEqual 45
      clientConfig.ssl.get.clientKeyCertChainFile shouldBe new File("file1")
      clientConfig.ssl.get.clientKeyFile shouldBe new File("file2")
      clientConfig.ssl.get.trustedCertsFile shouldBe new File("file3")
    }

    "return the expected type of Throwable on parse errors" in {
      LedgerClientConfig.create(ConfigFactory.empty()).failed.get should be(
        a[MalformedTypesafeConfig])
    }
  }
} 
Example 7
Source File: ChaosCassandraSetup.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import akka.actor.ActorSystem
import com.rbmhtechnology.eventuate.ReplicationEndpoint
import com.rbmhtechnology.eventuate.log.cassandra.CassandraEventLog
import com.typesafe.config.ConfigFactory

trait ChaosCassandraSetup extends ChaosSetup {

  def config(hostname: String, seeds: Seq[String]) = baseConfig(hostname)
    .withFallback(ConfigFactory.parseString(
    s"""
       |eventuate.log.cassandra.contact-points = [${seeds.map(quote).mkString(",")}]
       |eventuate.log.cassandra.replication-factor = ${seeds.size}
       |eventuate.log.cassandra.keyspace = "ev_$name"
       |eventuate.log.cassandra.index-update-limit = 16
       |eventuate.log.cassandra.read-consistency = "QUORUM"
       |eventuate.log.cassandra.write-consistency = "QUORUM"
       |eventuate.log.cassandra.init-retry-max = 50
       |eventuate.log.cassandra.connect-retry-max = 50
     """.stripMargin))

  def cassandras = sys.env.get("CASSANDRA_NODES").map(_.split(","))
    .getOrElse(Array("c1.cassandra.docker", "c2.cassandra.docker"))

  def getSystem = ActorSystem.create("location", config(hostname, cassandras))

  // create and activate eventuate replication endpoint
  def getEndpoint(implicit system: ActorSystem) = {
    val ep = new ReplicationEndpoint(name,
      Set(ReplicationEndpoint.DefaultLogName),
      CassandraEventLog.props(_),
      connections)
    ep.activate()
    ep
  }
} 
Example 8
Source File: ChaosSetup.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import akka.actor.ActorSystem
import akka.actor.Props
import akka.pattern.BackoffSupervisor
import com.rbmhtechnology.eventuate.ReplicationConnection
import com.rbmhtechnology.eventuate.ReplicationEndpoint
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.DurationInt

trait ChaosSetup extends App {

  def getSystem: ActorSystem

  def getEndpoint(implicit system: ActorSystem): ReplicationEndpoint

  protected def baseConfig(hostname: String) = ConfigFactory.parseString(
    s"""
       |akka.actor.provider = "akka.remote.RemoteActorRefProvider"
       |akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
       |akka.remote.netty.tcp.hostname = "$hostname"
       |akka.remote.netty.tcp.port = 2552
       |akka.test.single-expect-default = 10s
       |akka.loglevel = "INFO"
       |eventuate.log.write-batch-size = 16
       |eventuate.log.read-timeout = 3s
       |eventuate.log.retry-delay = 3s
       |akka.remote.netty.tcp.maximum-frame-size = 1024000b
     """.stripMargin)

  protected def quote(str: String) = "\"" + str + "\""

  
  protected def supervised(props: Props, name: String): Props =
    BackoffSupervisor.props(props, name, 1.second, 30.seconds, 0.1)

  def name = {
    if (args == null || args.length < 1) {
      Console.err.println("no <nodename> specified")
      sys.exit(1)
    } else {
      args(0)
    }
  }

  def hostname = sys.env.getOrElse("HOSTNAME", s"$name.eventuate-chaos.docker")

  // replication connection to other node(s)
  def connections = args.drop(1).map { conn =>
    conn.split(":") match {
      case Array(host, port) =>
        ReplicationConnection(host, port.toInt)
      case Array(host) =>
        ReplicationConnection(host, 2552)
    }
  }.toSet
} 
Example 9
Source File: ChaosLeveldbSetup.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import akka.actor.ActorSystem
import com.rbmhtechnology.eventuate.ReplicationEndpoint
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog
import com.typesafe.config.ConfigFactory

trait ChaosLeveldbSetup extends ChaosSetup {

  def config(hostname: String) = baseConfig(hostname)
    .withFallback(ConfigFactory.parseString(
      s"""
         |eventuate.snapshot.filesystem.dir = /tmp/test-snapshot
         |eventuate.log.leveldb.dir = /tmp/test-log
     """.stripMargin))

  def getSystem = ActorSystem.create("location", config(hostname))

  // create and activate eventuate replication endpoint
  def getEndpoint(implicit system: ActorSystem) = {
    val ep = new ReplicationEndpoint(name,
      Set(ReplicationEndpoint.DefaultLogName),
      LeveldbEventLog.props(_), connections)
    ep.activate()
    ep
  }
} 
Example 10
Source File: MetricsConfig.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import com.typesafe.config.{Config, ConfigFactory, ConfigParseOptions, ConfigResolveOptions}
import com.workday.prometheus.akka.impl.{EntityFilter, GlobPathFilter, RegexPathFilter}

object MetricsConfig {
  val Dispatcher = "akka-dispatcher"
  val Router = "akka-router"
  val Actor = "akka-actor"
  val ActorGroups = "akka-actor-groups"

  private val defaultConfig = ConfigFactory.load(this.getClass.getClassLoader, ConfigParseOptions.defaults(), ConfigResolveOptions.defaults().setAllowUnresolved(true))
  private val metricFiltersConfig = defaultConfig.getConfig("prometheus.akka.metric.filters")

  lazy val matchEvents: Boolean = defaultConfig.getBoolean("prometheus.akka.match.events")

  implicit class Syntax(val config: Config) extends AnyVal {
    def firstLevelKeys: Set[String] = {
      import scala.collection.JavaConverters._

      config.entrySet().asScala.map {
        case entry ⇒ entry.getKey.takeWhile(_ != '.')
      } toSet
    }
  }

  private val filters = createFilters(metricFiltersConfig, metricFiltersConfig.firstLevelKeys.filterNot(_ == ActorGroups))
  private val groupFilters = {
    if(metricFiltersConfig.hasPath(ActorGroups)) {
      val cfg = metricFiltersConfig.getConfig(ActorGroups)
      createFilters(cfg, cfg.firstLevelKeys)
    } else {
      Map.empty
    }
  }

  private def createFilters(cfg: Config, categories: Set[String]): Map[String, EntityFilter] = {
    import scala.collection.JavaConverters._
    categories map { category: String ⇒
      val asRegex = if (cfg.hasPath(s"$category.asRegex")) cfg.getBoolean(s"$category.asRegex") else false
      val includes = cfg.getStringList(s"$category.includes").asScala.map(inc ⇒
        if (asRegex) RegexPathFilter(inc) else new GlobPathFilter(inc)).toList
      val excludes = cfg.getStringList(s"$category.excludes").asScala.map(exc ⇒
        if (asRegex) RegexPathFilter(exc) else new GlobPathFilter(exc)).toList

      (category, EntityFilter(includes, excludes))
    } toMap
  }

  def shouldTrack(category: String, entityName: String): Boolean = {
    filters.get(category) match {
      case Some(filter) => filter.accept(entityName)
      case None => false
    }
  }

  def actorShouldBeTrackedUnderGroups(entityName: String): List[String] = {
    val iterable = for((groupName, filter) <- groupFilters if filter.accept(entityName)) yield groupName
    iterable.toList
  }

  def groupNames: Set[String] = groupFilters.keys.toSet
} 
Example 11
Source File: OAuthFailedSpec.scala    From kanadi   with MIT License 5 votes vote down vote up
package org.zalando.kanadi

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import com.typesafe.config.ConfigFactory
import io.circe.Json
import org.mdedetrich.webmodels.{FlowId, OAuth2Token, OAuth2TokenProvider}
import org.specs2.Specification
import org.specs2.concurrent.ExecutionEnv
import org.specs2.execute.Skipped
import org.specs2.matcher.FutureMatchers
import org.specs2.specification.core.SpecStructure
import org.zalando.kanadi.api.{Events, Subscriptions}
import org.zalando.kanadi.models._

import scala.concurrent.Future
import scala.concurrent.duration._

class OAuthFailedSpec(implicit ec: ExecutionEnv) extends Specification with FutureMatchers with Config {

  val config = ConfigFactory.load()

  implicit val system       = ActorSystem()
  implicit val http         = Http()
  implicit val materializer = ActorMaterializer()
  val failingOauth2TokenProvider = Some(
    OAuth2TokenProvider(() => Future.successful(OAuth2Token("Failing token")))
  )

  val subscriptionsClient =
    Subscriptions(nakadiUri, failingOauth2TokenProvider)
  val eventsClient = Events(nakadiUri, failingOauth2TokenProvider)

  override def is: SpecStructure = s2"""
    Call to subscriptions list should fail with invalid token   $oAuthCallSubscriptions
    Call to publishEvents should fail with invalid token        $oAuthPublishEvents
  """

  def oAuthCallSubscriptions = Skipped("No way for current Nakadi docker image to detect \"wrong\" tokens")

  def oAuthPublishEvents = Skipped("No way for current Nakadi docker image to detect \"wrong\" tokens")
} 
Example 12
Source File: SubscriptionsSpec.scala    From kanadi   with MIT License 5 votes vote down vote up
package org.zalando.kanadi

import java.util.UUID

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import com.typesafe.config.ConfigFactory
import org.mdedetrich.webmodels.FlowId
import org.specs2.Specification
import org.specs2.concurrent.ExecutionEnv
import org.specs2.specification.core.SpecStructure
import org.specs2.specification.{AfterAll, BeforeAll}
import org.zalando.kanadi.api.{Category, EventType, EventTypes, Events, Subscription, Subscriptions}
import org.zalando.kanadi.models.{EventTypeName, SubscriptionId}

import scala.collection.parallel.mutable
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

class SubscriptionsSpec(implicit ec: ExecutionEnv) extends Specification with Config with BeforeAll with AfterAll {
  override def is: SpecStructure = sequential ^ s2"""
      Create enough subscriptions to ensure that pagination is used $createEnoughSubscriptionsToUsePagination
    """

  val config = ConfigFactory.load()

  implicit val system       = ActorSystem()
  implicit val http         = Http()
  implicit val materializer = ActorMaterializer()

  val eventTypeName         = EventTypeName(s"Kanadi-Test-Event-${UUID.randomUUID().toString}")
  val OwningApplication     = "KANADI"
  val consumerGroup: String = UUID.randomUUID().toString
  val subscriptionsClient =
    Subscriptions(nakadiUri, None)
  val eventsClient = Events(nakadiUri, None)
  val eventsTypesClient =
    EventTypes(nakadiUri, None)
  val subscriptionIds: mutable.ParSet[SubscriptionId] = mutable.ParSet.empty

  eventTypeName.pp
  s"Consumer Group: $consumerGroup".pp

  def createEventType = eventsTypesClient.create(EventType(eventTypeName, OwningApplication, Category.Business))

  override def beforeAll =
    Await.result(createEventType, 10 seconds)

  override def afterAll = {
    Await.result(
      for {
        res1 <- Future.sequence(subscriptionIds.toList.map(s => subscriptionsClient.delete(s)))
        res2 <- eventsTypesClient.delete(eventTypeName)
      } yield (res1, res2),
      10 seconds
    )
    ()
  }

  def createEnoughSubscriptionsToUsePagination = (name: String) => {
    implicit val flowId: FlowId = Utils.randomFlowId()
    flowId.pp(name)

    val createdSubscriptions = Future.sequence(for {
      _ <- 1 to 22
      subscription = subscriptionsClient.create(
        Subscription(None, s"$OwningApplication-${UUID.randomUUID().toString}", Some(List(eventTypeName))))
    } yield {
      subscription.foreach { s =>
        subscriptionIds += s.id.get
      }
      subscription
    })

    val retrievedSubscriptions = (for {
      subscriptions <- createdSubscriptions
      retrievedSubscription = Future.sequence(subscriptions.map { subscription =>
        subscriptionsClient.createIfDoesntExist(subscription)
      })
    } yield retrievedSubscription).flatMap(a => a)

    Await.result(createdSubscriptions, 10 seconds) mustEqual Await.result(retrievedSubscriptions, 10 seconds)
  }

} 
Example 13
Source File: S2StreamQueryWriter.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.spark.sql.streaming

import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.{GraphElement, JSONParser}
import org.apache.s2graph.s2jobs.S2GraphHelper
import org.apache.s2graph.spark.sql.streaming.S2SinkConfigs._
import org.apache.spark.TaskContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.types.StructType
import play.api.libs.json.{JsObject, Json}

import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.Try

private [sql] class S2StreamQueryWriter(
                                         serializedConf:String,
                                         schema: StructType ,
                                         commitProtocol: S2CommitProtocol
                                       ) extends Serializable with Logger {
  private val config = ConfigFactory.parseString(serializedConf)
  private val s2Graph = S2GraphHelper.getS2Graph(config)
  private val encoder: ExpressionEncoder[Row] = RowEncoder(schema).resolveAndBind()
  private val RESERVED_COLUMN = Set("timestamp", "from", "to", "label", "operation", "elem", "direction")


  def run(taskContext: TaskContext, iters: Iterator[InternalRow]): TaskCommit = {
    val taskId = s"stage-${taskContext.stageId()}, partition-${taskContext.partitionId()}, attempt-${taskContext.taskAttemptId()}"
    val partitionId= taskContext.partitionId()

    val groupedSize = getConfigString(config, S2_SINK_GROUPED_SIZE, DEFAULT_GROUPED_SIZE).toInt
    val waitTime = getConfigString(config, S2_SINK_WAIT_TIME, DEFAULT_WAIT_TIME_SECONDS).toInt

    commitProtocol.initTask()
    try {
      var list = new ListBuffer[(String, Int)]()
      val rst = iters.flatMap(rowToEdge).grouped(groupedSize).flatMap{ elements =>
        logger.debug(s"[$taskId][elements] ${elements.size} (${elements.map(e => e.toLogString).mkString(",\n")})")
        elements.groupBy(_.serviceName).foreach{ case (service, elems) =>
          list += ((service, elems.size))
        }

        val mutateF = s2Graph.mutateElements(elements, true)
        Await.result(mutateF, Duration(waitTime, "seconds"))
      }

      val (success, fail) = rst.toSeq.partition(r => r.isSuccess)
      val counter = list.groupBy(_._1).map{ case (service, t) =>
        val sum = t.toList.map(_._2).sum
        (service, sum)
      }
      logger.info(s"[$taskId] success : ${success.size}, fail : ${fail.size} ($counter)")


      commitProtocol.commitTask(TaskState(partitionId, success.size, fail.size, counter))

    } catch {
      case t: Throwable =>
        commitProtocol.abortTask(TaskState(partitionId))
        throw t
    }
  }

  private def rowToEdge(internalRow:InternalRow): Option[GraphElement] =
    S2GraphHelper.sparkSqlRowToGraphElement(s2Graph, encoder.fromRow(internalRow), schema, RESERVED_COLUMN)
} 
Example 14
Source File: S2SinkProvider.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.spark.sql.streaming

import com.typesafe.config.{Config, ConfigFactory, ConfigRenderOptions}
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.streaming.Sink
import org.apache.spark.sql.sources.{DataSourceRegister, StreamSinkProvider}
import org.apache.spark.sql.streaming.OutputMode

import scala.collection.JavaConversions._

class S2SinkProvider extends StreamSinkProvider with DataSourceRegister with Logger {
  override def createSink(
                  sqlContext: SQLContext,
                  parameters: Map[String, String],
                  partitionColumns: Seq[String],
                  outputMode: OutputMode): Sink = {

    logger.info(s"S2SinkProvider options : ${parameters}")
    val jobConf:Config = ConfigFactory.parseMap(parameters).withFallback(ConfigFactory.load())
    logger.info(s"S2SinkProvider Configuration : ${jobConf.root().render(ConfigRenderOptions.concise())}")

    new S2SparkSqlStreamingSink(sqlContext.sparkSession, jobConf)
  }

  override def shortName(): String = "s2graph"
} 
Example 15
Source File: BenchmarkCommon.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.core.benchmark

import com.typesafe.config.{ConfigFactory, Config}
import org.specs2.mutable.Specification
import scalikejdbc.AutoSession

import scala.concurrent.ExecutionContext

trait BenchmarkCommon extends Specification {
  val wrapStr = s"\n=================================================="

  def duration[T](prefix: String = "")(block: => T) = {
    val startTs = System.currentTimeMillis()
    val ret = block
    val endTs = System.currentTimeMillis()
    println(s"$wrapStr\n$prefix: took ${endTs - startTs} ms$wrapStr")
    ret
  }

  def durationWithReturn[T](prefix: String = "")(block: => T): (T, Long) = {
    val startTs = System.currentTimeMillis()
    val ret = block
    val endTs = System.currentTimeMillis()
    val duration = endTs - startTs
//    println(s"$wrapStr\n$prefix: took $duration ms$wrapStr")
    (ret, duration)
  }
} 
Example 16
Source File: EdgeFetcherTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.core.fetcher

import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.Integrate.IntegrateCommon
import org.apache.s2graph.core.Management.JsonModel.{Index, Prop}
import org.apache.s2graph.core.schema.Label
import org.apache.s2graph.core.{Query, QueryParam, ResourceManager}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}

class EdgeFetcherTest extends IntegrateCommon {

  import scala.collection.JavaConverters._

  test("MemoryModelFetcher") {
    // 1. create label.
    // 2. importLabel.
    // 3. fetch.
    val service = management.createService("s2graph", "localhost", "s2graph_htable", -1, None).get
    val serviceColumn =
      management.createServiceColumn("s2graph", "user", "string", Seq(Prop("age", "0", "int", true)))
    val labelName = "fetcher_test"
    val options =
      s"""{
         |
                     | "importer": {
         |   "${ResourceManager.ClassNameKey}": "org.apache.s2graph.core.utils.IdentityImporter"
         | },
         | "fetcher": {
         |   "${ResourceManager.ClassNameKey}": "org.apache.s2graph.core.fetcher.MemoryModelEdgeFetcher"
         | }
         |}""".stripMargin

    Label.findByName(labelName, useCache = false).foreach { label => Label.delete(label.id.get) }

    val label = management.createLabel(
      labelName,
      service.serviceName,
      serviceColumn.columnName,
      serviceColumn.columnType,
      service.serviceName,
      serviceColumn.columnName,
      serviceColumn.columnType,
      service.serviceName,
      Seq.empty[Index],
      Seq.empty[Prop],
      isDirected = true,
      consistencyLevel =  "strong",
      hTableName = None,
      hTableTTL = None,
      schemaVersion = "v3",
      compressionAlgorithm =  "gz",
      options = Option(options)
    ).get

    graph.management.updateEdgeFetcher(label, Option(options))


    val vertex = graph.elementBuilder.toVertex(service.serviceName, serviceColumn.columnName, "daewon")
    val queryParam = QueryParam(labelName = labelName)

    val query = Query.toQuery(srcVertices = Seq(vertex), queryParams = Seq(queryParam))
    val stepResult = Await.result(graph.getEdges(query), Duration("60 seconds"))

    stepResult.edgeWithScores.foreach { es =>
      println(es.edge)
    }
  }
} 
Example 17
Source File: BaseFetcherTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.core.fetcher

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.s2graph.core.Management.JsonModel.{Index, Prop}
import org.apache.s2graph.core.rest.RequestParser
import org.apache.s2graph.core._
import org.apache.s2graph.core.schema.{Label, LabelMeta, Service, ServiceColumn}
import org.scalatest._

import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration.Duration

trait BaseFetcherTest extends FunSuite with Matchers with BeforeAndAfterAll {
  var graph: S2Graph = _
  var parser: RequestParser = _
  var management: Management = _
  var config: Config = _

  override def beforeAll = {
    config = ConfigFactory.load()
    graph = new S2Graph(config)(ExecutionContext.Implicits.global)
    management = new Management(graph)
    parser = new RequestParser(graph)
  }

  override def afterAll(): Unit = {
    graph.shutdown()
  }

  def queryEdgeFetcher(service: Service,
                       serviceColumn: ServiceColumn,
                       label: Label,
                       srcVertices: Seq[String]): StepResult = {

    val vertices = srcVertices.map(graph.elementBuilder.toVertex(service.serviceName, serviceColumn.columnName, _))

    val queryParam = QueryParam(labelName = label.label, limit = 10)

    val query = Query.toQuery(srcVertices = vertices, queryParams = Seq(queryParam))
    Await.result(graph.getEdges(query), Duration("60 seconds"))
  }

  def initEdgeFetcher(serviceName: String,
                      columnName: String,
                      labelName: String,
                      options: Option[String]): (Service, ServiceColumn, Label) = {
    val service = management.createService(serviceName, "localhost", "s2graph_htable", -1, None).get
    val serviceColumn =
      management.createServiceColumn(serviceName, columnName, "string", Nil)

    Label.findByName(labelName, useCache = false).foreach { label =>
      label.labelMetaSet.foreach { lm =>
        LabelMeta.delete(lm.id.get)
      }

      Label.delete(label.id.get)
    }

    val label = management.createLabel(
      labelName,
      service.serviceName,
      serviceColumn.columnName,
      serviceColumn.columnType,
      service.serviceName,
      serviceColumn.columnName,
      serviceColumn.columnType,
      service.serviceName,
      Seq.empty[Index],
      Seq(Prop(name = "score", defaultValue = "0.0", dataType = "double")),
      isDirected = true,
      consistencyLevel = "strong",
      hTableName = None,
      hTableTTL = None,
      schemaVersion = "v3",
      compressionAlgorithm = "gz",
      options = options,
      initFetcherWithOptions = true
    ).get

    management.updateEdgeFetcher(label, options)

    (service, serviceColumn, Label.findById(label.id.get, useCache = false))
  }
} 
Example 18
Source File: CounterEtlFunctionsSpec.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.loader.core

import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.schema.{Label, Service}
import org.apache.s2graph.core.types.HBaseType
import org.apache.s2graph.core.{S2Graph, Management}
import org.apache.s2graph.counter.models.DBModel
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.ExecutionContext.Implicits.global

class CounterEtlFunctionsSpec extends FlatSpec with BeforeAndAfterAll with Matchers {
  val config = ConfigFactory.load()
  val cluster = config.getString("hbase.zookeeper.quorum")
  DBModel.initialize(config)

  val graph = new S2Graph(config)(global)
  val management = new Management(graph)

  override def beforeAll: Unit = {
    management.createService("test", cluster, "test", 1, None, "gz")
    management.createLabel("test_case", "test", "src", "string", "test", "tgt", "string", true, "test", Nil, Nil, "weak", None, None, HBaseType.DEFAULT_VERSION, false, "gz")
  }

  override def afterAll: Unit = {
    Label.delete(Label.findByName("test_case", false).get.id.get)
    Service.delete(Service.findByName("test", false).get.id.get)
  }

  "CounterEtlFunctions" should "parsing log" in {
    val data =
      """
        |1435107139287	insert	e	aaPHfITGUU0B_150212123559509	abcd	test_case	{"cateid":"100110102","shopid":"1","brandid":""}
        |1435106916136	insert	e	Tgc00-wtjp2B_140918153515441	efgh	test_case	{"cateid":"101104107","shopid":"2","brandid":""}
      """.stripMargin.trim.split('\n')
    val items = {
      for {
        line <- data
        item <- CounterEtlFunctions.parseEdgeFormat(line)
      } yield {
        item.action should equal("test_case")
        item
      }
    }

    items should have size 2
  }
} 
Example 19
Source File: CounterSchemaSpec.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.models

import com.typesafe.config.ConfigFactory
import org.specs2.mutable.Specification

class CounterSchemaSpec extends Specification {
  val config = ConfigFactory.load()

  DBModel.initialize(config)

  "CounterModel" should {
    val model = new CounterModel(config)
    "findById" in {
      model.findById(0, useCache = false) must beNone
    }

    "findByServiceAction using cache" in {
      val service = "test"
      val action = "test_action"
      val counter = Counter(useFlag = true, 2, service, action, Counter.ItemType.STRING,
        autoComb = true, "", useProfile = true, None, useRank = true, 0, None, None, None, None, None, None)
      model.createServiceAction(counter)
      model.findByServiceAction(service, action, useCache = false) must beSome
      val opt = model.findByServiceAction(service, action, useCache = true)
      opt must beSome
      model.findById(opt.get.id) must beSome
      model.deleteServiceAction(opt.get)
      model.findById(opt.get.id) must beSome
      model.findById(opt.get.id, useCache = false) must beNone
    }

    "create and delete policy" in {
      val (service, action) = ("test", "test_case")
      for {
        policy <- model.findByServiceAction(service, action, useCache = false)
      } {
        model.deleteServiceAction(policy)
      }
      model.createServiceAction(Counter(useFlag = true, 2, service, action, Counter.ItemType.STRING,
        autoComb = true, "", useProfile = true, None, useRank = true, 0, None, None, None, None, None, None))
      model.findByServiceAction(service, action, useCache = false).map { policy =>
        policy.service mustEqual service
        policy.action mustEqual action
        model.deleteServiceAction(policy)
        policy
      } must beSome
      model.findByServiceAction(service, action, useCache = false) must beNone
    }
  }
} 
Example 20
Source File: S2ConfigFactory.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.spark.config

import com.typesafe.config.{Config, ConfigFactory}

object S2ConfigFactory {
  lazy val config: Config = _load

  @deprecated("do not call explicitly. use config", "0.0.6")
  def load(): Config = {
    _load
  }

  def _load: Config = {
    // default configuration file name : application.conf
    val sysConfig = ConfigFactory.parseProperties(System.getProperties)

    lazy val phase = if (!sysConfig.hasPath("phase")) "alpha" else sysConfig.getString("phase")
    sysConfig.withFallback(ConfigFactory.parseResourcesAnySyntax(s"$phase.conf")).withFallback(ConfigFactory.load())
  }
} 
Example 21
Source File: Server.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.http

import java.time.Instant

import scala.language.postfixOps
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.S2Graph
import org.slf4j.LoggerFactory

object Server extends App
  with S2GraphTraversalRoute
  with S2GraphAdminRoute
  with S2GraphMutateRoute
  with S2GraphQLRoute {

  implicit val system: ActorSystem = ActorSystem("S2GraphHttpServer")
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  implicit val executionContext: ExecutionContext = system.dispatcher

  val config = ConfigFactory.load()

  override val s2graph = new S2Graph(config)
  override val logger = LoggerFactory.getLogger(this.getClass)

  val port = sys.props.get("http.port").fold(8000)(_.toInt)
  val interface = sys.props.get("http.interface").fold("0.0.0.0")(identity)

  val startAt = System.currentTimeMillis()

  def uptime = System.currentTimeMillis() - startAt

  def serverHealth = s"""{ "port": ${port}, "interface": "${interface}", "started_at": ${Instant.ofEpochMilli(startAt)}, "uptime": "${uptime} millis" """

  def health = HttpResponse(status = StatusCodes.OK, entity = HttpEntity(ContentTypes.`application/json`, serverHealth))

  // Allows you to determine routes to expose according to external settings.
  lazy val routes: Route = concat(
    pathPrefix("graphs")(traversalRoute),
    pathPrefix("mutate")(mutateRoute),
    pathPrefix("admin")(adminRoute),
    pathPrefix("graphql")(graphqlRoute),
    get(complete(health))
  )

  val binding: Future[Http.ServerBinding] = Http().bindAndHandle(routes, interface, port)
  binding.onComplete {
    case Success(bound) => logger.info(s"Server online at http://${bound.localAddress.getHostString}:${bound.localAddress.getPort}/")
    case Failure(e) => logger.error(s"Server could not start!", e)
  }

  scala.sys.addShutdownHook { () =>
    s2graph.shutdown()
    system.terminate()
    logger.info("System terminated")
  }

  Await.result(system.whenTerminated, Duration.Inf)
} 
Example 22
Source File: MutateRouteSpec.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.http

import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.testkit.ScalatestRouteTest
import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.Management.JsonModel.Prop
import org.apache.s2graph.core.S2Graph
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsValue, Json}

class MutateRouteSpec extends WordSpec with Matchers with PlayJsonSupport with ScalaFutures with ScalatestRouteTest with S2GraphMutateRoute with BeforeAndAfterAll {

  import scala.collection.JavaConverters._

  val dbUrl = "jdbc:h2:file:./var/metastore_mutate_route;MODE=MYSQL;AUTO_SERVER=true"
  val config =
    ConfigFactory.parseMap(Map("db.default.url" -> dbUrl).asJava)
  lazy val s2graph = new S2Graph(config.withFallback(ConfigFactory.load()))
  override val logger = LoggerFactory.getLogger(this.getClass)

  override def afterAll(): Unit = {
    s2graph.shutdown(true)
  }

  lazy val routes = mutateRoute

  val serviceName = "kakaoFavorites"
  val columnName = "userName"

  "MutateRoute" should {

    "be able to insert vertex (POST /mutate/vertex/insert)" in {
      s2graph.management.createService(serviceName, "localhost", s"${serviceName}-dev", 1, None)
      s2graph.management.createServiceColumn(serviceName, columnName, "string", Seq(Prop("age", "0", "integer")))

      // {"timestamp": 10, "serviceName": "s2graph", "columnName": "user", "id": 1, "props": {}}
      val param = Json.obj(
        "timestamp" -> 10,
        "serviceName" -> serviceName,
        "columnName" -> columnName,
        "id" -> "user_a",
        "props" -> Json.obj(
          "age" -> 20
        )
      )

      val entity = Marshal(param).to[MessageEntity].futureValue
      val request = Post("/vertex/insert").withEntity(entity)

      request ~> routes ~> check {
        status should ===(StatusCodes.OK)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]
        response should ===(Json.toJson(Seq(true)))
      }
    }
  }
} 
Example 23
Source File: AdminRouteSpec.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.http

import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.testkit.ScalatestRouteTest
import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.Management.JsonModel.Prop
import org.apache.s2graph.core.S2Graph
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsString, JsValue, Json}

class AdminRoutesSpec extends WordSpec with Matchers with ScalaFutures with ScalatestRouteTest with S2GraphAdminRoute with BeforeAndAfterAll {
  import scala.collection.JavaConverters._

  val dbUrl = "jdbc:h2:file:./var/metastore_admin_route;MODE=MYSQL;AUTO_SERVER=true"
  val config =
    ConfigFactory.parseMap(Map("db.default.url" -> dbUrl).asJava)
  lazy val s2graph = new S2Graph(config.withFallback(ConfigFactory.load()))
  override val logger = LoggerFactory.getLogger(this.getClass)

  override def afterAll(): Unit = {
    s2graph.shutdown(true)
  }

  lazy val routes = adminRoute

  val serviceName = "kakaoFavorites"
  val columnName = "userName"

  "AdminRoute" should {
    "be able to create service (POST /createService)" in {
      val serviceParam = Json.obj(
        "serviceName" -> serviceName,
        "compressionAlgorithm" -> "gz"
      )

      val serviceEntity = Marshal(serviceParam).to[MessageEntity].futureValue
      val request = Post("/createService").withEntity(serviceEntity)

      request ~> routes ~> check {
        status should ===(StatusCodes.Created)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]

        (response \\ "name").head should ===(JsString("kakaoFavorites"))
        (response \\ "status").head should ===(JsString("ok"))
      }
    }

    "return service if present (GET /getService/{serviceName})" in {
      val request = HttpRequest(uri = s"/getService/$serviceName")

      request ~> routes ~> check {
        status should ===(StatusCodes.OK)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]

        (response \\ "name").head should ===(JsString("kakaoFavorites"))
      }
    }

    "be able to create serviceColumn (POST /createServiceColumn)" in {
      val serviceColumnParam = Json.obj(
        "serviceName" -> serviceName,
        "columnName" -> columnName,
        "columnType" -> "string",
        "props" -> Json.toJson(
          Seq(
            Json.obj("name" -> "age", "defaultValue" -> "-1", "dataType" -> "integer")
          )
        )
      )

      val serviceColumnEntity = Marshal(serviceColumnParam).to[MessageEntity].futureValue
      val request = Post("/createServiceColumn").withEntity(serviceColumnEntity)

      request ~> routes ~> check {
        status should ===(StatusCodes.Created)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]

        (response \\ "serviceName").head should ===(JsString("kakaoFavorites"))
        (response \\ "columnName").head should ===(JsString("userName"))
        (response \\ "status").head should ===(JsString("ok"))
      }
    }
  }
} 
Example 24
Source File: DirectiveTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.graphql

import com.typesafe.config.ConfigFactory
import org.apache.s2graph.graphql.types.S2Directive
import org.scalatest._

class DirectiveTest extends FunSuite with Matchers with BeforeAndAfterAll {
  var testGraph: TestGraph = _

  override def beforeAll = {
    val config = ConfigFactory.load()
    testGraph = new EmptyGraph(config)
    testGraph.open()
  }

  override def afterAll(): Unit = {
    testGraph.cleanup()
  }

  test("transform") {
    val input = "20170601_A0"
    val code =
      """ (s: String) => {
          val date = s.split("_").head
          s"http://abc.xy.com/IMG_${date}.png"
      }

      """.stripMargin
    val actual = S2Directive.resolveTransform(code, input)
    val expected = "http://abc.xy.com/IMG_20170601.png"

    actual shouldBe expected
  }
} 
Example 25
Source File: SystemInformationReporter.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.logs

import com.typesafe.config.{Config, ConfigFactory, ConfigRenderOptions}
import com.wavesplatform.dex.domain.utils.ScorexLogging

object SystemInformationReporter extends ScorexLogging {

  def report(config: Config): Unit = {
    val resolved = config.resolve()
    val configForLogs = {
      val orig = Seq(
        "waves",
        "metrics"
      ).foldLeft(ConfigFactory.empty()) { case (r, path) => r.withFallback(resolved.withOnlyPath(path)) }

      Seq(
        "waves.custom.genesis",
        "waves.wallet",
        "waves.rest-api.api-key-hash",
        "metrics.influx-db",
      ).foldLeft(orig)(_.withoutPath(_))
    }

    val renderOptions = ConfigRenderOptions
      .defaults()
      .setOriginComments(false)
      .setComments(false)
      .setFormatted(false)

    val logInfo: Seq[(String, Any)] = Seq(
      "Available processors" -> Runtime.getRuntime.availableProcessors,
      "Max memory available" -> Runtime.getRuntime.maxMemory,
    ) ++ Seq(
      "os.name",
      "os.version",
      "os.arch",
      "java.version",
      "java.vendor",
      "java.home",
      "java.class.path",
      "user.dir",
      "sun.net.inetaddr.ttl",
      "sun.net.inetaddr.negative.ttl",
      "networkaddress.cache.ttl",
      "networkaddress.cache.negative.ttl"
    ).map { x =>
      x -> System.getProperty(x)
    } ++ Seq(
      "Configuration" -> configForLogs.root.render(renderOptions)
    )

    log.debug(logInfo.map { case (n, v) => s"$n: $v" }.mkString("\n"))
  }
} 
Example 26
Source File: PostgresConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.settings

import cats.syntax.apply._
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.wavesplatform.dex.settings.utils.ConfigSettingsValidator
import com.wavesplatform.dex.settings.utils.ConfigSettingsValidator._
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ValueReader

case class PostgresConnection(host: String, portNumber: Int, database: String, user: String, password: String, dataSourceClassName: String) {

  def getConfig: Config = {
    ConfigFactory
      .empty()
      .withValue("dataSource.serverName", ConfigValueFactory.fromAnyRef(host))
      .withValue("dataSource.portNumber", ConfigValueFactory.fromAnyRef(portNumber))
      .withValue("dataSource.databaseName", ConfigValueFactory.fromAnyRef(database))
      .withValue("dataSource.user", ConfigValueFactory.fromAnyRef(user))
      .withValue("dataSource.password", ConfigValueFactory.fromAnyRef(password))
      .withValue("dataSourceClassName", ConfigValueFactory.fromAnyRef(dataSourceClassName))
  }
}

object PostgresConnection {

  implicit val postgresConnectionReader: ValueReader[PostgresConnection] = { (cfg, path) =>
    val cfgValidator = ConfigSettingsValidator(cfg)

    (
      cfgValidator.validate[String](s"$path.server-name"),
      cfgValidator.validate[Int](s"$path.port-number"),
      cfgValidator.validate[String](s"$path.database"),
      cfgValidator.validate[String](s"$path.user"),
      cfgValidator.validate[String](s"$path.password"),
      cfgValidator.validate[String](s"$path.data-source-class-name")
    ) mapN PostgresConnection.apply getValueOrThrowErrors
  }
} 
Example 27
Source File: MatcherSpec.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import akka.actor.ActorSystem
import akka.testkit.TestKitBase
import com.typesafe.config.ConfigFactory
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.settings.loadConfig
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

abstract class MatcherSpec(_actorSystemName: String) extends AnyWordSpecLike with MatcherSpecLike {
  protected def actorSystemName: String = _actorSystemName
}

trait MatcherSpecLike extends TestKitBase with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with ScorexLogging {
  this: Suite =>

  protected def actorSystemName: String

  implicit override lazy val system: ActorSystem = ActorSystem(
    actorSystemName,
    loadConfig(ConfigFactory.empty())
  )

  override protected def afterAll(): Unit = {
    super.afterAll()
    shutdown(system)
  }
} 
Example 28
Source File: MultipleMatchersOrderCancelTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.networking

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.http.entities.HttpOrderStatus.Status
import com.wavesplatform.dex.domain.asset.Asset.Waves
import com.wavesplatform.dex.domain.order.OrderType
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.it.MatcherSuiteBase
import com.wavesplatform.it.tags.DexItExternalKafkaRequired

@DexItExternalKafkaRequired
class MultipleMatchersOrderCancelTestSuite extends MatcherSuiteBase {

  override protected def dexInitialSuiteConfig: Config = ConfigFactory.parseString(s"""waves.dex.price-assets = [ "$UsdId", "WAVES" ]""".stripMargin)

  protected lazy val dex2: DexContainer = createDex("dex-2")

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueUsdTx, IssueEthTx)
    dex1.start()
    dex2.start()
  }

  
  "Tricky case when DEX-1 is slower than DEX-2 and it leads to order cancelling on DEX-1" in {

    val acc1 = mkAccountWithBalance(15.015.waves -> Waves)
    val acc2 = mkAccountWithBalance(0.015.waves  -> Waves, 15.usd -> usd)
    val acc3 = mkAccountWithBalance(1.waves      -> Waves, 10.eth -> eth) // Account for fake orders

    val ts = System.currentTimeMillis()
    val sellOrders = (1 to 5).map { amt =>
      mkOrderDP(acc1, wavesUsdPair, OrderType.SELL, amt.waves, amt, ts = ts + amt) // To cancel latest first
    }

    sellOrders.foreach { placeAndAwaitAtDex(_) }

    // if DEX-1 will work with local queue, it won't receive buy orders placements and
    // will cancel remained orders due to balance changes
    // (which were caused by exchange transactions from DEX-2)

    dex1.api.saveSnapshots
    dex1.restartWithNewSuiteConfig(ConfigFactory.parseString(s"waves.dex.events-queue.type = local").withFallback(dexInitialSuiteConfig))
    // HACK: Because we switched the queue, we need to place 5 orders to move offset of queue.
    // If we don't do this, internal cancels will be ignored by order books.
    (1 to 5).foreach { _ =>
      dex1.api.place(mkOrderDP(acc3, ethWavesPair, OrderType.SELL, 1.eth, 1))
    }

    val submittedOrders = (1 to 3).map { amt =>
      mkOrderDP(acc2, wavesUsdPair, OrderType.BUY, amt.waves, amt)
    }
    submittedOrders.foreach(placeAndAwaitAtDex(_, Status.Filled, dex2))
    submittedOrders.foreach(waitForOrderAtNode(_, dex2.api))

    (0 to 2).foreach { i =>
      dex1.api.waitForOrderStatus(sellOrders(i), Status.Accepted)
    }

    // TODO problem solution should prevent sell orders from cancelling!
    (3 to 4).foreach { i =>
      dex1.api.waitForOrderStatus(sellOrders(i), Status.Cancelled)
    }
  }
} 
Example 29
Source File: OrderV3TestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.http.entities.HttpOrderStatus.Status
import com.wavesplatform.dex.domain.order.OrderType
import com.wavesplatform.it.MatcherSuiteBase

class OrderV3TestSuite extends MatcherSuiteBase {

  override protected val dexInitialSuiteConfig: Config = allowedOrderVersion(1, 2)

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueUsdTx)
    dex1.start()
  }

  "settings of allowing orderV3" - {
    val price = 100000000L

    "try to place not allowed orderV3" in {
      val orderV3 = mkOrder(alice, wavesUsdPair, OrderType.BUY, 3, price, version = 3)
      dex1.api.tryPlace(orderV3) should failWith(9439746, "The orders of version 3 are denied by matcher") // OrderVersionDenied
    }

    "matching orderV1 and orderV3" in {
      val orderV1 = mkOrder(alice, wavesUsdPair, OrderType.BUY, 3, price, version = 1)
      placeAndAwaitAtDex(orderV1)

      dex1.restartWithNewSuiteConfig(allowedOrderVersion(1, 2, 3))

      val orderV3 = mkOrder(bob, wavesUsdPair, OrderType.SELL, 2, price, version = 3)
      dex1.api.place(orderV3)

      dex1.api.waitForOrderStatus(orderV1, Status.PartiallyFilled)
      dex1.api.waitForOrderStatus(orderV3, Status.Filled)
    }
  }

  private def allowedOrderVersion(versions: Int*): Config =
    ConfigFactory.parseString(s"""waves.dex {
         |  price-assets = [ "$UsdId", "WAVES" ]
         |  allowed-order-versions = [${versions.mkString(", ")}]
         |}""".stripMargin)
} 
Example 30
Source File: OrderBookSnapshotsTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.http.entities.HttpOrderStatus
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.it.{MatcherSuiteBase, orderGen}
import org.scalacheck.Gen

class OrderBookSnapshotsTestSuite extends MatcherSuiteBase {
  private val interval = 50L

  override protected val dexInitialSuiteConfig: Config = ConfigFactory.parseString(
    s"""waves.dex {
      |  price-assets = ["$UsdId", "WAVES"]
      |  snapshots-interval = $interval
      |}""".stripMargin
  )

  private val assetPair1 = ethUsdPair
  private val assetPair2 = ethWavesPair

  private val ordersPack1Size = 11
  private val ordersPack1 = Gen
    .containerOfN[Vector, Order](ordersPack1Size - 1, orderGen(matcher, alice, List(assetPair1)))
    .sample
    .get :+ orderGen(matcher, alice, List(assetPair2)).sample.get

  private val ordersPack2Size = interval.toInt
  private val ordersPack2 = Gen
    .containerOfN[Vector, Order](ordersPack2Size, orderGen(matcher, alice, List(assetPair2)))
    .sample
    .get

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueEthTx, IssueUsdTx)
    dex1.start()
  }

  "Order books are created with right offsets" in {

    ordersPack1.foreach(dex1.api.place)
    dex1.api.waitForCurrentOffset(_ == ordersPack1Size - 1)

    val allSnapshotOffsets1 = dex1.api.allSnapshotOffsets

    withClue("We doesn't show pairs, those have snapshot's offset equal to -1") {
      if (allSnapshotOffsets1.contains(assetPair1)) allSnapshotOffsets1(assetPair1) should be < interval
      if (allSnapshotOffsets1.contains(assetPair2)) allSnapshotOffsets1(assetPair2) should be < interval
    }

    ordersPack2.foreach(dex1.api.place)
    dex1.api.waitForCurrentOffset(_ == ordersPack1Size + ordersPack2Size - 1)

    val allSnapshotOffsets2 = dex1.api.allSnapshotOffsets

    withClue("Asset pairs has right offsets") {
      allSnapshotOffsets2.foreach {
        case (pair, offset) =>
          withClue(pair) {
            offset should be < (interval * 2)
          }
      }
    }
  }

  "All events are processed after restart" in {
    dex1.restart()
    dex1.api.waitForCurrentOffset(_ == ordersPack1Size + ordersPack2Size - 1)
    ordersPack1.foreach { order =>
      dex1.api.orderStatus(order) should not be HttpOrderStatus.Status.NotFound.name
    }
  }
} 
Example 31
Source File: WsPingPongInternalTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.api.ws

import com.typesafe.config.{Config, ConfigFactory}

class WsPingPongInternalTestSuite extends WsPingPongBaseSuite {

  override protected lazy val wsStreamUri = s"${getWsStreamUri(dex1)}/internal"

  override protected val dexInitialSuiteConfig: Config = ConfigFactory
    .parseString(
      s"""waves.dex.web-sockets.internal-client-handler.health-check = {
        |  ping-interval = $pingInterval
        |  pong-timeout = $pongTimeout
        |}""".stripMargin
    )
    .withFallback(jwtPublicKeyConfig)
} 
Example 32
Source File: WsConnectionTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.api.ws

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.ws.protocol._
import com.wavesplatform.dex.domain.order.OrderType.SELL
import com.wavesplatform.it.WsSuiteBase

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

class WsConnectionTestSuite extends WsSuiteBase {

  override protected val dexInitialSuiteConfig: Config = ConfigFactory
    .parseString(s"""waves.dex.price-assets = [ "$BtcId", "WAVES" ]""")
    .withFallback(jwtPublicKeyConfig)

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueBtcTx)
    dex1.start()
  }

  "Updates both from address and order book" in {
    val wsc = mkDexWsConnection(dex1)

    markup("Subscribe to an order book updates")
    wsc.send(WsOrderBookSubscribe(wavesBtcPair, 1))
    wsc.receiveAtLeastN[WsOrderBookChanges](1)
    wsc.clearMessages()

    markup("Subscribe to an address updates")
    wsc.send(WsAddressSubscribe(alice, WsAddressSubscribe.defaultAuthType, mkJwt(alice)))
    wsc.receiveAtLeastN[WsAddressChanges](1)
    wsc.clearMessages()

    markup("Place an order")
    val order = mkOrderDP(alice, wavesBtcPair, SELL, 1.waves, 0.00005)
    placeAndAwaitAtDex(order)
    wsc.receiveAtLeastN[WsOrderBookChanges](1)
    wsc.receiveAtLeastN[WsAddressChanges](1)
    wsc.clearMessages()

    markup("Unsubscribe from an address updates")
    wsc.send(WsUnsubscribe(alice))

    markup("Cancel an order")
    cancelAndAwait(alice, order)
    wsc.receiveAtLeastN[WsOrderBookChanges](1)
    wsc.receiveNoMessagesOf[WsAddressChanges]()

    wsc.close()
  }

  "Matcher should handle many connections simultaneously" in {
    Await.result(Future.traverse((1 to 200).toList)(_ => Future(mkDexWsConnection(dex1))), 25.seconds).foreach { wsc =>
      wsc.isClosed shouldBe false
      wsc.close()
    }
  }
} 
Example 33
Source File: WsPingPongExternalTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.api.ws

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.ws.protocol.WsError

import scala.concurrent.Await
import scala.concurrent.duration.DurationInt

class WsPingPongExternalTestSuite extends WsPingPongBaseSuite {

  protected val maxConnectionLifetime = 6.seconds

  override protected lazy val wsStreamUri: String = getWsStreamUri(dex1)

  override protected val dexInitialSuiteConfig: Config = ConfigFactory
    .parseString(
      s"""waves.dex.web-sockets.external-client-handler {
        |    max-connection-lifetime = $maxConnectionLifetime
        |    health-check = {
        |      ping-interval = $pingInterval
        |      pong-timeout = $pongTimeout
        |    }
        | }
        |""".stripMargin
    )
    .withFallback(jwtPublicKeyConfig)

  "Web socket connection should be closed " - {
    s"by max-connection-lifetime = $maxConnectionLifetime" in {
      val wsac               = mkWsAddressConnection(alice, dex1)
      val connectionLifetime = Await.result(wsac.connectionLifetime, maxConnectionLifetime + delta)

      connectionLifetime should (be >= maxConnectionLifetime and be <= maxConnectionLifetime + delta)
      wsac.pings.size should be >= 5
      wsac.isClosed shouldBe true

      wsac.collectMessages[WsError].head should matchTo(
        WsError(
          timestamp = 0L, // ignored
          code = 109077767, // WsConnectionMaxLifetimeExceeded
          message = "WebSocket has reached max allowed lifetime"
        )
      )
    }
  }
} 
Example 34
Source File: GetOrderBookTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.api

import com.softwaremill.diffx.{Derived, Diff}
import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.http.entities.HttpV0OrderBook
import com.wavesplatform.dex.domain.asset.Asset.Waves
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.domain.order.OrderType.{BUY, SELL}
import com.wavesplatform.it.MatcherSuiteBase

class GetOrderBookTestSuite extends MatcherSuiteBase {

  private val ordersCount = 150

  override protected val dexInitialSuiteConfig: Config =
    ConfigFactory.parseString(
      s"""waves.dex {
         |  price-assets = [ "$UsdId", "WAVES", $EthId ]
         |  order-book-http {
         |    depth-ranges = [10, 20, 40, 41, 43, 100, 1000]
         |    default-depth = 100
         |  }
         |}""".stripMargin
    )

  // DEX-642
  private implicit val orderBookResponseDiff: Diff[HttpV0OrderBook] = Derived[Diff[HttpV0OrderBook]].ignore(_.timestamp)

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueUsdTx, IssueEthTx)
    dex1.start()
  }

  def checkDepth(forTheseDepths: Array[Int] = Array(), thisDepthWillBePicked: Int): Unit = {
    val orderBook: HttpV0OrderBook = dex1.api.orderBook(wavesUsdPair, thisDepthWillBePicked)

    if (thisDepthWillBePicked < ordersCount) {
      orderBook.asks.size shouldBe thisDepthWillBePicked
      orderBook.bids.size shouldBe thisDepthWillBePicked
    }

    forTheseDepths.foreach(depth => dex1.api.orderBook(wavesUsdPair, depth) should matchTo(orderBook))
  }

  "response order book should contain right count of bids and asks" in {

    for (i <- 1 to ordersCount) {
      dex1.api.place(mkOrder(alice, wavesUsdPair, BUY, 1.waves, i))
      dex1.api.place(mkOrder(alice, wavesUsdPair, SELL, 1.waves, i + ordersCount + 1))
    }

    checkDepth(forTheseDepths = Array(0, 1, 8, 9), thisDepthWillBePicked = 10)
    checkDepth(forTheseDepths = Array(11, 12, 19), thisDepthWillBePicked = 20)
    checkDepth(forTheseDepths = Array(31, 32, 39), thisDepthWillBePicked = 40)
    checkDepth(forTheseDepths = Array(42), thisDepthWillBePicked = 43)
    checkDepth(forTheseDepths = Array(102, 103, 999, 9999), thisDepthWillBePicked = 1000)

    withClue("check default depth value") {
      val defaultOrderBook = dex1.api.orderBook(wavesUsdPair)
      defaultOrderBook should matchTo(dex1.api.orderBook(wavesUsdPair, 100))
      Array(44, 45, 60, 98, 99).foreach(depth => dex1.api.orderBook(wavesUsdPair, depth) should matchTo(defaultOrderBook))
    }
  }

  "query parameters should not be lost during redirect to well-ordered pair" in {

    val depth               = 10
    val ethWavesOrdersCount = 20

    (1 to ethWavesOrdersCount) foreach { i =>
      dex1.api.place(mkOrder(alice, ethWavesPair, BUY, 1.waves, i * 100))
      dex1.api.place(mkOrder(alice, ethWavesPair, SELL, 1.waves, (i + ethWavesOrdersCount + 1) * 100))
    }

    val orderBook = dex1.api.orderBook(AssetPair(Waves, eth), depth)
    orderBook.asks should have size depth
    orderBook.bids should have size depth
  }
} 
Example 35
Source File: GetSettingsTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.api

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.domain.asset.Asset.Waves
import com.wavesplatform.it.MatcherSuiteBase

class GetSettingsTestSuite extends MatcherSuiteBase {

  override protected def dexInitialSuiteConfig: Config = ConfigFactory.parseString(s"""waves.dex.price-assets = [ "$UsdId", "$BtcId", "WAVES" ]""")

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueUsdTx, IssueBtcTx)
    dex1.start()
  }

  "GET /matcher/settings should " - {
    "return correct byte of the node's network" in {
      dex1.api.settings.networkByte shouldBe 'Y'.toByte
    }

    "return matcher's public key" in {
      dex1.api.settings.matcherPublicKey should be(matcher.publicKey)
    }

    "return correct list of price assets" in {
      Seq(usd, btc, Waves).foreach { asset =>
        dex1.api.settings.priceAssets should contain(asset)
      }
    }
  }
} 
Example 36
Source File: BackwardCompatSuiteBase.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync.compat

import cats.Id
import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.http.entities.HttpOrderStatus.Status
import com.wavesplatform.dex.domain.asset.Asset.Waves
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.it.api.MultipleVersions
import com.wavesplatform.dex.it.dex.DexApi
import com.wavesplatform.it.MatcherSuiteBase
import com.wavesplatform.it.api.MatcherState


trait BackwardCompatSuiteBase extends MatcherSuiteBase with MultipleVersions {

  override protected def dexInitialSuiteConfig: Config = ConfigFactory.parseString(s"""waves.dex.price-assets = [ "$UsdId", "WAVES" ]""".stripMargin)

  protected val carol    = mkKeyPair("carol")
  protected val accounts = List(alice, bob)

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    wavesNode2.start()
    wavesNode2.api.connect(wavesNode1.networkAddress)
    wavesNode2.api.waitForConnectedPeer(wavesNode1.networkAddress)
    broadcastAndAwait(
      IssueUsdTx,
      IssueEthTx,
      mkTransfer(alice, carol, 1.003.waves, Waves)
    )
    wavesNode1.api.waitForHeightArise()
    wavesNode2.api.waitForHeight(wavesNode1.api.currentHeight)
    broadcastAndAwait(
      mkTransfer(alice, bob, IssueUsdTx.getQuantity / 2, usd),
      mkTransfer(alice, bob, IssueEthTx.getQuantity / 2, eth)
    )
  }

  protected def waitOnBoth(order: Order, status: Status): Unit = {
    dex1.api.waitForOrderStatus(order, status)
    dex2.api.waitForOrderStatus(order, status)
  }

  protected def cancelAll(): Unit = {
    accounts.foreach(dex2.api.cancelAll(_))
    accounts.foreach(dex2.api.waitForOrderHistory(_, activeOnly = Some(true))(_.isEmpty))
  }

  protected def state(dexApi: DexApi[Id], orders: IndexedSeq[Order]): MatcherState = clean(matcherState(List(wavesUsdPair), orders, accounts, dexApi))

  private def clean(state: MatcherState): MatcherState = state.copy(
    offset = 0L, // doesn't matter in this test
    // we can't guarantee that SaveSnapshot message will come at same place in a orderbook's queue on both matchers
    snapshots = state.snapshots.map { case (k, _) => k -> 0L }
  )

} 
Example 37
Source File: RoundingIssuesTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.http.entities.HttpOrderStatus.Status
import com.wavesplatform.dex.api.http.entities.{HttpOrderStatus, HttpV0LevelAgg}
import com.wavesplatform.dex.domain.asset.Asset.Waves
import com.wavesplatform.dex.domain.order.OrderType
import com.wavesplatform.it.MatcherSuiteBase
import com.wavesplatform.wavesj.transactions.ExchangeTransaction

class RoundingIssuesTestSuite extends MatcherSuiteBase {

  override protected def dexInitialSuiteConfig: Config = ConfigFactory.parseString(s"""waves.dex.price-assets = [ "$UsdId", "$BtcId", "WAVES" ]""")

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueUsdTx, IssueEthTx, IssueBtcTx)
    dex1.start()
  }

  "should correctly fill an order with small amount" in {
    val aliceBalanceBefore = wavesNode1.api.balance(alice, Waves)
    val bobBalanceBefore   = wavesNode1.api.balance(bob, Waves)

    val counter = mkOrder(alice, wavesUsdPair, OrderType.BUY, 3100000000L, 238)
    dex1.api.place(counter)

    val submitted = mkOrder(bob, wavesUsdPair, OrderType.SELL, 425532L, 235)
    dex1.api.place(submitted)

    val filledAmount = 420169L
    dex1.api.waitForOrder(submitted)(_ == HttpOrderStatus(Status.Filled, Some(filledAmount), Some(296219L)))
    dex1.api.waitForOrder(counter)(_ == HttpOrderStatus(Status.PartiallyFilled, Some(filledAmount), Some(40L)))

    val tx = waitForOrderAtNode(counter)
    dex1.api.cancel(alice, counter)

    val exchangeTx =
      wavesNode1.api.transactionInfo(tx.head.getId).getOrElse(throw new RuntimeException(s"Can't find tx with id = '${tx.head.getId}'")) match {
        case r: ExchangeTransaction => r
        case x                      => throw new RuntimeException(s"Expected ExchangeTransaction, but got $x")
      }

    exchangeTx.getPrice shouldBe counter.price
    exchangeTx.getAmount shouldBe filledAmount
    exchangeTx.getBuyMatcherFee shouldBe 40L
    exchangeTx.getSellMatcherFee shouldBe 296219L

    val aliceBalanceAfter = wavesNode1.api.balance(alice, Waves)
    val bobBalanceAfter   = wavesNode1.api.balance(bob, Waves)

    (aliceBalanceAfter - aliceBalanceBefore) shouldBe (-40L + 420169L)
    (bobBalanceAfter - bobBalanceBefore) shouldBe (-296219L - 420169L)
  }

  "reserved balance should not be negative" in {
    val counter = mkOrder(bob, ethBtcPair, OrderType.BUY, 923431000L, 31887L)
    dex1.api.place(counter)

    val submitted = mkOrder(alice, ethBtcPair, OrderType.SELL, 223345000L, 31887L)
    dex1.api.place(submitted)

    val filledAmount = 223344937L
    dex1.api.waitForOrder(submitted)(_ == HttpOrderStatus(Status.Filled, filledAmount = Some(filledAmount), filledFee = Some(299999L)))
    dex1.api.waitForOrder(counter)(_ == HttpOrderStatus(Status.PartiallyFilled, filledAmount = Some(filledAmount), filledFee = Some(72559L)))

    withClue("Alice's reserved balance before cancel")(dex1.api.reservedBalance(alice) shouldBe empty)

    waitForOrderAtNode(counter)
    dex1.api.cancel(bob, counter)

    withClue("Bob's reserved balance after cancel")(dex1.api.reservedBalance(bob) shouldBe empty)
  }

  "should correctly fill 2 counter orders" in {
    val counter1 = mkOrder(bob, wavesUsdPair, OrderType.SELL, 98333333L, 60L)
    dex1.api.place(counter1)

    val counter2 = mkOrder(bob, wavesUsdPair, OrderType.SELL, 100000000L, 70L)
    dex1.api.place(counter2)

    val submitted = mkOrder(alice, wavesUsdPair, OrderType.BUY, 100000000L, 1000L)
    dex1.api.place(submitted)

    dex1.api.waitForOrder(submitted)(_ == HttpOrderStatus(Status.Filled, filledAmount = Some(99523810L), filledFee = Some(298571L)))
    dex1.api.waitForOrder(counter2)(_ == HttpOrderStatus(Status.PartiallyFilled, filledAmount = Some(2857143L), filledFee = Some(8571L)))

    withClue("orderBook check") {
      val ob = dex1.api.orderBook(wavesUsdPair)
      ob.bids shouldBe empty
      ob.asks shouldBe List(HttpV0LevelAgg(97142857L, 70L)) // = 100000000 - 2857143
    }
  }
} 
Example 38
Source File: DisableProducerTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.domain.order.{Order, OrderType}
import com.wavesplatform.it.MatcherSuiteBase

class DisableProducerTestSuite extends MatcherSuiteBase {

  override protected val dexInitialSuiteConfig: Config = ConfigFactory.parseString(
    """waves.dex.events-queue {
      |  local.enable-storing  = no
      |  kafka.producer.enable = no
      |}""".stripMargin
  )

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    broadcastAndAwait(IssueEthTx)
  }

  "Check no commands are written to queue" - {
    "check assets's balances" in {
      wavesNode1.api.balance(alice, eth) shouldBe IssueEthTx.getQuantity
      wavesNode1.api.balance(matcher, eth) shouldBe 0L
    }

    "place an order and wait some time" in {
      def test(order: Order): Unit = dex1.api.tryPlace(order) should failWith(528) // FeatureDisabled

      List(
        mkOrder(alice, ethWavesPair, OrderType.SELL, 500, 2.waves * Order.PriceConstant),
        mkOrder(alice, ethWavesPair, OrderType.BUY, 500, 2.waves * Order.PriceConstant)
      ).foreach(test)

      Thread.sleep(5000)

      dex1.api.currentOffset should be(-1)
      dex1.api.lastOffset should be(-1)
    }

    "Commands aren't written to queue after restart" in {
      dex1.restart()

      dex1.api.currentOffset should be(-1)
      dex1.api.lastOffset should be(-1)
    }
  }
} 
Example 39
Source File: MatcherRecoveryTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.it._
import com.wavesplatform.it.api.{MatcherCommand, MatcherState}
import org.scalacheck.Gen

import scala.util.Random

class MatcherRecoveryTestSuite extends MatcherSuiteBase {

  override protected def dexInitialSuiteConfig: Config = {
    ConfigFactory.parseString(
      s"""waves.dex {
       |  snapshots-interval = 51
       |  price-assets = [ "$UsdId", "WAVES" ]
       |}""".stripMargin
    )
  }

  private val placesNumber  = 200
  private val cancelsNumber = placesNumber / 10

  private val assetPairs = List(ethUsdPair, wavesUsdPair, ethWavesPair)
  private val orders     = Gen.containerOfN[Vector, Order](placesNumber, orderGen(matcher, alice, assetPairs)).sample.get

  private var successfulCommandsNumber = 0

  "Place, fill and cancel a lot of orders" in {
    val cancels = (1 to cancelsNumber).map(_ => choose(orders))

    val placeCommands  = Random.shuffle(orders.map(MatcherCommand.Place(dex1.asyncApi, _)))
    val cancelCommands = cancels.map(MatcherCommand.Cancel(dex1.asyncApi, alice, _))

    successfulCommandsNumber += executeCommands(placeCommands)
    successfulCommandsNumber += executeCommands(cancelCommands)
  }

  "Wait until all requests are processed - 1" in {
    dex1.api.waitForCurrentOffset(_ == successfulCommandsNumber - 1) // Index starts from 0
  }

  private var stateBefore: MatcherState = _

  "Store the current state" in {
    stateBefore = state
    withClue("common offset") { stateBefore.offset should be > 0L }
    stateBefore.snapshots.foreach {
      case (assetPair, snapshotOffset) => withClue(assetPair) { snapshotOffset should be > 0L }
    }
  }

  "Restart the matcher" in dex1.restart()

  "Wait until all requests are processed - 2" in dex1.api.waitForCurrentOffset(_ == successfulCommandsNumber - 1)

  "Verify the state" in {
    val stateAfter = state
    stateBefore should matchTo(stateAfter)
  }

  private def state = cleanState(matcherState(assetPairs, orders, Seq(alice)))

  protected def cleanState(state: MatcherState): MatcherState = state

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    broadcastAndAwait(IssueEthTx, IssueUsdTx)
    dex1.start()
  }
} 
Example 40
Source File: BroadcastUntilConfirmedTestSuite.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.sync

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.http.entities.HttpOrderStatus.Status
import com.wavesplatform.dex.domain.order.OrderType
import com.wavesplatform.dex.it.docker.WavesNodeContainer
import com.wavesplatform.it.MatcherSuiteBase

class BroadcastUntilConfirmedTestSuite extends MatcherSuiteBase {

  override protected def dexInitialSuiteConfig: Config =
    ConfigFactory
      .parseString(s"""waves.dex.exchange-transaction-broadcast {
                      |  broadcast-until-confirmed = yes
                      |  interval = 10s
                      |}""".stripMargin)

  // Validator node
  protected lazy val wavesNode2: WavesNodeContainer = {
    createWavesNode("waves-2", suiteInitialConfig = ConfigFactory.parseString("waves.miner.enable = no") withFallback wavesNodeInitialSuiteConfig)
  }

  private val aliceOrder = mkOrder(alice, ethWavesPair, OrderType.SELL, 100000L, 80000L)
  private val bobOrder   = mkOrder(bob, ethWavesPair, OrderType.BUY, 200000L, 100000L)

  "BroadcastUntilConfirmed" in {
    markup("Disconnect a miner node from the network")
    wavesNode1.disconnectFromNetwork()

    markup("Place orders, those should match")
    eventually { dex1.api.tryPlace(aliceOrder) shouldBe 'right }

    dex1.api.place(bobOrder)
    dex1.api.waitForOrderStatus(aliceOrder, Status.Filled)

    markup("Wait for a transaction")
    val exchangeTxId = dex1.api.waitForTransactionsByOrder(aliceOrder, 1).head.getId

    markup("Connect the miner node to the network")
    wavesNode1.connectToNetwork()

    markup("Wait until it receives the transaction")
    wavesNode2.api.waitForTransaction(exchangeTxId)
  }

  override protected def beforeAll(): Unit = {
    wavesNode1.start()
    wavesNode2.start()

    wavesNode2.api.connect(wavesNode1.networkAddress)
    wavesNode2.api.waitForConnectedPeer(wavesNode1.networkAddress)

    dex1.start()

    broadcastAndAwait(IssueEthTx)
    wavesNode2.api.waitForTransaction(IssueEthTx)
  }
} 
Example 41
Source File: HasKafka.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api

import java.util.concurrent.ThreadLocalRandom

import com.dimafeng.testcontainers.KafkaContainer
import com.github.dockerjava.api.model.{ContainerNetwork, NetworkSettings}
import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.it.test.InformativeTestStart

import scala.collection.JavaConverters._

trait HasKafka { self: BaseContainersKit with InformativeTestStart =>

  protected val kafkaContainerName = s"$networkName-kafka"

  protected val kafkaIp = getIp(12)

  protected def dexKafkaConfig(topic: String = ThreadLocalRandom.current.nextInt(0, Int.MaxValue).toString): Config = ConfigFactory.parseString(
    s"""waves.dex.events-queue {
       |  type = kafka
       |  kafka {
       |    servers = "$kafkaIp:9092"
       |    topic = "$topic"
       |  }
       |}""".stripMargin
  )

  protected val kafka: KafkaContainer =
    KafkaContainer().configure { k =>
      k.withNetwork(network)
      k.withNetworkAliases(kafkaContainerName)
      k.withCreateContainerCmdModifier { cmd =>
        cmd withName kafkaContainerName
        cmd withIpv4Address kafkaIp
      }
    }

  protected def disconnectKafkaFromNetwork(): Unit = {
    writeGlobalLog("--- Disconnecting Kafka from the network ---")

    kafka.dockerClient
      .disconnectFromNetworkCmd()
      .withContainerId(kafka.containerId)
      .withNetworkId(network.getId)
      .exec()

    waitForNetworkSettings(!_.getNetworks.containsKey(network.getId))
  }

  protected def connectKafkaToNetwork(): Unit = {
    writeGlobalLog("--- Connecting Kafka to the network ---")

    kafka.dockerClient
      .connectToNetworkCmd()
      .withContainerId(kafka.containerId)
      .withNetworkId(network.getId)
      .withContainerNetwork(
        new ContainerNetwork()
          .withIpamConfig(new ContainerNetwork.Ipam().withIpv4Address(kafkaIp))
          .withAliases(kafka.networkAliases.asJava))
      .exec()

    waitForNetworkSettings(_.getNetworks.containsKey(network.getId))
  }

  private def waitForNetworkSettings(pred: NetworkSettings => Boolean): Unit =
    Iterator
      .continually {
        Thread.sleep(1000)
        kafka.dockerClient.inspectContainerCmd(kafka.containerId).exec().getNetworkSettings
      }
      .zipWithIndex
      .find { case (ns, attempt) => pred(ns) || attempt == 10 }
      .fold(log.warn(s"Can't wait on ${kafka.containerId}"))(_ => ())
} 
Example 42
Source File: MultipleVersions.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.it.api.node.HasWavesNode
import com.wavesplatform.dex.it.dex.HasDex
import com.wavesplatform.dex.it.docker.{DexContainer, WavesNodeContainer}

trait MultipleVersions extends HasDex with HasWavesNode { self: BaseContainersKit =>
  private val dex2Tag  = Option(System.getenv("DEX_MULTIPLE_VERSIONS_PREVIOUS_TAG")).getOrElse("latest")
  private val node2Tag = Option(System.getenv("NODE_MULTIPLE_VERSIONS_PREVIOUS_TAG")).getOrElse("latest")

  protected lazy val wavesNode2: WavesNodeContainer = createWavesNode("waves-2", tag = node2Tag, netAlias = None)

  protected def dex2SuiteConfig: Config = dexInitialSuiteConfig.withFallback {
    ConfigFactory.parseString(
      s"""waves.dex {
         |  waves-blockchain-client.grpc.target = "${wavesNode2.networkAddress.getHostName}:6887"
         |}""".stripMargin
    )
  }

  protected lazy val dex2: DexContainer = createDex("dex-2", suiteInitialConfig = dex2SuiteConfig, tag = dex2Tag)
} 
Example 43
Source File: HasJwt.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.websockets

import java.security
import java.security.KeyPairGenerator
import java.util.Base64

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.api.ws.protocol.WsAddressSubscribe.JwtPayload
import com.wavesplatform.dex.auth.JwtUtils
import com.wavesplatform.dex.domain.account.KeyPair
import play.api.libs.json.Json

import scala.concurrent.duration.{FiniteDuration, _}

trait HasJwt extends JwtUtils {

  protected val authServiceKeyPair: security.KeyPair = {
    val kpg = KeyPairGenerator.getInstance("RSA")
    kpg.initialize(2048)
    kpg.generateKeyPair()
  }

  protected def jwtPublicKeyConfig: Config = ConfigFactory.parseString(
    s"""waves.dex.web-sockets.external-client-handler.jwt-public-key = \"\"\"-----BEGIN PUBLIC KEY-----
       |${Base64.getEncoder.encodeToString(authServiceKeyPair.getPublic.getEncoded).grouped(64).mkString("\n")}
       |-----END PUBLIC KEY-----\"\"\"
       |""".stripMargin
  )

  protected def mkJwt(payload: JwtPayload): String = mkJwt(authServiceKeyPair, Json.toJsObject(payload))

  protected def mkJwt(clientKeyPair: KeyPair, lifetime: FiniteDuration = 1.hour): String = {
    mkJwt(mkJwtSignedPayload(clientKeyPair, lifetime = lifetime))
  }
} 
Example 44
Source File: HasWavesNode.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.node

import cats.Functor
import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.it.api.BaseContainersKit
import com.wavesplatform.dex.it.config.GenesisConfig
import com.wavesplatform.dex.it.docker.WavesNodeContainer
import com.wavesplatform.dex.it.fp.CanExtract
import mouse.any._

trait HasWavesNode { self: BaseContainersKit =>
  private val defaultTag = Option(System.getenv("NODE_TAG")).getOrElse("latest")

  protected implicit def toNodeExplicitGetOps[F[_]: Functor: CanExtract](self: NodeApi[F]): NodeApiOps.ExplicitGetNodeApiOps[F] = {
    new NodeApiOps.ExplicitGetNodeApiOps[F](self)
  }

  protected def wavesNodeInitialSuiteConfig: Config = ConfigFactory.empty()

  protected lazy val wavesNodeRunConfig: Config = GenesisConfig.config

  protected def createWavesNode(name: String,
                                runConfig: Config = wavesNodeRunConfig,
                                suiteInitialConfig: Config = wavesNodeInitialSuiteConfig,
                                tag: String = defaultTag,
                                netAlias: Option[String] = Some(WavesNodeContainer.wavesNodeNetAlias)): WavesNodeContainer =
    WavesNodeContainer(name, networkName, network, getIp(name), runConfig, suiteInitialConfig, localLogsDir, tag, netAlias) unsafeTap addKnownContainer

  lazy val wavesNode1: WavesNodeContainer = createWavesNode("waves-1")
} 
Example 45
Source File: GenesisConfig.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.config

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.dex.domain.account.AddressScheme

object GenesisConfig {

  val generatorConfig: Config = ConfigFactory.parseResources("genesis.conf")
  val config: Config          = GenesisConfigGenerator.generate(generatorConfig)

  private val requiredChainId = config.getString("waves.blockchain.custom.address-scheme-character").head.toByte

  def setupAddressScheme(): Unit = {
    if (AddressScheme.current.chainId != requiredChainId)
      AddressScheme.current = new AddressScheme {
        override val chainId: Byte = requiredChainId
      }
  }

  setupAddressScheme()
} 
Example 46
Source File: ResponseParsers.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.sttp

import com.google.common.primitives.Longs
import com.softwaremill.sttp.{DeserializationError, ResponseAs, MonadError => _, _}
import com.typesafe.config.{Config, ConfigFactory}
import play.api.libs.json.JsError

import scala.util.{Failure, Success, Try}

object ResponseParsers {

  val asUtf8String: ResponseAs[String, Nothing] = asString("UTF-8")

  def asLong: ResponseAs[Either[DeserializationError[JsError], Long], Nothing] =
    asUtf8String.map { string =>
      val r = Longs.tryParse(string)
      if (r == null) Left(DeserializationError[JsError](string, JsError("Can't parse Long"), "Can't parse Long"))
      else Right(r)
    }

  def asConfig: ResponseAs[Either[DeserializationError[JsError], Config], Nothing] =
    asUtf8String.map { string =>
      Try(ConfigFactory.parseString(string)) match {
        case Success(r) => Right(r)
        case Failure(e) => Left(DeserializationError[JsError](string, JsError("Can't parse Config"), s"Can't parse Config: ${e.getMessage}"))
      }
    }
} 
Example 47
Source File: MyJournalSpec.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.persistence.jdbcjournal

import akka.persistence.CapabilityFlag
import akka.persistence.journal.JournalSpec
import akka.persistence.snapshot.SnapshotStoreSpec
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfter
import org.slf4j.LoggerFactory

class MyJournalSpec extends JournalSpec (
  config = ConfigFactory.parseString(
    s"""
       |akka.persistence.query.jdbc-read-journal.configName = MyJournalSpec
       |jdbc-journal.configName = MyJournalSpec
       |jdbc-snapshot-store.configName = MyJournalSpec
     """.stripMargin).withFallback(ConfigFactory.load("application-test.conf"))) {

  val log = LoggerFactory.getLogger(getClass)

  val errorHandler = new JdbcJournalErrorHandler {
    override def onError(e: Exception): Unit = log.error("JdbcJournalErrorHandler.onError", e)
  }

  JdbcJournalConfig.setConfig("MyJournalSpec", JdbcJournalConfig(DataSourceUtil.createDataSource("MyJournalSpec"), Some(errorHandler), StorageRepoConfig(), new PersistenceIdParserImpl('-')))

  override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = false
}

class MySnapshotStoreSpec extends SnapshotStoreSpec (
  config = ConfigFactory.parseString(
    s"""
       |akka.persistence.query.jdbc-read-journal.configName = MySnapshotStoreSpec
       |jdbc-journal.configName = MySnapshotStoreSpec
       |jdbc-snapshot-store.configName = MySnapshotStoreSpec
     """.stripMargin).withFallback(ConfigFactory.load("application-test.conf"))) with BeforeAndAfter {

  val log = LoggerFactory.getLogger(getClass)

  val errorHandler = new JdbcJournalErrorHandler {
    override def onError(e: Exception): Unit = log.error("JdbcJournalErrorHandler.onError", e)
  }

  JdbcJournalConfig.setConfig("MySnapshotStoreSpec", JdbcJournalConfig(DataSourceUtil.createDataSource("MySnapshotStoreSpec"), None, StorageRepoConfig(), new PersistenceIdParserImpl('-')))

} 
Example 48
Source File: JacksonJsonSerializerTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.serializing

import akka.actor.ActorSystem
import akka.serialization.SerializationExtension
import com.fasterxml.jackson.annotation.JsonTypeInfo
import com.fasterxml.jackson.databind.{SerializationFeature, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.typesafe.config.{ConfigFactory, Config}
import org.scalatest.{Matchers, FunSuite}

class JacksonJsonSerializerTest extends FunSuite with Matchers {

  val objectMapper = new ObjectMapper()
  objectMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
  objectMapper.registerModule(new DefaultScalaModule)

  test("serializer") {
    JacksonJsonSerializer.setObjectMapper(objectMapper)
    val serializer = new JacksonJsonSerializer()
    val a = Animal("our cat", 12, Cat("black", true))
    val bytes = serializer.toBinary(a)
    val ar = serializer.fromBinary(bytes, classOf[Animal]).asInstanceOf[Animal]
    assert( a == ar)
  }

  test("Registering the serializer works") {
    JacksonJsonSerializer.setObjectMapper(objectMapper)
    val system = ActorSystem("JacksonJsonSerializerTest", ConfigFactory.load("akka-tools-json-serializing.conf"))

    val serialization = SerializationExtension.get(system)
    assert( classOf[JacksonJsonSerializer] ==  serialization.serializerFor(classOf[Animal]).getClass)

    system.terminate()
  }

  test("DepricatedTypeWithMigrationInfo") {
    JacksonJsonSerializer.setObjectMapper(objectMapper)
    val serializer = new JacksonJsonSerializer()
    val bytes = serializer.toBinary(OldType("12"))
    assert(NewType(12) == serializer.fromBinary(bytes, classOf[OldType]))
  }

  test("verifySerialization - no error") {
    JacksonJsonSerializer.setObjectMapper(objectMapper)
    JacksonJsonSerializer.setVerifySerialization(true)
    val serializer = new JacksonJsonSerializer()
    val a = Animal("our cat", 12, Cat("black", true))
    val ow = ObjectWrapperWithTypeInfo(a)
    serializer.toBinary(ow)
  }

  test("verifySerialization - with error") {
    JacksonJsonSerializer.setObjectMapper(objectMapper)
    JacksonJsonSerializer.setVerifySerialization(true)
    val serializer = new JacksonJsonSerializer()
    val a = Animal("our cat", 12, Cat("black", true))
    val ow = ObjectWrapperWithoutTypeInfo(a)
    intercept[JacksonJsonSerializerVerificationFailed] {
      serializer.toBinary(ow)
    }
  }

  test("verifySerialization - disabled") {
    JacksonJsonSerializer.setObjectMapper(objectMapper)
    JacksonJsonSerializer.setVerifySerialization(true)
    val serializer = new JacksonJsonSerializer()
    val a = Animal("our cat", 12, Cat("black", true))
    val ow = ObjectWrapperWithoutTypeInfoOverrided(a)
    serializer.toBinary(ow)
  }



}

case class Animal(name:String, age:Int, t:Cat) extends JacksonJsonSerializable

case class Cat(color:String, tail:Boolean)

case class OldType(s:String) extends DepricatedTypeWithMigrationInfo {
  override def convertToMigratedType(): AnyRef = NewType(s.toInt)
}
case class NewType(i:Int)


case class ObjectWrapperWithTypeInfo(@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "@any_class") any:AnyRef)

case class ObjectWrapperWithoutTypeInfo(any:AnyRef)

case class ObjectWrapperWithoutTypeInfoOverrided(any:AnyRef) extends JacksonJsonSerializableButNotDeserializable 
Example 49
Source File: ClusterSingletonHelperTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.cluster

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike, Matchers}
import org.slf4j.LoggerFactory

import scala.util.Random

object ClusterSingletonHelperTest {
  val port = 20000 + Random.nextInt(20000)
}

class ClusterSingletonHelperTest (_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {

  def this() = this(ActorSystem("test-actor-system", ConfigFactory.parseString(
      s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
          |akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
          |akka.remote.netty.tcp.hostname="localhost"
          |akka.remote.netty.tcp.port=${ClusterSingletonHelperTest.port}
          |akka.cluster.seed-nodes = ["akka.tcp://test-actor-system@localhost:${ClusterSingletonHelperTest.port}"]
    """.stripMargin
    ).withFallback(ConfigFactory.load("application-test.conf"))))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  val log = LoggerFactory.getLogger(getClass)


  test("start and communicate with cluster-singleton") {


    val started = TestProbe()
    val proxy = ClusterSingletonHelper.startClusterSingleton(system, Props(new OurClusterSingleton(started.ref)), "ocl")
    started.expectMsg("started")
    val sender = TestProbe()
    sender.send(proxy, "ping")
    sender.expectMsg("pong")

  }
}

class OurClusterSingleton(started:ActorRef) extends Actor {

  started ! "started"
  def receive = {
    case "ping" => sender ! "pong"
  }
} 
Example 50
Source File: GeneralAggregateWithShardingTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.aggregate

import java.util.{Arrays, UUID}

import akka.actor.ActorSystem
import akka.actor.Status.Failure
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import no.nextgentel.oss.akkatools.aggregate.testAggregate.StateName._
import no.nextgentel.oss.akkatools.aggregate.testAggregate.{StateName, _}
import no.nextgentel.oss.akkatools.testing.AggregateTesting
import org.scalatest._
import org.slf4j.LoggerFactory

import scala.util.Random

object GeneralAggregateWithShardingTest {
  val port = 20000 + Random.nextInt(20000)
}


class GeneralAggregateWithShardingTest(_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {

  def this() = this(ActorSystem("test-actor-system", ConfigFactory.parseString(
    s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
        |akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
        |akka.remote.netty.tcp.hostname="localhost"
        |akka.remote.netty.tcp.port=${GeneralAggregateWithShardingTest.port}
        |akka.cluster.seed-nodes = ["akka.tcp://test-actor-system@localhost:${GeneralAggregateWithShardingTest.port}"]
    """.stripMargin
  ).withFallback(ConfigFactory.load("application-test.conf"))))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  val log = LoggerFactory.getLogger(getClass)
  private def generateId() = UUID.randomUUID().toString

  val seatIds = List("s1","id-used-in-Failed-in-onAfterValidationSuccess", "s2", "s3-This-id-is-going-to-be-discarded", "s4")

  trait TestEnv extends AggregateTesting[BookingState] {
    val id = generateId()
    val printShop = TestProbe()
    val cinema = TestProbe()
    val onSuccessDmForwardReceiver = TestProbe()

    val starter = new AggregateStarterSimple("booking", system).withAggregatePropsCreator {
      dmSelf =>
        BookingAggregate.props(dmSelf, dmForwardAndConfirm(printShop.ref).path, dmForwardAndConfirm(cinema.ref).path, seatIds, dmForwardAndConfirm(onSuccessDmForwardReceiver.ref).path)
    }

    val main = starter.dispatcher
    starter.start()

    def assertState(correctState:BookingState): Unit = {
      assert(getState(id) == correctState)
    }

  }




  test("normal flow") {

    new TestEnv {

      // Make sure we start with empty state
      assertState(BookingState.empty())

      val maxSeats = 2
      val sender = TestProbe()
      // Open the booking
      println("1")
      sendDMBlocking(main, OpenBookingCmd(id, maxSeats), sender.ref)
      println("2")
      assertState(BookingState(OPEN, maxSeats, Set()))

    }
  }
} 
Example 51
package no.nextgentel.oss.akkatools.aggregate.aggregateTest_usingAggregateStateBase

import java.util.UUID

import akka.actor.{ActorPath, ActorSystem, Props}
import akka.persistence.{DeleteMessagesFailure, DeleteMessagesSuccess, SaveSnapshotFailure, SaveSnapshotSuccess, SnapshotMetadata, SnapshotOffer}
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import no.nextgentel.oss.akkatools.aggregate._
import no.nextgentel.oss.akkatools.testing.AggregateTesting
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike, Matchers}
import org.slf4j.LoggerFactory



  override def onSnapshotOffer(offer: SnapshotOffer): Unit = {
    state = offer.snapshot.asInstanceOf[StringState]
  }

  override def acceptSnapshotRequest(req: SaveSnapshotOfCurrentState): Boolean = {
    if (state == StringState("WAT")) {
      state = StringState("SAVED")
      true
    }
    else {
      state = StringState("WAT") //So it works second time
      false
    }
  }

  override def onSnapshotSuccess(success: SaveSnapshotSuccess): Unit = {
    state = StringState("SUCCESS_SNAP")
  }

  override def onSnapshotFailure(failure: SaveSnapshotFailure): Unit = {
    state = StringState("FAIL_SNAP")
  }

  override def onDeleteMessagesSuccess(success: DeleteMessagesSuccess): Unit = {
    state = StringState("SUCCESS_MSG")
  }

  override def onDeleteMessagesFailure(failure: DeleteMessagesFailure): Unit = {
    state = StringState("FAIL_MSG")
  }

  // Used as prefix/base when constructing the persistenceId to use - the unique ID is extracted runtime from actorPath which is construced by Sharding-coordinator
  override def persistenceIdBase(): String = "/x/"
}

case class StringEv(data: String)

case class StringState(data:String) extends AggregateStateBase[StringEv, StringState] {
  override def transitionState(event: StringEv): StateTransition[StringEv, StringState] =
    StateTransition(StringState(event.data))
} 
Example 52
Source File: ActorWithDMSupportTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.persistence

import java.util.concurrent.TimeUnit

import akka.actor.{Props, ActorSystem}
import akka.testkit.{TestProbe, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, FunSuiteLike}

import scala.concurrent.duration.FiniteDuration

class ActorWithDMSupportTest(_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {
  def this() = this(ActorSystem("ActorWithDMSupportTest", ConfigFactory.load("application-test.conf")))

  test("success with dm") {
    val a = system.actorOf(Props(new TestActorWithDMSupport()))
    val s = TestProbe()

    // send raw
    s.send(a, "sendok")
    s.expectMsg("ok")

    // send via dm and withNewPayload
    val dm = DurableMessage(1L, "sendok", s.ref.path)
    s.send(a, dm)
    s.expectMsg(dm.withNewPayload("ok"))

    // send raw - do nothing
    s.send(a, "silent")


    // send silent - wait for configm
    s.send(a, DurableMessage(1L, "silent", s.ref.path))
    s.expectMsg( DurableMessageReceived(1,None) )


    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm-custom", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm-custom")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

  }


}

class TestActorWithDMSupport extends ActorWithDMSupport {
  // All raw messages or payloads in DMs are passed to this function.
  override def receivePayload = {
    case "sendok" =>
      send(sender.path, "ok")
    case "silent" =>
      Unit
    case "no-confirm" =>
      throw new LogWarningAndSkipDMConfirmException("something went wrong")
    case "no-confirm-custom" =>
      throw new CustomLogWarningAndSkipDMConfirm()
  }
}

class CustomLogWarningAndSkipDMConfirm extends Exception("") with LogWarningAndSkipDMConfirm 
Example 53
Source File: DynamoDBClientV1Config.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.config.client.v1

import com.amazonaws.handlers.RequestHandler2
import com.amazonaws.metrics.RequestMetricCollector
import com.amazonaws.monitoring.MonitoringListener
import com.github.j5ik2o.akka.persistence.dynamodb.client.v1._
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ ClassCheckUtils, LoggingSupport }
import com.typesafe.config.{ Config, ConfigFactory }
import net.ceedubs.ficus.Ficus._

import scala.collection.immutable._

object DynamoDBClientV1Config extends LoggingSupport {

  val dispatcherNameKey                          = "dispatcher-name"
  val clientConfigurationKey                     = "client-configuration"
  val requestMetricCollectorProviderClassNameKey = "request-metric-collector-provider-class-name"
  val requestMetricCollectorClassNameKey         = "request-metric-collector-class-name"
  val monitoringListenerProviderClassNameKey     = "monitoring-listener-provider-class-name"
  val monitoringListenerClassNameKey             = "monitoring-listener-class-name"
  val requestHandlersProviderClassNameKey        = "request-handlers-provider-class-name"
  val requestHandlerClassNamesKey                = "request-handler-class-names"

  val DefaultRequestMetricCollectorProviderClassName: String = classOf[RequestMetricCollectorProvider.Default].getName
  val DefaultMonitoringListenerProviderClassName: String     = classOf[MonitoringListenerProvider.Default].getName

  def fromConfig(config: Config): DynamoDBClientV1Config = {
    logger.debug("config = {}", config)
    val result = DynamoDBClientV1Config(
      sourceConfig = config,
      dispatcherName = config.getAs[String](dispatcherNameKey),
      clientConfiguration =
        ClientConfiguration.fromConfig(config.getOrElse[Config](clientConfigurationKey, ConfigFactory.empty())),
      requestMetricCollectorProviderClassName = {
        val className =
          config.getOrElse(requestMetricCollectorProviderClassNameKey, DefaultRequestMetricCollectorProviderClassName)
        ClassCheckUtils.requireClass(classOf[RequestMetricCollectorProvider], className)
      },
      requestMetricCollectorClassName = {
        val className = config.getAs[String](requestMetricCollectorClassNameKey)
        ClassCheckUtils.requireClass(classOf[RequestMetricCollector], className)
      },
      monitoringListenerProviderClassName = {
        val className = config
          .getOrElse(monitoringListenerProviderClassNameKey, DefaultMonitoringListenerProviderClassName)
        ClassCheckUtils.requireClass(classOf[MonitoringListenerProvider], className)
      },
      monitoringListenerClassName = {
        val className = config.getAs[String](monitoringListenerClassNameKey)
        ClassCheckUtils.requireClass(classOf[MonitoringListener], className)
      },
      requestHandlersProviderClassName = {
        val className = config
          .getOrElse[String](requestHandlersProviderClassNameKey, classOf[RequestHandlersProvider.Default].getName)
        ClassCheckUtils.requireClass(classOf[RequestHandlersProvider], className)
      },
      requestHandlerClassNames = {
        val classNames = config.getOrElse[Seq[String]](requestHandlerClassNamesKey, Seq.empty)
        classNames.map { className => ClassCheckUtils.requireClass(classOf[RequestHandler2], className) }
      }
    )
    logger.debug("result = {}", result)
    result
  }
}

case class DynamoDBClientV1Config(
    sourceConfig: Config,
    dispatcherName: Option[String],
    clientConfiguration: ClientConfiguration,
    requestMetricCollectorProviderClassName: String,
    requestMetricCollectorClassName: Option[String],
    monitoringListenerProviderClassName: String,
    monitoringListenerClassName: Option[String],
    requestHandlersProviderClassName: String,
    requestHandlerClassNames: Seq[String]
) 
Example 54
Source File: DynamoDBClientConfig.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.config.client

import com.github.j5ik2o.akka.persistence.dynamodb.config.client.v1.DynamoDBClientV1Config
import com.github.j5ik2o.akka.persistence.dynamodb.config.client.v1dax.DynamoDBClientV1DaxConfig
import com.github.j5ik2o.akka.persistence.dynamodb.config.client.v2.DynamoDBClientV2Config
import com.github.j5ik2o.akka.persistence.dynamodb.utils.LoggingSupport
import com.typesafe.config.{ Config, ConfigFactory }
import net.ceedubs.ficus.Ficus._

object DynamoDBClientConfig extends LoggingSupport {

  val accessKeyIdKeyKey      = "access-key-id"
  val secretAccessKeyKey     = "secret-access-key"
  val endpointKey            = "endpoint"
  val regionKey              = "region"
  val clientVersionKey       = "client-version"
  val clientTypeKey          = "client-type"
  val v1Key                  = "v1"
  val v1DaxKey               = "v1-dax"
  val v2Key                  = "v2"
  val batchGetItemLimitKey   = "batch-get-item-limit"
  val batchWriteItemLimitKey = "batch-write-item-limit"

  val DefaultClientVersion: ClientVersion.Value = ClientVersion.V2
  val DefaultClientType: ClientType.Value       = ClientType.Async
  val DefaultBatchGetItemLimit                  = 100
  val DefaultBatchWriteItemLimit                = 25

  def fromConfig(config: Config, legacy: Boolean): DynamoDBClientConfig = {
    logger.debug("config = {}", config)
    val result = DynamoDBClientConfig(
      sourceConfig = config,
      accessKeyId = config.getAs[String](accessKeyIdKeyKey),
      secretAccessKey = config.getAs[String](secretAccessKeyKey),
      endpoint = config.getAs[String](endpointKey),
      region = config.getAs[String](regionKey),
      clientVersion =
        config.getAs[String](clientVersionKey).map(s => ClientVersion.withName(s)).getOrElse(DefaultClientVersion),
      clientType = config.getAs[String](clientTypeKey).map(s => ClientType.withName(s)).getOrElse(DefaultClientType),
      DynamoDBClientV1Config.fromConfig(config.getOrElse[Config](v1Key, ConfigFactory.empty())),
      DynamoDBClientV1DaxConfig.fromConfig(config.getOrElse[Config](v1DaxKey, ConfigFactory.empty())), {
        if (legacy) {
          logger.warn(
            "<<<!!!CAUTION: PLEASE MIGRATE TO NEW CONFIG FORMAT!!!>>>\n" +
            "\tThe configuration items of AWS-SDK V2 client remain with the old key names: (j5ik2o.dynamo-db-journal.dynamo-db-client).\n" +
            "\tPlease change current key name to the new key name: (j5ik2o.dynamo-db-journal.dynamo-db-client.v2). \n\t" +
            DynamoDBClientV2Config.existsKeyNames(config).filter(_._2).keys.mkString("child-keys = [ ", ", ", " ]")
          )
          DynamoDBClientV2Config.fromConfig(config, legacy)
        } else
          DynamoDBClientV2Config.fromConfig(config.getOrElse[Config](v2Key, ConfigFactory.empty()), legacy)
      },
      batchGetItemLimit = config.getOrElse[Int](batchGetItemLimitKey, DefaultBatchGetItemLimit),
      batchWriteItemLimit = config.getOrElse[Int](batchWriteItemLimitKey, DefaultBatchWriteItemLimit)
    )
    logger.debug("result = {}", result)
    result
  }

}

case class DynamoDBClientConfig(
    sourceConfig: Config,
    accessKeyId: Option[String],
    secretAccessKey: Option[String],
    endpoint: Option[String],
    region: Option[String],
    clientVersion: ClientVersion.Value,
    clientType: ClientType.Value,
    v1ClientConfig: DynamoDBClientV1Config,
    v1DaxClientConfig: DynamoDBClientV1DaxConfig,
    v2ClientConfig: DynamoDBClientV2Config,
    batchGetItemLimit: Int, // Currently unused
    batchWriteItemLimit: Int
) {
  require(batchGetItemLimit >= 1 && batchGetItemLimit <= 100)
  require(batchWriteItemLimit >= 1 && batchWriteItemLimit <= 25)
} 
Example 55
Source File: DynamoDBClientV2Config.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.config.client.v2

import com.github.j5ik2o.akka.persistence.dynamodb.client.v1.ExecutionInterceptorsProvider
import com.github.j5ik2o.akka.persistence.dynamodb.client.v2.RetryPolicyProvider
import com.github.j5ik2o.akka.persistence.dynamodb.utils.ConfigOps._
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ ClassCheckUtils, LoggingSupport }
import com.typesafe.config.{ Config, ConfigFactory }
import net.ceedubs.ficus.Ficus._
import software.amazon.awssdk.core.interceptor.ExecutionInterceptor
import software.amazon.awssdk.core.retry.RetryMode

import scala.collection.immutable._
import scala.concurrent.duration.FiniteDuration

object DynamoDBClientV2Config extends LoggingSupport {

  val dispatcherNameKey                        = "dispatcher-name"
  val asyncKey                                 = "async"
  val syncKey                                  = "sync"
  val headersKey                               = "headers"
  val retryModeKey                             = "retry-mode"
  val retryPolicyProviderClassNameKey          = "retry-policy-provider-class-name"
  val executionInterceptorClassNamesKey        = "execution-interceptor-class-names"
  val executionInterceptorProviderClassNameKey = "execution-interceptor-provider-class-name"
  val apiCallTimeoutKey                        = "api-call-timeout"
  val apiCallAttemptTimeoutKey                 = "api-call-attempt-timeout"

  val keyNames: Seq[String] =
    Seq(dispatcherNameKey, asyncKey, syncKey, retryModeKey, apiCallTimeoutKey, apiCallAttemptTimeoutKey)

  def existsKeyNames(config: Config): Map[String, Boolean] = {
    keyNames.map(v => (v, config.exists(v))).toMap
  }

  def fromConfig(config: Config, legacy: Boolean): DynamoDBClientV2Config = {
    logger.debug("config = {}", config)
    val result = DynamoDBClientV2Config(
      sourceConfig = config,
      dispatcherName = config.getAs[String](dispatcherNameKey),
      asyncClientConfig = {
        if (legacy) {
          logger.warn(
            "<<<!!!CAUTION: PLEASE MIGRATE TO NEW CONFIG FORMAT!!!>>>\n" +
            "\tThe configuration items of AWS-SDK V2 client remain with the old key names: (j5ik2o.dynamo-db-journal.dynamo-db-client).\n" +
            "\tPlease change current key name to the new key name: (j5ik2o.dynamo-db-journal.dynamo-db-client.v2.async). \n\t" +
            AsyncClientConfig.existsKeyNames(config).filter(_._2).keys.mkString("child-keys = [ ", ", ", " ]")
          )
          AsyncClientConfig.fromConfig(config)
        } else
          AsyncClientConfig.fromConfig(config.getOrElse[Config](asyncKey, ConfigFactory.empty()))
      },
      syncClientConfig = SyncClientConfig.fromConfig(config.getOrElse[Config](syncKey, ConfigFactory.empty())),
      headers = config.getOrElse[Map[String, Seq[String]]](headersKey, Map.empty),
      retryMode = config.getAs[String](retryModeKey).map(s => RetryMode.valueOf(s)),
      retryPolicyProviderClassName = {
        val className = config
          .getAs[String](retryPolicyProviderClassNameKey).orElse(Some(classOf[RetryPolicyProvider.Default].getName))
        ClassCheckUtils.requireClass(classOf[RetryPolicyProvider], className)
      },
      executionInterceptorClassNames = {
        val classNames = config.getOrElse[Seq[String]](executionInterceptorClassNamesKey, Seq.empty)
        classNames.map(s => ClassCheckUtils.requireClass(classOf[ExecutionInterceptor], s))
      },
      executionInterceptorsProviderClassName = {
        val className = config.getOrElse[String](
          executionInterceptorProviderClassNameKey,
          classOf[ExecutionInterceptorsProvider.Default].getName
        )
        ClassCheckUtils.requireClass(classOf[ExecutionInterceptorsProvider], className)
      },
      apiCallTimeout = config.getAs[FiniteDuration](apiCallTimeoutKey),
      apiCallAttemptTimeout = config.getAs[FiniteDuration](apiCallAttemptTimeoutKey)
    )
    logger.debug("result = {}", result)
    result
  }
}

case class DynamoDBClientV2Config(
    sourceConfig: Config,
    dispatcherName: Option[String],
    asyncClientConfig: AsyncClientConfig,
    syncClientConfig: SyncClientConfig,
    headers: Map[String, Seq[String]],
    retryMode: Option[RetryMode],
    retryPolicyProviderClassName: Option[String],
    executionInterceptorClassNames: Seq[String],
    executionInterceptorsProviderClassName: String,
    apiCallTimeout: Option[FiniteDuration],
    apiCallAttemptTimeout: Option[FiniteDuration]
) 
Example 56
Source File: CurrentPersistenceIds1Test.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query.query

import java.net.URI

import akka.pattern.ask
import akka.persistence.query.{ EventEnvelope, Sequence }
import com.github.j5ik2o.akka.persistence.dynamodb.query.QueryJournalSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.{ Config, ConfigFactory }
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.Future
import scala.concurrent.duration._

abstract class CurrentPersistenceIds1Test(config: Config) extends QueryJournalSpec(config) {

  it should "not find any events for unknown pid" in
  withCurrentEventsByPersistenceId()("unkown-pid", 0L, Long.MaxValue) { tp =>
    tp.request(Int.MaxValue)
    tp.expectComplete()
  }

  it should "find events from an offset" in {
    withTestActors() { (actor1, actor2, actor3) =>
      Future.sequence(Range.inclusive(1, 4).map(_ => actor1 ? "a")).toTry should be a Symbol("success")

      withCurrentEventsByPersistenceId()("my-1", 2, 3) { tp =>
        tp.request(Int.MaxValue)
        tp.expectNext(new EventEnvelope(Sequence(2), "my-1", 2, "a-2", 0L))
        tp.expectNext(new EventEnvelope(Sequence(3), "my-1", 3, "a-3", 0L))
        tp.expectComplete()
      }
    }
  }
}

object DynamoDBCurrentPersistenceIds1Test {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBCurrentPersistenceIds1Test
    extends CurrentPersistenceIds1Test(
      ConfigFactory
        .parseString(
          s"""
           |j5ik2o.dynamo-db-journal {
           |  query-batch-size = 1
           |  dynamo-db-client {
           |    endpoint = "http://127.0.0.1:${DynamoDBCurrentPersistenceIds1Test.dynamoDBPort}/"
           |  }
           |}
           |
           |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
           |  endpoint = "http://127.0.0.1:${DynamoDBCurrentPersistenceIds1Test.dynamoDBPort}/"
           |}
           |
           |j5ik2o.dynamo-db-read-journal {
           |  query-batch-size = 1
           |  dynamo-db-client {
           |    endpoint = "http://127.0.0.1:${DynamoDBCurrentPersistenceIds1Test.dynamoDBPort}/"
           |  }
           |}
           """.stripMargin
        ).withFallback(ConfigFactory.load("query-reference"))
    )
    with DynamoDBSpecSupport {

  override implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBCurrentPersistenceIds1Test.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def afterAll(): Unit = {
    underlying.close()
    super.afterAll()
  }

  before { createTable }

  after { deleteTable }

} 
Example 57
Source File: CurrentEventsByTagTest1.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query.query

import java.net.URI

import com.github.j5ik2o.akka.persistence.dynamodb.query.QueryJournalSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.{ Config, ConfigFactory }
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

abstract class CurrentEventsByTagTest1(config: Config) extends QueryJournalSpec(config) {

  it should "not find an event by tag for unknown tag" in {
    withTestActors() { (actor1, actor2, actor3) =>
      List(
        sendMessage(withTags("a", "one"), actor1),
        sendMessage(withTags("a", "two"), actor2),
        sendMessage(withTags("a", "three"), actor3)
      ).toTry should be a Symbol("success")

      withCurrentEventsByTag()("unknown", 0) { tp =>
        tp.request(Int.MaxValue)
        tp.expectComplete()
      }
    }
  }
}

object DynamoDBCurrentEventsByTagTest1 {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBCurrentEventsByTagTest1
    extends CurrentEventsByTagTest1(
      ConfigFactory
        .parseString(
          s"""
           |j5ik2o.dynamo-db-journal.dynamo-db-client {
           |  endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest1.dynamoDBPort}/"
           |}
           |
           |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
           |  endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest1.dynamoDBPort}/"
           |}
           |j5ik2o.dynamo-db-read-journal {
           |  batch-size = 1
           |}
           |j5ik2o.dynamo-db-read-journal.dynamo-db-client {
           |  endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest1.dynamoDBPort}/"
           |}
           """.stripMargin
        ).withFallback(ConfigFactory.load("query-reference"))
    )
    with DynamoDBSpecSupport {

  override implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBCurrentEventsByTagTest1.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def afterAll(): Unit = {
    underlying.close()
    super.afterAll()
  }

  before { createTable }

  after { deleteTable }

} 
Example 58
Source File: CurrentEventsByTagTest4.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query.query

import java.net.URI

import akka.pattern.ask
import akka.persistence.query.{ EventEnvelope, Sequence }
import com.github.j5ik2o.akka.persistence.dynamodb.query.QueryJournalSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.{ Config, ConfigFactory }
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

abstract class CurrentEventsByTagTest4(config: Config) extends QueryJournalSpec(config) {

  it should "persist and find a tagged event with one tag" in
  withTestActors() { (actor1, actor2, actor3) =>
    (actor1 ? withTags(1, "one2")).toTry should be a Symbol("success")

    withClue("query should find the event by tag") {
      withCurrentEventsByTag()("one2", 0) { tp =>
        tp.request(Int.MaxValue)
        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
        tp.expectComplete()
      }
    }

    withClue("query should find the event by persistenceId") {
      withCurrentEventsByPersistenceId()("my-1", 1, 1) { tp =>
        tp.request(Int.MaxValue)
        tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => }
        tp.expectComplete()
      }
    }
  }
}

object DynamoDBCurrentEventsByTagTest4 {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBCurrentEventsByTagTest4
    extends CurrentEventsByTagTest4(
      ConfigFactory
        .parseString(
          s"""
             |j5ik2o.dynamo-db-journal{
             |  query-batch-size = 1
             |  dynamo-db-client {
             |    endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest4.dynamoDBPort}/"
             |  }
             |}
             |
             |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
             |  endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest4.dynamoDBPort}/"
             |}
             |
             |j5ik2o.dynamo-db-read-journal {
             |  query-batch-size = 1
             |  dynamo-db-client {
             |    endpoint = "http://127.0.0.1:${DynamoDBCurrentEventsByTagTest4.dynamoDBPort}/"
             |  }
             |}
           """.stripMargin
        ).withFallback(ConfigFactory.load("query-reference"))
    )
    with DynamoDBSpecSupport {

  override implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBCurrentEventsByTagTest4.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def afterAll(): Unit = {
    underlying.close()
    super.afterAll()
  }

  before { createTable }

  after { deleteTable }

} 
Example 59
Source File: ConfigHelper.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.journal

import com.github.j5ik2o.akka.persistence.dynamodb.config.client.{ ClientType, ClientVersion }
import com.github.j5ik2o.akka.persistence.dynamodb.utils.ConfigRenderUtils
import com.typesafe.config.{ Config, ConfigFactory }

object ConfigHelper {

  def config(
      defaultResource: String,
      legacyConfigFormat: Boolean,
      legacyJournalMode: Boolean,
      dynamoDBPort: Int,
      clientVersion: ClientVersion.Value,
      clientType: ClientType.Value,
      journalRowDriverWrapperClassName: Option[String],
      kafkaPort: Option[Int]
  ): Config = {
    val configString = s"""
       |akka.persistence.journal.plugin = "j5ik2o.dynamo-db-journal"
       |akka.persistence.snapshot-store.plugin = "j5ik2o.dynamo-db-snapshot"
       |j5ik2o.dynamo-db-journal {
       |  legacy-config-format = $legacyConfigFormat
       |  shard-count = 1024
       |  queue-enable = true
       |  queue-overflow-strategy = fail
       |  queue-buffer-size = 1024
       |  queue-parallelism = 1
       |  write-parallelism = 1
       |  query-batch-size = 1024
       |  dynamo-db-client {
       |    region = "ap-northeast-1"
       |    endpoint = "http://127.0.0.1:${dynamoDBPort}/"
       |    client-version = "${clientVersion.toString.toLowerCase}"
       |    client-type = "${clientType.toString.toLowerCase()}"
       |  }
       |  ${if (journalRowDriverWrapperClassName.nonEmpty) {
                            s"""journal-row-driver-wrapper-class-name = "${journalRowDriverWrapperClassName.get}" """
                          } else ""}
       |  columns-def {
       |    sort-key-column-name = ${if (legacyJournalMode) "sequence-nr" else "skey"}
       |  }
       |}
       |
       |j5ik2o.dynamo-db-snapshot {
       |  dynamo-db-client {
       |    region = "ap-northeast-1"
       |    endpoint = "http://127.0.0.1:${dynamoDBPort}/"
       |  }
       |}
       |
       |j5ik2o.dynamo-db-read-journal {
       |  query-batch-size = 1
       |  dynamo-db-client {
       |    region = "ap-northeast-1"
       |    endpoint = "http://127.0.0.1:${dynamoDBPort}/"
       |  }
       |  columns-def {
       |    sort-key-column-name = ${if (legacyJournalMode) "sequence-nr" else "skey"}
       |  }
       |}
       """.stripMargin
    val config = ConfigFactory
      .parseString(
        configString
      ).withFallback(ConfigFactory.load(defaultResource))
    // println(ConfigRenderUtils.renderConfigToString(config))
    config
  }
} 
Example 60
Source File: DynamoDBJournalV1SyncSpec.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.journal

import java.net.URI

import akka.persistence.CapabilityFlag
import akka.persistence.journal.JournalSpec
import com.github.j5ik2o.akka.persistence.dynamodb.config.client.{ ClientType, ClientVersion }
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.{ Config, ConfigFactory }
import org.scalatest.concurrent.ScalaFutures
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

object DynamoDBJournalV1SyncSpec {

  val dynamoDBPort: Int          = RandomPortUtil.temporaryServerPort()
  val legacyJournalMode: Boolean = false

}

class DynamoDBJournalV1SyncSpec
    extends JournalSpec(
      ConfigHelper
        .config(
          "journal-reference",
          legacyConfigFormat = false,
          legacyJournalMode = DynamoDBJournalV1AsyncSpec.legacyJournalMode,
          dynamoDBPort = DynamoDBJournalV1AsyncSpec.dynamoDBPort,
          clientVersion = ClientVersion.V1,
          clientType = ClientType.Sync,
          None,
          None
        )
    )
    with ScalaFutures
    with DynamoDBSpecSupport {
  override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = CapabilityFlag.on()

  implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBJournalV1AsyncSpec.dynamoDBPort

  override val legacyJournalTable: Boolean = DynamoDBJournalV1AsyncSpec.legacyJournalMode

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def beforeAll(): Unit = {
    super.beforeAll()
    createTable()
  }

  override def afterAll(): Unit = {
    deleteTable()
    super.afterAll()
  }

} 
Example 61
Source File: DynamoDBJournalV2SyncSpec.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.journal

import java.net.URI

import akka.persistence.CapabilityFlag
import akka.persistence.journal.JournalSpec
import com.github.j5ik2o.akka.persistence.dynamodb.config.client.{ ClientType, ClientVersion }
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

object DynamoDBJournalV2SyncSpec {
  val dynamoDBPort: Int          = RandomPortUtil.temporaryServerPort()
  val legacyJournalMode: Boolean = false
}

class DynamoDBJournalV2SyncSpec
    extends JournalSpec(
      ConfigHelper.config(
        "journal-reference",
        legacyConfigFormat = false,
        legacyJournalMode = DynamoDBJournalV2AsyncSpec.legacyJournalMode,
        dynamoDBPort = DynamoDBJournalV2AsyncSpec.dynamoDBPort,
        clientVersion = ClientVersion.V2,
        clientType = ClientType.Sync,
        None,
        None
      )
    )
    with ScalaFutures
    with DynamoDBSpecSupport {
  override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = CapabilityFlag.on()

  implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBJournalV2AsyncSpec.dynamoDBPort

  override val legacyJournalTable: Boolean = DynamoDBJournalV2AsyncSpec.legacyJournalMode

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def beforeAll(): Unit = {
    super.beforeAll()
    createTable()
  }

  override def afterAll(): Unit = {
    deleteTable()
    super.afterAll()
  }

} 
Example 62
Source File: DynamoDBJournalPerfSpec.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.journal

import java.net.URI

import akka.persistence.CapabilityFlag
import akka.persistence.journal.JournalPerfSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

object DynamoDBJournalPerfSpec {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBJournalPerfSpec
    extends JournalPerfSpec(
      ConfigFactory
        .parseString(
          s"""
        |j5ik2o.dynamo-db-journal {
        |  shard-count = 2
        |  queue-buffer-size = 1024
        |  queue-parallelism = 1
        |  write-parallelism = 1
        |  query-batch-size = 1024
        |  dynamo-db-client {
        |    endpoint = "http://127.0.0.1:${DynamoDBJournalPerfSpec.dynamoDBPort}/"
        |  }
        |}
        |
        |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
        |  endpoint = "http://127.0.0.1:${DynamoDBJournalPerfSpec.dynamoDBPort}/"
        |}
        |
        """.stripMargin
        ).withFallback(ConfigFactory.load("journal-reference"))
    )
    with BeforeAndAfterAll
    with ScalaFutures
    with DynamoDBSpecSupport {
  override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = false

  
  override def measurementIterations: Int = 5

  override protected lazy val dynamoDBPort: Int = DynamoDBJournalPerfSpec.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def beforeAll(): Unit = {
    super.beforeAll()
    createTable()
  }

  override def afterAll(): Unit = {
    deleteTable()
    super.afterAll()
  }

} 
Example 63
Source File: SnapshotPluginConfig.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.config

import com.github.j5ik2o.akka.persistence.dynamodb.config.client.DynamoDBClientConfig
import com.github.j5ik2o.akka.persistence.dynamodb.metrics.{ MetricsReporter, MetricsReporterProvider }
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ ClassCheckUtils, LoggingSupport }
import com.typesafe.config.{ Config, ConfigFactory }
import net.ceedubs.ficus.Ficus._

object SnapshotPluginConfig extends LoggingSupport {

  val legacyConfigFormatKey               = "legacy-config-format"
  val tableNameKey                        = "table-name"
  val columnsDefKey                       = "columns-def"
  val consistentReadKey                   = "consistent-read"
  val metricsReporterClassNameKey         = "metrics-reporter-class-name"
  val metricsReporterProviderClassNameKey = "metrics-reporter-provider-class-name"
  val dynamoDbClientKey                   = "dynamo-db-client"

  val DefaultLegacyConfigFormat: Boolean              = false
  val DefaultLegacyConfigLayoutKey: Boolean           = false
  val DefaultTableName: String                        = "Snapshot"
  val DefaultConsistentRead: Boolean                  = false
  val DefaultMetricsReporterClassName: String         = classOf[MetricsReporter.None].getName
  val DefaultMetricsReporterProviderClassName: String = classOf[MetricsReporterProvider.Default].getName

  def fromConfig(config: Config): SnapshotPluginConfig = {
    logger.debug("config = {}", config)
    val legacyConfigFormat = config.getOrElse[Boolean](legacyConfigFormatKey, DefaultLegacyConfigFormat)
    logger.debug("legacy-config-format = {}", legacyConfigFormat)
    val result = SnapshotPluginConfig(
      sourceConfig = config,
      legacyConfigFormat,
      tableName = config.getOrElse[String](tableNameKey, DefaultTableName),
      columnsDefConfig =
        SnapshotColumnsDefConfig.fromConfig(config.getOrElse[Config](columnsDefKey, ConfigFactory.empty())),
      consistentRead = config.getOrElse[Boolean](consistentReadKey, DefaultConsistentRead),
      metricsReporterProviderClassName = {
        val className =
          config.getOrElse[String](metricsReporterProviderClassNameKey, DefaultMetricsReporterProviderClassName)
        ClassCheckUtils.requireClass(classOf[MetricsReporterProvider], className)
      },
      metricsReporterClassName = {
        val className = config.getAs[String](metricsReporterClassNameKey) // , DefaultMetricsReporterClassName)
        ClassCheckUtils.requireClass(classOf[MetricsReporter], className)
      },
      clientConfig = DynamoDBClientConfig
        .fromConfig(config.getOrElse[Config](dynamoDbClientKey, ConfigFactory.empty()), legacyConfigFormat)
    )
    logger.debug("result = {}", result)
    result
  }

}

final case class SnapshotPluginConfig(
    sourceConfig: Config,
    legacyConfigFormat: Boolean,
    tableName: String,
    columnsDefConfig: SnapshotColumnsDefConfig,
    consistentRead: Boolean,
    metricsReporterProviderClassName: String,
    metricsReporterClassName: Option[String],
    clientConfig: DynamoDBClientConfig
) extends PluginConfig {
  override val configRootPath: String = "j5ik2o.dynamo-db-snapshot"
} 
Example 64
Source File: DynamoDBSnapshotStoreSpec.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.snapshot

import java.net.URI

import akka.persistence.snapshot.SnapshotStoreSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

object DynamoDBSnapshotStoreSpec {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBSnapshotStoreSpec
    extends SnapshotStoreSpec(
      ConfigFactory
        .parseString(
          s"""
             |j5ik2o.dynamo-db-journal.dynamo-db-client {
             |  endpoint = "http://127.0.0.1:${DynamoDBSnapshotStoreSpec.dynamoDBPort}/"
             |}
             |
             |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
             |  endpoint = "http://127.0.0.1:${DynamoDBSnapshotStoreSpec.dynamoDBPort}/"
             |}
             |
             |j5ik2o.dynamo-db-read-journal.dynamo-db-client {
             |  endpoint = "http://127.0.0.1:${DynamoDBSnapshotStoreSpec.dynamoDBPort}/"
             |}
         """.stripMargin
        ).withFallback(ConfigFactory.load("snapshot-reference"))
    )
    with ScalaFutures
    with DynamoDBSpecSupport {

  implicit val pc: PatienceConfig = PatienceConfig(30 seconds, 1 seconds)

  override protected lazy val dynamoDBPort: Int = DynamoDBSnapshotStoreSpec.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .httpClient(NettyNioAsyncHttpClient.builder().maxConcurrency(1).build())
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  before { createTable }

  after { deleteTable }

} 
Example 65
Source File: ClientsModule.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package modules

import com.google.inject.ImplementedBy
import play.api.inject.ApplicationLifecycle
import javax.inject.Singleton
import javax.inject.Inject
import play.api.libs.ws.WSClient
import play.api.Application
import play.api.Environment
import play.api.Configuration
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import play.Logger
import clients.OntonetHubClient
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigRenderOptions

@ImplementedBy(classOf[ClientsModuleBase])
trait ClientsModule

@Singleton
class ClientsModuleBase @Inject() (lifecycle: ApplicationLifecycle,
                                   ws: WSClient,
                                   configuration: Configuration) extends ClientsModule {

  val conf_clients = configuration.underlying
    .getConfig("clients")

  val ontonethub_config = conf_clients.getConfig("ontonethub")

  // TODO: verify if default configurations are needed here
  val ontonethub = new OntonetHubClient(ws, ontonethub_config)

  // TESTING ................................................
  val options = ConfigRenderOptions.concise()
    .setComments(false).setOriginComments(false)
    .setFormatted(true).setJson(true)
  val json = ontonethub_config.root().render(options)
  // TESTING ................................................

  // when application starts...
  @Inject
  def onStart(
    app: Application,
    env: Environment)(implicit ec: ExecutionContext) {

    Logger.info("ClientsModuleBase START")

    println("\n\n\n\n\n\n")
    println(json)

  }

  // when application stops...
  lifecycle.addStopHook({ () =>

    Future.successful {

      Logger.info("ClientsModuleBase STOP")

    }

  })

} 
Example 66
Source File: CatalogStandardizationService.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package it.almawave.kb.http.endpoints

import javax.inject.Singleton
import javax.ws.rs.Path
import org.slf4j.LoggerFactory
import it.almawave.kb.http.models.OntologyMetaModel
import com.typesafe.config.ConfigFactory
import java.nio.file.Paths
import it.almawave.linkeddata.kb.catalog.CatalogBox
import it.almawave.linkeddata.kb.utils.JSONHelper
import it.almawave.daf.standardization.refactoring.CatalogStandardizer

@Singleton
@Path("conf://api-catalog-config")
class CatalogStandardizationService {

  private val logger = LoggerFactory.getLogger(this.getClass)

  val conf = ConfigFactory.parseFile(Paths.get("./conf/catalog.conf").normalize().toFile())
  val catalog = new CatalogBox(conf)
  catalog.start()

  val _standardizer = CatalogStandardizer(catalog)
  _standardizer.start

  def stardardizer = _standardizer

  //  TODO: STOP?

} 
Example 67
Source File: MainSingleStandardization.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package it.almawave.daf.standardization.refactoring

import org.slf4j.LoggerFactory
import java.nio.file.Paths
import it.almawave.linkeddata.kb.catalog.CatalogBox
import com.typesafe.config.ConfigFactory
import it.almawave.linkeddata.kb.utils.JSONHelper

import it.almawave.linkeddata.kb.catalog.VocabularyBox

object MainSingleStandardization extends App {

  private val logger = LoggerFactory.getLogger(this.getClass)

  val conf = ConfigFactory.parseFile(Paths.get("./conf/catalog.conf").normalize().toFile())

  val catalog = new CatalogBox(conf)
  catalog.start()

  //  val vocID = "legal-status"
  //  val vocID = "theme-subtheme-mapping"
  val vocID = "licences"
  val std: VocabularyStandardizer = CatalogStandardizer(catalog).getVocabularyStandardizerByID(vocID).get
  std.start

  //  println("\n\nCSV")
  //  std.toCSV()(System.out)
  //
  //  println("\n\nTREE")
  val tree = std.toJSONTree()
  val json_tree = JSONHelper.writeToString(tree)
  println(json_tree)

  println("\n\nMETA")
  val meta = std.getMetadata()
  val json_meta = JSONHelper.writeToString(meta)
  println(json_meta)

  std.stop
  catalog.stop()

  // TODO: verify the closing of all active connections

}

object MainStandardizationAll extends App {

  private val logger = LoggerFactory.getLogger(this.getClass)
  val conf = ConfigFactory.parseFile(Paths.get("./conf/catalog.conf").normalize().toFile())

  val catalog = new CatalogBox(conf)
  catalog.start()

  val std = CatalogStandardizer(catalog)
  std.start

  val list = std.getVocabularyStandardizersList()

  list.foreach { vstd =>
    //    println(s"\n\nCSV for ${vstd.vbox}")
    vstd.toCSV()(System.out)
  }

  std.stop
  catalog.stop()

  System.exit(0)
} 
Example 68
Source File: NO_MainAllStandardization.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package it.almawave.daf.standardization.refactoring

import org.slf4j.LoggerFactory
import java.nio.file.Paths
import com.typesafe.config.ConfigFactory
import it.almawave.linkeddata.kb.catalog.CatalogBox
import scala.util.Try

object NO_MainAllStandardization extends App {

  private val logger = LoggerFactory.getLogger(this.getClass)

  val conf = ConfigFactory.parseFile(Paths.get("./conf/catalog.conf").normalize().toFile())

  val catalog = new CatalogBox(conf)
  catalog.start()

  CatalogStandardizer(catalog).getVocabularyStandardizersList()
    .zipWithIndex
    .slice(1, 2)
    .toList
    .foreach {
      case (std, i) =>

        Try {
          println(s"""\n\n$i: ${std.vbox}""")
          println("\n\nCSV_______________________________________")
          std.toCSV()(System.out)
          println("\n\n__________________________________________")
        }

    }

  catalog.stop()

} 
Example 69
Source File: KBModule.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package modules

import javax.inject._

import play.api.inject.ApplicationLifecycle
import play.api.mvc._

import scala.concurrent.Future
import com.google.inject.ImplementedBy
import play.api.Play
import play.api.Application
import play.api.Environment
import play.api.Configuration
import scala.concurrent.ExecutionContext
import play.api.Logger
import it.almawave.linkeddata.kb.utils.ConfigHelper
import it.almawave.linkeddata.kb.repo._
import scala.concurrent.ExecutionContext.Implicits.global
import java.nio.file.Paths
import play.api.Mode
import java.io.File
import it.almawave.linkeddata.kb.repo.RDFRepository
import com.typesafe.config.ConfigFactory

@ImplementedBy(classOf[KBModuleBase])
trait KBModule

@Singleton
class KBModuleBase @Inject() (lifecycle: ApplicationLifecycle) extends KBModule {

  // TODO: SPI per dev / prod
  val kbrepo = RDFRepository.memory()

  val logger = Logger.underlyingLogger

  // when application starts...
  @Inject
  def onStart(
    env: Environment,
    configuration: Configuration)(implicit ec: ExecutionContext) {

    // get configs
    val app_type = configuration.underlying.getString("app.type")

    val data_dir = app_type match {
      case "dev"  => "./dist/data"
      case "prod" => "./data"
    }
    logger.debug(s"app_type: ${app_type}")
    logger.debug(s"data_dir: ${data_dir}")

    // starting VocabularyAPI service
    var conf_voc = ConfigFactory.parseFile(new File("./conf/semantic_repository.conf").getAbsoluteFile)
    conf_voc = ConfigHelper.injectParameters(conf_voc, ("data_dir", data_dir))

    kbrepo.configuration(conf_voc)

    logger.info("KBModule.START....")
    logger.debug("KBModule using configuration:\n" + ConfigHelper.pretty(conf_voc))

    println("KBModule using configuration:\n" + ConfigHelper.pretty(conf_voc))

    // this is needed for ensure proper connection(s) etc
    kbrepo.start()

    

    // CHECK the initial (total) triples count
    var triples = kbrepo.store.size()

    logger.info(s"KBModule> ${triples} triples loaded")

  }

  // when application stops...
  lifecycle.addStopHook({ () =>

    Future.successful {

      // this is useful for saving files, closing connections, release indexes, etc
      kbrepo.stop()
      logger.info("KBModule.STOP....")

    }

  })

} 
Example 70
Source File: TestingHttpApi.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package it.almawave.linkeddata.kb.http

import play.api.inject.guice.GuiceApplicationBuilder
import org.junit.Test
import org.junit.After
import play.api.Application
import org.junit.Before
import it.almawave.linkeddata.kb.utils.JSONHelper
import play.api.libs.ws.WSClient
import org.asynchttpclient.DefaultAsyncHttpClient
import play.api.libs.ws.ssl.SystemConfiguration
import akka.stream.ActorMaterializer
import play.api.libs.ws.ahc.AhcWSClient
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import java.net.URL
import com.typesafe.config.ConfigFactory

class TestingHttpApi {

  var app: Application = null
  var conf = ConfigFactory.empty()
  var ws: WSClient = null
  var app_url = new URL("http://localhost:8080")

  @Test
  def testing_contexts() {

    //    curl -X GET http://localhost:8999/kb/v1/prefixes/lookup?prefix=no_pref 
    //    -H  "accept: application/json" 
    //    -H  "content-type: application/json"

    val fut = ws.url(s"http://localhost:8999/kb/v1/prefixes/lookup")
      .withHeaders(("accept", "application/json"))
      .withHeaders(("content-type", "application/json"))
      .withFollowRedirects(true)
      .withQueryString(("prefix", "muapit"))
      .get()

    val results = Await.result(fut, Duration.Inf)
    println(results.body)

  }

  @Before
  def before() {

    app = GuiceApplicationBuilder()
      .build()

    conf = app.configuration.underlying

    // play.app.local.url
    // play.server.http.address
    // play.server.http.port

    println(JSONHelper.writeToString(conf.root().unwrapped()))

    app_url = new URL(conf.getString("app.local.url"))

    println(s"\n\nrunning at ${app_url}")

    val materializer = ActorMaterializer()(app.actorSystem)
    ws = AhcWSClient()(materializer)

  }

  @After
  def after() {
    ws.close()
    app.stop()
  }

} 
Example 71
Source File: ApiBDD.scala    From daf-semantics   with Apache License 2.0 5 votes vote down vote up
package specs

import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import com.fasterxml.jackson.databind.ObjectMapper
import com.typesafe.config.ConfigFactory
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.test.WithBrowser

@RunWith(classOf[JUnitRunner])
class IntegrationSpec extends Specification {

  val host = "localhost"

  "semantic_repository" should {

    "expose swagger specification" in new WithBrowser {
      browser.goTo(s"http://${host}:${port}/spec/semantic_repository.yaml")
      browser.pageSource must haveSize(greaterThan(0))
      browser.pageSource must contain("semantic repository")
    }

    "list all existing contexts" in new WithBrowser {
      browser.goTo(s"http://${host}:${port}/kb/v1/contexts")
      browser.pageSource must haveSize(greaterThan(0))

      // DISABLED
      //      val ctxs = JSONHelper.parseString(browser.pageSource).toList
      //      ctxs.find { el => el.get("context").equals("http://xmlns.com/foaf/0.1/") }
      //      ctxs.find { el => el.get("triples").equals(631) }
    }

  }
}

object JSONHelper {

  val json_mapper = new ObjectMapper
  val json_reader = json_mapper.reader()

  val cc = ConfigFactory.empty()

  def parseString(json: String) = json_reader.readTree(json)

} 
Example 72
Source File: MetaCatalogProcessor.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.ingestion.metacatalog

import com.typesafe.config.ConfigFactory
import play.api.libs.json._
import it.gov.daf.catalogmanager._
import it.gov.daf.catalogmanager.json._
import org.slf4j.{Logger, LoggerFactory}
import org.apache.commons.lang.StringEscapeUtils

//Get Logical_uri, process MetadataCatalog and get the required info
class MetaCatalogProcessor(metaCatalog: MetaCatalog) {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  val sftpDefPrefix = ConfigFactory.load().getString("ingmgr.sftpdef.prefixdir")

  
  def separator() = {
    metaCatalog.operational
      .input_src.sftp
      .flatMap(_.headOption)
      .flatMap(_.param)
      .flatMap(_.split(", ").reverse.headOption)
      .map(_.replace("sep=", ""))
      .getOrElse(",")
  }

  def fileFormatNifi(): String = {
    val inputSftp = metaCatalog.operational.input_src.sftp

    inputSftp match {
      case Some(s) =>
        val sftps: Seq[SourceSftp] = s.filter(x => x.name.equals("sftp_daf"))
        if (sftps.nonEmpty) sftps.head.param.getOrElse("")
        else ""

      case None => ""
    }
  }

  def ingPipelineNifi(): String = {
    ingPipeline.mkString(",")
  }

} 
Example 73
Source File: PipeInfoRead.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.ingestion.pipelines

import java.io.FileInputStream
import com.typesafe.config.ConfigFactory
import ingestion_manager.yaml.PipelineInfo
import play.api.libs.json._
import data.PipelineClass.pipelineInfoReads

object PipelineInfoRead {
  val pipeInfoFile = ConfigFactory.load().getString("ingmgr.pipeinfo.datapath")

  def pipelineInfo(): List[PipelineInfo] = {
    val stream = new FileInputStream(pipeInfoFile)
    val pipeInfoOpt: Option[List[PipelineInfo]] = try { Json.parse(stream).asOpt[List[PipelineInfo]] } finally { stream.close() }
    pipeInfoOpt match {
      case Some(s) => s
      case None => List()
    }
  }

  def pipelineInfoByCat(category: String): List[PipelineInfo] = {
    val pipelineList = pipelineInfo()
    pipelineList.filter(_.category.equals(category))
  }
  def pipelineInfoById(id: String): List[PipelineInfo] = {
    val pipelineList = pipelineInfo()
    pipelineList.filter(_.id.equals(id))
  }

} 
Example 74
Source File: UriDataset.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.catalogmanager.utilities.uri

import catalog_manager.yaml.MetaCatalog
import com.typesafe.config.ConfigFactory
import it.gov.daf.catalogmanager.utilities.datastructures.DatasetType
import play.api.Logger

import scala.util.{Failure, Success, Try}

case class UriDataset(
                       domain: String = "NO_DOMAIN",
                       typeDs: DatasetType.Value = DatasetType.RAW,
                       groupOwn: String = "NO_groupOwn",
                       owner: String = "NO_owner",
                       theme: String = "NO_theme",
                       subtheme :String = "NO_theme",
                       nameDs: String = "NO_nameDs") {

  val config = ConfigFactory.load()

  def getUri(): String = {
    domain + "://" + "dataset/" + typeDs + "/" + groupOwn + "/" + owner + "/" + theme + "/" + subtheme + "/" + nameDs
  }


  def getUrl(): String = {

    val basePath = config.getString("Inj-properties.hdfsBasePath")
    val baseDataPath = config.getString("Inj-properties.dataBasePath")
    typeDs match {
      case DatasetType.STANDARD => basePath + baseDataPath + "/" + typeDs + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs
      case DatasetType.ORDINARY => basePath + baseDataPath + "/" + typeDs + "/" + owner + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs
      case DatasetType.RAW => basePath + baseDataPath + "/" + typeDs + "/" + owner + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs
      case _ => "-1"
    }
  }
}


object UriDataset  {
  def apply(uri: String): UriDataset = {
    Try {
      val uri2split = uri.split("://")
      val uriParts = uri2split(1).split("/")
      new UriDataset(
        domain = uri2split(0),
        typeDs = DatasetType.withNameOpt(uriParts(1)).get,
        groupOwn = uriParts(2),
        owner = uriParts(3),
        theme = uriParts(4),
        subtheme = uriParts(5),
        nameDs = uriParts(6))
    } match {
      case Success(s) => s
      case Failure(err) =>
        Logger.error("Error while creating uri: " + uri + " - " + err.getMessage)
        UriDataset()
    }

  }

  def convertToUriDataset(schema: MetaCatalog): UriDataset =  {

      val typeDs = if (schema.operational.is_std)
        DatasetType.STANDARD
      else
        DatasetType.ORDINARY
      new UriDataset(
        domain = "daf",
        typeDs = typeDs,
        groupOwn = schema.operational.group_own,
        owner = schema.dcatapit.owner_org.get,
        theme  = schema.operational.theme,
        subtheme = schema.operational.subtheme,
        nameDs = schema.dataschema.avro.name
      )

  }

} 
Example 75
Source File: ScalastyleInspectionsGenerator.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.mwz.sonar.scala.metadata.scalastyle

import java.io.InputStream
import java.nio.file.Paths

import com.mwz.sonar.scala.metadata.scalastyle._
import com.typesafe.config.{Config, ConfigFactory}
import org.scalastyle.{Level, _}
import sbt.Keys._
import sbt._

import scala.meta._
import scala.xml.{Node, NodeSeq, XML}


  def transform(source: Tree, inspections: Seq[ScalastyleInspection]): Tree = {
    val stringified: Seq[String] = inspections.collect {
      case inspection =>
        // Is there a better way of embedding multi-line text?
        val extraDescription = inspection.extraDescription.map(s => "\"\"\"" + s + "\"\"\"")
        val justification = inspection.justification.map(s => "\"\"\"" + s + "\"\"\"")
        val params = inspection.params.map { p =>
          s"""
             |ScalastyleParam(
             |  name = "${p.name}",
             |  typ = ${p.typ},
             |  label = "${p.label}",
             |  description = \"\"\"${p.description}\"\"\",
             |  default = \"\"\"${p.default}\"\"\"
             |)
           """.stripMargin
        }

        // It doesn't seem to be straightforward to automatically convert a collection
        // into a tree using scalameta, so I'm turning it into a String so it can be parsed,
        // which is easier than constructing the tree manually.
        // Totally doable with shapeless though, but it would be a bit of an overkill in this case.
        s"""
           |ScalastyleInspection(
           |  clazz = "${inspection.clazz}",
           |  id = "${inspection.id}",
           |  label = "${inspection.label}",
           |  description = "${inspection.description}",
           |  extraDescription = $extraDescription,
           |  justification = $justification,
           |  defaultLevel = ${inspection.defaultLevel},
           |  params = ${params.toString.parse[Term].get.syntax}
           |)
         """.stripMargin
    }

    // Transform the template file.
    val term: Term = stringified.toString.parse[Term].get
    source.transform {
      case q"val AllInspections: $tpe = $expr" =>
        q"val AllInspections: $tpe = $term"
    }
  }
} 
Example 76
Source File: LilaWsServer.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws

import akka.actor.typed.{ ActorSystem, Scheduler }
import com.softwaremill.macwire._
import com.typesafe.config.{ Config, ConfigFactory }
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext

import util.Util.nowSeconds

object Boot extends App {

  lazy val config: Config                         = ConfigFactory.load
  lazy val clientSystem: ClientSystem             = ActorSystem(Clients.behavior, "clients")
  implicit def scheduler: Scheduler               = clientSystem.scheduler
  implicit def executionContext: ExecutionContext = clientSystem.executionContext

  lazy val mongo         = wire[Mongo]
  lazy val groupedWithin = wire[util.GroupedWithin]
  lazy val lightUserApi  = wire[LightUserApi]
  lazy val lilaRedis     = wire[Lila]
  lazy val lilaHandlers  = wire[LilaHandler]
  lazy val roundCrowd    = wire[RoundCrowd]
  lazy val roomCrowd     = wire[RoomCrowd]
  lazy val crowdJson     = wire[ipc.CrowdJson]
  lazy val users         = wire[Users]
  lazy val keepAlive     = wire[KeepAlive]
  lazy val lobby         = wire[Lobby]
  lazy val socialGraph   = wire[SocialGraph]
  lazy val friendList    = wire[FriendList]
  lazy val services      = wire[Services]
  lazy val controller    = wire[Controller]
  lazy val router        = wire[Router]
  lazy val seenAt        = wire[SeenAtUpdate]
  lazy val auth          = wire[Auth]
  lazy val nettyServer   = wire[netty.NettyServer]
  lazy val monitor       = wire[Monitor]

  wire[LilaWsServer].start
}

final class LilaWsServer(
    nettyServer: netty.NettyServer,
    handlers: LilaHandler, // must eagerly instanciate!
    lila: Lila,
    monitor: Monitor,
    scheduler: Scheduler
)(implicit ec: ExecutionContext) {

  def start(): Unit = {

    monitor.start()

    Bus.internal.subscribe(
      "users",
      {
        case ipc.LilaIn.ConnectUser(_, true) => // don't send to lila
        case msg: ipc.LilaIn.Site            => lila.emit.site(msg)
      }
    )

    scheduler.scheduleWithFixedDelay(30.seconds, 7211.millis) { () =>
      Bus.publish(_.all, ipc.ClientCtrl.Broom(nowSeconds - 30))
    }

    nettyServer.start() // blocks
  }
}

object LilaWsServer {

  val connections = new java.util.concurrent.atomic.AtomicInteger
} 
Example 77
Source File: File.scala    From nescala   with GNU General Public License v2.0 5 votes vote down vote up
package com.owlandrews.nescala.helpers

import com.owlandrews.nescala.Console

object File {
  import java.io.File
  import java.net.URL
  import java.io.{FileFilter, FileInputStream, FileOutputStream, ObjectInputStream, ObjectOutputStream}
  import javax.imageio.ImageIO

  import scala.util.Try
  import scala.xml.XML
  import scala.language.postfixOps

  import sys.process._

  import com.typesafe.config.ConfigFactory

  def Download(url: String, filename: String) = (for{
    url <- Try(new URL(url))
    conn <- Try(url.openConnection().connect())
    file <- Try(new File(filename))
  } yield Try(url  #> file !!)) map {x => new File(filename)}

  def Writer(filename: String)(op: java.io.PrintWriter => Unit) = {
    val p = new java.io.PrintWriter(new File(filename))
    try op(p)
    finally p.close()
  }

  def Write(filename: String, content: String) = {
    val res = new java.io.PrintWriter(new File(filename))
    res.write(content)
    res.close()
  }

  def Filter = new FileFilter {
    override def accept(pathname: File): Boolean = pathname.getName.toLowerCase.endsWith(".nes")
  }

  def Image(file:Try[File]) = file.map(ImageIO.read)

  def Image(filename:String) = Try(ImageIO.read(resource(filename)))

  def Xml(filename:String) = XML.load(resource("/database.xml"))

  def Config(filename:String) = {
    val file = new File(filename)
    file.exists() match {
      case true => ConfigFactory.parseFile(file)
      case false => ConfigFactory.empty()
    }
  }

  def SaveState(console:Console) = {
    val fos = new FileOutputStream(s"$ApplicationFolder/${console.cartridge.CRC}.save")
    val oos = new ObjectOutputStream(fos)

    oos.writeObject(console)
    oos.close()
  }

  def LoadState(crc:String):Try[Console] = Try {
    val fis = new FileInputStream(s"$ApplicationFolder/$crc.save")
    val ois = new ObjectInputStreamWithCustomClassLoader(fis)

    val console = ois.readObject.asInstanceOf[Console]
    ois.close()
    console
  }

  // Taken from: https://gist.github.com/ramn/5566596
  private class ObjectInputStreamWithCustomClassLoader(fileInputStream: FileInputStream) extends ObjectInputStream(fileInputStream) {
    override def resolveClass(desc: java.io.ObjectStreamClass): Class[_] = {
      try { Class.forName(desc.getName, false, getClass.getClassLoader) }
      catch { case ex: ClassNotFoundException => super.resolveClass(desc) }
    }
  }

  lazy val ApplicationFolder: File = {
    val settingDirectory = System.getProperty("user.home") + "/.nescala"
    val settings = new java.io.File(settingDirectory)
    if (!settings.exists()) settings.mkdir()
    settings
  }

  private def resource(filename:String) = getClass.getResourceAsStream(filename)
} 
Example 78
Source File: SlickJdbcSpec.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc.slick.helpers

import com.typesafe.config.ConfigFactory
import org.scalatest._
import slick.jdbc.{HsqldbProfile, JdbcBackend, JdbcProfile}

trait SlickJdbcSpec extends fixture.FlatSpec {
  import scala.concurrent.ExecutionContext.Implicits.global

  private val _profile = HsqldbProfile

  protected def profile: JdbcProfile = _profile

  override protected def withFixture(test: OneArgTest): Outcome = {
    val config = ConfigFactory.load()
    val db = JdbcBackend.Database.forConfig("db", config)
    try {
      new DatabaseInitializer(db).initDatabase()
      test(createFixture(db))
    } finally {
      db.close()
    }
  }

  protected def createFixture(db: JdbcBackend.Database): FixtureParam
} 
Example 79
Source File: HTTPResponseStream.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package akkahttp

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives.{complete, get, logRequestResult, path, _}
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Sink, Source}
import com.typesafe.config.ConfigFactory
import spray.json.DefaultJsonProtocol

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object HTTPResponseStream extends App with DefaultJsonProtocol with SprayJsonSupport {
  implicit val system = ActorSystem("HTTPResponseStream")
  implicit val executionContext = system.dispatcher

  //JSON Protocol and streaming support
  final case class ExamplePerson(name: String)

  implicit def examplePersonFormat = jsonFormat1(ExamplePerson.apply)

  implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json()

  val (address, port) = ("127.0.0.1", 8080)
  server(address, port)
  client(address, port)

  def client(address: String, port: Int): Unit = {
    val requestParallelism = ConfigFactory.load.getInt("akka.http.host-connection-pool.max-connections")

    val requests: Source[HttpRequest, NotUsed] = Source
      .fromIterator(() =>
        Range(0, requestParallelism).map(i => HttpRequest(uri = Uri(s"http://$address:$port/download/$i"))).iterator
      )

    // Run singleRequest and completely consume response elements
    def runRequestDownload(req: HttpRequest) =
      Http()
        .singleRequest(req)
        .flatMap { response =>
          val unmarshalled: Future[Source[ExamplePerson, NotUsed]] = Unmarshal(response).to[Source[ExamplePerson, NotUsed]]
          val source: Source[ExamplePerson, Future[NotUsed]] = Source.futureSource(unmarshalled)
          source.via(processorFlow).runWith(printSink)
        }

    requests
      .mapAsync(requestParallelism)(runRequestDownload)
      .runWith(Sink.ignore)
  }


  val printSink = Sink.foreach[ExamplePerson] { each: ExamplePerson => println(s"Client processed element: $each") }

  val processorFlow: Flow[ExamplePerson, ExamplePerson, NotUsed] = Flow[ExamplePerson].map {
    each: ExamplePerson => {
      //println(s"Process: $each")
      each
    }
  }


  def server(address: String, port: Int): Unit = {

    def routes: Route = logRequestResult("httpecho") {
      path("download" / Segment) { id: String =>
        get {
          println(s"Server received request with id: $id, stream response...")
          extractRequest { r: HttpRequest =>
            val finishedWriting = r.discardEntityBytes().future
            onComplete(finishedWriting) { done =>
              //Limit response by appending eg .take(5)
              val responseStream: Stream[ExamplePerson] = Stream.continually(ExamplePerson(s"request:$id"))
              complete(Source(responseStream).throttle(1, 1.second, 1, ThrottleMode.shaping))
            }
          }
        }
      }
    }

    val bindingFuture = Http().bindAndHandle(routes, address, port)
    bindingFuture.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to: $address:$port. Exception message: ${e.getMessage}")
        system.terminate()
    }
  }
} 
Example 80
Source File: BasicParams.scala    From spatial   with MIT License 5 votes vote down vote up
package spatial.tests.dse

import spatial.dsl._
import com.typesafe.config.ConfigFactory
import pureconfig._
import spatial.util.spatialConfig
import spatial.metadata.params._
import scala.reflect.ClassTag

class BasicParamsV1 extends BasicParams(1,4,64,8,true)
class BasicParamsV2 extends BasicParams(1,4,64,8,false)
class BasicParamsV3 extends BasicParams(2,4,64,8,true)


@spatial abstract class BasicParams(val op: scala.Int, val ip: scala.Int, val bs: scala.Int, val lp: scala.Int, pipeline: scala.Boolean) extends SpatialTest {
  override def dseModelArgs: Args = "640"
  override def finalModelArgs: Args = "640"
  override def runtimeArgs: Args = "640"
  type X = FixPt[TRUE,_32,_0]

  def dotproduct[T:Num](aIn: Array[T], bIn: Array[T]): T = {
    // Can be overwritten using --param-path=fileName at command line
    val OP = op (1 -> 3)
    val IP = ip (2 -> 2 -> 8)
    val B  = bs (32 -> 32 -> 192)
    val LP = lp (1, 2, 4, 8, 16)

    //saveParams(s"$SPATIAL_HOME/saved.param") // Store used params to file

    val size = aIn.length
    val sizePlus1 = size + 1; bound(sizePlus1) = 640

    val N = ArgIn[Int]
    val tileSizeAsArg = ArgIn[Int]
    setArg(N, size + 1)
    setArg(tileSizeAsArg, B)

    val a = DRAM[T](N)
    val b = DRAM[T](N)
    val out0 = ArgOut[T]
    setMem(a, aIn)
    setMem(b, bIn)

    Accel {
      val accO = Reg[T](0.to[T])
      if (pipeline) {
        out0 := Reduce(accO)(N by B par OP){i =>
          val aBlk = SRAM[T](B)
          val bBlk = SRAM[T](B)
          Parallel {
            Pipe{if (size > 0) aBlk load a(i::i+tileSizeAsArg par LP)}
            bBlk load b(i::i+B par LP)
          }
          val accI = Reg[T](0.to[T])
          Reduce(accI)(B par IP){ii => aBlk(ii) * bBlk(ii) }{_+_}
        }{_+_}
      }
      else {
        out0 := Sequential.Reduce(accO)(N by B par OP){i =>
          val aBlk = SRAM[T](B)
          val bBlk = SRAM[T](B)
          Parallel {
            aBlk load a(i::i+B par LP)
            bBlk load b(i::i+B par LP)
          }
          val accI = Reg[T](0.to[T])
          Reduce(accI)(B par IP){ii => aBlk(ii) * bBlk(ii) }{_+_}
        }{_+_}        
      }
    }
    getArg(out0)
  }


  def main(args: Array[String]): Unit = {
    val N = args(0).to[Int]
    val a = Array.fill(N){ random[X](4) }
    val b = Array.fill(N){ random[X](4) }

    val result0 = dotproduct(a, b)
    val gold = a.zip(b){_*_}.reduce{_+_}

    println("expected: " + gold)
    println("result0: " + result0)

    val cksum = gold == result0
    println("PASS: " + cksum + " (BasicParams)")
    assert(cksum)
  }
} 
Example 81
Source File: SqlContext.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component

import java.util.UUID

import com.typesafe.config.ConfigFactory
import io.eels.component.jdbc.{JdbcSink, JdbcSource}
import io.eels.datastream.DataStream

class SqlContext {
  Class.forName("org.h2.Driver")

  val config = ConfigFactory.load()
  val disk = config.getBoolean("eel.sqlContext.writeToDisk")
  val dataDirectory = config.getString("eel.sqlContext.dataDirectory")
  val ignoreCase = config.getBoolean("eel.sqlContext.ignoreCase").toString().toUpperCase()

  val uri = if (disk) {
    s"jdbc:h2:$dataDirectory/sqlcontext${UUID.randomUUID().toString().replace("-", "")};IGNORECASE=$ignoreCase;DB_CLOSE_DELAY=-1"
  } else {
    s"jdbc:h2:mem:sqlcontext${UUID.randomUUID().toString().replace("-", "")};IGNORECASE=$ignoreCase;DB_CLOSE_DELAY=-1"
  }

  def registerFrame(name: String, ds: DataStream): Unit = {
    ds.to(JdbcSink(uri, name).withCreateTable(true))
  }

  def sql(query: String): DataStream = JdbcSource(uri, query).toDataStream()
} 
Example 82
Source File: AvroDeserializer.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.avro

import com.typesafe.config.ConfigFactory
import io.eels.Row
import io.eels.schema.StructType
import org.apache.avro.Schema.Field
import org.apache.avro.generic.GenericRecord
import org.apache.avro.util.Utf8

import scala.collection.JavaConverters._


class AvroDeserializer(useJavaString: Boolean = ConfigFactory.load().getBoolean("eel.avro.java.string")) {

  val config = ConfigFactory.load()
  val deserializeAsNullable = config.getBoolean("eel.avro.deserializeAsNullable")
  var schema: StructType = null
  var fields: Array[Field] = null
  var range: Range = null

  def toScala(value: Any): Any = {
    value match {
      case record: GenericRecord => toValues(record)
      case utf8: Utf8 if useJavaString => value.asInstanceOf[Utf8].toString
      case col: java.util.Collection[Any] => col.asScala.toVector.map(toScala)
      case map: java.util.Map[_, _] => map.asScala.toMap.map { case (k, v) => toScala(k) -> toScala(v) }
      case other => other
    }
  }

  def toValues(record: GenericRecord): Vector[Any] = {
    val vector = Vector.newBuilder[Any]
    for (k <- 0 until record.getSchema.getFields.size) {
      val value = record.get(k)
      vector += toScala(value)
    }
    vector.result
  }

  def toRow(record: GenericRecord): Row = {
    // take the schema from the first record
    if (schema == null) {
      schema = AvroSchemaFns.fromAvroSchema(record.getSchema, deserializeAsNullable)
    }
    Row(schema, toValues(record))
  }
} 
Example 83
Source File: JdbcSink.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.jdbc

import java.sql.{Connection, DriverManager}

import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.Sink
import io.eels.component.jdbc.dialect.{GenericJdbcDialect, JdbcDialect}
import io.eels.schema.StructType
import com.sksamuel.exts.OptionImplicits._

object JdbcSink extends Logging {

  private val config = ConfigFactory.load()
  private val warnIfMissingRewriteBatchedStatements = config.getBoolean("eel.jdbc.sink.warnIfMissingRewriteBatchedStatements")

  def apply(url: String, table: String): JdbcSink = {
    if (!url.contains("rewriteBatchedStatements")) {
      if (warnIfMissingRewriteBatchedStatements) {
        logger.warn("JDBC connection string does not contain the property 'rewriteBatchedStatements=true' which can be a major performance boost when writing data via JDBC. " +
          "Add this property to your connection string, or to remove this warning set eel.jdbc.warnIfMissingRewriteBatchedStatements=false")
      }
    }
    JdbcSink(() => DriverManager.getConnection(url), table)
  }
}

case class JdbcSink(connFn: () => Connection,
                    table: String,
                    createTable: Boolean = false,
                    dropTable: Boolean = false,
                    batchSize: Int = 1000, // the number of rows before a commit is made
                    batchesPerCommit: Int = 0, // 0 means commit at the end, otherwise how many batches before a commit
                    dialect: Option[JdbcDialect] = None,
                    threads: Int = 4) extends Sink with Logging {

  private val config = ConfigFactory.load()
  private val bufferSize = config.getInt("eel.jdbc.sink.bufferSize")
  private val autoCommit = config.getBoolean("eel.jdbc.sink.autoCommit")

  def withCreateTable(createTable: Boolean): JdbcSink = copy(createTable = createTable)
  def withDropTable(dropTable: Boolean): JdbcSink = copy(dropTable = dropTable)
  def withBatchSize(batchSize: Int): JdbcSink = copy(batchSize = batchSize)
  def withThreads(threads: Int): JdbcSink = copy(threads = threads)
  def withBatchesPerCommit(commitSize: Int): JdbcSink = copy(batchesPerCommit = batchesPerCommit)
  def withDialect(dialect: JdbcDialect): JdbcSink = copy(dialect = dialect.some)

  override def open(schema: StructType) =
    new JdbcSinkWriter(schema, connFn, table, createTable, dropTable, dialect.getOrElse(new GenericJdbcDialect), threads, batchSize, batchesPerCommit, autoCommit, bufferSize)
} 
Example 84
Source File: ParquetWriterConfig.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.parquet

import com.sksamuel.exts.Logging
import com.sksamuel.exts.config.ConfigSupport
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.parquet.hadoop.ParquetWriter
import org.apache.parquet.hadoop.metadata.CompressionCodecName

case class ParquetWriterConfig(blockSize: Int,
                               pageSize: Int,
                               compressionCodec: CompressionCodecName,
                               enableDictionary: Boolean,
                               validating: Boolean)

object ParquetWriterConfig extends Logging with ConfigSupport {

  def apply(): ParquetWriterConfig = apply(ConfigFactory.load())
  def apply(config: Config): ParquetWriterConfig = {

    val blockSize: Int = config.getIntOrElse("eel.parquet.blockSize", ParquetWriter.DEFAULT_BLOCK_SIZE)
    val pageSize: Int = config.getIntOrElse("eel.parquet.pageSize", ParquetWriter.DEFAULT_PAGE_SIZE)
    val compressionCodec = config.getString("eel.parquet.compressionCodec").toLowerCase() match {
      case "gzip" => CompressionCodecName.GZIP
      case "lzo" => CompressionCodecName.LZO
      case "snappy" => CompressionCodecName.SNAPPY
      case _ => CompressionCodecName.UNCOMPRESSED
    }

    logger.debug(s"Parquet writer will use blockSize = $blockSize; pageSize = $pageSize; compressionCodec = $compressionCodec")
    ParquetWriterConfig(blockSize, pageSize, compressionCodec, true, true)
  }
} 
Example 85
Source File: AvroParquetRowWriter.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.parquet.avro

import com.sksamuel.exts.Logging
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.avro.Schema
import org.apache.avro.generic.GenericRecord
import org.apache.hadoop.fs.{FileSystem, Path}


class AvroParquetRowWriter(path: Path,
                           avroSchema: Schema)(implicit fs: FileSystem) extends Logging {

  private val config: Config = ConfigFactory.load()
  private val skipCrc = config.getBoolean("eel.parquet.skipCrc")
  logger.info(s"Parquet writer will skipCrc = $skipCrc")

  private val writer = AvroParquetWriterFn(path, avroSchema)

  def write(record: GenericRecord): Unit = {
    writer.write(record)
  }

  def close(): Unit = {
    writer.close()
    if (skipCrc) {
      val crc = new Path("." + path.toString() + ".crc")
      logger.debug("Deleting crc $crc")
      if (fs.exists(crc))
        fs.delete(crc, false)
    }
  }
} 
Example 86
Source File: AvroParquetSink.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.parquet.avro

import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.component.avro.{AvroSchemaFns, RowSerializer}
import io.eels.schema.StructType
import io.eels.{Row, Sink, SinkWriter}
import org.apache.hadoop.fs.{FileSystem, Path}

object AvroParquetSink {
  def apply(path: String)(implicit fs: FileSystem): AvroParquetSink = AvroParquetSink(new Path(path))
}

case class AvroParquetSink(path: Path, overwrite: Boolean = false)(implicit fs: FileSystem) extends Sink with Logging {

  def withOverwrite(overwrite: Boolean): AvroParquetSink = copy(overwrite = overwrite)

  override def open(schema: StructType): SinkWriter = new SinkWriter {

    private val config = ConfigFactory.load()
    private val caseSensitive = config.getBoolean("eel.parquet.caseSensitive")

    if (overwrite && fs.exists(path))
      fs.delete(path, false)

    private val avroSchema = AvroSchemaFns.toAvroSchema(schema, caseSensitive = caseSensitive)
    private val writer = new AvroParquetRowWriter(path, avroSchema)
    private val serializer = new RowSerializer(avroSchema)

    override def write(row: Row): Unit = {
      this.synchronized {
        val record = serializer.serialize(row)
        writer.write(record)
      }
    }

    override def close(): Unit = {
      writer.close()
    }
  }
} 
Example 87
Source File: AvroSourceTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.avro

import java.nio.file.Paths

import com.typesafe.config.ConfigFactory
import io.eels.schema.{Field, StructType}
import org.apache.avro.util.Utf8
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.scalatest.{Matchers, WordSpec}

class AvroSourceTest extends WordSpec with Matchers {

  private implicit val conf = new Configuration()
  private implicit val fs = FileSystem.get(new Configuration())

  "AvroSource" should {
    "read schema" in {
      val people = AvroSource(Paths.get(getClass.getResource("/test.avro").toURI).toAbsolutePath)
      people.schema shouldBe StructType(Field("name", nullable = false), Field("job", nullable = false), Field("location", nullable = false))
    }
    "read strings as java.lang.String when eel.avro.java.string is true" in {
      System.setProperty("eel.avro.java.string", "true")
      ConfigFactory.invalidateCaches()
      val people = AvroSource(Paths.get(getClass.getResource("/test.avro").toURI).toAbsolutePath).toDataStream().toSet
      people.map(_.values) shouldBe Set(
        List("clint eastwood", "actor", "carmel"),
        List("elton john", "musician", "pinner"),
        List("issac newton", "scientist", "heaven")
      )
      System.setProperty("eel.avro.java.string", "false")
      ConfigFactory.invalidateCaches()
    }
    "read strings as utf8 when eel.avro.java.string is false" in {
      System.setProperty("eel.avro.java.string", "false")
      ConfigFactory.invalidateCaches()
      val people = AvroSource(Paths.get(getClass.getResource("/test.avro").toURI).toAbsolutePath).toDataStream().toSet
      people.map(_.values) shouldBe Set(
        List(new Utf8("clint eastwood"), new Utf8("actor"), new Utf8("carmel")),
        List(new Utf8("elton john"), new Utf8("musician"), new Utf8("pinner")),
        List(new Utf8("issac newton"), new Utf8("scientist"), new Utf8("heaven"))
      )
      System.setProperty("eel.avro.java.string", "true")
      ConfigFactory.invalidateCaches()
    }
  }
} 
Example 88
Source File: AvroDeserializerTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.avro

import com.typesafe.config.ConfigFactory
import io.eels.Row
import io.eels.schema._
import org.apache.avro.generic.GenericData
import org.scalatest.{Matchers, WordSpec}

class AvroDeserializerTest extends WordSpec with Matchers {

  private  val config = ConfigFactory.parseString(""" eel.avro.fillMissingValues = true """)

  "toRow" should {
    "create eel row from supplied avro record" in {
      val schema = StructType(Field("a", nullable = false), Field("b", nullable = false), Field("c", nullable = false))
      val record = new GenericData.Record(AvroSchemaFns.toAvroSchema(schema))
      record.put("a", "aaaa")
      record.put("b", "bbbb")
      record.put("c", "cccc")
      val row = new AvroDeserializer(true).toRow(record)
      row.schema shouldBe schema
      row shouldBe Row(schema, "aaaa", "bbbb", "cccc")
    }
    "support arrays" in {
      val schema = StructType(Field("a"), Field("b", ArrayType(BooleanType)))
      val record = new GenericData.Record(AvroSchemaFns.toAvroSchema(schema))
      record.put("a", "aaaa")
      record.put("b", Array(true, false))
      new AvroDeserializer().toRow(record).values.head shouldBe "aaaa"
      new AvroDeserializer().toRow(record).values.last.asInstanceOf[Array[Boolean]].toList shouldBe List(true, false)
    }
  }
} 
Example 89
Source File: KuduSink.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.kudu

import com.sksamuel.exts.Logging
import com.typesafe.config.{Config, ConfigFactory}
import io.eels.schema._
import io.eels.{Row, Sink, SinkWriter}
import org.apache.kudu.client.{CreateTableOptions, KuduClient}

import scala.collection.JavaConverters._

object KuduSinkConfig {
  def apply(): KuduSinkConfig = apply(ConfigFactory.load())
  def apply(config: Config): KuduSinkConfig = KuduSinkConfig(
    WriteMode.valueOf(config.getString("eel.kudu.write-mode"))
  )
}
case class KuduSinkConfig(writeMode: WriteMode)

case class KuduSink(tableName: String,
                    config: KuduSinkConfig)(implicit client: KuduClient) extends Sink with Logging {

  override def open(structType: StructType): SinkWriter = new SinkWriter {

    val schema = KuduSchemaFns.toKuduSchema(structType)

    private def deleteTable(): Unit = if (client.tableExists(tableName)) client.deleteTable(tableName)
    private def createTable(): Unit = {
      if (!client.tableExists(tableName)) {
        logger.debug(s"Creating table $tableName")
        val options = new CreateTableOptions()
          .setNumReplicas(1)
          .setRangePartitionColumns(structType.fields.filter(_.key).map(_.name).asJava)
        client.createTable(tableName, schema, options)
      }
    }


    config.writeMode match {
      case WriteMode.OVERWRITE =>
        deleteTable()
        createTable()
      case WriteMode.CREATE =>
        createTable()
      case _ =>
    }

    val table = client.openTable(tableName)
    val session = client.newSession()

    override def write(row: Row): Unit = {
      val insert = table.newInsert()
      val partial = insert.getRow
      for ((field, index) <- row.schema.fields.zipWithIndex) {
        val value = row.values(index)
        if (value == null) {
          partial.setNull(index)
        } else {
          field.dataType match {
            case BinaryType => KuduBinaryWriter.write(partial, index, value)
            case BooleanType => KuduBooleanWriter.write(partial, index, value)
            case _: ByteType => KuduByteWriter.write(partial, index, value)
            case DoubleType => KuduDoubleWriter.write(partial, index, value)
            case FloatType => KuduFloatWriter.write(partial, index, value)
            case _: IntType => KuduIntWriter.write(partial, index, value)
            case _: LongType => KuduLongWriter.write(partial, index, value)
            case _: ShortType => KuduShortWriter.write(partial, index, value)
            case StringType => KuduStringWriter.write(partial, index, value)
            case TimeMicrosType => KuduLongWriter.write(partial, index, value)
            case TimeMillisType => KuduLongWriter.write(partial, index, value)
            case TimestampMillisType => KuduLongWriter.write(partial, index, value)
          }
        }
      }
      session.apply(insert)
    }

    override def close(): Unit = {
      session.close()
    }
  }
}

object KuduSink {
  def apply(master: String, table: String, config: KuduSinkConfig = KuduSinkConfig()): KuduSink = {
    implicit val client = new KuduClient.KuduClientBuilder(master).build()
    KuduSink(table, config)
  }
} 
Example 90
Source File: HivePartitionScanner.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive

import com.sksamuel.exts.Logging
import com.typesafe.config.{Config, ConfigFactory}
import io.eels.component.hive.partition.PartitionMetaData
import io.eels.schema.PartitionConstraint
import org.apache.hadoop.fs.{FileSystem, LocatedFileStatus}

// scans partitions for files, returning the files and the meta data object for each partition
class HivePartitionScanner(implicit fs: FileSystem) extends Logging {

  private val config: Config = ConfigFactory.load()
  private val missingPartitionAction: String = config.getString("eel.hive.source.missingPartitionAction")

  def scan(partitions: Seq[PartitionMetaData],
           constraints: Seq[PartitionConstraint] = Nil): Map[PartitionMetaData, Seq[LocatedFileStatus]] = {
    logger.debug(s"Scanning ${partitions.size} partitions for applicable files ${partitions.map(_.location).mkString(", ").take(100)}")

    // first we filter out any partitions not matching the constraints
    val filteredPartitions = partitions.filter { meta =>
      constraints.forall(_.eval(meta.partition))
    }
    logger.debug(s"Filtered partitions: ${filteredPartitions.map(_.location).mkString(", ")})")

    // next, we check that the directories that the partitions point to actually exist
    // this will avoid a situation where a location exists in the metastore but not on disk
    val exantPartitions = filteredPartitions.filter { partition =>
      if (fs.exists(partition.location)) {
        true
      } else {
        if (missingPartitionAction == "error") {
          throw new IllegalStateException(s"Partition [${partition.name}] was specified in the hive metastore at [${partition.location}] but did not exist on disk. To disable these exceptions set eel.hive.source.missingPartitionAction=warn or eel.hive.source.missingPartitionAction=none")
        } else if (missingPartitionAction == "warn") {
          logger.warn(s"Partition [${partition.name}] was specified in the hive metastore at [${partition.location}] but did not exist on disk. To disable these warnings set eel.hive.source.missingPartitionAction=none")
          false
        } else {
          false
        }
      }
    }

    // next we grab all the data files from each of these partitions
    exantPartitions.map { meta =>
      meta -> HiveFileScanner(meta.location, false)
    }.toMap
  }
} 
Example 91
Source File: HiveFileScanner.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive

import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.util.HdfsIterator
import org.apache.hadoop.fs.{FileSystem, LocatedFileStatus, Path}

// given a hadoop path, will look for files inside that path that match the
// configured settings for hidden files
// does not return directories
object HiveFileScanner extends Logging {

  private val config = ConfigFactory.load()
  private val ignoreHiddenFiles = config.getBoolean("eel.hive.source.ignoreHiddenFiles")
  private val hiddenFilePattern = config.getString("eel.hive.source.hiddenFilePattern")

  // returns true if the given file should be considered based on the config settings
  private def skip(file: LocatedFileStatus): Boolean = {
    file.getLen == 0L || ignoreHiddenFiles && file.getPath.getName.matches(hiddenFilePattern)
  }

  def apply(path: Path, recursive: Boolean)(implicit fs: FileSystem): Seq[LocatedFileStatus] = {
    logger.debug(s"Scanning $path, filtering=$ignoreHiddenFiles, pattern=$hiddenFilePattern")
    val files: List[LocatedFileStatus] = if (fs.exists(path)) {
      val files = fs.listFiles(path, recursive)
      HdfsIterator.remote(files)
          .filter(_.isFile)
          .filterNot(skip)
          .toList
    } else {
      Nil
    }
    logger.debug(s"Scanner found ${files.size} files")
    files
  }
} 
Example 92
Source File: HivePartitionPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive

import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.Row
import io.eels.datastream.{Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.metastore.IMetaStoreClient

import scala.util.control.NonFatal


class HivePartitionPublisher(dbName: String,
                             tableName: String,
                             projectionSchema: StructType,
                             partitionKeys: List[String], // partition keys for this table, used to map the partition values back to a map
                             dialect: HiveDialect // used to open up the files to check they exist if checkDataForPartitionOnlySources is true
                            )
                            (implicit fs: FileSystem,
                             client: IMetaStoreClient) extends Publisher[Seq[Row]] with Logging {

  private val config = ConfigFactory.load()

  // if this is true, then we will still check that some files exist for each partition, to avoid
  // a situation where the partitions have been created in the hive metastore, but no actual
  // data has been written using those yet.
  private val partitionPartFileCheck = config.getBoolean("eel.hive.source.checkDataForPartitionOnlySources")
  logger.info(s"eel.hive.source.checkDataForPartitionOnlySources=$partitionPartFileCheck")

  // returns true if the partition exists on disk
  private def isPartitionPhysical(part: org.apache.hadoop.hive.metastore.api.Partition): Boolean = {
    val location = new Path(part.getSd.getLocation)
    logger.debug(s"Checking that partition $location has been created on disk...")
    try {
      val exists = fs.exists(location)
      if (exists) {
        logger.debug("...exists")
      } else {
        logger.debug("...not found")
      }
      exists
    } catch {
      case NonFatal(e) =>
        logger.warn(s"Error reading $location", e)
        false
    }
  }

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = client.synchronized {
    try {

      import scala.collection.JavaConverters._

      // each row will contain just the values from the metastore
      val rows = client.listPartitions(dbName, tableName, Short.MaxValue).asScala.filter { part =>
        !partitionPartFileCheck || isPartitionPhysical(part)
      }.map { part =>
        // the partition values are assumed to be the same order as the supplied partition keys
        // first we build a map of the keys to values, then use that map to return a Row with
        // values in the order set by the fieldNames parameter
        val map = partitionKeys.zip(part.getValues.asScala).toMap
        Row(projectionSchema, projectionSchema.fieldNames.map(map(_)).toVector)
      }

      logger.debug(s"After scanning partitions and files we have ${rows.size} rows")
      subscriber.subscribed(Subscription.empty)
      rows.iterator.grouped(10).foreach(subscriber.next)
      subscriber.completed()
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 93
Source File: OrcWriter.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.orc

import java.util.concurrent.atomic.AtomicInteger
import java.util.function.IntUnaryOperator

import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.Row
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector
import org.apache.orc.{OrcConf, OrcFile, TypeDescription}

import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer

// performs the actual write out of orc data, to be used by an orc sink
class OrcWriter(path: Path,
                structType: StructType,
                options: OrcWriteOptions)(implicit conf: Configuration) extends Logging {

  private val schema: TypeDescription = OrcSchemaFns.toOrcSchema(structType)
  logger.trace(s"Creating orc writer for schema $schema")

  private val batchSize = {
    val size = ConfigFactory.load().getInt("eel.orc.sink.batchSize")
    Math.max(Math.min(1024, size), 1)
  }
  logger.debug(s"Orc writer will use batchsize=$batchSize")

  private val buffer = new ArrayBuffer[Row](batchSize)
  private val serializers = schema.getChildren.asScala.map(OrcSerializer.forType).toArray
  private val batch = schema.createRowBatch(batchSize)

  OrcConf.COMPRESSION_STRATEGY.setString(conf, options.compressionStrategy.name)
  OrcConf.COMPRESS.setString(conf, options.compressionKind.name)
  options.encodingStrategy.map(_.name).foreach(OrcConf.ENCODING_STRATEGY.setString(conf, _))
  options.compressionBufferSize.foreach(OrcConf.BUFFER_SIZE.setLong(conf, _))
  private val woptions = OrcFile.writerOptions(conf).setSchema(schema)

  options.rowIndexStride.foreach { size =>
    woptions.rowIndexStride(size)
    logger.debug(s"Using stride size = $size")
  }

  if (options.bloomFilterColumns.nonEmpty) {
    woptions.bloomFilterColumns(options.bloomFilterColumns.mkString(","))
    logger.debug(s"Using bloomFilterColumns = $options.bloomFilterColumns")
  }
  private lazy val writer = OrcFile.createWriter(path, woptions)

  private val counter = new AtomicInteger(0)

  def write(row: Row): Unit = {
    buffer.append(row)
    if (buffer.size == batchSize)
      flush()
  }

  def records: Int = counter.get()

  def flush(): Unit = {

    def writecol[T <: ColumnVector](rowIndex: Int, colIndex: Int, row: Row): Unit = {
      val value = row.values(colIndex)
      val vector = batch.cols(colIndex).asInstanceOf[T]
      val serializer = serializers(colIndex).asInstanceOf[OrcSerializer[T]]
      serializer.writeToVector(rowIndex, vector, value)
    }

    // don't use foreach here, using old school for loops for perf
    for (rowIndex <- buffer.indices) {
      val row = buffer(rowIndex)
      for (colIndex <- batch.cols.indices) {
        writecol(rowIndex, colIndex, row)
      }
    }

    batch.size = buffer.size
    writer.addRowBatch(batch)
    counter.updateAndGet(new IntUnaryOperator {
      override def applyAsInt(operand: Int): Int = operand + batch.size
    })
    buffer.clear()
    batch.reset()
  }

  def close(): Long = {
    if (buffer.nonEmpty)
      flush()
    writer.close()
    val count = writer.getNumberOfRows
    logger.info(s"Orc writer wrote $count rows")
    count
  }
} 
Example 94
Source File: OrcSink.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.orc

import com.sksamuel.exts.Logging
import com.sksamuel.exts.OptionImplicits._
import com.sksamuel.exts.config.ConfigSupport
import com.typesafe.config.ConfigFactory
import io.eels.schema.StructType
import io.eels.{Row, Sink, SinkWriter}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.orc.OrcFile.{CompressionStrategy, EncodingStrategy}
import org.apache.orc.OrcProto.CompressionKind

case class OrcWriteOptions(overwrite: Boolean = false,
                           compressionKind: CompressionKind,
                           compressionStrategy: CompressionStrategy,
                           compressionBufferSize: Option[Int],
                           encodingStrategy: Option[EncodingStrategy],
                           bloomFilterColumns: Seq[String] = Nil,
                           permission: Option[FsPermission] = None,
                           inheritPermissions: Option[Boolean] = None,
                           rowIndexStride: Option[Int] = None) {
  def withCompressionKind(kind: CompressionKind): OrcWriteOptions = copy(compressionKind = kind)
  def withCompressionStrategy(strategy: CompressionStrategy): OrcWriteOptions = copy(compressionStrategy = strategy)
  def withCompressionBufferSize(size: Int): OrcWriteOptions = copy(compressionBufferSize = size.some)
  def withEncodingStrategy(strategy: EncodingStrategy): OrcWriteOptions = copy(encodingStrategy = strategy.some)
  def withBloomFilterColumns(bloomFilterColumns: Seq[String]): OrcWriteOptions = copy(bloomFilterColumns = bloomFilterColumns)
  def withRowIndexStride(stride: Int): OrcWriteOptions = copy(rowIndexStride = stride.some)
  def withOverwrite(overwrite: Boolean): OrcWriteOptions = copy(overwrite = overwrite)
  def withPermission(permission: FsPermission): OrcWriteOptions = copy(permission = permission.some)
  def withInheritPermission(inheritPermissions: Boolean): OrcWriteOptions = copy(inheritPermissions = inheritPermissions.some)
}

object OrcWriteOptions extends ConfigSupport {

  // creates a config from the typesafe reference.confs
  def apply(): OrcWriteOptions = {
    val config = ConfigFactory.load()
    OrcWriteOptions(
      false,
      CompressionKind valueOf config.getString("eel.orc.writer.compression-kind"),
      CompressionStrategy valueOf config.getString("eel.orc.writer.compression-strategy"),
      config.getIntOpt("eel.orc.writer.compression-buffer-size"),
      config.getStringOpt("eel.orc.writer.encoding-strategy").map(EncodingStrategy.valueOf)
    )
  }
}

case class OrcSink(path: Path, options: OrcWriteOptions = OrcWriteOptions())
                  (implicit fs: FileSystem, conf: Configuration) extends Sink with Logging {

  // -- convenience options --
  def withCompressionKind(kind: CompressionKind): OrcSink = copy(options = options.copy(compressionKind = kind))
  def withCompressionStrategy(strategy: CompressionStrategy): OrcSink = copy(options = options.copy(compressionStrategy = strategy))
  def withCompressionBufferSize(size: Int): OrcSink = copy(options = options.copy(compressionBufferSize = size.some))
  def withEncodingStrategy(strategy: EncodingStrategy): OrcSink = copy(options = options.copy(encodingStrategy = strategy.some))
  def withBloomFilterColumns(bloomFilterColumns: Seq[String]): OrcSink = copy(options = options.copy(bloomFilterColumns = bloomFilterColumns))
  def withRowIndexStride(stride: Int): OrcSink = copy(options = options.copy(rowIndexStride = stride.some))
  def withOverwrite(overwrite: Boolean): OrcSink = copy(options = options.copy(overwrite = overwrite))
  def withPermission(permission: FsPermission): OrcSink = copy(options = options.copy(permission = permission.some))
  def withInheritPermission(inheritPermissions: Boolean): OrcSink = copy(options = options.copy(inheritPermissions = inheritPermissions.some))

  override def open(schema: StructType, n: Int): Seq[SinkWriter] = {
    if (n == 1) Seq(create(schema, path))
    else List.tabulate(n) { k => create(schema, new Path(path.getParent, path.getName + "_" + k)) }
  }

  override def open(schema: StructType): SinkWriter = create(schema, path)

  private def create(schema: StructType, path: Path): SinkWriter = new SinkWriter {

    if (options.overwrite && fs.exists(path))
      fs.delete(path, false)

    val writer = new OrcWriter(path, schema, options)

    override def write(row: Row): Unit = writer.write(row)
    
    override def close(): Unit = {
      writer.close()
      options.permission match {
        case Some(perm) => fs.setPermission(path, perm)
        case None =>
          if (options.inheritPermissions.getOrElse(false)) {
            val permission = fs.getFileStatus(path.getParent).getPermission
            fs.setPermission(path, permission)
          }
      }
    }
  }
} 
Example 95
Source File: ApplicationContext.scala    From redrock   with Apache License 2.0 5 votes vote down vote up
package com.powertrack

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.hadoop.fs._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.hive._


object ApplicationContext {

  object Config {
    // Global Application configuration
    val appConf: Config = ConfigFactory.load("redrock-app").getConfig("redrock")
    // Spark specific configuration
    val sparkConf: Config = appConf.getConfig("spark")
    // Elastic Search configuration
    val esConf: Config = appConf.getConfig("elasticsearch")
  }

  val sparkConf = new SparkConf()
  sparkConf.setAppName(Config.appConf.getString("appName") + " - Powertrack")
  sparkConf.set("spark.scheduler.mode", "FAIR")
  sparkConf.set("es.index.auto.create", "false")

  // Spark master resources
  sparkConf.set("spark.executor.memory", s"""${Config.sparkConf.getString("powertrack.executorMemory")}""") // scalastyle:ignore
  sparkConf.set("spark.ui.port", s"""${Config.sparkConf.getString("powertrack.sparkUIPort")}""")
  sparkConf.set("spark.cores.max", s"""${Config.sparkConf.getInt("powertrack.totalCores")}""")

  val sparkContext = new SparkContext(sparkConf)
  val sqlContext = new HiveContext(sparkContext)

  
  val hadoopFS: FileSystem = FileSystem.get(sparkContext.hadoopConfiguration)
} 
Example 96
Source File: ApplicationContext.scala    From redrock   with Apache License 2.0 5 votes vote down vote up
package com.decahose

import com.typesafe.config.{ConfigFactory, Config}
import org.apache.hadoop.fs._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.hive._


object ApplicationContext {

  object Config {
    // Global Application configuration
    val appConf: Config = ConfigFactory.load("redrock-app").getConfig("redrock")
    // Spark specific configuration
    val sparkConf: Config = appConf.getConfig("spark")
    // Elastic Search configuration
    val esConf: Config = appConf.getConfig("elasticsearch")
  }


  val sparkConf = new SparkConf()
  // sparkConf.setMaster(masterNode)
  sparkConf.setAppName(ApplicationContext.Config.appConf.getString("appName") + " - Decahose")
  sparkConf.set("spark.scheduler.mode", "FAIR")

  // Spark master resources
  sparkConf.set("spark.executor.memory",s"""${ApplicationContext.Config.sparkConf.getString("decahose.executorMemory")}""") // scalastyle:ignore
  sparkConf.set("spark.ui.port",s"""${ApplicationContext.Config.sparkConf.getString("decahose.sparkUIPort")}""") // scalastyle:ignore
  sparkConf.set("spark.cores.max",s"""${ApplicationContext.Config.sparkConf.getInt("decahose.totalCores")}""") // scalastyle:ignore

  // Wait for Elasticsearch response
  sparkConf.set("spark.akka.heartbeat.interval", "10000s")
  sparkConf.set("spark.akka.heartbeat.pauses", "60000s")
  sparkConf.set("spark.akka.threads", "8")
  sparkConf.set("spark.akka.timeout", "1000s")

  // Do not allow infer schema. Schema must be defined at ES before start the app
  sparkConf.set("es.index.auto.create", "false")
  sparkConf.set("es.batch.size.bytes", "300000000")
  sparkConf.set("es.batch.size.entries", "10000")
  sparkConf.set("es.batch.write.refresh", "false")
  sparkConf.set("es.batch.write.retry.count", "50")
  sparkConf.set("es.batch.write.retry.wait", "500")
  sparkConf.set("es.http.timeout", "5m")
  sparkConf.set("es.http.retries", "50")
  sparkConf.set("es.action.heart.beat.lead", "50")

  val sparkContext = new SparkContext(sparkConf)
  val sqlContext = new HiveContext(sparkContext)

  // config sqlContext
  sqlContext.setConf("spark.sql.shuffle.partitions", s"""${ApplicationContext.Config.sparkConf.getInt("partitionNumber")}""") // scalastyle:ignore
  sqlContext.setConf("spark.sql.codegen", "true")

  // provide access to HDFS file system
  val hadoopFS: FileSystem = FileSystem.get(sparkContext.hadoopConfiguration)
} 
Example 97
Source File: Build.scala    From sbt-reactive-app   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.rp.sbtreactiveapp.magic

import sbt.{ Attributed, File, IO }
import scala.collection.JavaConverters._
import java.net.URL
import com.typesafe.config.{ Config, ConfigFactory }

object Build {
  def annotate(prependInclude: Boolean, unmanagedConfigName: String, config: String): String =
    s"""|# Generated by sbt-reactive-app. To disable this, set the `prependRpConf` sbt key to `""`.
        |
        |""".stripMargin +
      (if (prependInclude) s"""include "$unmanagedConfigName"""" else "") +
      s"""|
          |$config""".stripMargin

  def withHeader(comment: String, config: String): String =
    s"""|# $comment
        |
        |$config""".stripMargin

  def extractRpToolingConf(
    managedConfigNames: Seq[String],
    dependencyClasspath: Seq[Attributed[File]],
    prependInclude: Boolean,
    unmanagedConfigName: String): String = {
    val dependencyClassLoader = new java.net.URLClassLoader(dependencyClasspath.files.map(_.toURI.toURL).toArray)

    val managedConfigs: List[URL] =
      managedConfigNames
        .flatMap(dependencyClassLoader.findResources(_).asScala)
        .toList

    annotate(
      prependInclude,
      unmanagedConfigName,
      (managedConfigs
        .foldLeft(Seq.empty[String]) {
          case (accum, conf) =>
            accum :+ withHeader(conf.toString, IO.readLinesURL(conf).mkString(IO.Newline))
        }).mkString(IO.Newline))
  }

  def makeConfig(dependencyClasspath: Seq[File]): Config = {
    val dependencyClassLoader = new java.net.URLClassLoader(dependencyClasspath.map(_.toURI.toURL).toArray)
    ConfigFactory.load(dependencyClassLoader)
  }
} 
Example 98
Source File: HelloAkka.scala    From sbt-reactive-app   with Apache License 2.0 5 votes vote down vote up
package hello.akka

import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.{ Actor, ActorSystem, Props }
import akka.discovery._
import com.typesafe.config.ConfigFactory

final case class Greet(name: String)

class GreeterActor extends Actor {
  val cluster = Cluster(context.system)

  override def preStart = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop = {
    cluster.unsubscribe(self)
  }

  def receive = {
    case Greet(name) =>
      println(s"Hello, $name")
    case MemberUp(member) =>
      println(s"Member up: $member")
    case MemberRemoved(member, previousStatus) =>
      println(s"Member down: $member")
    case _: MemberEvent => // ignore
  }
}

object HelloAkka {
  def main(args: Array[String]) = {
    startup()
  }

  def startup() = {
    val system = ActorSystem("ClusterSystem")
    val discovery = ServiceDiscovery(system).discovery
    val actor = system.actorOf(Props[GreeterActor], name = "GreeterActor")

    actor ! Greet("[unnamed]")
  }
} 
Example 99
Source File: SidechainSettingsReader.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen

import java.io.File
import java.net.URL
import java.util.{Optional => JOptional}

import com.typesafe.config.{Config, ConfigFactory}
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
import scorex.core.settings.{ScorexSettings, SettingsReaders}
import scorex.util.ScorexLogging

import scala.compat.java8.OptionConverters.toScala


object SidechainSettingsReader
  extends ScorexLogging
    with SettingsReaders
{
  protected val sidechainSettingsName = "sidechain-sdk-settings.conf"

  def fromConfig(config: Config): SidechainSettings = {
    val webSocketConnectorConfiguration = config.as[WebSocketSettings]("scorex.websocket")
    val scorexSettings = config.as[ScorexSettings]("scorex")
    val genesisSetting = config.as[GenesisDataSettings]("scorex.genesis")
    val backwardTransfer = config.as[withdrawalEpochCertificateSettings]("scorex.withdrawalEpochCertificate")
    val walletSetting = config.as[WalletSettings]("scorex.wallet")
    SidechainSettings(scorexSettings, genesisSetting, webSocketConnectorConfiguration, backwardTransfer, walletSetting)
  }

  def readConfigFromPath(userConfigPath: String, applicationConfigPath: Option[String]): Config = {

    val userConfigFile: File = new File(userConfigPath)

    val userConfig: Option[Config] = if (userConfigFile.exists()) {
      Some(ConfigFactory.parseFile(userConfigFile))
    } else None

    val applicationConfigURL: Option[URL] = applicationConfigPath.map(filename => new File(filename))
      .filter(_.exists()).map(_.toURI.toURL)
      .orElse(applicationConfigPath.map(r => getClass.getClassLoader.getResource(r)))

    val applicationConfig: Option[Config] = if (applicationConfigURL.isDefined) {
      Some(ConfigFactory.parseURL(applicationConfigURL.get))
    } else None

    var config: Config = ConfigFactory.defaultOverrides()

    if (userConfig.isDefined)
      config = config.withFallback(userConfig.get)

    if (applicationConfig.isDefined)
      config = config.withFallback(applicationConfig.get)

    config = config
      .withFallback(ConfigFactory.parseResources(sidechainSettingsName))
      .withFallback(ConfigFactory.defaultReference())
      .resolve()

    config
  }

  def readConfigFromPath(userConfigPath: String, applicationConfigPath: JOptional[String]) : Config =
    readConfigFromPath(userConfigPath, toScala(applicationConfigPath))

  def read(userConfigPath: String, applicationConfigPath: Option[String]) : SidechainSettings =
    fromConfig(readConfigFromPath(userConfigPath, applicationConfigPath))
} 
Example 100
Source File: Settings.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encryBenchmark

import com.typesafe.config.ConfigFactory
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._

case class Settings(serializedAssetBenchSettings: SerializedAssetBenchSettings,
                    serializedMonetaryBenchSettings: SerializedMonetaryBenchSettings,
                    serializedDataBenchSettings: SerializedDataBenchSettings,
                    benchesSettings: BenchesSettings,
                    stateBenchSettings: StateBenchSettings,
                    historyBenchSettings: HistoryBenchSettings)

trait BenchSettings {
  lazy val benchSettings: Settings = Settings.read
}

object Settings {
  val configPath = "encry.benchmark"
  val read: Settings = ConfigFactory.load("application.conf").as[Settings](configPath)
}

case class SerializedAssetBenchSettings(totalBoxesNumber: Int, numberOfInputs: Int, numberOfOutputs: Int)

case class SerializedMonetaryBenchSettings(totalBoxesNumber: Int, numberOfInputs: Int, numberOfOutputs: Int)

case class SerializedDataBenchSettings(totalBoxesNumber: Int, numberOfInputs: Int, numberOfOutputs: Int, bytesQty: Int)

case class BenchesSettings(warmUpIterations: Int, measurementIterations: Int, measurementTime: Int, warmUpTime: Int)

case class StateBenchSettings(totalBoxesNumber: Int,
                              blocksNumber: Int,
                              transactionsNumberInEachBlock: Int,
                              numberOfInputsInOneTransaction: Int,
                              numberOfOutputsInOneTransaction: Int)

case class HistoryBenchSettings(blocksNumber: Int, transactionsNumber: Int) 
Example 101
Source File: Configs.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.it.configs

import com.typesafe.config.{Config, ConfigFactory}

object Configs {

  def mining(miningEnable: Boolean): Config = ConfigFactory.parseString(
    s"""
       |encry.node.mining=$miningEnable
    """.stripMargin
  )

  def offlineGeneration(offlineGeneration: Boolean): Config = ConfigFactory.parseString(
    s"""
       |encry.node.offlineGeneration=$offlineGeneration
    """.stripMargin
  )

  def nodeName(name: String): Config = ConfigFactory.parseString(
    s"""
       |encry.network.nodeName="$name"
    """.stripMargin
  )

  def miningDelay(miningDelay: Int): Config = ConfigFactory.parseString(
    s"""
       |encry.node.miningDelay=${miningDelay}s
    """.stripMargin
  )

  def knownPeers(peers: Seq[(String, Int)]): Config = ConfigFactory.parseString({
    val peerInfoSeq: Seq[String] = peers.map(n =>
      s"""
         |"${n._1}:${n._2}"
       """.stripMargin)
    val peerInfoStr: String = peerInfoSeq.mkString("[", ",", "]")
    s"""
       |encry.network.knownPeers=$peerInfoStr
     """.stripMargin
  })

  def mnemonicKey(key: String): Config = ConfigFactory.parseString(
    s"""
       |encry.wallet.seed="$key"
     """.stripMargin
  )
} 
Example 102
Source File: SparkConfig.scala    From gsoc_relationship   with Apache License 2.0 5 votes vote down vote up
package com.holmesprocessing.analytics.relationship

import java.io.File

import org.apache.spark.{SparkConf, SparkContext}
import com.typesafe.config.ConfigFactory

object SparkConfig {

  val config = ConfigFactory.parseFile(new File("./config/relationship.conf"))

  val hosts = "hosts"
  val username = "username"
  val password = "password"
  val keyspace = "keyspace"
  val analytics_knowledge_base = "analytics_knowledge_base"
  val analytics_mv_knowledge_base_by_feature = "analytics_mv_knowledge_base_by_feature"
  val analytics_primary_relationships = "analytics_primary_relationships"
  val results = "results"
  val results_meta = "results_meta"
  val results_data = "results_data"
  val objects_table = "objects_table"

  val appName = "relationship"
  val master = "localhost"

  val sparkconf = new SparkConf(true)
    .set("spark.cassandra.connection.host", hosts)
    .set("spark.cassandra.auth.username", username)
    .set("spark.cassandra.auth.password", password)

  val sc = new SparkContext(master, appName, sparkconf)
} 
Example 103
Source File: ConfigLoaderSpec.scala    From amadou   with Apache License 2.0 5 votes vote down vote up
package com.mediative.amadou

import org.scalatest.{WordSpec, Matchers}
import com.typesafe.config.ConfigFactory
import java.util.Properties

object ConfigLoaderSpec {
  case class Database(url: String, properties: Properties)
}

class ConfigLoaderSpec extends WordSpec with Matchers with ConfigLoader {
  import ConfigLoaderSpec.Database

  "propertiesValueReader" should {
    "load from given path" in {
      val config =
        ConfigFactory.parseString("""
        database {
          url = "jdbc:postgresql:testdb"
          properties = src/test/resources/config-reader-spec.properties
        }
      """)
      val db = config.as[Database]("database")
      db.properties.size should be(2)
      db.properties.getProperty("user") should be("john")
      db.properties.getProperty("pass") should be("secret")
    }

    "be empty when no path is given" in {
      val config = ConfigFactory.parseString("""
        database.url = "jdbc:postgresql:testdb"
      """)
      val db     = config.as[Database]("database")
      db.properties.isEmpty should be(true)
    }

    "fail when given path does not exist" in {
      val config =
        ConfigFactory.parseString("""
        database {
          url = "jdbc:postgresql:testdb"
          properties = src/test/resources/doesn-not-exists.properties
        }
      """)

      the[java.io.FileNotFoundException] thrownBy {
        config.as[Database]("database")
      } should have message "src/test/resources/doesn-not-exists.properties (No such file or directory)"
    }
  }
} 
Example 104
Source File: AkkaUtils.scala    From DataXServer   with Apache License 2.0 5 votes vote down vote up
package org.tianlangstudio.data.hamal.yarn.util

import akka.actor.{ActorSystem, ExtendedActorSystem}
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.tianlangstudio.data.hamal.core.{Constants, HamalConf}
import org.tianlangstudio.data.hamal.core.HamalConf


  def maxFrameSizeBytes(conf: HamalConf): Int = {
    val frameSizeInMB = conf.getInt("datax.akka.frameSize", 128)
    if (frameSizeInMB > AKKA_MAX_FRAME_SIZE_IN_MB) {
      throw new IllegalArgumentException(
        s"spark.akka.frameSize should not be greater than $AKKA_MAX_FRAME_SIZE_IN_MB MB")
    }
    frameSizeInMB * 1024 * 1024
  }


  def protocol(actorSystem: ActorSystem): String = {
    val akkaConf = actorSystem.settings.config
    val sslProp = "akka.remote.netty.tcp.enable-ssl"
    protocol(akkaConf.hasPath(sslProp) && akkaConf.getBoolean(sslProp))
  }

  def protocol(ssl: Boolean = false): String = {
    if (ssl) {
      "akka.ssl.tcp"
    } else {
      "akka.tcp"
    }
  }

  def address(
      protocol: String,
      systemName: String,
      host: String,
      port: Int,
      actorName: String): String = {

        address(protocol,
          systemName,
          s"$host:$port",
          actorName
        )
  }
  def address(
               protocol: String,
               systemName: String,
               hostPort: String,
               actorName: String): String = {
    s"$protocol://$systemName@$hostPort/user/$actorName"
  }
} 
Example 105
Source File: HamalConf.scala    From DataXServer   with Apache License 2.0 5 votes vote down vote up
package org.tianlangstudio.data.hamal.core

import com.typesafe.config.{Config, ConfigFactory}

/**
 * Created by zhuhq on 2016/4/28.
 */
class HamalConf(fileName:String = Constants.DATAX_MASTER_CONF_NAME) {

  val conf = ConfigFactory.load(fileName).withFallback(ConfigFactory.load())
  def withFallback(other:Config) = {
      conf.withFallback(other)
  }
  def getInt(name:String,defVal:Int) = {

    if(conf.hasPath(name)) {
      conf.getInt(name)
    }else {
      defVal
    }

  }

  def getString(name:String,defVal:String) = {
    if(conf.hasPath(name)) {
      conf.getString(name)
    }else {
      defVal
    }
  }
  def getString(name:String) = {
    conf.getString(name)
  }
  def getInt(name:String) = {
    conf.getInt(name)

  }
  def getBoolean(name:String,defVal:Boolean) = {
    if(conf.hasPath(name)) {
      conf.getBoolean(name)
    }else {
      defVal
    }
  }
  def getBoolean(name:String) = {
    conf.getBoolean(name)
  }
  def getConf = {
    conf
  }

} 
Example 106
Source File: CouchbasePluginSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.support

import akka.actor.ActorSystem
import akka.persistence.couchbase.{CouchbaseExtension, LoggingConfig}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._

object CouchbasePluginSpec {

  val config = ConfigFactory.parseString(
    """
      |akka {
      |  persistence {
      |    journal {
      |      plugin = "couchbase-journal"
      |    }
      |
      |    snapshot-store {
      |      plugin =  "couchbase-snapshot-store"
      |    }
      |
      |    journal-plugin-fallback {
      |      replay-filter {
      |        mode = warn
      |      }
      |    }
      |  }
      |
      |  test.single-expect-default = 10s
      |  loglevel = WARNING
      |  log-dead-letters = 0
      |  log-dead-letters-during-shutdown = off
      |  test.single-expect-default = 10s
      |}
      |
      |couchbase-replay {
      |
      |  batchSize = "4"
      |}
    """.stripMargin)
}

trait CouchbasePluginSpec
  extends Suite
    with BeforeAndAfter
    with BeforeAndAfterAll {

  System.setProperty("java.util.logging.config.class", classOf[LoggingConfig].getName)

  def system: ActorSystem

  def couchbase = CouchbaseExtension(system)

  before {
    assert(couchbase.journalBucket.bucketManager.flush())
    assert(couchbase.snapshotStoreBucket.bucketManager.flush())
  }

  override protected def afterAll(): Unit = {
    Await.result(system.terminate(), 10.seconds)
    super.afterAll()
  }
} 
Example 107
Source File: BasicServiceTest.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.oracle.infy.qa

import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.service.messages.GetMetaDetails
import com.webtrends.harness.service.meta.ServiceMetaDetails
import com.webtrends.harness.service.test.{BaseWookieeScalaTest, TestHarness}

class BasicServiceTest extends BaseWookieeScalaTest {
  override def config = ConfigFactory.empty()
  override def servicesMap = Some(Map("base" -> classOf[BasicService]))

  "BasicService" should {
    "start itself up" in {
      val probe = TestProbe()
      val testService = TestHarness.harness.get.getService("base")
      assert(testService.isDefined, "Basic Service was not registered")

      probe.send(testService.get, GetMetaDetails)
      ServiceMetaDetails(false) mustEqual probe.expectMsg(ServiceMetaDetails(false))
    }
  }
} 
Example 108
Source File: ConfigUtil.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.utils

import java.util.concurrent.TimeUnit

import akka.util.Timeout
import com.typesafe.config.{ConfigException, Config, ConfigFactory}

object ConfigUtil {

  lazy val referenceConfig = ConfigFactory.defaultReference

  
  def getDefaultTimeout(config:Config, path:String, default:Timeout, unit:TimeUnit=TimeUnit.SECONDS) : Timeout = {
    if (config.hasPath(path)) {
      val duration = config.getDuration(path, unit)
      Timeout(duration, unit)
    } else {
      default
    }
  }
} 
Example 109
Source File: HarnessActorSystem.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.app

import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import com.webtrends.harness.component.ComponentManager
import com.webtrends.harness.logging.Logger
import com.webtrends.harness.service.ServiceManager

object HarnessActorSystem {

  lazy val loader = HarnessClassLoader(Thread.currentThread.getContextClassLoader)
  private val externalLogger = Logger.getLogger(this.getClass)

  def apply(config:Option[Config]=None): ActorSystem = {
    ActorSystem.create("server", getConfig(config), loader)
  }

  def getConfig(config:Option[Config]): Config = {
    val sysConfig = {
      if (config.isDefined) {
        config.get
      } else {
        val baseConfig = ConfigFactory.load(loader, "conf/application.conf")
        ConfigFactory.load(loader).withFallback(baseConfig).getConfig("wookiee-system")
      }
    }

    ComponentManager.loadComponentJars(sysConfig, loader)
    ConfigFactory.load

    externalLogger.debug("Loading the service configs")
    val configs = ServiceManager.loadConfigs(sysConfig)
    if (configs.nonEmpty) externalLogger.info(s"${configs.size} service config(s) have been loaded: ${configs.mkString(", ")}")

    externalLogger.debug("Loading the component configs")
    val compConfigs = ComponentManager.loadComponentInfo(sysConfig)
    if (compConfigs.nonEmpty) externalLogger.info(s"${compConfigs.size} component config(s) have been loaded: ${compConfigs.mkString(", ")}\nIf 0 could be due to config loaded from component JARs.")

    val allConfigs = configs ++ compConfigs

    // Build the hierarchy
    val conf = if (allConfigs.isEmpty) sysConfig
      else allConfigs.reduce(_.withFallback(_)).withFallback(sysConfig)
    conf.resolve()
  }
} 
Example 110
Source File: LoggingActorSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.logging

import akka.actor.{ActorSystem, Props}
import akka.event.Logging.{InitializeLogger, LoggerInitialized}
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.TestKitSpecificationWithJUnit

class LoggingActorSpec extends TestKitSpecificationWithJUnit(ActorSystem("test", ConfigFactory.parseString( """logging.use-actor=off"""))) {

  val logger = system.actorOf(Props[LoggingActor])

  "Logging" should {
    "test logging initialization" in {
      val probe = TestProbe()
      probe.send(logger, InitializeLogger(null))
      LoggerInitialized must beEqualTo(probe.expectMsg(LoggerInitialized))
    }
  }

  step {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 111
Source File: ConfigSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.io.{BufferedWriter, File, FileWriter}
import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.app.HarnessActor.ConfigChange
import com.webtrends.harness.config.ConfigWatcherActor
import com.webtrends.harness.health.{ComponentState, HealthComponent}
import com.webtrends.harness.service.messages.CheckHealth
import org.specs2.mutable.SpecificationWithJUnit

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.FiniteDuration
import scala.reflect.io.{Directory, Path}

class ConfigSpec extends SpecificationWithJUnit {
  implicit val dur = FiniteDuration(2, TimeUnit.SECONDS)
  new File("services/test/conf").mkdirs()
  implicit val sys = ActorSystem("system", ConfigFactory.parseString( """
    akka.actor.provider = "akka.actor.LocalActorRefProvider"
    services { path = "services" }
    """).withFallback(ConfigFactory.load))

  implicit val ec: ExecutionContextExecutor =  sys.dispatcher

  val probe = TestProbe()
  val parent = sys.actorOf(Props(new Actor {
    val child = context.actorOf(ConfigWatcherActor.props, "child")
    def receive = {
      case x if sender == child => probe.ref forward x
      case x => child forward x
    }
  }))

  sequential

  "config " should {
    "be in good health" in {
      probe.send(parent, CheckHealth)
      val msg = probe.expectMsgClass(classOf[HealthComponent])
      msg.state equals ComponentState.NORMAL
    }

    "detect changes in config" in {
      val file = new File("services/test/conf/test.conf")
      val bw = new BufferedWriter(new FileWriter(file))
      bw.write("test = \"value\"")
      bw.close()
      val msg = probe.expectMsgClass(classOf[ConfigChange])
      msg.isInstanceOf[ConfigChange]
    }
  }

  step {
    sys.terminate().onComplete { _ =>
        Directory(Path(new File("services"))).deleteRecursively()
    }
  }
} 
Example 112
Source File: BaseSpecTest.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.service.test

import akka.actor.ActorSystem
import ch.qos.logback.classic.Level
import com.typesafe.config.{Config, ConfigFactory}
import com.webtrends.harness.component.Component
import com.webtrends.harness.service.Service
import org.specs2.mutable.SpecificationLike
import org.scalatest.{MustMatchers, WordSpecLike}

import scala.concurrent.duration._

trait BaseWookieeTest {
  def config:Config = ConfigFactory.empty()
  def componentMap:Option[Map[String, Class[_<:Component]]] = None
  def servicesMap:Option[Map[String, Class[_<:Service]]] = None
  def logLevel: Level = Level.INFO
  def startupWait: FiniteDuration = 15 seconds

  TestHarness(config, servicesMap, componentMap, logLevel, startupWait)
  Thread.sleep(1000)
  implicit val system: ActorSystem = TestHarness.system.get
}

trait BaseWookieeSpecTest extends BaseWookieeTest with SpecificationLike
trait BaseWookieeScalaTest extends BaseWookieeTest with WordSpecLike with MustMatchers 
Example 113
Source File: ConfluentSchemaRegistry.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.avro.registry

import com.google.common.cache.{CacheBuilder, CacheLoader}
import com.typesafe.config.{Config, ConfigFactory}
import hydra.common.logging.LoggingAdapter
import io.confluent.kafka.schemaregistry.client.{
  CachedSchemaRegistryClient,
  MockSchemaRegistryClient,
  SchemaMetadata,
  SchemaRegistryClient
}

import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContext, Future}


case class ConfluentSchemaRegistry(
    registryClient: SchemaRegistryClient,
    registryUrl: String
) extends SchemaRegistryComponent {

  def getAllSubjects()(implicit ec: ExecutionContext): Future[Seq[String]] =
    Future(
      registryClient
        .getAllSubjects()
        .asScala
        .map(s => if (s.endsWith("-value")) s.dropRight(6) else s)
        .toSeq
    )

  def getById(id: Int, suffix: String = "-value")(
      implicit ec: ExecutionContext
  ): Future[SchemaMetadata] = Future {
    val schema = registryClient.getById(id)
    val subject = schema.getFullName + suffix
    registryClient.getLatestSchemaMetadata(subject)
  }
}

object ConfluentSchemaRegistry extends LoggingAdapter {

  import hydra.common.config.ConfigSupport._

  case class SchemaRegistryClientInfo(
      url: String,
      schemaRegistryMaxCapacity: Int
  )

  private val cachedClients = CacheBuilder
    .newBuilder()
    .build(
      new CacheLoader[SchemaRegistryClientInfo, ConfluentSchemaRegistry] {

        def load(info: SchemaRegistryClientInfo): ConfluentSchemaRegistry = {
          log.debug(s"Creating new schema registry client for ${info.url}")
          val client = if (info.url == "mock") {
            mockRegistry
          } else {
            new CachedSchemaRegistryClient(
              info.url,
              info.schemaRegistryMaxCapacity
            )
          }
          ConfluentSchemaRegistry(client, info.url)
        }
      }
    )

  val mockRegistry = new MockSchemaRegistryClient()

  def registryUrl(config: Config): String =
    config.getStringOpt("schema.registry.url")
      .getOrElse(throw new IllegalArgumentException("A schema registry url is required."))

  def forConfig(
      config: Config = ConfigFactory.load()
  ): ConfluentSchemaRegistry = {
    val identityMapCapacity =
      config.getIntOpt("max.schemas.per.subject").getOrElse(1000)
    cachedClients.get(
      SchemaRegistryClientInfo(registryUrl(config), identityMapCapacity)
    )
  }
} 
Example 114
Source File: HydraIngestorRegistrySpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import hydra.common.util.ActorUtils
import hydra.core.bootstrap.ReflectionsWrapper
import hydra.ingest.IngestorInfo
import hydra.ingest.services.IngestorRegistry
import hydra.ingest.services.IngestorRegistry.RegisterWithClass
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class HydraIngestorRegistrySpec
    extends TestKit(ActorSystem("HydraIngestorRegistrySpec"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender
    with ScalaFutures {

  override def afterAll =
    TestKit.shutdownActorSystem(
      system,
      verifySystemShutdown = true,
      duration = 10.seconds
    )

  val testRegistry =
    system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val cfg = ConfigFactory.parseString(
    "ingest.ingestor-registry.path=/user/ingestor_registry"
  )
  val registry = HydraIngestorRegistryClient(cfg)

  implicit val actorRefFactory = system

  ReflectionsWrapper.rescan()

  registry.registry ! RegisterWithClass(classOf[TestIngestor], "global")
  expectMsgType[IngestorInfo]

  describe("The Ingestor Registry") {
    it("uses the default registry if no config") {
      val path = HydraIngestorRegistryClient.registryPath(ConfigFactory.empty())
      path shouldBe s"/user/service/${ActorUtils.actorName(classOf[IngestorRegistry])}"
    }

    it("looks up an ingestor") {
      implicit val timeout = akka.util.Timeout(10.seconds)
      whenReady(registry.lookupIngestor("test_ingestor")) { i =>
        i.ingestors.size shouldBe 1
        i.ingestors(0).name shouldBe "test_ingestor"
        i.ingestors(0).path shouldBe testRegistry.path / "test_ingestor"
      }
    }
  }
} 
Example 115
Source File: DataSourceConnectionProviderSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.sql

import java.sql.SQLException
import java.util.Properties

import com.typesafe.config.ConfigFactory
import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers

import scala.collection.JavaConverters._


class DataSourceConnectionProviderSpec
    extends Matchers
    with AnyFlatSpecLike
    with BeforeAndAfterAll {

  val properties = new Properties
  val cfg = ConfigFactory.load().getConfig("db-cfg")
  cfg
    .entrySet()
    .asScala
    .foreach(e => properties.setProperty(e.getKey(), cfg.getString(e.getKey())))

  private val hikariConfig = new HikariConfig(properties)

  private val ds = new HikariDataSource(hikariConfig)

  override def afterAll() = ds.close()

  "The DataSourceConnectionProvider" should "establish a connection" in {
    val p = new DataSourceConnectionProvider(ds)
    p.getConnection().isValid(1) shouldBe true
  }

  it should "close the connection" in {
    val p = new DataSourceConnectionProvider(ds)
    p.close()
    intercept[SQLException](p.getConnection())
  }

  "The DriverManagerConnectionProvider" should "be configured properly" in {
    val config = ConfigFactory.parseString("""
        |connection.url = url
        |connection.user = test
        |connection.password = password
        |connection.max.retries = 20
        |connection.retry.backoff = 10s
      """.stripMargin)

    val c = DriverManagerConnectionProvider(config)
    c.password shouldBe "password"
    c.connectionUrl shouldBe "url"
    c.username shouldBe "test"
    c.retryBackoff.toSeconds shouldBe 10
    c.maxConnectionAttempts shouldBe 20
    c.close()
  }

  it should "return a new connection" in {
    val config = ConfigFactory.parseString(
      """
        |connection.url = "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1"
        |connection.max.retries = 2
        |connection.retry.backoff = 1s
      """.stripMargin
    )

    val c = DriverManagerConnectionProvider(config)
    c.getConnection() should not be null
    c.getNewConnection() should not be null
    c.close()
    c.connection.isValid(2) shouldBe false
  }
} 
Example 116
Source File: JdbcWriterSettingsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.sql

import com.typesafe.config.ConfigFactory
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike


class JdbcWriterSettingsSpec extends Matchers with AnyFlatSpecLike {

  "The JdbcWriterSettings" should "be properly configured" in {
    val config = ConfigFactory.parseString("""
        |db.syntax = hydra.sql.NoOpSyntax
        |auto.evolve= true
        |
      """.stripMargin)

    val c = JdbcWriterSettings(config)
    c.dbSyntax shouldBe NoOpSyntax
    c.autoEvolve shouldBe true
  }

  it should "use defaults" in {
    val config = ConfigFactory.parseString("""
        |connection.url = url
        |connection.user = test
        |connection.password = password
        |dialect = hydra.sql.PostgresDialect
      """.stripMargin)

    val c = JdbcWriterSettings(config)
    c.autoEvolve shouldBe true
    c.batchSize shouldBe 3000
    c.dbSyntax shouldBe UnderscoreSyntax
    c.autoEvolve shouldBe true
  }

} 
Example 117
Source File: KafkaMetricsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.transport

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import hydra.core.transport.AckStrategy
import hydra.kafka.producer.KafkaRecordMetadata
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll
import spray.json.DefaultJsonProtocol


class KafkaMetricsSpec
    extends TestKit(ActorSystem("hydra"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with DefaultJsonProtocol {

  import KafkaRecordMetadata._

  implicit val config = EmbeddedKafkaConfig(
    kafkaPort = 8092,
    zooKeeperPort = 3181,
    customBrokerProperties = Map(
      "auto.create.topics.enable" -> "false",
      "offsets.topic.replication.factor" -> "1"
    )
  )

  override def afterAll() = {
    super.afterAll()
    EmbeddedKafka.stop()
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

  override def beforeAll() = {
    super.beforeAll()
    EmbeddedKafka.start()
    EmbeddedKafka.createCustomTopic("metrics_topic")
  }

  describe("When using the KafkaMetrics object") {

    it("uses the NoOpMetrics") {
      KafkaMetrics(ConfigFactory.empty()) shouldBe NoOpMetrics
      KafkaMetrics(
        ConfigFactory.parseString("transports.kafka.metrics.enabled=false")
      ) shouldBe NoOpMetrics
    }

    it("uses the PublishMetrics") {
      import spray.json._
      val cfg = ConfigFactory.parseString(s"""
           | transports.kafka.metrics.topic = metrics_topic
           | transports.kafka.metrics.enabled=true""".stripMargin)
      val pm = KafkaMetrics(cfg)
      pm shouldBe a[PublishMetrics]
      val kmd = KafkaRecordMetadata(1, 1, "topic", 1, 1, AckStrategy.NoAck)
      pm.saveMetrics(kmd)
      EmbeddedKafka
        .consumeFirstStringMessageFrom("metrics_topic")
        .parseJson shouldBe kmd.toJson

    }
  }
} 
Example 118
Source File: ConfigSupport.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.config

import java.util.Properties
import java.util.concurrent.TimeUnit

import cats.implicits._
import com.typesafe.config.{Config, ConfigFactory, ConfigObject}

import scala.concurrent.duration.FiniteDuration
import scala.language.implicitConversions


trait ConfigSupport extends ConfigComponent {

  private val defaultConfig = ConfigFactory.load()

  val applicationName: String = defaultConfig.getString("application.name")

  val rootConfig: Config = defaultConfig

  val applicationConfig: Config = rootConfig.getConfig(applicationName)

}

object ConfigSupport {

  import scala.collection.JavaConverters._

  implicit def toMap(cfg: ConfigObject): Map[String, Object] = {
    cfg.toConfig
      .entrySet()
      .asScala
      .map({ entry => entry.getKey -> entry.getValue.unwrapped() })(
        collection.breakOut
      )
  }

  implicit def toMap(cfg: Config): Map[String, Object] = {
    cfg
      .entrySet()
      .asScala
      .map({ entry => entry.getKey -> entry.getValue.unwrapped() })(
        collection.breakOut
      )
  }

  implicit def toProps(map: Map[String, AnyRef]): Properties = {
    map.foldLeft(new Properties) {
      case (a, (k, v)) =>
        a.put(k, v)
        a
    }
  }

  implicit class ConfigImplicits(config: Config) {
    def getDurationOpt(path: String): Option[FiniteDuration] =
      getOptional(path, config.getDuration).map(d => FiniteDuration(d.toNanos, TimeUnit.NANOSECONDS))

    def getStringOpt(path: String): Option[String] =
      getOptional(path, config.getString)

    def getConfigOpt(path: String): Option[Config] =
      getOptional(path, config.getConfig)

    def getIntOpt(path: String): Option[Int] =
      getOptional(path, config.getInt)

    def getBooleanOpt(path: String): Option[Boolean] =
      getOptional(path, config.getBoolean)

    def getStringListOpt(path: String): Option[List[String]] =
      getOptional(path, config.getStringList).map(_.asScala.toList)

    private def getOptional[A](path: String, method: String => A): Option[A] = {
      if (config.hasPath(path)) {
        method(path).some
      } else {
        none
      }
    }
  }

} 
Example 119
Source File: NSDbMiniClusterConfigProvider.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.minicluster

import java.time.Duration

import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import io.radicalbit.nsdb.common.configuration.NSDbConfigProvider

trait NSDbMiniClusterConfigProvider extends NSDbConfigProvider {

  def hostname: String
  def storageDir: String
  def passivateAfter: Duration
  def replicationFactor: Int

  override lazy val userDefinedConfig: Config =
    ConfigFactory
      .parseResources("nsdb-minicluster.conf")
      .withValue("nsdb.node.hostname", ConfigValueFactory.fromAnyRef(hostname))
      .withValue("nsdb.grpc.interface", ConfigValueFactory.fromAnyRef(hostname))
      .withValue("nsdb.http.interface", ConfigValueFactory.fromAnyRef(hostname))
      .withValue("nsdb.storage.base-path", ConfigValueFactory.fromAnyRef(storageDir))
      .withValue("nsdb.cluster.replication-factor", ConfigValueFactory.fromAnyRef(replicationFactor))
      .resolve()

  override lazy val lowLevelTemplateConfig: Config =
    mergeConf(userDefinedConfig,
              ConfigFactory.parseResources("application-native.conf"),
              ConfigFactory.parseResources("application-common.conf"))
} 
Example 120
Source File: KafkaSecurityManager.scala    From kafka-security-manager   with MIT License 5 votes vote down vote up
package com.github.simplesteph.ksm

import java.util.concurrent.atomic.AtomicBoolean

import com.github.simplesteph.ksm.grpc.KsmGrpcServer
import com.github.simplesteph.ksm.parser.CsvAclParser
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

import scala.util.{Failure, Success, Try}
import java.util.concurrent.{
  ExecutionException,
  Executors,
  ScheduledExecutorService,
  TimeUnit
}

object KafkaSecurityManager extends App {

  val log = LoggerFactory.getLogger(KafkaSecurityManager.getClass)

  val config = ConfigFactory.load()
  val appConfig: AppConfig = new AppConfig(config)

  var isCancelled: AtomicBoolean = new AtomicBoolean(false)
  var grpcServer: KsmGrpcServer = _
  var aclSynchronizer: AclSynchronizer = _
  val aclParser = new CsvAclParser(appConfig.Parser.csvDelimiter)
  val scheduler: ScheduledExecutorService = Executors.newScheduledThreadPool(1)

  if (appConfig.KSM.extract) {
    new ExtractAcl(appConfig.Authorizer.authorizer, aclParser).extract()
  } else {
    aclSynchronizer = new AclSynchronizer(
      appConfig.Authorizer.authorizer,
      appConfig.Source.sourceAcl,
      appConfig.Notification.notification,
      aclParser,
      appConfig.KSM.readOnly
    )

    Try {
      grpcServer = new KsmGrpcServer(
        aclSynchronizer,
        appConfig.GRPC.port,
        appConfig.GRPC.gatewayPort,
        appConfig.Feature.grpc
      )
      grpcServer.start()
    } match {
      case Success(_) =>
      case Failure(e) =>
        log.error("gRPC Server failed to start", e)
        shutdown()
    }

    Runtime.getRuntime.addShutdownHook(new Thread() {
      override def run(): Unit = {
        log.info("Received stop signal")
        shutdown()
      }
    })

    try {
      //if appConfig.KSM.refreshFrequencyMs is equal or less than 0 the aclSyngronizer is run just once.
      if (appConfig.KSM.refreshFrequencyMs <= 0) {
        log.info("Single run mode: ACL will be synchornized once.")
        aclSynchronizer.run()
      } else {
        log.info(
          "Continuous mode: ACL will be synchronized every " + appConfig.KSM.refreshFrequencyMs + " ms."
        )
        val handle = scheduler.scheduleAtFixedRate(
          aclSynchronizer,
          0,
          appConfig.KSM.refreshFrequencyMs,
          TimeUnit.MILLISECONDS
        )
        handle.get
      }
    } catch {
      case e: ExecutionException =>
        log.error("unexpected exception", e)
    } finally {
      shutdown()
    }

  }

  def shutdown(): Unit = {
    log.info("Kafka Security Manager is shutting down...")
    isCancelled = new AtomicBoolean(true)
    aclSynchronizer.close()
    grpcServer.stop()
    scheduler.shutdownNow()
  }
} 
Example 121
Source File: OpenApiUtils.scala    From gospeak   with Apache License 2.0 5 votes vote down vote up
package gospeak.web.utils

import java.io.{File, FileNotFoundException}

import com.typesafe.config.{ConfigFactory, ConfigRenderOptions}
import play.api.libs.json.{JsValue, Json}
import gospeak.libs.scala.Extensions._

import scala.util.Try

object OpenApiUtils {
  val specPath = "app/gospeak/web/api/swagger/gospeak.openapi.conf"

  def loadSpec(): Try[JsValue] = {
    Try(new File(s"web/$specPath")).filterWith(_.exists(), f => new FileNotFoundException(f.getAbsolutePath))
      .orElse(Try(new File(specPath)).filterWith(_.exists(), f => new FileNotFoundException(f.getAbsolutePath)))
      .flatMap(loadSpec)
  }

  private def loadSpec(file: File): Try[JsValue] = {
    val spec = ConfigFactory.parseFile(file).resolve()
    val json = spec.root().render(ConfigRenderOptions.concise())
    Try(Json.parse(json))
  }
} 
Example 122
Source File: Values.scala    From gospeak   with Apache License 2.0 5 votes vote down vote up
package gospeak.web.testingutils

import java.util.UUID

import akka.stream.Materializer
import akka.stream.testkit.NoMaterializer
import com.danielasfregola.randomdatagenerator.RandomDataGenerator
import com.mohiva.play.silhouette.api.actions._
import com.mohiva.play.silhouette.api.util.Clock
import com.mohiva.play.silhouette.api.{LoginInfo, Silhouette, SilhouetteProvider, Environment => SilhouetteEnvironment}
import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator
import com.mohiva.play.silhouette.impl.providers.SocialProviderRegistry
import com.mohiva.play.silhouette.test._
import com.typesafe.config.ConfigFactory
import gospeak.core.domain.User
import gospeak.core.services.storage.DbConf
import gospeak.core.testingutils.Generators._
import gospeak.infra.services.AvatarSrv
import gospeak.infra.services.email.InMemoryEmailSrv
import gospeak.infra.services.storage.sql.GsRepoSql
import gospeak.web.AppConf
import gospeak.web.auth.domain.{AuthUser, CookieEnv}
import gospeak.web.auth.services.{AuthRepo, AuthSrv}
import gospeak.web.domain.Breadcrumb
import gospeak.web.utils.{UserAwareReq, UserReq}
import play.api.mvc._
import play.api.test.CSRFTokenHelper._
import play.api.test.{CSRFTokenHelper, FakeRequest, Helpers}

import scala.concurrent.ExecutionContext.Implicits.global

object Values extends RandomDataGenerator {
  // play
  // private val playEnv = Environment.simple()
  // private val ctx = ApplicationLoader.Context.create(playEnv)
  // val app = new GospeakComponents(ctx)
  val cc: ControllerComponents = Helpers.stubControllerComponents()
  private val playBodyParsers = cc.parsers
  private val messagesApi = cc.messagesApi
  private val bodyParsers: BodyParsers.Default = new BodyParsers.Default(playBodyParsers)

  // silhouette
  private val user: User = random[User]
  private val loginInfo: LoginInfo = AuthSrv.loginInfo(user.email)
  private val identity: AuthUser = AuthUser(loginInfo, user, Seq())
  protected val clock = Clock()
  private val env: SilhouetteEnvironment[CookieEnv] = FakeEnvironment[CookieEnv](Seq(identity.loginInfo -> identity))
  private val securedAction: SecuredAction = new DefaultSecuredAction(new DefaultSecuredRequestHandler(new DefaultSecuredErrorHandler(messagesApi)), bodyParsers)
  private val unsecuredAction: UnsecuredAction = new DefaultUnsecuredAction(new DefaultUnsecuredRequestHandler(new DefaultUnsecuredErrorHandler(messagesApi)), bodyParsers)
  private val userAwareAction: UserAwareAction = new DefaultUserAwareAction(new DefaultUserAwareRequestHandler(), bodyParsers)
  val silhouette: Silhouette[CookieEnv] = new SilhouetteProvider(env, securedAction, unsecuredAction, userAwareAction)
  val unsecuredReqHeader: RequestHeader = FakeRequest().withCSRFToken
  val securedReqHeader: RequestHeader = FakeRequest().withAuthenticator(identity.loginInfo)(env).withCSRFToken
  protected implicit val mat: Materializer = NoMaterializer

  // app
  val conf: AppConf = AppConf.load(ConfigFactory.load()).get
  private val dbConf = DbConf.H2(s"jdbc:h2:mem:${UUID.randomUUID()};MODE=PostgreSQL;DATABASE_TO_UPPER=false;DB_CLOSE_DELAY=-1")
  val db: GsRepoSql = new GsRepoSql(dbConf, conf.gospeak)
  private val authRepo = new AuthRepo(db.user, db.group)
  val emailSrv = new InMemoryEmailSrv()
  val authSrv = AuthSrv(conf.auth, silhouette, db.user, db.userRequest, db.group, authRepo, clock, SocialProviderRegistry(Seq()), new AvatarSrv())

  // twirl
  private val req: Request[AnyContent] = CSRFTokenHelper.addCSRFToken(FakeRequest().withAuthenticator(identity.loginInfo)(env))
  private val authenticator: CookieAuthenticator = FakeAuthenticator(loginInfo)(env, req)
  private val r: SecuredRequest[CookieEnv, AnyContent] = SecuredRequest[CookieEnv, AnyContent](identity, authenticator, req)
  val userReq: UserReq[AnyContent] = UserReq.from(conf, messagesApi, r)
  val userAwareReq: UserAwareReq[AnyContent] = userReq.userAware
  val b: Breadcrumb = Breadcrumb(Seq())
} 
Example 123
Source File: ServerSettingsTemplate.scala    From akka-http-circe-json-template   with Apache License 2.0 5 votes vote down vote up
package com.vitorsvieira.http.config

import akka.actor.ActorSystem
import akka.event.{ LogSource, Logging }
import akka.stream.ActorMaterializer
import com.typesafe.config.{ Config, ConfigFactory }

import scala.concurrent.ExecutionContextExecutor

trait ServerSettingsTemplate {

  lazy private val config: Config = ConfigFactory.load()
  private val httpConfig: Config = config.getConfig("http")
  val httpInterface: String = httpConfig.getString("interface")
  val httpPort: Int = httpConfig.getInt("port")

  implicit val actorSystem: ActorSystem = ActorSystem("akka-http-circe-json")
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher
  private implicit val logSource: LogSource[ServerSettingsTemplate] = (t: ServerSettingsTemplate) ⇒ t.getClass.getSimpleName
  private def logger(implicit logSource: LogSource[_ <: ServerSettingsTemplate]) = Logging(actorSystem, this.getClass)

  implicit val log = logger
}

object ServerSettingsTemplate extends ServerSettingsTemplate 
Example 124
Source File: Reserved.scala    From asura   with MIT License 5 votes vote down vote up
package asura.app.api.auth

import com.typesafe.config.ConfigFactory

import scala.collection.mutable

object Reserved {

  var groups: Set[String] = Set.empty

  def initReservedData(): Unit = {
    val config = ConfigFactory.load("reserved.conf")
    val groups = config.getStringList("asura.reserved.groups")
    val groupsSet = mutable.Set[String]()
    groups.forEach(g => groupsSet += g)
    Reserved.groups = groupsSet.toSet
  }

  def isReservedGroup(group: String): Boolean = {
    groups.contains(group)
  }
} 
Example 125
Source File: ClusterManager.scala    From asura   with MIT License 5 votes vote down vote up
package asura.cluster

import akka.actor.{ActorRef, ActorSystem}
import asura.cluster.actor.MemberListenerActor
import com.typesafe.config.{Config, ConfigFactory}

object ClusterManager {

  private var isIndependentSystem = false
  var enabled = false
  var system: ActorSystem = null
  var clusterManagerActor: ActorRef = null

  def init(
            config: Config = ConfigFactory.load(),
            name: String = "ClusterSystem",
            actorSystem: ActorSystem = null
          ): Unit = {
    enabled = true
    system = if (null != actorSystem) {
      actorSystem
    } else {
      isIndependentSystem = true
      ActorSystem(name, config)
    }
    clusterManagerActor = system.actorOf(MemberListenerActor.props())
  }

  def shutdown(): Unit = {
    if (null != system && !isIndependentSystem) system.terminate()
  }
} 
Example 126
Source File: ClusterSpec.scala    From asura   with MIT License 5 votes vote down vote up
package asura.dubbo

import akka.actor.ActorSystem
import asura.cluster.actor.MemberListenerActor
import com.typesafe.config.ConfigFactory

object ClusterSpec {

  def main(args: Array[String]): Unit = {

    val config1 = ConfigFactory.parseString(config1Str)
    val system1 = ActorSystem("ClusterSystem", config1.resolve())
    val indigo1 = system1.actorOf(MemberListenerActor.props(), "indigo")

    val config2 = ConfigFactory.parseString(config2Str)
    val system2 = ActorSystem("ClusterSystem", config2.resolve())
  }

  val config1Str =
    """
      |akka {
      |  actor {
      |    provider = cluster
      |  }
      |  remote {
      |    log-remote-lifecycle-events = off
      |    artery {
      |      enabled = on
      |      transport = aeron-udp
      |      canonical.hostname = "127.0.0.1"
      |      canonical.port = 2551
      |    }
      |  }
      |  cluster {
      |    seed-nodes = [
      |      "akka://[email protected]:2551",
      |    ]
      |    roles = [
      |      "indigo"
      |    ]
      |  }
      |}
    """.stripMargin


  val config2Str =
    """
      |akka {
      |  actor {
      |    provider = cluster
      |  }
      |  remote {
      |    log-remote-lifecycle-events = off
      |    artery {
      |      enabled = on
      |      transport = aeron-udp
      |      canonical.hostname = "127.0.0.1"
      |      canonical.port = 2552
      |    }
      |  }
      |  cluster {
      |    seed-nodes = [
      |      "akka://[email protected]:2551",
      |    ]
      |    roles = [
      |      "indigo"
      |    ]
      |  }
      |}
    """.stripMargin
} 
Example 127
Source File: SystemSettings.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino.cluster

import java.io.File

import akka.actor._
import com.ebay.neutrino.NeutrinoCore
import com.ebay.neutrino.config.Configuration._
import com.ebay.neutrino.config.{Configuration, NeutrinoSettings}
import com.typesafe.config.{ConfigFactory, Config}


case class SystemSettings(
  enableApi:    Boolean,
  neutrino:     NeutrinoSettings,
  dataSource:   DataSourceSettings
)

object SystemSettings {

  // This config is already at 'ebay.neutrino'
  def apply(config: Config): SystemSettings =
    SystemSettings(
      config getBoolean "enable-api",
      NeutrinoSettings(config),
      DataSourceSettings(config getConfig "datasource")
    )
}


case class SystemConfigurationExtension(system: ExtendedActorSystem) extends Extension
{
  // Extract 'ebay.neutrino' config
  val config = Configuration.load(system.settings.config, "resolvers")

  // Load system-settings (including all component settings)
  val settings = SystemSettings(config)

  // Initialize our Neutrino-core
  val core = new NeutrinoCore(settings.neutrino)

  // Our use-specific state cluster topology (customized for SLB)
  val topology = {
    new SLBTopology(core)
  }


}


  private val common = ConfigFactory.load("slb.conf")

  override def lookup() = SystemConfiguration

  override def createExtension(system: ExtendedActorSystem) = SystemConfigurationExtension(system)

  def load(filename: String): Config =
    filename match {
      case null => common
      case file => val slbFile = new File(filename)
        val slbConfig = ConfigFactory.parseFile(slbFile)
        if (slbConfig.isEmpty) {
          common
        } else {
          ConfigFactory.load(slbConfig)
        }
    }

  def system(filename: String): ActorSystem =
    ActorSystem("slb-cluster", load(filename))

  // Create an actor-system and return the attached configuration, all in one
  def apply(filename: String): SystemConfigurationExtension =
    SystemConfiguration(system(filename))

} 
Example 128
Source File: EchoServer.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino

import java.util.concurrent.TimeUnit

import akka.actor._
import akka.io.IO
import com.ebay.neutrino.util.Random
import com.typesafe.config.{Config, ConfigFactory}
import spray.can.Http
import spray.http._

import scala.concurrent.duration._


object EchoServer extends App {

  // Extract port from args, if provided
  val port = if (args.size > 0) args(0).toInt else 8081

  // Load our configuration from file and merge in the port parameter
  val config = ConfigFactory.parseString(s"echo-server.port = $port") withFallback ConfigFactory.load("echo.conf")
  val system = ActorSystem("echo-server", config)
  system.actorOf(Props[EchoServer], "echo-server")
}


class EchoServer extends Actor with ActorLogging {
  import scala.language.implicitConversions

  implicit val system = context.system
  val startup  = System.currentTimeMillis
  val settings = EchoServerSettings(system)

  //Use the system's dispatcher as ExecutionContext
  import system.dispatcher

  // Register connection service
  IO(Http) ! Http.Bind(self, interface = settings.host, port = settings.port)

  
case class EchoServerSettings(host: String, port: Int, random: Boolean, duration: FiniteDuration)
  extends Extension
{
  def latency = random match {
    case false => duration
    case true  => Random.nextMillis(duration)
  }
}

object EchoServerSettings {

  def apply(c: Config): EchoServerSettings = EchoServerSettings(
    c getString "host",
    c getInt "port",
    c getBoolean "random",
    c getDuration("duration", TimeUnit.MILLISECONDS) milliseconds
  )

  def apply(system: ActorSystem): EchoServerSettings =
    EchoServerSettings(system.settings.config getConfig "echo-server")

} 
Example 129
Source File: StreamSpecUtil.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import java.io.File
import java.nio.file.Files
import java.util.concurrent.atomic.AtomicInteger

import akka.stream.ThrottleMode
import akka.stream.scaladsl._
import com.typesafe.config.ConfigFactory
import net.openhft.chronicle.wire.{WireIn, WireOut}

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.collection.JavaConverters._
import scala.util.Random

object StreamSpecUtil {
  val elementCount = 100000
  val failTestAt = elementCount * 3 / 10
  val elementsAfterFail = 100
  val flowRate = 1000
  val flowUnit = 10 millisecond
  val burstSize = 500
}

class StreamSpecUtil[T, S](outputPort: Int = 1) {

  import StreamSpecUtil._
  val outputPorts = outputPort
  val tempPath: File = Files.createTempDirectory("persistent_queue").toFile
  val totalProcessed = elementCount + elementsAfterFail

  val config = ConfigFactory.parseMap {
    Map(
      "persist-dir" -> s"${tempPath.getAbsolutePath}",
      "output-ports" -> s"$outputPorts",
      "roll-cycle" -> "TEST_SECONDLY".toLowerCase()
    ).asJava
  }

  val in = Source(1 to elementCount)
  lazy val atomicCounter = Vector.tabulate(outputPorts)(_ => new AtomicInteger(0))
  lazy val flowCounter = Flow[Any].map(_ => 1L).reduce(_ + _).toMat(Sink.head)(Keep.right)
  lazy val merge = Merge[S](outputPorts)
  lazy val throttle = Flow[S].throttle(flowRate, flowUnit, burstSize, ThrottleMode.shaping)
  lazy val throttleMore = Flow[S].throttle(flowRate * 9 / 10, flowUnit, burstSize, ThrottleMode.shaping)
  lazy val head = Sink.head[S]
  lazy val last = Sink.last[S]
  val minRandom = 100
  lazy val random = Random.nextInt(elementCount - minRandom - 1) + minRandom
  lazy val filterCounter = new AtomicInteger(0)
  lazy val filterARandomElement = Flow[Event[T]].map(e => (e, filterCounter.incrementAndGet())).filter(_._2 != random).map(_._1)

  def commitCounter(outputPortId: Int) = atomicCounter(outputPortId).incrementAndGet()

  def clean() = delete(tempPath)

  private def delete(file: File): Unit = {
    if (file.isDirectory)
      Option(file.listFiles).map(_.toList).getOrElse(Nil).foreach(delete)
    file.delete
  }
}

case class Person(name: String, age: Int)

class PersonSerializer extends QueueSerializer[Person] {

  override def readElement(wire: WireIn): Option[Person] = {
    for {
      name <- Option(wire.read().`object`(classOf[String]))
      age <- Option(wire.read().int32)
    } yield { Person(name, age) }
  }

  override def writeElement(element: Person, wire: WireOut): Unit = {
    wire.write().`object`(classOf[String], element.name)
    wire.write().int32(element.age)
  }
} 
Example 130
Source File: BroadcastBufferCommitOrderSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import akka.actor.ActorSystem
import akka.stream.{ClosedShape, ActorMaterializer}
import akka.stream.scaladsl.{GraphDSL, RunnableGraph}
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpec}
import org.squbs.testkit.Timeouts._

import scala.concurrent.Await

class BroadcastBufferCommitOrderSpec extends FlatSpec with Matchers with BeforeAndAfterAll with Eventually {

  implicit val system = ActorSystem("BroadcastBufferCommitOrderSpec", PersistentBufferSpec.testConfig)
  implicit val mat = ActorMaterializer()
  implicit val serializer = QueueSerializer[Int]()
  import StreamSpecUtil._

  override def afterAll = {
    Await.ready(system.terminate(), awaitMax)
  }

  it should "fail when an out of order commit is attempted and commit-order-policy = strict" in {
    val util = new StreamSpecUtil[Int, Event[Int]](2)
    import util._
    val buffer = BroadcastBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = strict").withFallback(config))
    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        val commit = buffer.commit[Int]
        val bcBuffer = builder.add(buffer.async)
        val mr = builder.add(merge)
        in ~> bcBuffer ~> filterARandomElement ~> commit ~> mr ~> sink
        bcBuffer ~> commit ~> mr
        ClosedShape
    })
    val sinkF = streamGraph.run()
    Await.result(sinkF.failed, awaitMax) shouldBe an[CommitOrderException]
    clean()
  }

  it should "not fail when an out of order commit is attempted and commit-order-policy = lenient" in {
    val util = new StreamSpecUtil[Int, Event[Int]](2)
    import util._
    val buffer = BroadcastBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = lenient").withFallback(config))
    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        val commit = buffer.commit[Int]
      val bcBuffer = builder.add(buffer.async)
        val mr = builder.add(merge)
        in ~> bcBuffer ~> filterARandomElement ~> commit ~> mr ~> sink
        bcBuffer ~> commit ~> mr
        ClosedShape
    })

    val countFuture = streamGraph.run()
    val count = Await.result(countFuture, awaitMax)
    eventually { buffer.queue shouldBe 'closed }
    count shouldBe (elementCount * outputPorts - 1)
    println(s"Total records processed $count")

    clean()
  }
} 
Example 131
Source File: QueueConfigSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import com.typesafe.config.{ConfigException, ConfigFactory}
import net.openhft.chronicle.queue.RollCycles
import net.openhft.chronicle.wire.WireType
import org.scalatest.{FlatSpec, Matchers}


class QueueConfigSpec extends FlatSpec with Matchers {

  it should "properly read the configuration from config" in {

    val configText =
      """
        | persist-dir = /tmp/myQueue
        | roll-cycle = xlarge_daily
        | wire-type = compressed_binary
        | block-size = 80m
        | index-spacing = 8k
        | output-ports = 3
        | commit-order-policy = strict
      """.stripMargin
    val config = ConfigFactory.parseString(configText)
    val queueConfig = QueueConfig.from(config)
    queueConfig.persistDir.getAbsolutePath shouldBe "/tmp/myQueue"
    queueConfig.rollCycle shouldBe RollCycles.XLARGE_DAILY
    queueConfig.wireType shouldBe WireType.COMPRESSED_BINARY
    queueConfig.blockSize shouldBe (80 * 1024 * 1024)
    queueConfig.indexSpacing shouldBe (8 * 1024)
    queueConfig.indexCount shouldBe RollCycles.XLARGE_DAILY.defaultIndexCount
    queueConfig.isBuffered shouldBe false
    queueConfig.epoch shouldBe 0L
    queueConfig.outputPorts shouldBe 3
    queueConfig.commitOrderPolicy shouldBe Strict
  }

  it should "properly assume default configurations" in {

    val configText =
      """
        | persist-dir = /tmp/myQueue
      """.stripMargin
    val config = ConfigFactory.parseString(configText)
    val queueConfig = QueueConfig.from(config)
    queueConfig.persistDir.getAbsolutePath shouldBe "/tmp/myQueue"
    queueConfig.rollCycle shouldBe RollCycles.DAILY
    queueConfig.wireType shouldBe WireType.BINARY
    queueConfig.blockSize shouldBe (64 * 1024 * 1024)
    queueConfig.indexSpacing shouldBe RollCycles.DAILY.defaultIndexSpacing
    queueConfig.indexCount shouldBe RollCycles.DAILY.defaultIndexCount
    queueConfig.isBuffered shouldBe false
    queueConfig.epoch shouldBe 0L
    queueConfig.outputPorts shouldBe 1
    queueConfig.commitOrderPolicy shouldBe Lenient
  }

  it should "set commit order policy to lenient" in {
    val configText =
      """
        | persist-dir = /tmp/myQueue
        | roll-cycle = xlarge_daily
        | wire-type = compressed_binary
        | block-size = 80m
        | index-spacing = 8k
        | output-ports = 3
        | commit-order-policy = lenient
      """.stripMargin
    val config = ConfigFactory.parseString(configText)
    val queueConfig = QueueConfig.from(config)
    queueConfig.commitOrderPolicy shouldBe Lenient
  }

  it should "throw BadValue exception when commit-order-policy is set to an invalid value" in {
    val configText =
      """
        | persist-dir = /tmp/myQueue
        | roll-cycle = xlarge_daily
        | wire-type = compressed_binary
        | block-size = 80m
        | index-spacing = 8k
        | output-ports = 3
        | commit-order-policy = invalid
      """.stripMargin
    val config = ConfigFactory.parseString(configText)
    a [ConfigException.BadValue] should be thrownBy QueueConfig.from(config)
  }
} 
Example 132
Source File: PersistentBufferCommitOrderSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import akka.actor.ActorSystem
import akka.stream.scaladsl.{GraphDSL, RunnableGraph}
import akka.stream.{ActorMaterializer, ClosedShape}
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import org.squbs.testkit.Timeouts._

import scala.concurrent.Await

class PersistentBufferCommitOrderSpec extends FlatSpec with Matchers with BeforeAndAfterAll with Eventually {

  implicit val system = ActorSystem("PersistentBufferCommitOrderSpec", PersistentBufferSpec.testConfig)
  implicit val mat = ActorMaterializer()
  implicit val serializer = QueueSerializer[Int]()
  import StreamSpecUtil._

  override def afterAll = {
    Await.ready(system.terminate(), awaitMax)
  }

  it should "fail when an out of order commit is attempted and commit-order-policy = strict" in {
    val util = new StreamSpecUtil[Int, Event[Int]]
    import util._
    val buffer = PersistentBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = strict").withFallback(config))
    val commit = buffer.commit[Int]

    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        in ~> buffer.async ~> filterARandomElement ~> commit ~> sink
        ClosedShape
    })
    val sinkF = streamGraph.run()
    Await.result(sinkF.failed, awaitMax) shouldBe an[CommitOrderException]
    clean()
  }

  it should "not fail when an out of order commit is attempted and commit-order-policy = lenient" in {
    val util = new StreamSpecUtil[Int, Event[Int]]
    import util._
    val buffer = PersistentBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = lenient").withFallback(config))
    val commit = buffer.commit[Int]

    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        in ~> buffer.async ~> filterARandomElement ~> commit ~> sink
        ClosedShape
    })

    val countFuture = streamGraph.run()
    val count = Await.result(countFuture, awaitMax)
    count shouldBe elementCount - 1
    eventually { buffer.queue shouldBe 'closed }

    clean()
  }
} 
Example 133
Source File: ClientFlowHttpsSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.httpclient

import java.io.InputStream
import java.security.{KeyStore, SecureRandom}
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.{ConnectionContext, Http}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.squbs.resolver.ResolverRegistry
import org.squbs.testkit.Timeouts._

import scala.concurrent.{Await, Future}
import scala.util.{Success, Try}

object ClientFlowHttpsSpec {

  val config = ConfigFactory.parseString(
    """
      |helloHttps {
      |  type = squbs.httpclient
      |  akka.ssl-config.loose.disableHostnameVerification = true
      |}
    """.stripMargin)

  implicit val system = ActorSystem("ClientFlowHttpsSpec", config)
  implicit val materializer = ActorMaterializer()

  ResolverRegistry(system).register[HttpEndpoint]("LocalhostHttpsEndpointResolver") { (name, _) =>
    name match {
      case "helloHttps" =>
        Some(HttpEndpoint(s"https://localhost:$port", Some(sslContext("exampletrust.jks", "changeit")), None))
      case _ => None
    }
  }

  import akka.http.scaladsl.server.Directives._
  import system.dispatcher

  val route =
    path("hello") {
      get {
        complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Hello World!"))
      }
    }

  val serverBinding = Await.result(Http().bindAndHandle(route, "localhost", 0,
    ConnectionContext.https(sslContext("example.com.jks", "changeit"))), awaitMax)
  val port = serverBinding.localAddress.getPort
}

class ClientFlowHttpsSpec  extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {

  import ClientFlowHttpsSpec._

  override def afterAll: Unit = {
    serverBinding.unbind() map {_ => system.terminate()}
  }

  it should "make a call to Hello Service" in {
    val clientFlow = ClientFlow[Int]("helloHttps")
    val responseFuture: Future[(Try[HttpResponse], Int)] =
      Source.single(HttpRequest(uri = "/hello") -> 42)
        .via(clientFlow)
        .runWith(Sink.head)

    val (Success(response), _) = Await.result(responseFuture, awaitMax)
    response.status should be (StatusCodes.OK)
    val entity = response.entity.dataBytes.runFold(ByteString(""))(_ ++ _) map(_.utf8String)
    entity map { e => e shouldEqual "Hello World!" }
  }
} 
Example 134
Source File: ClientFlowIdleTimeoutSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.httpclient

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source, TcpIdleTimeoutException}
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.squbs.resolver.ResolverRegistry
import org.squbs.testkit.Timeouts.awaitMax

import scala.concurrent.{Await, Promise}
import scala.util.{Failure, Success}

object ClientFlowIdleTimeoutSpec {

  val config = ConfigFactory.parseString(
    """
      |akka {
      |  loggers = [
      |    "akka.event.Logging$DefaultLogger"
      |  ]
      |
      |  loglevel = "DEBUG"
      |
      |  http {
      |    server {
      |      idle-timeout = 240 s
      |      request-timeout = 120 s
      |    }
      |
      |    client.idle-timeout = 1 s
      |
      |    host-connection-pool.max-retries = 0
      |  }
      |}
    """.stripMargin)

  implicit val system = ActorSystem("ClientFlowIdleTimeoutSpec", config)
  implicit val materializer = ActorMaterializer()

  ResolverRegistry(system).register[HttpEndpoint]("LocalhostEndpointResolver") { (svcName, _) => svcName match {
    case "slow" => Some(HttpEndpoint(s"http://localhost:$port"))
    case _ => None
  }}

  import akka.http.scaladsl.server.Directives._
  import system.dispatcher

  val route =
    path("slow") {
      get {
        val promise = Promise[String]
        // Never completing the promise
        onComplete(promise.future) {
          case Success(value) => complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Slow...!"))
          case Failure(ex)    => complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Slow failed...!"))
        }
      }
    }

  val serverBinding = Await.result(Http().bindAndHandle(route, "localhost", 0), awaitMax)
  val port = serverBinding.localAddress.getPort
}

class ClientFlowIdleTimeoutSpec  extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {

  import ClientFlowIdleTimeoutSpec._

  override def afterAll: Unit = {
    serverBinding.unbind() map {_ => system.terminate()}
  }

  it should "drop the connection after idle-timeout and resume the stream with new connections" in {
    val clientFlow = ClientFlow[Int]("slow")

    val result =
      Source(1 to 10)
        .map(HttpRequest(uri = "/slow") -> _)
        .via(clientFlow)
        .runWith(Sink.seq)

    result map { r =>
      val failures = r.map(_._1).filter(_.isFailure).map(_.failed)
      failures should have size 10
      failures.forall(_.get.isInstanceOf[TcpIdleTimeoutException]) shouldBe true
      r.map(_._2) should contain theSameElementsAs(1 to 10)
    }
  }
} 
Example 135
Source File: TestNGRouteTest.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.testkit.japi

import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.javadsl.model.HttpRequest
import akka.http.javadsl.server.RouteResult
import akka.http.javadsl.testkit.{RouteTest, TestRouteResult}
import akka.stream.{ActorMaterializer, Materializer}
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.testng.TestNGSuiteLike
import org.testng.Assert
import org.testng.annotations.{AfterClass, BeforeClass}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.util.control.NonFatal


trait TestNGRouteTestBase extends RouteTest with RouteDefinitionTest with TestNGSuiteLike  {
  protected def systemResource: SystemResource
  implicit def system: ActorSystem = systemResource.system
  implicit def materializer: Materializer = systemResource.materializer

  override protected def createTestRouteResultAsync(request: HttpRequest, result: Future[RouteResult]): TestRouteResult =
    new TestRouteResult(result, awaitDuration)(system.dispatcher, materializer) {
      protected def assertEquals(expected: AnyRef, actual: AnyRef, message: String): Unit =
        reportDetails {
          Assert.assertEquals(actual, expected, message)
        }

      protected def assertEquals(expected: Int, actual: Int, message: String): Unit =
        Assert.assertEquals(actual, expected, message)

      protected def assertTrue(predicate: Boolean, message: String): Unit =
        Assert.assertTrue(predicate, message)

      protected def fail(message: String): Unit = {
        Assert.fail(message)
        throw new IllegalStateException("Assertion should have failed")
      }

      def reportDetails[T](block: ⇒ T): T = {
        try block catch {
          case NonFatal(t) ⇒ throw new AssertionError(t.getMessage + "\n" +
            "  Request was:      " + request + "\n" +
            "  Route result was: " + result + "\n", t)
        }
      }
    }
}

abstract class TestNGRouteTest extends TestNGRouteTestBase {

  protected def additionalConfig: Config = ConfigFactory.empty()

  private[this] val _systemResource = new SystemResource(Logging.simpleName(getClass), additionalConfig)

  protected def systemResource: SystemResource = _systemResource

  @BeforeClass(alwaysRun=true)
  def setup(): Unit = {
    systemResource.before()
  }

  @AfterClass(alwaysRun=true)
  def teardown(): Unit = {
    systemResource.after()
  }
}

class SystemResource(name: String, additionalConfig: Config) {
  protected def config = additionalConfig.withFallback(ConfigFactory.load())
  protected def createSystem(): ActorSystem = ActorSystem(name, config)
  protected def createMaterializer(system: ActorSystem): ActorMaterializer = ActorMaterializer()(system)

  implicit def system: ActorSystem = _system
  implicit def materializer: ActorMaterializer = _materializer

  private[this] var _system: ActorSystem = null
  private[this] var _materializer: ActorMaterializer = null

  def before(): Unit = {
    require((_system eq null) && (_materializer eq null))
    _system = createSystem()
    _materializer = createMaterializer(_system)
  }
  def after(): Unit = {
    Await.result(_system.terminate(), 5.seconds)
    _system = null
    _materializer = null
  }
} 
Example 136
Source File: PortGetterSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.testkit

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._
import scala.language.postfixOps

object PortGetterSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("").getPath

  val classPaths = Array(
    "PortGetterSpec"
  ) map (dummyJarsDir + "/" + _)

  def config(actorSystemName: String) = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = ${actorSystemName}
       |  ${JMX.prefixConfig} = true
       |}
       |
       |default-listener.bind-port = 0
       |
       |my-listener {
       |  type = squbs.listener
       |  bind-address = "0.0.0.0"
       |  full-address = false
       |  bind-port = 0
       |  secure = false
       |  need-client-auth = false
       |  ssl-context = default
       |}
    """.stripMargin
  )

  def boot(actorSystemName: String) = UnicomplexBoot(config(actorSystemName))
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()

}

class PortGetterSpec extends TestKit(PortGetterSpec.boot("portGetterSpec").actorSystem) with ImplicitSender
  with FlatSpecLike with Matchers with BeforeAndAfterAll with PortGetter {

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "PortGetter" should "retrieve the port" in {
    port should be > 0
    port shouldEqual port("default-listener")
  }
}

class PortGetterCustomListenerSpec extends TestKit(PortGetterSpec.boot("PortGetterCustomListenerSpec").actorSystem)
  with ImplicitSender with FlatSpecLike with Matchers with BeforeAndAfterAll with PortGetter {

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  override def listener = "my-listener"

  "PortGetter" should "retrieve the port" in {
    port should be > 0
  }

  "PortGetter" should "return the specified listener's port" in {
    port should not equal port("default-listener")
    port shouldEqual port("my-listener")
  }
} 
Example 137
Source File: PerpetualStreamMergeHubJSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{FlatSpecLike, Matchers}
import org.squbs.unicomplex.Timeouts.{awaitMax, _}
import org.squbs.unicomplex._

import scala.collection.mutable
import scala.concurrent.Await

object PerpetualStreamMergeHubJSpec {
  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath
  val classPaths = Array("JavaPerpetualStreamMergeHubSpec") map (dummyJarsDir + "/" + _)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = JavaPerpetualStreamMergeHubSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
      """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {
      (name, config) => ActorSystem(name, config)
    }
    .scanComponents(classPaths)
    .start()
}

class PerpetualStreamMergeHubJSpec extends TestKit(PerpetualStreamMergeHubJSpec.boot.actorSystem)
  with FlatSpecLike with Matchers  {

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val psActorName = "/user/JavaPerpetualStreamMergeHubSpec/perpetualStreamWithMergeHub"
  val actorRef = Await.result((system.actorSelection(psActorName) ? RetrieveMyMessageStorageActorRef).mapTo[ActorRef],
    awaitMax)
  val port = portBindings("default-listener")


  it should "connect streams with mergehub" in {

    implicit val ac = ActorMaterializer()
    Http().singleRequest(HttpRequest(uri = Uri(s"http://127.0.0.1:$port/mergehub"), entity = "10"))
    Http().singleRequest(HttpRequest(uri = Uri(s"http://127.0.0.1:$port/mergehub"), entity = "11"))

    awaitAssert {
      val messages = Await.result((actorRef ? RetrieveMyMessages).mapTo[mutable.Set[MyMessage]], awaitMax)
      messages should have size 2
      messages should contain(MyMessage(10))
      messages should contain(MyMessage(11))
    }
  }
} 
Example 138
Source File: UnicomplexActorPublisherJSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.testkit.scaladsl.TestSource
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._

import scala.concurrent.duration._

object UnicomplexActorPublisherJSpec {
  val myConfig: Config = ConfigFactory.parseString(
    """
      | squbs.actorsystem-name = UnicomplexActorPublisherJSpec
    """.stripMargin)
  val boot = UnicomplexBoot(myConfig).createUsing((name, config) => ActorSystem(name, config))
    .scanResources("/")
    .initExtensions
    .start()
}

final class UnicomplexActorPublisherJSpec extends TestKit(UnicomplexActorPublisherJSpec.boot.actorSystem)
    with FlatSpecLike with Matchers with BeforeAndAfterAll {
  implicit val materializer = ActorMaterializer()
  val duration = 10.second

  val in = TestSource.probe[String]

  // expose probe port(s)
  val mat = new UnicomplexActorPublisherJ(system).runnableGraph()
  val (pub, sub) = mat.toScala
  val (pubIn, pubTrigger) = pub.toScala

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "UnicomplexTriggerJ" should "activate flow by unicomplex" in {
    // send 2 elements to in
    pubIn.sendNext("1")
    pubIn.sendNext("2")
    sub.request(2)
    sub.expectNext(duration, "1")
    sub.expectNext("2")

    // re-send Active to unicomplex trigger, flow continues
    sub.request(2)
    sub.expectNoMessage(remainingOrDefault)
    pubTrigger ! SystemState
    pubIn.sendNext("3")
    pubIn.sendNext("4")
    sub.expectNext("3", "4")
  }
} 
Example 139
Source File: UnicomplexActorPublisherSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._

import scala.concurrent.duration._

object UnicomplexActorPublisherSpec {
  val myConfig: Config = ConfigFactory.parseString(
    """
      | squbs.actorsystem-name = UnicomplexActorPublisherSpec
    """.stripMargin)
  val boot = UnicomplexBoot(myConfig).createUsing((name, config) => ActorSystem(name, config))
    .scanResources("/")
    .initExtensions
    .start()
}

final class UnicomplexActorPublisherSpec extends TestKit(UnicomplexActorPublisherSpec.boot.actorSystem)
    with FlatSpecLike with Matchers with BeforeAndAfterAll {

  implicit val materializer = ActorMaterializer()
  val duration = 10.second

  val in = TestSource.probe[String]

  // expose probe port(s)
  val ((pubIn, pubTrigger), sub) = LifecycleManaged().source(in).toMat(TestSink.probe[String](system))(Keep.both).run()

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "UnicomplexTrigger" should "activate flow by unicomplex" in {
    // send 2 elements to in
    pubIn.sendNext("1")
    pubIn.sendNext("2")
    sub.request(2)
    sub.expectNext(duration, "1")
    sub.expectNext("2")

    // re-send Active to unicomplex trigger, flow continues
    sub.request(2)
    sub.expectNoMessage(remainingOrDefault)
    pubTrigger ! SystemState
    pubIn.sendNext("3")
    pubIn.sendNext("4")
    sub.expectNext("3", "4")
  }
} 
Example 140
Source File: JavaFlowSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpEntity.Chunked
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes}
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpecLike, BeforeAndAfterAll, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

object JavaFlowSvcSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/JavaFlowSvc").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = JavaFlowSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class JavaFlowSvcSpec extends TestKit(
  JavaFlowSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with BeforeAndAfterAll with Matchers {

  implicit val am = ActorMaterializer()

  val portBindingsF = (Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]]
  val portF = portBindingsF map { bindings => bindings("default-listener") }

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  it should "handle a normal request" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaflowsvc/ping")
    } yield {
      response shouldBe "pong"
    }
  }

  it should "handle a chunked request and be able to provide a chunked response" in {
    val requestChunks = Source.single("Hi this is a test")
      .mapConcat { s => s.split(' ').toList }
      .map(HttpEntity.ChunkStreamPart(_))

    for {
      port <- portF
      response <- post(s"http://127.0.0.1:$port/javaflowsvc/chunks",
                       Chunked(ContentTypes.`text/plain(UTF-8)`, requestChunks))
      responseString <- response.entity.dataBytes.map(_.utf8String).toMat(Sink.fold("") { _ + _})(Keep.right).run()
    } yield {
      response.entity shouldBe 'chunked
      responseString should be("Received 5 chunks and 13 bytes.\r\nThis is the last chunk!")
    }
  }

  it should "get an InternalServerError with blank response if Flow collapses" in {
    for {
      port <- portF
      errResp <- get(s"http://127.0.0.1:$port/javaflowsvc/throwit")
      respEntity <- errResp.entity.toStrict(awaitMax)
    } yield {
      errResp.status shouldBe StatusCodes.InternalServerError
      respEntity.data.utf8String shouldBe 'empty
    }
  }
} 
Example 141
Source File: JavaRouteSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpecLike, BeforeAndAfterAll, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

object JavaRouteSvcSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/JavaRouteSvc").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = JavaRouteSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class JavaRouteSvcSpec extends TestKit(
  JavaRouteSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with BeforeAndAfterAll with Matchers {

  implicit val am = ActorMaterializer()

  val portBindingsF = (Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]]
  val portF = portBindingsF map { bindings => bindings("default-listener") }

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  it should "handle a normal request" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaroutesvc/ping")
    } yield {
      response shouldBe "pong"
    }
  }

  it should "apply the rejection handler to the service" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaroutesvc/reject")
    } yield {
      response shouldBe "rejected"
    }
  }

  it should "apply the exception handler to the service" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaroutesvc/exception")
    } yield {
      response shouldBe "exception"
    }
  }
} 
Example 142
Source File: JavaFailedFlowSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.OptionValues._
import org.scalatest.{AsyncFlatSpecLike, Matchers}

import scala.util.Failure

object JavaFailedFlowSvcSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPath = dummyJarsDir + "/JavaFailedFlowSvc/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = JavaFailedFlowSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
       |akka.http.server.remote-address-header = on
    """.stripMargin
  )

  import Timeouts._

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .start(startupTimeout)
}


class JavaFailedFlowSvcSpec extends TestKit(JavaFailedFlowSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with Matchers {

  "The JavaFailedFlowSvc" should "fail" in {
    import Timeouts._
    Unicomplex(system).uniActor ? SystemState map { state =>
      state shouldBe Failed
    }
  }

  "The JavaFailedFlowSvc" should "expose errors" in {
    import Timeouts._
    (Unicomplex(system).uniActor ? ReportStatus).mapTo[StatusReport] map { report =>
      report.state shouldBe Failed
      val initTry = report.cubes.values.head._2.value.reports.values.head.value
      initTry should matchPattern { case Failure(e: InstantiationException) => }
    }
  }
} 
Example 143
Source File: RootCtxRouteSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Route
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await

object RootCtxRouteSpec{

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/RootCtxRoute").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = RootCtxRouteSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class RootCtxRouteSpec extends TestKit(
  RootCtxRouteSpec.boot.actorSystem) with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val am = ActorMaterializer()

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Route" should "handle request with empty web-context" in {
    Await.result(entityAsString(s"http://127.0.0.1:$port/ping"), awaitMax) should be("pong")
  }
}

class RootRoute extends RouteDefinition {
  override def route: Route = path("ping") {
    complete{"pong"}
  }
} 
Example 144
Source File: JavaFailedRouteSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.OptionValues._
import org.scalatest.{AsyncFlatSpecLike, Matchers}

import scala.util.Failure

object JavaFailedRouteSvcSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPath = dummyJarsDir + "/JavaFailedRouteSvc/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = JavaFailedRouteSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
       |akka.http.server.remote-address-header = on
    """.stripMargin
  )

  import Timeouts._

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .start(startupTimeout)
}


class JavaFailedRouteSvcSpec extends TestKit(JavaFailedRouteSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with Matchers {

  "The JavaFailedRouteSvc" should "fail" in {
    import Timeouts._
    Unicomplex(system).uniActor ? SystemState map { state =>
      state shouldBe Failed
    }
  }

  "The JavaFailedRouteSvc" should "expose errors" in {
    import Timeouts._
    (Unicomplex(system).uniActor ? ReportStatus).mapTo[StatusReport] map { report =>
      report.state shouldBe Failed
      val initTry = report.cubes.values.head._2.value.reports.values.head.value
      initTry should matchPattern { case Failure(e: InstantiationException) => }
    }
  }
} 
Example 145
Source File: StopAndStartCubeSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import java.util.concurrent.TimeUnit

import akka.actor.{ActorIdentity, ActorSystem, Identify}
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop

import scala.util.Try

object StopAndStartCubeSpec {
  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPaths = Array(
    "DummyCube",
    "DummyCubeSvc",
    "DummySvc"
  ) map (dummyJarsDir + "/" + _)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = StopAndStartCubeSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class StopAndStartCubeSpec extends TestKit(StopAndStartCubeSpec.boot.actorSystem)
with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val timeout: akka.util.Timeout =
    Try(System.getProperty("test.timeout").toLong) map { millis =>
      akka.util.Timeout(millis, TimeUnit.MILLISECONDS)
    } getOrElse Timeouts.askTimeout

  import Timeouts.awaitMax

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Unicomplex" should "be able to stop a cube" in {
    Unicomplex(system).uniActor ! StopCube("DummyCube")
    within(awaitMax) {
      expectMsg(Ack)
    }
    system.actorSelection("/user/DummyCube") ! Identify("hello")
    within(awaitMax) {
      val id = expectMsgType[ActorIdentity]
      id.ref should be(None)
    }
  }

  "Unicomplex" should "not be able to stop a stopped cube" in {
    Unicomplex(system).uniActor ! StopCube("DummyCube")
    expectNoMessage()
  }

  "Unicomplex" should "be able to start a cube" in {
    Unicomplex(system).uniActor ! StartCube("DummyCube")
    within(awaitMax) {
      expectMsg(Ack)
    }
    system.actorSelection("/user/DummyCube") ! Identify("hello")
    within(awaitMax) {
      val id = expectMsgType[ActorIdentity]
      id.ref should not be None
    }
  }

  "Unicomplex" should "not be able to start a running cube" in {
    Unicomplex(system).uniActor ! StartCube("DummyCube")
    expectNoMessage()
  }

} 
Example 146
Source File: CubeActorErrorStatesSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import javax.management.ObjectName
import javax.management.openmbean.CompositeData

import akka.actor.{Actor, ActorSystem}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.OptionValues._
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await

object CubeActorErrorStatesSpec{

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/CubeActorErrorStates").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = cubeActorErrorStatesSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class CubeActorErrorStatesSpec extends TestKit(CubeActorErrorStatesSpec.boot.actorSystem)
  with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")


  implicit val am = ActorMaterializer()

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Route" should "handle request with empty web-context" in {
    Http().singleRequest(HttpRequest(uri = Uri(s"http://127.0.0.1:$port/test2?msg=1")))
    Thread.sleep(100)
    Http().singleRequest(HttpRequest(uri = Uri(s"http://127.0.0.1:$port/test1?msg=1")))
    Thread.sleep(100)
    Http().singleRequest(HttpRequest(uri = Uri(s"http://127.0.0.1:$port/test1?msg=2")))
    Thread.sleep(1000) // wait the agent get refreshed
    import org.squbs.unicomplex.JMX._
    val errorStates = get(new ObjectName(prefix(system) + cubeStateName + "CubeActorErrorStates"), "ActorErrorStates")
      .asInstanceOf[Array[CompositeData]]
    errorStates should have length 2
    val state1 = errorStates.find(_.get("actorPath") == "/user/CubeActorErrorStates/test1-CubeActorTest-handler").value
    state1.get("errorCount") shouldBe 2
    state1.get("latestException").asInstanceOf[String] should include ("test1:2")
    val state2 = errorStates.find(_.get("actorPath") == "/user/CubeActorErrorStates/test2-CubeActorTest-handler").value
    state2.get("errorCount") shouldBe 1
    state2.get("latestException").asInstanceOf[String] should include ("test2:1")
  }
}

class CubeActorTest extends Actor {
  override def receive: Receive = {
    case r: HttpRequest =>
      val msg = r.uri.query().get("msg").getOrElse("")
      throw new RuntimeException(s"${r.uri.path}:$msg")
  }
} 
Example 147
Source File: BadPipelineNameSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.pipeline

import akka.actor.ActorSystem
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._

import scala.concurrent.Await

object BadPipelineNameSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/pipeline/BadPipelineNameSpec").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = BadPipelineNameSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class BadPipelineNameSpec extends TestKit(
  BadPipelineNameSpec.boot.actorSystem) with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val am = ActorMaterializer()
  import Timeouts._

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "System state" should "be Failed" in {
    Unicomplex(system).uniActor ! ReportStatus
    val StatusReport(systemState, _, _) = expectMsgType[StatusReport]
    systemState should be(Failed)
  }

} 
Example 148
Source File: BadPipelineFactorySpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.pipeline

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._

object BadPipelineFactorySpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/pipeline/BadPipelineFactorySpec").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = BadPipelineFactorySpec
       |  ${JMX.prefixConfig} = true
       |}
       |
       |dummyFlow {
       |  type = squbs.pipelineflow
       |  factory = org.squbs.unicomplex.pipeline.NotExists
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class BadPipelineFactorySpec extends TestKit(
  BadPipelineFactorySpec.boot.actorSystem) with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val am = ActorMaterializer()

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "System state" should "be Failed" in {
    Unicomplex(system).uniActor ! ReportStatus
    val StatusReport(systemState, _, _) = expectMsgType[StatusReport]
    systemState should be(Failed)
  }

} 
Example 149
Source File: ClassNotFoundSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.OptionValues._
import org.scalatest.{AsyncFlatSpecLike, Matchers}

import scala.util.Failure

object ClassNotFoundSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPath = dummyJarsDir + "/ClassNotFoundCube/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = ClassNotFoundSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
       |akka.http.server.remote-address-header = on
    """.stripMargin
  )

  import Timeouts._

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .start(startupTimeout)
}


class ClassNotFoundSpec extends TestKit(ClassNotFoundSpec.boot.actorSystem) with AsyncFlatSpecLike with Matchers {

  it should "fail system when specified class is not found" in {
    import Timeouts._
    Unicomplex(system).uniActor ? SystemState map { state =>
      state shouldBe Failed
    }
  }

  it should "expose errors and fail cube when specified class is not found" in {
    import Timeouts._
    (Unicomplex(system).uniActor ? ReportStatus).mapTo[StatusReport] map { report =>
      report.state shouldBe Failed
      val initTry1 = report.cubes.values.head._2.value.reports.values.head.value
      initTry1 should matchPattern { case Failure(e: ClassNotFoundException) => }
      val initTry2 = report.cubes.values.head._2.value.reports.values.tail.head.value
      initTry2 should matchPattern { case Failure(e: ClassNotFoundException) => }

    }
  }
} 
Example 150
Source File: UnicomplexTimeoutSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.stream.ActorMaterializer
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest._
import org.scalatest.concurrent.Waiters
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await

object UnicomplexTimeoutSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPaths = Array(
    "DummySvcActor"
  ) map (dummyJarsDir + "/" + _)

  val aConfig = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = UnicomplexTimeoutSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener {
       |  bind-port = 0
       |}
       |akka.http.server {
       |  request-timeout = 3s
       |}
     """.stripMargin)

  val boot = UnicomplexBoot(aConfig)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()

}

class UnicomplexTimeoutSpec extends TestKit(UnicomplexTimeoutSpec.boot.actorSystem) with ImplicitSender
    with WordSpecLike with Matchers with BeforeAndAfterAll with Waiters {

  implicit val am = ActorMaterializer()
  import akka.pattern.ask
  val port = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Unicomplex" must {

    "Cause a timeout event" in {
      system.settings.config getString "akka.http.server.request-timeout" should be ("3s")
      val response = Await.result(get(s"http://127.0.0.1:$port/dummysvcactor/timeout"), awaitMax)
      // TODO This test is useless to me..  Need to explore how we can intervene with timeouts..  Do we need to ?
      // There may be scenarios, where we may want to do some work when a timeout happens..  So, having a hook
      // would be useful..
      response.status should be (StatusCodes.ServiceUnavailable)
    }
  }
} 
Example 151
Source File: FlowDefinitionSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Flow
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await

object FlowDefinitionSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath
  val classPath = dummyJarsDir + "/FlowDefinitionSpec/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = FlowDefinitionSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .initExtensions.start()

}

class TestFlowDefinition extends FlowDefinition with WebContext {
  val firstPath = s"/$webContext/first"
  val secondPath = s"/$webContext/second"
  val thirdPath = s"/$webContext/third"

  @volatile var count = 0
  def flow = Flow[HttpRequest].map {
    case HttpRequest(HttpMethods.GET, Uri(_, _, Path(`firstPath`), _, _), _, _, _) =>
      count += 1
      HttpResponse(StatusCodes.OK, entity = count.toString)

    case HttpRequest(HttpMethods.GET, Uri(_, _, Path(`secondPath`), _, _), _, _, _) =>
      count += 1
      HttpResponse(StatusCodes.OK, entity = count.toString)

    case HttpRequest(HttpMethods.GET, Uri(_, _, Path(`thirdPath`), _, _), _, _, _) =>
      HttpResponse(StatusCodes.OK, entity = {count += 1; count.toString})
  }
}

class FlowDefinitionSpec extends TestKit(
  FlowDefinitionSpec.boot.actorSystem) with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val am = ActorMaterializer()

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "The test actor" should "return correct count value" in {
    // The behaviour is different than Spray.  Not caching anymore.
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/first"), awaitMax) should be ("1")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/first"), awaitMax) should be ("2")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/second"), awaitMax) should be ("3")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/third"), awaitMax) should be ("4")
    Await.result(entityAsString(s"http://127.0.0.1:$port/flowdef/third"), awaitMax) should be ("5")
  }
} 
Example 152
Source File: ScanResourceSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import java.util.concurrent.TimeUnit
import javax.management.ObjectName

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Waiters
import org.scalatest.{BeforeAndAfterAll, Inspectors, Matchers, WordSpecLike}
import org.squbs.lifecycle.GracefulStop

import scala.util.Try

object ScanResourceSpec {

  val jmxPrefix = "ScanResourceSpec"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = scanResourceSpec
       |  ${JMX.prefixConfig} = true
       |}
       |
       |default-listener.bind-port = 0
    """.stripMargin)

  implicit val akkaTimeout: Timeout =
    Try(System.getProperty("test.timeout").toLong) map { millis =>
      akka.util.Timeout(millis, TimeUnit.MILLISECONDS)
    } getOrElse Timeouts.askTimeout

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources()
    .initExtensions.start()
}

class ScanResourceSpec extends TestKit(ScanResourceSpec.boot.actorSystem) with ImplicitSender with WordSpecLike
    with Matchers with Inspectors with BeforeAndAfterAll with Waiters {

  import ScanResourceSpec._
  import system.dispatcher

  "The scanned resource" must {

    "have some actors started" in {
      val w = new Waiter

      system.actorSelection("/user/ScanResourceCube").resolveOne().onComplete { result =>
        w {
          assert(result.isSuccess)
        }
        w.dismiss()
      }
      w.await()
    }

    "expose proper cube state through MXBean" in {
      import org.squbs.unicomplex.JMX._
      val cubeName = "ScanResourceCube"
      val cubesName = new ObjectName(prefix(system) + cubeStateName + cubeName)
      get(cubesName, "Name") should be (cubeName)
      get(cubesName, "CubeState") should be ("Active")
      val wellKnownActors = get(cubesName, "WellKnownActors").asInstanceOf[String]
      println(wellKnownActors)
      wellKnownActors should include ("Actor[akka://scanResourceSpec/user/ScanResourceCube/Prepender#")
      wellKnownActors should include ("Actor[akka://scanResourceSpec/user/ScanResourceCube/Appender#")
    }
  }

  override protected def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }
} 
Example 153
Source File: FailedRouteSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.OptionValues._
import org.scalatest.{AsyncFlatSpecLike, Matchers}

import scala.util.Failure

object FailedRouteSvcSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPath = dummyJarsDir + "/FailedRouteSvc/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = FailedRouteSvc
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
       |akka.http.server.remote-address-header = on
    """.stripMargin
  )

  import Timeouts._

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .start(startupTimeout)
}


class FailedRouteSvcSpec extends TestKit(FailedRouteSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with Matchers {

  "The FailedRouteSvc" should "fail" in {
    import Timeouts._
    Unicomplex(system).uniActor ? SystemState map { state =>
      state shouldBe Failed
    }
  }

  "The FailedRouteSvc" should "expose errors" in {
    import Timeouts._
    (Unicomplex(system).uniActor ? ReportStatus).mapTo[StatusReport] map { report =>
      report.state shouldBe Failed
      val initTry = report.cubes.values.head._2.value.reports.values.head.value
      initTry should matchPattern { case Failure(e: InstantiationException) => }
    }
  }
}


class FailedRouteSvc(content: String) extends RouteDefinition {

  def route = path("ping") {
    complete(content)
  }
} 
Example 154
Source File: RootCtxFlowSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop

import scala.concurrent.Await

object RootCtxFlowSpec{

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath
  val classPath = dummyJarsDir + "/RootCtxFlowSpec/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = RootCtxFlowSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .initExtensions.start()
}

class RootCtxFlowSpec extends TestKit(
  RootCtxFlowSpec.boot.actorSystem) with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val am = ActorMaterializer()
  import org.squbs.unicomplex.Timeouts._

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Flow" should "handle request with empty web-context" in {
    Await.result(entityAsString(s"http://127.0.0.1:$port/ping"), awaitMax) should be("pong")
  }
} 
Example 155
Source File: UnicomplexPortAutoSelectSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await
import scala.language.postfixOps

object UnicomplexPortAutoSelectSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPaths = Array(
    "DummySvc"
  ) map (dummyJarsDir + "/" + _)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = UnicomplexPortAutoSelectSpec
       |  ${JMX.prefixConfig} = true
       |}
       |
       |default-listener.bind-port = 0
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()

}

class UnicomplexPortAutoSelectSpec extends TestKit(UnicomplexPortAutoSelectSpec.boot.actorSystem) with ImplicitSender
  with FlatSpecLike with Matchers with BeforeAndAfterAll  {

  import akka.pattern.ask

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Unicomplex" should "let the system pick the port" in {
    portBindings("default-listener") should not be(8080)
    portBindings("default-listener") should not be(13000) // bind-port specified in src/test/resources/reference.conf
  }
} 
Example 156
Source File: RouteActorHandlerSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.server._
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await

object RouteActorHandlerSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/RouteActorHandler").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = RouteActorHandlerSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class RouteActorHandlerSpec extends TestKit(
  RouteActorHandlerSpec.boot.actorSystem) with FlatSpecLike with BeforeAndAfterAll with Matchers {

  implicit val am = ActorMaterializer()

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Rejection handler" should "be applied to the route actor" in {
    Await.result(entityAsString(s"http://127.0.0.1:$port/ctx/reject"), awaitMax) should be("rejected")
  }

  "Exception handler" should "be applied to the route actor" in {
    Await.result(entityAsString(s"http://127.0.0.1:$port/ctx/exception"), awaitMax) should be("exception")
  }
}

class Service extends RouteDefinition {

  override def rejectionHandler: Option[RejectionHandler] = Some(RejectionHandler.newBuilder().handle {
    case ServiceRejection => complete("rejected")
  }.result())

  override def exceptionHandler: Option[ExceptionHandler] = Some(ExceptionHandler {
    case _: ServiceException => complete("exception")
  })

  override def route: Route = path("reject") {
    reject(ServiceRejection)
  } ~ path("exception") {
    ctx =>
      throw new ServiceException
  }

  object ServiceRejection extends Rejection

  class ServiceException extends Exception
} 
Example 157
Source File: UnicomplexTestModeOnSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest._
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await
import scala.language.postfixOps

object UnicomplexTestModeOnSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPaths = Array(
    "DummySvc"
  ) map (dummyJarsDir + "/" + _)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = unicomplexTestModeSpec
       |  ${JMX.prefixConfig} = true
       |}
       |
       |default-listener.bind-port = 0
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()

}

class UnicomplexTestModeOnSpec extends TestKit(UnicomplexTestModeOnSpec.boot.actorSystem) with ImplicitSender
  with FlatSpecLike with Matchers with BeforeAndAfterAll  {

  import akka.pattern.ask

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Unicomplex" should "let the system pick the port" in {
    portBindings("default-listener") should not be 8080
    portBindings("default-listener") should not be 13000 // bind-port specified in src/test/resources/reference.conf
  }
} 
Example 158
Source File: JavaRootSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpecLike, BeforeAndAfterAll, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

object JavaRootSvcSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/JavaRootSvc").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = JavaRootSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class JavaRootSvcSpec extends TestKit(
  JavaRootSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with BeforeAndAfterAll with Matchers {

  implicit val am = ActorMaterializer()

  val portBindingsF = (Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]]
  val portF = portBindingsF map { bindings => bindings("default-listener") }

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  it should "handle a normal request" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/ping")
    } yield {
      response shouldBe "pong"
    }
  }

  it should "apply the rejection handler to the service" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/reject")
    } yield {
      response shouldBe "rejected"
    }
  }

  it should "apply the exception handler to the service" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/exception")
    } yield {
      response shouldBe "exception"
    }
  }
} 
Example 159
Source File: StreamTestSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor._
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.FileIO
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Waiters
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

import scala.concurrent.Await

object StreamTestSpec {
  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPaths = Array(
    "StreamCube",
    "StreamSvc"
  ) map (dummyJarsDir + "/" + _)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = StreamTestSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions
    .start()
}

class StreamTestSpec extends TestKit(StreamTestSpec.boot.actorSystem) with ImplicitSender with WordSpecLike
    with Matchers with BeforeAndAfterAll with Waiters {

  implicit val am = ActorMaterializer()
  import system.dispatcher

  val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
  val port = portBindings("default-listener")

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "UniComplex" must {

    "upload file with correct parts" in {

      val filePath =
        StreamTestSpec.getClass.getResource("/classpaths/StreamSvc/dummy.txt").getPath
      val file = new java.io.File(filePath)
      require(file.exists() && file.canRead)

      val chunkSize = 8192
      val responseF = Http().singleRequest(HttpRequest(HttpMethods.POST,
                                           uri = s"http://127.0.0.1:$port/streamsvc/file-upload",
                                           entity = HttpEntity(MediaTypes.`application/octet-stream`,
                                                               FileIO.fromPath(file.toPath, chunkSize))))

      val actualResponseEntity = Await.result(responseF flatMap extractEntityAsString, awaitMax)
      val expectedNumberOfChunks = Math.ceil(file.length.toDouble / chunkSize).toInt
      val expectedResponseEntity = s"Chunk Count: $expectedNumberOfChunks ByteCount: ${file.length}"
      actualResponseEntity should be (expectedResponseEntity)
    }
  }
} 
Example 160
Source File: JavaRouteNoHandlerSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpecLike, BeforeAndAfterAll, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

object JavaRouteNoHandlerSvcSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/JavaRouteNoHandlerSvc").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = JavaRouteNoHandlerSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class JavaRouteNoHandlerSvcSpec extends TestKit(
  JavaRouteNoHandlerSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with BeforeAndAfterAll with Matchers {

  implicit val am = ActorMaterializer()

  val portBindingsF = (Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]]
  val portF = portBindingsF map { bindings => bindings("default-listener") }

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  it should "handle a normal request" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaroutenohandlersvc/ping")
    } yield {
      response shouldBe "pong"
    }
  }

  it should "apply the rejection handler to the service" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaroutenohandlersvc/reject")
    } yield {
      response shouldBe "There was an internal server error."
    }
  }

  it should "apply the exception handler to the service" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaroutenohandlersvc/exception")
    } yield {
      response shouldBe "There was an internal server error."
    }
  }
} 
Example 161
Source File: InvalidPipelineFlowSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.OptionValues._
import org.scalatest.{FlatSpecLike, Matchers}

import scala.concurrent.Await
import scala.util.Failure

object InvalidPipelineFlowSpec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPath = dummyJarsDir + "/InvalidPipelineFlowSvc/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = InvalidPipelineFlowSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
       |akka.http.server.remote-address-header = on
    """.stripMargin
  )

  import Timeouts._

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .start(startupTimeout)
}


class InvalidPipelineFlowSpec extends TestKit(InvalidPipelineFlowSpec.boot.actorSystem) with FlatSpecLike with Matchers {

  "The InvalidPipelineFlowSvc" should "fail" in {
    import Timeouts._
    Await.result(Unicomplex(system).uniActor ? SystemState, awaitMax) shouldBe Failed
  }

  "The InvalidPipelineFlowSvc" should "expose errors" in {
    import Timeouts._
    val report = Await.result((Unicomplex(system).uniActor ? ReportStatus).mapTo[StatusReport], awaitMax)
    report.state shouldBe Failed
    val initTry = report.cubes.values.head._2.value.reports.values.head.value
    initTry should matchPattern { case Failure(e: IllegalArgumentException) => }
  }
} 
Example 162
Source File: SystemSettingBeanSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import com.typesafe.config.{ConfigFactory}
import org.scalatest.{Matchers, FlatSpecLike}
import collection.JavaConverters._

class SystemSettingBeanSpec extends FlatSpecLike with Matchers{

  "SystemSettingBean" should "read config correctly" in {
    val config = ConfigFactory.parseString(
      """
        |root {
        |  str: "1"
        |  number: 2
        |  bool: true
        |  nil: null
        |  map: {
        |    k1: "v1"
        |    k2: {
        |      k21: "v21"
        |      k22: [
        |        "v220"
        |      ]
        |    }
        |  }
        |  oneLevelList: ["v0", "v1", "v2"]
        |  listList: [
        |    ["v00", "v01", "v02"],
        |    ["v10", "v11"],
        |    ["v20", "v21"]
        |  ]
        |  listObject: [
        |    {
        |      k1: "v1"
        |    },
        |    {
        |      k2: "v2"
        |    }
        |  ]
        |}
      """.stripMargin)
    val bean = new SystemSettingBean(config)
    val settings = bean.getSystemSetting.asScala
    settings.length should be(19)
    settings.find(_.key.equals("root.str")).get.value should be("1")
    settings.find(_.key.equals("root.number")).get.value should be("2")
    settings.find(_.key.equals("root.nil")).get.value should be("null")
    settings.find(_.key.equals("root.bool")).get.value should be("true")
    settings.find(_.key.equals("root.map.k1")).get.value should be("v1")
    settings.find(_.key.equals("root.map.k2.k21")).get.value should be("v21")
    settings.find(_.key.equals("root.map.k2.k22[0]")).get.value should be("v220")
    settings.find(_.key.equals("root.oneLevelList[0]")).get.value should be("v0")
    settings.find(_.key.equals("root.oneLevelList[1]")).get.value should be("v1")
    settings.find(_.key.equals("root.oneLevelList[2]")).get.value should be("v2")
    settings.find(_.key.equals("root.listList[0][0]")).get.value should be("v00")
    settings.find(_.key.equals("root.listList[0][1]")).get.value should be("v01")
    settings.find(_.key.equals("root.listList[0][2]")).get.value should be("v02")
    settings.find(_.key.equals("root.listList[1][0]")).get.value should be("v10")
    settings.find(_.key.equals("root.listList[1][1]")).get.value should be("v11")
    settings.find(_.key.equals("root.listList[2][0]")).get.value should be("v20")
    settings.find(_.key.equals("root.listList[2][1]")).get.value should be("v21")
    settings.find(_.key.equals("root.listObject[0].k1")).get.value should be("v1")
    settings.find(_.key.equals("root.listObject[1].k2")).get.value should be("v2")
  }
} 
Example 163
Source File: FailedFlow1Spec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.pattern._
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.OptionValues._
import org.scalatest.{AsyncFlatSpecLike, Matchers}

import scala.util.Failure

object FailedFlow1Spec {

  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPath = dummyJarsDir + "/DummyFailedFlowSvc1/META-INF/squbs-meta.conf"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = FailedFlow1Spec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
       |akka.http.server.remote-address-header = on
    """.stripMargin
  )

  import Timeouts._

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources(withClassPath = false, classPath)
    .start(startupTimeout)
}


class FailedFlow1Spec extends TestKit(FailedFlow1Spec.boot.actorSystem) with AsyncFlatSpecLike with Matchers {

  "The DummyFailedFlowSvc1" should "fail" in {
    import Timeouts._
    Unicomplex(system).uniActor ? SystemState map { state =>
      state shouldBe Failed
    }
  }

  "The DummyFailedFlowSvc1" should "expose errors" in {
    import Timeouts._
    (Unicomplex(system).uniActor ? ReportStatus).mapTo[StatusReport] map { report =>
      report.state shouldBe Failed
      val initTry = report.cubes.values.head._2.value.reports.values.head.value
      initTry should matchPattern { case Failure(e: InstantiationException) => }
    }
  }
} 
Example 164
Source File: CircuitBreakerStateSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.streams.circuitbreaker

import java.lang.management.ManagementFactory
import javax.management.ObjectName

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{FlatSpecLike, Matchers}
import org.squbs.streams.circuitbreaker.impl.AtomicCircuitBreakerState

import scala.language.postfixOps

class CircuitBreakerStateSpec extends TestKit(ActorSystem("CircuitBreakerStateSpec")) with FlatSpecLike with Matchers {

  implicit val scheduler = system.scheduler
  import system.dispatcher

  import scala.concurrent.duration._

  it should "use default exponential backoff settings" in {
    AtomicCircuitBreakerState(
      "params-with-default-exponential-backoff",
      1,
      50.milliseconds,
      20.milliseconds)

    assertJmxValue("params-with-default-exponential-backoff", "MaxFailures", 1)
    assertJmxValue("params-with-default-exponential-backoff", "CallTimeout", "50 milliseconds")
    assertJmxValue("params-with-default-exponential-backoff", "ResetTimeout", "20 milliseconds")
    assertJmxValue("params-with-default-exponential-backoff", "MaxResetTimeout", "36500 days")
    assertJmxValue("params-with-default-exponential-backoff", "ExponentialBackoffFactor", 1.0)
  }

  it should "create circuit breaker state with provided exponential backoff settings" in {
    AtomicCircuitBreakerState(
      "params-with-custom-exponential-backoff",
      1,
      50.milliseconds,
      20.milliseconds,
      2.minutes,
      16.0)
    assertJmxValue("params-with-custom-exponential-backoff", "MaxFailures", 1)
    assertJmxValue("params-with-custom-exponential-backoff", "CallTimeout", "50 milliseconds")
    assertJmxValue("params-with-custom-exponential-backoff", "ResetTimeout", "20 milliseconds")
    assertJmxValue("params-with-custom-exponential-backoff", "MaxResetTimeout", "2 minutes")
    assertJmxValue("params-with-custom-exponential-backoff", "ExponentialBackoffFactor", 16.0)
  }

  it should "create circuit breaker state from configuration" in {
    val config = ConfigFactory.parseString(
      """
        |max-failures = 1
        |call-timeout = 50 ms
        |reset-timeout = 20 ms
        |max-reset-timeout = 1 minute
        |exponential-backoff-factor = 16.0
      """.stripMargin)

    AtomicCircuitBreakerState("from-config", config)
    assertJmxValue("from-config", "MaxFailures", 1)
    assertJmxValue("from-config", "CallTimeout", "50 milliseconds")
    assertJmxValue("from-config", "ResetTimeout", "20 milliseconds")
    assertJmxValue("from-config", "MaxResetTimeout", "1 minute")
    assertJmxValue("from-config", "ExponentialBackoffFactor", 16.0)
  }

  it should "fallback to default values when configuration is empty" in {
    AtomicCircuitBreakerState("empty-config", ConfigFactory.empty())
    assertJmxValue("empty-config", "MaxFailures", 5)
    assertJmxValue("empty-config", "CallTimeout", "1 second")
    assertJmxValue("empty-config", "ResetTimeout", "5 seconds")
    assertJmxValue("empty-config", "MaxResetTimeout", "36500 days")
    assertJmxValue("empty-config", "ExponentialBackoffFactor", 1.0)
  }

  def assertJmxValue(name: String, key: String, expectedValue: Any) = {
    val oName = ObjectName.getInstance(
      s"org.squbs.configuration:type=squbs.circuitbreaker,name=${ObjectName.quote(name)}")
    val actualValue = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, key)
    actualValue shouldEqual expectedValue
  }
} 
Example 165
Source File: NoWellKnownActorsSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.actorregistry

import java.lang.management.ManagementFactory
import javax.management.ObjectName

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.JMX.prefix
import org.squbs.unicomplex._

object NoWellKnownActorsSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/NoWellKnownActorsCube").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = ActorRegistryNoWellKnownActorsSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class NoWellKnownActorsSpec extends TestKit(NoWellKnownActorsSpec.boot.actorSystem)
  with ImplicitSender with FunSpecLike with Matchers with BeforeAndAfterAll {

  import NoWellKnownActorsSpec._

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  describe("ActorRegistry") {
    it ("should initialize even if there is no well-known actor in the classpath") {
      awaitAssert {
        boot.started shouldBe true
        Unicomplex(system).uniActor ! SystemState
        expectMsg(Active)
      }
    }

    it ("should show well-known actor count as zero") {
      val o = new ObjectName(prefix(boot.actorSystem) + "org.squbs.unicomplex:type=ActorRegistry")
      val count = ManagementFactory.getPlatformMBeanServer.getAttribute(o, "Count").asInstanceOf[Int]
      count should be (0)
    }
  }
} 
Example 166
Source File: MetronomeConfigTest.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome

import com.typesafe.config.ConfigFactory
import org.scalatest.{FunSuite, GivenWhenThen, Matchers}
import play.api.Configuration

class MetronomeConfigTest extends FunSuite with Matchers with GivenWhenThen {
  private def fromConfig(cfg: String): MetronomeConfig =
    new MetronomeConfig(new Configuration(ConfigFactory.parseString(cfg)))

  test("Http and Https ports with valid parseable strings") {
    Given("http Port is a valid port string")
    val httpPort = "9000"
    val httpsPort = "9010"

    When("Config parser tries to extract it")
    val cfg = fromConfig(s"""
         | play.server.http.port="$httpPort"
         | play.server.https.port="$httpsPort"
       """.stripMargin)

    Then("Should return an integer of that given port")
    cfg.httpPort shouldEqual Some(9000)
    cfg.httpsPort shouldEqual 9010
  }

  test("Http overriden with `disabled`") {
    Given("http Port is `disabled`")
    val httpPort = "disabled"
    val httpsPort = "9010"

    When("Config parser tries to extract it")
    val cfg = fromConfig(s"""
         | play.server.http.port="$httpPort"
         | play.server.https.port="$httpsPort"
       """.stripMargin)

    Then("Http port should be None")
    cfg.httpPort shouldEqual None

    Then("Effective port should be https")
    cfg.effectivePort shouldEqual 9010
  }

  test("feature gpu_resources is enabled when gpu_scheduling_behavior is set") {

    Given("A config with gpu_scheduling_behavior")
    val cfg = fromConfig(s"""
         | metronome.gpu_scheduling_behavior="restricted"
       """.stripMargin)

    When("enabled features are requested")
    val featues =
      Then("features should contain gpu_resources")
    cfg.scallopConf.features.toOption.get.contains("gpu_resources") shouldEqual true
    And("gpu_scheduling_behavior must be set")
    cfg.scallopConf.gpuSchedulingBehavior.toOption.contains("restricted") shouldEqual true
  }

  test("feature gpu_resources is disabled when gpu_scheduling_behavior is not set") {

    Given("A config with gpu_scheduling_behavior")
    val cfg = fromConfig("")

    When("enabled features are requested")
    val featues =
      Then("features should contain gpu_resources")
    cfg.scallopConf.features.toOption.get shouldEqual Set.empty
    And("gpu_scheduling_behavior must be set")
    cfg.scallopConf.gpuSchedulingBehavior.toOption shouldEqual Some("undefined")
  }
} 
Example 167
Source File: db.scala    From lichess-db   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lichess

import com.typesafe.config.ConfigFactory
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

import reactivemongo.api._
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.bson._

import org.joda.time._

final class DB(
    val gameColl: BSONCollection,
    val analysisColl: BSONCollection,
    val userColl: BSONCollection
) {

  private val userProj = BSONDocument("username" -> true, "title" -> true)
  implicit private val lightUserBSONReader = new BSONDocumentReader[LightUser] {
    def read(doc: BSONDocument) = LightUser(
      id = doc.getAs[String]("_id").get,
      name = doc.getAs[String]("username").get,
      title = doc.getAs[String]("title")
    )
  }

  def users(gs: Seq[lila.game.Game]): Future[Seq[Users]] =
    userColl
      .find(
        BSONDocument(
          "_id" -> BSONDocument("$in" -> gs.flatMap(_.userIds).distinct)
        ),
        userProj
      )
      .cursor[LightUser](readPreference = ReadPreference.secondary)
      .collect[List](Int.MaxValue, Cursor.ContOnError[List[LightUser]]())
      .map { users =>
        def of(p: lila.game.Player) = p.userId.fold(LightUser("?", "?")) { uid =>
          users.find(_.id == uid) getOrElse LightUser(uid, uid)
        }
        gs.map { g =>
          Users(of(g.whitePlayer), of(g.blackPlayer))
        }
      }
}

object DB {

  private val config = ConfigFactory.load()

  val dbName   = "lichess"
  val collName = "game5"

  val uri       = config.getString("db.uri")
  val driver    = new AsyncDriver(Some(config.getConfig("mongo-async-driver ")))
  val parsedUri = MongoConnection.fromString(uri)
  val conn      = parsedUri.flatMap(driver.connect)

  def get: Future[(DB, () => Unit)] =
    conn.flatMap(_.database(dbName)).map { db =>
      (
        new DB(
          gameColl = db collection "game5",
          analysisColl = db collection "analysis2",
          userColl = db collection "user4"
        ),
        (() => {
          driver.close()
        })
      )
    }

  implicit object BSONDateTimeHandler extends BSONHandler[BSONDateTime, DateTime] {
    def read(time: BSONDateTime) = new DateTime(time.value, DateTimeZone.UTC)
    def write(jdtime: DateTime)  = BSONDateTime(jdtime.getMillis)
  }

  def debug(v: BSONValue): String = v match {
    case d: BSONDocument => debugDoc(d)
    case d: BSONArray    => debugArr(d)
    case BSONString(x)   => x
    case BSONInteger(x)  => x.toString
    case BSONDouble(x)   => x.toString
    case BSONBoolean(x)  => x.toString
    case v               => v.toString
  }
  def debugArr(doc: BSONArray): String =
    doc.values.toList.map(debug).mkString("[", ", ", "]")
  def debugDoc(doc: BSONDocument): String =
    (doc.elements.toList map {
      case BSONElement(k, v) => s"$k: ${debug(v)}"
    }).mkString("{", ", ", "}")
} 
Example 168
Source File: RunnerConfigUtils.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.blueprint

import com.typesafe.config.{ Config, ConfigFactory }

import scala.io.Source
import scala.util.{ Failure, Success, Try }

object RunnerConfigUtils {
  val StorageMountPathKey = "storage.mountPath"
  val MetadataName        = "metadata.name"
  val MetadataNamespace   = "metadata.namespace"
  val MetadataUid         = "metadata.uid"

  def addStorageConfig(config: Config, pvcVolumeMountPath: String): Config = {
    val storageConfig = ConfigFactory.parseString(s"""$StorageMountPathKey:"$pvcVolumeMountPath"""")
    config.withFallback(storageConfig)
  }

  def addPodRuntimeConfig(config: Config, downwardApiVolumeMountPath: String): Config = {
    val (name, namespace, uid) = getPodMetadata(downwardApiVolumeMountPath)
    val podRuntimeConfig       = ConfigFactory.parseString(s"""
                                                              |cloudflow.runner.pod: {
                                                              |  $MetadataName:"$name"
                                                              |  $MetadataNamespace:"$namespace"
                                                              |  $MetadataUid:"$uid"
                                                              |}
                                                              |""".stripMargin)
    config.withFallback(podRuntimeConfig)
  }

  def getPodMetadata(downwardApiVolumeMountPath: String): (String, String, String) = {
    val name      = readDownwardApi(downwardApiVolumeMountPath, MetadataName)
    val namespace = readDownwardApi(downwardApiVolumeMountPath, MetadataNamespace)
    val uid       = readDownwardApi(downwardApiVolumeMountPath, MetadataUid)
    (name, namespace, uid)
  }

  private def readDownwardApi(downwardApiVolumeMountPath: String, filename: String): String = {
    val path = s"$downwardApiVolumeMountPath/$filename"
    Try(Source.fromFile(path).getLines.mkString) match {
      case Success(contents) ⇒ contents
      case Failure(ex) ⇒
        throw new Exception(s"An error occurred while attempting to access the downward API volume mount with path '$path'", ex)
    }
  }
} 
Example 169
Source File: RunnerConfigSpec.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.blueprint.deployment

import collection.JavaConverters._

import com.typesafe.config.ConfigFactory
import org.scalatest._

class RunnerConfigSpec extends WordSpec with MustMatchers with OptionValues with EitherValues with Inspectors {

  "a RunnerConfig" should {
    "generate the correct JSON (one streamlet per deployment)" in {
      val runnerConfig = RunnerConfig(appId, appVersion, ingressDeployment, kafkaBootstrapServers)
      val config       = ConfigFactory.parseString(runnerConfig.data)

      val streamlet = config.getConfig("cloudflow.runner.streamlet")

      streamlet.getString("class_name") mustBe ingressDeployment.className
      streamlet.getString("streamlet_ref") mustBe ingressDeployment.streamletName

      val streamletContext = streamlet.getConfig("context")

      streamletContext.getString("app_id") mustBe appId
      streamletContext.getString("app_version") mustBe appVersion

      val portMappingConfig = streamletContext.getConfig("port_mappings")
      val ports = portMappingConfig
        .root()
        .entrySet()
        .asScala
        .map(_.getKey)
        .toVector

      ports must have size 1
      forExactly(1, ports) { port ⇒
        val topicConfig = portMappingConfig.getConfig(port)

        ingressDeployment.portMappings must contain(
          (
            port,
            Topic(
              topicConfig.getString("id"),
              topicConfig.getConfig("config")
            )
          )
        )
      }

      streamletContext.getConfig(s"config") mustBe ingressDeployment.config
    }
  }

  import cloudflow.blueprint._
  import BlueprintBuilder._

  case class Foo(name: String)
  case class Bar(name: String)

  val appId      = "monstrous-mite-12345"
  val appVersion = "42-abcdef0"
  val image      = "image-1"

  val agentPaths = Map(ApplicationDescriptor.PrometheusAgentKey -> "/app/prometheus/prometheus.jar")
  val kafkaBootstrapServers =
    "kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092"

  val ingress   = randomStreamlet().asIngress[Foo].withServerAttribute
  val processor = randomStreamlet().asProcessor[Foo, Bar].withRuntime("spark")

  val ingressRef   = ingress.ref("ingress")
  val processorRef = processor.ref("processor")

  val blueprint = Blueprint()
    .define(Vector(ingress, processor))
    .use(ingressRef)
    .use(processorRef)
    .connect(Topic(id = "foos"), ingressRef.out, processorRef.in)
    .connect(Topic(id = "bars"), processorRef.out)

  val verifiedBlueprint = blueprint.verified.right.value
  val descriptor        = ApplicationDescriptor(appId, appVersion, image, verifiedBlueprint, agentPaths)

  val allDeployments      = descriptor.deployments
  val ingressDeployment   = allDeployments.find(_.streamletName == ingressRef.name).value
  val processorDeployment = allDeployments.find(_.streamletName == processorRef.name).value
} 
Example 170
Source File: FlinkStreamletKafkaSpec.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.flink

import com.typesafe.config.ConfigFactory
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api._

import cloudflow.streamlets.{ StreamletDefinition, StreamletShape }
import cloudflow.streamlets.avro.AvroOutlet
import cloudflow.flink.avro._
import cloudflow.flink.testkit._
import org.scalatest._


class FlinkStreamletKafkaSpec extends FlinkTestkit with WordSpecLike with Matchers with BeforeAndAfterAll {

  "FlinkIngress" ignore {
    "write streaming data from a source" in {
      @transient lazy val env = StreamExecutionEnvironment.getExecutionEnvironment
      configureCheckpoint(env)

      object FlinkIngress extends FlinkStreamlet {
        val out   = AvroOutlet[Data]("out", _.id.toString())
        val shape = StreamletShape(out)

        override def createLogic() = new FlinkStreamletLogic {
          override def buildExecutionGraph = {
            val data                  = (1 to 10).map(i ⇒ new Data(i, s"name$i"))
            val ins: DataStream[Data] = env.addSource(FlinkSource.CollectionSourceFunction(data))
            writeStream(out, ins)
          }
        }
      }

      val streamletDef = StreamletDefinition("appId", "appVersion", "FlinkIngress", "streamletClass", List(), List(), ConfigFactory.empty)
      val ctx          = new FlinkStreamletContextImpl(streamletDef, env, ConfigFactory.empty)
      FlinkIngress.setContext(ctx)
      FlinkIngress.run(ctx.config)
    }
  }

  private def configureCheckpoint(env: StreamExecutionEnvironment): Unit = {
    // start a checkpoint every 1000 ms
    env.enableCheckpointing(1000)
    // set mode to exactly-once (this is the default)
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    // make sure 500 ms of progress happen between checkpoints
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)
    // checkpoints have to complete within one minute, or are discarded
    env.getCheckpointConfig.setCheckpointTimeout(60000)
    // prevent the tasks from failing if an error happens in their checkpointing, the checkpoint will just be declined.
    env.getCheckpointConfig.setTolerableCheckpointFailureNumber(3)
    // allow only one checkpoint to be in progress at the same time
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)
  }
} 
Example 171
Source File: ConfigInputChangeEventSpec.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.operator.event

import org.scalatest.{ ConfigMap ⇒ _, _ }
import cloudflow.operator.TestDeploymentContext
import com.typesafe.config.ConfigFactory

class ConfigInputChangeEventSpec
    extends WordSpec
    with MustMatchers
    with GivenWhenThen
    with EitherValues
    with OptionValues
    with Inspectors
    with TestDeploymentContext {

  "ConfigInputChangeEvent" should {
    "transform the config" in {
      val appConfig = ConfigFactory.parseString("""
      cloudflow {
        streamlets.logger {
          config-parameters {
            log-level = warning
            foo = bar
          }
          config {
            akka.loglevel = "DEBUG"
          }
          kubernetes {
            pods {
              pod {
                containers {
                  cloudflow {
                    env = [ 
                      { name = "JAVA_OPTS" 
                        value = "-XX:MaxRAMPercentage=40.0"
                      }
                    ]
                    # limited settings that we want to support
                    resources {
                      requests {
                        memory = "1G"
                      }
                    }
                  }
                }
              }
            }
          }
        }
        runtimes.akka.config {
          akka.loglevel = INFO
          akka.kafka.producer.parallelism = 15000
        }
      }
      cloudflow.streamlets.logger.config-parameters.log-level="info"
      cloudflow.streamlets.logger.config-parameters.msg-prefix="valid-logger"      
      """)
      import ConfigInputChangeEvent._

      val streamletName    = "logger"
      val runtimeConfig    = getGlobalRuntimeConfigAtStreamletPath("akka", streamletName, appConfig)
      val kubernetesConfig = getGlobalKubernetesConfigAtStreamletPath("akka", streamletName, appConfig)

      var loggerConfig = getMergedStreamletConfig(streamletName, appConfig, runtimeConfig, kubernetesConfig)
      loggerConfig = moveConfigParameters(loggerConfig, streamletName)
      loggerConfig = mergeRuntimeConfigToRoot(loggerConfig, streamletName)
      loggerConfig = mergeKubernetesConfigToRoot(loggerConfig, streamletName)

      loggerConfig.getString("cloudflow.streamlets.logger.log-level") mustBe "info"
      loggerConfig.getString("cloudflow.streamlets.logger.foo") mustBe "bar"
      loggerConfig.getString("cloudflow.streamlets.logger.msg-prefix") mustBe "valid-logger"
      loggerConfig.getInt("akka.kafka.producer.parallelism") mustBe 15000
      loggerConfig.getString("akka.loglevel") mustBe "DEBUG"
      loggerConfig.getMemorySize("kubernetes.pods.pod.containers.cloudflow.resources.requests.memory").toBytes mustBe 1024 * 1024 * 1024
    }
  }
} 
Example 172
Source File: StreamletDefinitionSpec.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.streamlets

import com.typesafe.config.ConfigFactory
import org.scalatest.{ MustMatchers, OptionValues, TryValues, WordSpec }

class StreamletDefinitionSpec extends WordSpec with MustMatchers with TryValues with OptionValues {

  "A valid StreamletConfig" should {
    val config          = ConfigFactory.load("config-map-sample.json")
    val streamletConfig = StreamletDefinition.read(config).get

    "the loaded instances must contain class, instance and port information" in {
      val expectedStreamlet = ("sensor-data", "cloudflow.examples.sensordata.SensorDataIngress$")
      streamletConfig.streamletRef must be(expectedStreamlet._1)
      streamletConfig.streamletClass must be(expectedStreamlet._2)
    }

    "a loaded instance must have port configuration" in {
      val ports = streamletConfig.portMappings
      val expectedPorts = Map(
        "accepted" -> Topic("accepted"),
        "rejected" -> Topic("rejected")
      )
      ports.foreach(portMapping ⇒ expectedPorts(portMapping.port) must be(portMapping.topic))
    }

    "a loaded instance must have its own configuration" in {
      val config = streamletConfig.config
      config.getInt("cloudflow.internal.server.container-port") must be(2049)
    }

    "a loaded instance must have the common configuration" in {
      config.getString("cloudflow.common.attribute") must be("value")
      config.getString("cloudflow.kafka.bootstrap-servers") must be("cloudflow-kafka.lightbend:9092")
    }

    "a loaded instance must not have runner configuration" in {
      val config = streamletConfig.config
      config.hasPath("runner") must be(false)
    }
  }
} 
Example 173
Source File: ArtifactS3SaverTest.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.artifact.manager

import java.io.File

import akka.Done
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.amazonaws.services.s3.AmazonS3
import com.amazonaws.services.s3.model.GetObjectRequest
import com.typesafe.config.ConfigFactory
import org.apache.hadoop.fs.Path
import org.marvin.artifact.manager.ArtifactSaver.{SaveToLocal, SaveToRemote}
import org.marvin.fixtures.MetadataMock
import org.marvin.model.EngineMetadata
import org.scalamock.scalatest.MockFactory
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}


class ArtifactS3SaverTest extends TestKit(
  ActorSystem("ArtifactS3SaverTest", ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]""")))
  with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll with MockFactory {

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  "s3 saver" should {
    "receive SaveToLocal message" in {
      val metadata = MetadataMock.simpleMockedMetadata()
      val _s3Client = mock[AmazonS3]
      val actor = system.actorOf(Props(new ArtifactS3SaverMock(metadata, _s3Client, true)))

      val protocol = "protocol"
      val artifactName = "model"

      (_s3Client.getObject(_ : GetObjectRequest, _ : File)).expects(*, *).once()

      actor ! SaveToLocal(artifactName, protocol)

      expectMsg(Done)
    }

    "receive SaveToRemote message" in {
      val metadata = MetadataMock.simpleMockedMetadata()
      val _s3Client = mock[AmazonS3]
      val actor = system.actorOf(Props(new ArtifactS3SaverMock(metadata, _s3Client, true)))

      val protocol = "protocol"
      val artifactName = "model"

      (_s3Client.putObject(_ : String, _: String, _ : File)).expects(metadata.s3BucketName, *, *).once()

      actor ! SaveToRemote(artifactName, protocol)

      expectMsg(Done)
    }
  }

    "call preStart method wth success" in {
      val metadata = MetadataMock.simpleMockedMetadata()
      try{
        system.actorOf(Props(new ArtifactS3Saver(metadata)))
        assert(true)
      }catch {
        case _: Throwable =>
          assert(false)
      }
    }

  class ArtifactS3SaverMock(metadata: EngineMetadata, _s3Client: AmazonS3, _isRemote: Boolean) extends ArtifactS3Saver(metadata) {
    def _preStart(): Unit = super.preStart()
    override def preStart(): Unit = {
      s3Client = _s3Client
    }

    override def validatePath(path: Path, isRemote: Boolean): Boolean = {
      if (_isRemote) true
      else false
    }
  }
} 
Example 174
Source File: MetricFilterSpec.scala    From play-prometheus-filters   with MIT License 5 votes vote down vote up
package com.github.stijndehaes.playprometheusfilters.filters

import com.github.stijndehaes.playprometheusfilters.metrics.CounterRequestMetrics.CounterRequestMetricBuilder
import com.github.stijndehaes.playprometheusfilters.metrics.{DefaultPlayUnmatchedDefaults, RequestMetric}
import com.github.stijndehaes.playprometheusfilters.mocks.MockController
import com.typesafe.config.ConfigFactory
import io.prometheus.client.CollectorRegistry
import org.scalatest.mockito.MockitoSugar
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.Configuration
import play.api.mvc._
import play.api.test.Helpers._
import play.api.test.{DefaultAwaitTimeout, FakeRequest, FutureAwaits}

import scala.concurrent.ExecutionContext.Implicits.global

class MetricFilterSpec extends PlaySpec with MockitoSugar with Results with DefaultAwaitTimeout with FutureAwaits with GuiceOneAppPerSuite {

  val configuration = Configuration(ConfigFactory.parseString(
    """play-prometheus-filters.exclude.paths = ["/test"]"""
  ))

  "Filter constructor" should {
    "Get exclude paths from configuration" in {
      implicit val mat = app.materializer
      val filter = new MetricsFilter(configuration) {
        override val metrics = List.empty[RequestMetric[_, RequestHeader, Result]]
      }

      filter.excludePaths must have size 1 // only check size since cannot compare Regex's
    }
  }

  "Apply method" should {
    "skip metrics for excluded paths" in {
      implicit val mat = app.materializer
      val collectorRegistry = mock[CollectorRegistry]
      val filter = new MetricsFilter(configuration) {
        override val metrics = List(
          CounterRequestMetricBuilder.build(collectorRegistry, DefaultPlayUnmatchedDefaults)
        )
      }

      val rh = FakeRequest("GET", "/test")
      val action = new MockController(stubControllerComponents()).ok

      await(filter(action)(rh).run())

      val metrics = filter.metrics(0).metric.collect()
      metrics must have size 1
      val samples = metrics.get(0).samples
      samples.size() mustBe 0 // expect no metrics
    }
  }
} 
Example 175
Source File: CorsBenchmark.scala    From akka-http-cors   with Apache License 2.0 5 votes vote down vote up
package ch.megard.akka.http.cors

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.headers.{Origin, `Access-Control-Request-Method`}
import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.unmarshalling.Unmarshal
import ch.megard.akka.http.cors.scaladsl.CorsDirectives
import ch.megard.akka.http.cors.scaladsl.settings.CorsSettings
import com.typesafe.config.ConfigFactory
import org.openjdk.jmh.annotations._

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}

@State(Scope.Benchmark)
@OutputTimeUnit(TimeUnit.SECONDS)
@BenchmarkMode(Array(Mode.Throughput))
class CorsBenchmark extends Directives with CorsDirectives {
  private val config = ConfigFactory.parseString("akka.loglevel = ERROR").withFallback(ConfigFactory.load())

  implicit private val system: ActorSystem  = ActorSystem("CorsBenchmark", config)
  implicit private val ec: ExecutionContext = scala.concurrent.ExecutionContext.global

  private val http         = Http()
  private val corsSettings = CorsSettings.default

  private var binding: ServerBinding        = _
  private var request: HttpRequest          = _
  private var requestCors: HttpRequest      = _
  private var requestPreflight: HttpRequest = _

  @Setup
  def setup(): Unit = {
    val route = {
      path("baseline") {
        get {
          complete("ok")
        }
      } ~ path("cors") {
        cors(corsSettings) {
          get {
            complete("ok")
          }
        }
      }
    }
    val origin = Origin("http://example.com")

    binding = Await.result(http.bindAndHandle(route, "127.0.0.1", 0), 1.second)
    val base = s"http://${binding.localAddress.getHostString}:${binding.localAddress.getPort}"

    request = HttpRequest(uri = base + "/baseline")
    requestCors = HttpRequest(
      method = HttpMethods.GET,
      uri = base + "/cors",
      headers = List(origin)
    )
    requestPreflight = HttpRequest(
      method = HttpMethods.OPTIONS,
      uri = base + "/cors",
      headers = List(origin, `Access-Control-Request-Method`(HttpMethods.GET))
    )
  }

  @TearDown
  def shutdown(): Unit = {
    val f = for {
      _ <- http.shutdownAllConnectionPools()
      _ <- binding.terminate(1.second)
      _ <- system.terminate()
    } yield ()
    Await.ready(f, 5.seconds)
  }

  @Benchmark
  def baseline(): Unit = {
    val f = http.singleRequest(request).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "ok")
  }

  @Benchmark
  def default_cors(): Unit = {
    val f = http.singleRequest(requestCors).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "ok")
  }

  @Benchmark
  def default_preflight(): Unit = {
    val f = http.singleRequest(requestPreflight).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "")
  }
} 
Example 176
Source File: WithTimeoutSpec.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.impl.stages

import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
import swave.core._

final class WithTimeoutSpec extends SwaveSpec {

  val config = ConfigFactory.parseString {
    """swave.core.dispatcher.definition {
      |  default {
      |    type = thread-pool
      |    thread-pool.fixed-pool-size = 1
      |  }
      |  disp0.thread-pool.fixed-pool-size = 1
      |  disp1.thread-pool.fixed-pool-size = 1
      |  disp2.thread-pool.fixed-pool-size = 1
      |}""".stripMargin
  }
  implicit val env = StreamEnv(config = Some(config))

  "withIdleTimeout must" - {
    implicit val testTimeout = Timeout(1.second)

    "be transparent if upstream rate is sufficient" taggedAs NotOnTravis in {
      Spout.fromIterable(1 to 10).throttle(1, per = 50.millis).withIdleTimeout(100.millis) should produceSeq(1 to 10)
    }

    "fail with StreamTimeoutException if upstream rate is insufficient" taggedAs NotOnTravis in {
      Spout
        .fromIterable(1 to 10)
        .delay(x ⇒ if (x == 3) 200.millis else Duration.Zero)
        .withIdleTimeout(100.millis) should produceErrorLike {
        case x: StreamTimeoutException ⇒
          x.getMessage shouldEqual "No elements passed in the last 100 milliseconds"
      }
    }
  }

  "withInitialTimeout must" - {
    implicit val testTimeout = Timeout(1.second)

    "be transparent if first element arrives quickly enough" taggedAs NotOnTravis in {
      Spout
        .fromIterable(1 to 10)
        .delay(x ⇒ if (x == 3) 200.millis else Duration.Zero)
        .withInitialTimeout(100.millis) should produceSeq(1 to 10)
    }

    "fail with StreamTimeoutException if first element is overly delayed" taggedAs NotOnTravis in {
      Spout
        .fromIterable(1 to 10)
        .delay(x ⇒ if (x == 1) 200.millis else Duration.Zero)
        .withInitialTimeout(100.millis) should produceErrorLike {
        case x: StreamTimeoutException ⇒
          x.getMessage shouldEqual "The first element was not received within 100 milliseconds"
      }
    }
  }

  "withCompletionTimeout must" - {
    implicit val testTimeout = Timeout(1.second)

    "be transparent if stream completes quickly enough" taggedAs NotOnTravis in {
      Spout.fromIterable(1 to 10).withCompletionTimeout(100.millis) should produceSeq(1 to 10)
    }

    "fail with StreamTimeoutException if stream doesn't complete within timeout" taggedAs NotOnTravis in {
      Spout
        .fromIterable(1 to 10)
        .throttle(1, per = 20.millis)
        .withCompletionTimeout(100.millis) should produceErrorLike {
        case x: StreamTimeoutException ⇒
          x.getMessage shouldEqual "The stream was not completed within 100 milliseconds"
      }
    }
  }
} 
Example 177
Source File: PluginRegistry.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package plugins

import com.typesafe.config.{Config, ConfigFactory}
import java.io.File
import play.api.Logger
import scala.concurrent.{ExecutionContext, Future}
import scala.io.Source

object PluginRegistry {
  
  private val PLUGIN_DIR = new File("plugins") 

  Logger.info("Loading plugin configurations:")
  
  private val configs: Seq[(Config, File)] = 
    findFilesRecursive("plugin.conf", PLUGIN_DIR)
      .map { file => 
        val config = ConfigFactory.parseFile(file) 
        val dir = file.getParentFile
        (config, dir)
      }
  
  configs.foreach { case (c, dir) =>
    Logger.info(s"  ${c.getString("extends")}.${c.getString("id")}")
  }
  
  Logger.info(s"${configs.size} configurations found")
  
  
  private def findFilesRecursive(name: String, dir: File): Seq[File] = {
    if (dir.exists) {
      val all = dir.listFiles

      val dirs = all.filter(_.isDirectory)
      val files = all.filter(_.isFile)

      val matchingFiles = files.filter(_.getName == name)

      matchingFiles ++ dirs.flatMap(dir => findFilesRecursive(name, dir))
    } else Seq()
  }
          
  def listConfigs(extensionPoint: String): Seq[Config] =
    configs
      .filter(_._1.getString("extends").equalsIgnoreCase(extensionPoint))
      .map(_._1)

  private def readFile(extensionPoint: String, id: String, filename: String)(implicit ctx: ExecutionContext) =
    scala.concurrent.blocking {
      Future {
        configs.find { case (config, dir) => 
          config.getString("extends").equalsIgnoreCase(extensionPoint) &&
          config.getString("id").equalsIgnoreCase(id)
        } map { case (config, dir) => 
          val file = new File(dir, filename)
          Source.fromFile(file).getLines.mkString("\n")
        }
      }
    }  

  def loadPlugin(extensionPoint: String, id: String)(implicit ctx: ExecutionContext): Future[Option[String]] =
    readFile(extensionPoint, id, "plugin.js")

  def loadCSS(extensionPoint: String, id: String)(implicit ctx: ExecutionContext): Future[Option[String]] =
    readFile(extensionPoint, id, "plugin.css")

} 
Example 178
Source File: KappaTagging.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.tagging

import com.typesafe.config.ConfigFactory
import io.gzet.tagging.gdelt.GdeltTagger
import io.gzet.tagging.twitter.TwitterHIS
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.twitter.TwitterUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import twitter4j.Status
import twitter4j.auth.OAuthAuthorization
import twitter4j.conf.ConfigurationBuilder

object KappaTagging {

  final val config = ConfigFactory.load().getConfig("io.gzet.kappa")
  final val esNodes = config.getString("esNodes")
  final val batchSize = config.getInt("batchSize")

  def main(args: Array[String]) = {

    val sparkConf = new SparkConf().setAppName("GDELT Kappa tagging")
    val ssc = new StreamingContext(sparkConf, Seconds(batchSize))
    val sc = ssc.sparkContext

    // Create a counter that can be shared accross batches
    val batchId = sc.longAccumulator("GZET")

    val twitterStream = createTwitterStream(ssc, Array[String]())
    val twitterProcessor = new TwitterHIS()
    twitterProcessor.train(twitterStream, batchId)

    val gdeltStream = createGdeltStream(ssc)
    val gdeltProcessor = new GdeltTagger()
    gdeltProcessor.predict(gdeltStream, batchId)

    ssc.start()
    ssc.awaitTermination()
  }

  private def createTwitterStream(ssc: StreamingContext, filters: Array[String]): DStream[Status] = {
    TwitterUtils.createStream(
      ssc,
      getTwitterConfiguration,
      filters
    )
  }

  private def getTwitterConfiguration = {
    val builder = new ConfigurationBuilder()
    builder.setOAuthConsumerKey(config.getString("apiKey"))
    builder.setOAuthConsumerSecret(config.getString("apiSecret"))
    builder.setOAuthAccessToken(config.getString("tokenKey"))
    builder.setOAuthAccessTokenSecret(config.getString("tokenSecret"))
    val configuration = builder.build()
    Some(new OAuthAuthorization(configuration))
  }

  private def createGdeltStream(ssc: StreamingContext) = {
    val topics = Map(
      config.getString("kafkaTopic") -> config.getInt("kafkaTopicPartition")
    )
    KafkaUtils.createStream(
      ssc,
      config.getString("zkQuorum"),
      config.getString("kafkaGroupId"),
      topics
    ).map(_._2)
  }

} 
Example 179
Source File: GdeltTagger.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.tagging.gdelt

import java.text.SimpleDateFormat
import java.util.Date

import com.typesafe.config.ConfigFactory
import io.gzet.tagging.classifier.Classifier
import io.gzet.tagging.html.HtmlHandler
import io.gzet.tagging.html.HtmlHandler.Content
import org.apache.spark.Accumulator
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.util.LongAccumulator
import org.elasticsearch.spark._

class GdeltTagger() extends Serializable {

  val config = ConfigFactory.load().getConfig("io.gzet.kappa")
  val isoSdf = "yyyy-MM-dd HH:mm:ss"
  val esIndex = config.getString("gdeltIndex")
  val vectorSize = config.getInt("vectorSize")
  val minProba = config.getDouble("minProba")

  def predict(gdeltStream: DStream[String], batchId: LongAccumulator) = {

    // Extract HTML content
    val gdeltContent = fetchHtmlContent(gdeltStream)

    // Predict each RDD
    gdeltContent foreachRDD { batch =>

      batch.cache()
      val count = batch.count()

      if (count > 0) {

        if (Classifier.model.isDefined) {
          val labels = Classifier.model.get.labels

          // Predict HashTags using latest Twitter model
          val textRdd = batch.map(_.body.get)
          val predictions = Classifier.predictProbabilities(textRdd)
          val taggedGdelt = batch.zip(predictions) map { case (content, probabilities) =>
            val validLabels = probabilities filter { case (label, probability) =>
              probability > minProba
            }

            val labels = validLabels.toSeq
              .sortBy(_._2)
              .reverse
              .map(_._1)

            (content, labels)
          }

          // Saving articles to Elasticsearch
          taggedGdelt map { case (content, hashTags) =>
            gdeltToJson(content, hashTags.toArray)
          } saveToEs esIndex

        } else {

          // Saving articles to Elasticsearch
          batch map { content =>
            gdeltToJson(content, Array())
          } saveToEs esIndex
        }

      }

      batch.unpersist(blocking = false)
    }
  }

  private def gdeltToJson(content: Content, hashTags: Array[String]) = {
    val sdf = new SimpleDateFormat(isoSdf)
    Map(
      "time" -> sdf.format(new Date()),
      "body" -> content.body.get,
      "url" -> content.url,
      "tags" -> hashTags,
      "title" -> content.title
    )
  }

  private def fetchHtmlContent(urlStream: DStream[String]) = {
    urlStream.map(_ -> 1).groupByKey().map(_._1) mapPartitions { urls =>
      val sdf = new SimpleDateFormat(isoSdf)
      val htmlHandler = new HtmlHandler()
      val goose = htmlHandler.getGooseScraper
      urls map { url =>
        htmlHandler.fetchUrl(goose, url, sdf)
      }
    } filter { content =>
      content.isDefined &&
        content.get.body.isDefined &&
        content.get.body.get.length > 255
    } map { content =>
      content.get
    }
  }
} 
Example 180
Source File: AnalyzerSvc.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package svc

import com.typesafe.config.ConfigFactory
import io.gzet.recommender.Audio

class AnalyzerSvc() {

  val config = ConfigFactory.load()
  val cassandraHost = config.getString("cassandra.host")
  val cassandraPort = config.getInt("cassandra.port")
  val sampleSize = config.getDouble("gzet.sample.size")
  val minMatch = config.getDouble("gzet.min.match")
  val dao = new CassandraDao(cassandraHost, cassandraPort)

  def analyze(audio: Audio): Option[String] = {

    val samples = audio.sampleByTime(sampleSize)
    val hashes = samples.map(_.hash)
    val hashResults = hashes.map(dao.findSongsByHash).reduce(_ ++ _)

    if(hashResults.isEmpty) {
      None: Option[String]
    } else {
      val songIds = hashResults.groupBy(s => s).mapValues(_.length).toList.sortBy(_._2).reverse
      val (bestId, bestMatch) = songIds.head
      val score = bestMatch / hashes.size.toDouble
      if(score > minMatch) {
        dao.getSongName(bestId)
      } else {
        None: Option[String]
      }
    }
  }
} 
Example 181
Source File: SparkSvc.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package svc

import java.io.StringWriter
import java.net.{HttpURLConnection, URL}

import com.typesafe.config.ConfigFactory
import io.gzet.recommender.Node
import org.apache.commons.io.IOUtils
import play.api.Logger
import play.api.libs.json._

class SparkSvc() {

  val config = ConfigFactory.load()
  val host = config.getString("spark.job.server.host")
  val port = config.getInt("spark.job.server.port")
  val timeout = config.getInt("spark.job.server.timeout")
  val appName = config.getString("spark.job.server.app")
  val context = config.getString("spark.job.server.context")
  val indexJob = config.getString("spark.job.index")
  val playlistJob = config.getString("spark.job.playlist")
  val playlistRecommendJob = config.getString("spark.job.personalized.playlist")

  private def getConnection(endpoint: String, params: Option[String]) = {
    try {
      val url = new URL(endpoint)
      val connection = url.openConnection().asInstanceOf[HttpURLConnection]
      connection.setDoOutput(true)
      connection.setRequestMethod("POST")
      connection.setRequestProperty("Accept", "application/json")
      if(params.isDefined){
        val os = connection.getOutputStream
        os.write(params.get.getBytes())
        os.flush()
        os.close()
      }
      val inputStream = connection.getInputStream
      val writer = new StringWriter()
      IOUtils.copy(inputStream, writer, "UTF-8")
      val ret = writer.toString
      Json.parse(ret)
    } catch {
      case e: Exception =>
        throw new Exception("Job Failed: " + e.getMessage)
    }
  }

  private def parseResponse(json: JsValue) : String = {
    val jobId = (json \ "result" \ "jobId").asOpt[String]
    if(jobId.isDefined){
      s"Job submitted [${jobId.get}]"
    } else {
      val message = (json \ "result" \ "message").asOpt[String]
      if(message.isDefined){
        throw new Exception(s"Job failed: ${message.get}")
      }
      throw new Exception("Could not find Spark job id")
    }
  }

  def index(path: String): String = {
    Logger.info("Submitting INDEX job")
    val url = s"http://$host:$port/jobs?appName=$appName&classPath=$indexJob&context=$context"
    val params = "input.dir=\"" + path + "\""
    val json = getConnection(url, Some(params))
    parseResponse(json)
  }

  def playlist() = {
    Logger.info("Submitting PLAYLIST job")
    val url = s"http://$host:$port/jobs?appName=$appName&classPath=$playlistJob&context=$context"
    val json = getConnection(url, None)
    parseResponse(json)
  }

  def playlist(id: Long) = {
    Logger.info("Submitting RECOMMEND job")
    val url = s"http://$host:$port/jobs?appName=$appName&classPath=$playlistRecommendJob&context=$context&sync=true&timeout=$timeout"
    val params = s"song.id=$id"
    val json: JsValue = getConnection(url, Some(params))
    val array = (json \ "result").as[Array[String]]
    array.map({line =>
      val Array(id, pr, song) = line.split(",").take(3)
      Node(id.toLong, song, pr.toDouble)
    }).toList
  }

} 
Example 182
Source File: Playlist.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package controllers

import com.typesafe.config.ConfigFactory
import play.api.Logger
import play.api.mvc.{Action, Controller}
import svc.{AnalyzerSvc, CassandraDao, SparkSvc}

object Playlist extends Controller {

  val config = ConfigFactory.load()
  val minTime = config.getInt("gzet.min.time")
  val maxTime = config.getInt("gzet.max.time")
  val cassandraHost = config.getString("cassandra.host")
  val cassandraPort = config.getInt("cassandra.port")
  val sampleSize = config.getDouble("gzet.sample.size")
  val minMatch = config.getDouble("gzet.min.match")

  val dao = new CassandraDao(cassandraHost, cassandraPort)
  val analyzer = new AnalyzerSvc()
  val spark = new SparkSvc()

  def index = Action { implicit request =>
    val playlist = models.Playlist(dao.getNodes)
    Logger.info(s"Database is currently ${playlist.nodes.size} songs long")
    Ok(views.html.playlist(playlist))
  }

  def personalize(id: Long) = Action { implicit request =>
    if(models.Playlist(dao.getNodes).nodes.isEmpty) {
      Redirect(routes.Playlist.index()).flashing("warning" -> s"Could not run personalized page rank on empty indices")
    } else {
      val name = dao.getSongName(id)
      if(name.isEmpty) {
        Redirect(routes.Playlist.index()).flashing("error" -> s"Could not find song for id [$id]")
      } else {
        try {
          Logger.info(s"Running a personalize Page Rank for id [$id] and song [$name]")
          val nodes = spark.playlist(id)
          val playlist = models.Playlist(nodes, name)
          Ok(views.html.playlist(playlist))
        } catch {
          case e: Exception =>
            Redirect(routes.Playlist.index()).flashing("error" -> e.getMessage)
        }
      }
    }
  }

  def submit = Action { implicit request =>
    try {
      dao.dropPlaylist
      val jobId = spark.playlist()
      Redirect(routes.Playlist.index()).flashing("success" -> jobId)
    } catch {
      case e: Exception =>
        Redirect(routes.Playlist.index()).flashing("error" -> e.getMessage)
    }
  }

} 
Example 183
Source File: Analyze.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package controllers

import java.io.{File, FileInputStream}
import java.util.UUID

import com.typesafe.config.ConfigFactory
import io.gzet.recommender.Audio
import models.Songs
import play.api.Logger
import play.api.mvc._
import svc.{AnalyzerSvc, CassandraDao, SparkSvc}

object Analyze extends Controller {

  val config = ConfigFactory.load()
  val minTime = config.getInt("gzet.min.time")
  val maxTime = config.getInt("gzet.max.time")
  val cassandraHost = config.getString("cassandra.host")
  val cassandraPort = config.getInt("cassandra.port")
  val sampleSize = config.getDouble("gzet.sample.size")
  val minMatch = config.getDouble("gzet.min.match")

  val dao = new CassandraDao(cassandraHost, cassandraPort)
  val analyzer = new AnalyzerSvc()
  val spark = new SparkSvc()

  def index = Action { implicit request =>
    val songs = Songs(dao.getSongs)
    Logger.info(s"Database is currently ${songs.songs.size} songs long")
    Ok(views.html.analyze("Select a wav file to analyze")(songs))
  }

  def submit = Action(parse.multipartFormData) { request =>
    val songs = Songs(dao.getSongs)
    Logger.info(s"Database is currently ${songs.songs.size} songs long")
    if(songs.songs.isEmpty) {
      Redirect(routes.Analyze.index()).flashing("warning" -> s"Library is currently empty. Please index new records")
    } else {
      request.body.file("song").map { upload =>
        val fileName = upload.filename
        Logger.info(s"Processing file $fileName")
        val file = new File(s"/tmp/${UUID.randomUUID()}")
        upload.ref.moveTo(file)
        try {
          val song = process(file)
          if(song.isEmpty) {
            Redirect(routes.Analyze.index()).flashing("warning" -> s"Could not match any record for [$fileName]")
          } else {
            val songName = song.get
            Logger.info(s"Found song [$songName]")
            Redirect(routes.Analyze.index()).flashing("success" -> songName)
          }
        } catch {
          case e: Exception =>
            Redirect(routes.Analyze.index()).flashing("error" -> e.getMessage)
        }
      }.getOrElse {
        Redirect(routes.Analyze.index()).flashing("error" -> "Missing file")
      }
    }
  }

  def process(file: File) = {
    val is = new FileInputStream(file)
    val audio = Audio.processSong(is, minTime, maxTime)
    Logger.info(audio.toString)
    file.delete()
    analyzer.analyze(audio)
  }

} 
Example 184
Source File: Index.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package controllers

import com.typesafe.config.ConfigFactory
import models.{Library, Songs}
import org.apache.commons.lang3.StringUtils
import play.api.Logger
import play.api.data.Form
import play.api.data.Forms._
import play.api.mvc._
import svc.{AnalyzerSvc, CassandraDao, SparkSvc}

object Index extends Controller {

  val config = ConfigFactory.load()
  val minTime = config.getInt("gzet.min.time")
  val maxTime = config.getInt("gzet.max.time")
  val cassandraHost = config.getString("cassandra.host")
  val cassandraPort = config.getInt("cassandra.port")
  val sampleSize = config.getDouble("gzet.sample.size")
  val minMatch = config.getDouble("gzet.min.match")

  val dao = new CassandraDao(cassandraHost, cassandraPort)
  val analyzer = new AnalyzerSvc()
  val spark = new SparkSvc()

  val indexForm: Form[Library] = Form(mapping("path" -> text)(Library.apply)(Library.unapply))

  def index = Action { implicit request =>
    val songs = Songs(dao.getSongs)
    Logger.info(s"Database is currently ${songs.songs.size} songs long")
    Ok(views.html.index(indexForm)(songs))
  }

  def submit = Action { implicit request =>
    indexForm.bindFromRequest.fold(
      errors =>
        Redirect(routes.Index.index()).flashing("error" -> s"Missing path"),
      index =>
        try {
          if(StringUtils.isNotEmpty(index.path)) {
            Logger.info("Dropping database")
            dao.dropSongs
            dao.dropPlaylist
            Logger.info("Submitting job")
            val jobId = spark.index(index.path)
            Redirect(routes.Index.index()).flashing("success" -> jobId)
          } else {
            Redirect(routes.Index.index()).flashing("error" -> s"Missing path")
          }
        } catch {
          case e: Exception =>
            Redirect(routes.Index.index()).flashing("error" -> e.getMessage)
        }
    )
  }
} 
Example 185
Source File: GzetLoader.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.community

import com.typesafe.config.ConfigFactory
import io.gzet.community.accumulo.{AccumuloLoader, AccumuloConfig}
import io.gzet.community.elasticsearch.{ESReader, ESConfig}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

object GzetLoader {

  Logger.getLogger("org").setLevel(Level.OFF)
  Logger.getLogger("akka").setLevel(Level.OFF)

  def main(args: Array[String]) = {

    val spark = SparkSession.builder()
      .appName("communities-loader")
      .getOrCreate()

    val sc = spark.sparkContext

    val blacklist = args.mkString(" ").split(",").map(_.trim).toSet
    val config = ConfigFactory.load()

    val esField = config.getString("io.gzet.elasticsearch.field")
    val esConf = ESConfig(
      config.getString("io.gzet.elasticsearch.nodes"),
      config.getInt("io.gzet.elasticsearch.port"),
      config.getString("io.gzet.elasticsearch.index")
    )

    val accumuloTable = config.getString("io.gzet.accumulo.table")
    val accumuloConf = AccumuloConfig(
      config.getString("io.gzet.accumulo.instance"),
      config.getString("io.gzet.accumulo.user"),
      config.getString("io.gzet.accumulo.password"),
      config.getString("io.gzet.accumulo.zookeeper")
    )

    val reader = new ESReader(esConf)
    val personsRdd = reader.loadPersons(sc, esField)
    personsRdd.cache()

    val writer = new AccumuloLoader(accumuloConf)
    writer.persist(sc, accumuloTable, personsRdd, blacklist)

  }

} 
Example 186
Source File: SimpleConfig.scala    From Mastering-Spark-for-Data-Science   with MIT License 5 votes vote down vote up
package io.gzet.timeseries

import java.util.UUID

import com.typesafe.config.ConfigFactory

import scala.collection.JavaConversions._
import scala.util.Try

trait SimpleConfig {

  lazy val conf = ConfigFactory.load()
  lazy val batchSize = Try(conf.getInt("batchSize")).getOrElse(10)
  lazy val windowSize = Try(conf.getInt("windowSize")).getOrElse(60)
  lazy val checkpointDir = Try(conf.getString("checkpoint")).getOrElse(s"file:///tmp/${UUID.randomUUID()}")

  lazy val timely = conf.getConfig("timely")
  lazy val timelyHost = Try(timely.getString("ip")).getOrElse("localhost")
  lazy val timelyPort = Try(timely.getInt("port")).getOrElse(54321)

  lazy val twitter = conf.getConfig("twitter")
  lazy val twitterFilter = twitter.getStringList("tags").toList
  lazy val twitterApiKey = twitter.getString("apiKey")
  lazy val twitterApiSecret = twitter.getString("apiSecret")
  lazy val twitterTokenKey = twitter.getString("tokenKey")
  lazy val twitterTokenSecret = twitter.getString("tokenSecret")

} 
Example 187
Source File: Main.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.readModelUpdater

import akka.actor.ActorSystem
import com.github.j5ik2o.bank.adaptor.dao.BankAccountReadModelFlowsImpl
import com.github.j5ik2o.bank.adaptor.readJournal.JournalReaderImpl
import com.github.j5ik2o.bank.useCase.BankAccountReadModelUseCase
import com.typesafe.config.ConfigFactory
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile

object Main extends App {
  val rootConfig      = ConfigFactory.load()
  implicit val system = ActorSystem("bank-system", config = rootConfig)
  val dbConfig        = DatabaseConfig.forConfig[JdbcProfile](path = "slick", rootConfig)

  new BankAccountReadModelUseCase(new BankAccountReadModelFlowsImpl(dbConfig.profile, dbConfig.db),
                                  new JournalReaderImpl())
    .execute()

  sys.addShutdownHook {
    system.terminate()
  }
} 
Example 188
Source File: Main.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.apiServer

import akka.actor.{ ActorRef, ActorSystem }
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import com.github.j5ik2o.bank.adaptor.aggregate.{ BankAccountAggregateFlowsImpl, ShardedBankAccountAggregates }
import com.github.j5ik2o.bank.adaptor.controller.Routes
import com.github.j5ik2o.bank.adaptor.dao.BankAccountReadModelFlowsImpl
import com.github.j5ik2o.bank.adaptor.generator.IdGenerator
import com.github.j5ik2o.bank.adaptor.readJournal.JournalReaderImpl
import com.github.j5ik2o.bank.useCase.{ BankAccountAggregateUseCase, BankAccountReadModelUseCase }
import com.typesafe.config.{ Config, ConfigFactory }
import pureconfig._
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile

import scala.concurrent.ExecutionContextExecutor

object Main extends App {
  val rootConfig: Config                    = ConfigFactory.load()
  val dbConfig: DatabaseConfig[JdbcProfile] = DatabaseConfig.forConfig[JdbcProfile](path = "slick", rootConfig)

  implicit val system: ActorSystem                        = ActorSystem("bank-system", config = rootConfig)
  implicit val materializer: ActorMaterializer            = ActorMaterializer()
  implicit val executionContext: ExecutionContextExecutor = system.dispatcher

  val bankAccountIdGenerator = IdGenerator.ofBankAccountId(dbConfig.profile, dbConfig.db)

  val bankAccountAggregatesRef: ActorRef =
    system.actorOf(ShardedBankAccountAggregates.props, ShardedBankAccountAggregates.name)

  val bankAccountAggregateUseCase: BankAccountAggregateUseCase = new BankAccountAggregateUseCase(
    new BankAccountAggregateFlowsImpl(bankAccountAggregatesRef)
  )

  val bankAccountReadModelUseCase: BankAccountReadModelUseCase =
    new BankAccountReadModelUseCase(new BankAccountReadModelFlowsImpl(dbConfig.profile, dbConfig.db),
                                    new JournalReaderImpl())

  val routes: Routes = Routes(bankAccountIdGenerator, bankAccountAggregateUseCase, bankAccountReadModelUseCase)

  val ApiServerConfig(host, port) =
    loadConfigOrThrow[ApiServerConfig](system.settings.config.getConfig("bank.api-server"))

  val bindingFuture = Http().bindAndHandle(routes.root, host, port)

  sys.addShutdownHook {
    bindingFuture
      .flatMap(_.unbind())
      .onComplete(_ => system.terminate())
  }
} 
Example 189
package com.github.j5ik2o.bank.adaptor.serialization

import akka.actor.ActorSystem
import akka.serialization.SerializationExtension
import com.github.j5ik2o.bank.adaptor.util.ActorSpec
import com.github.j5ik2o.bank.domain.model._
import com.typesafe.config.ConfigFactory
import org.sisioh.baseunits.scala.money.Money
import org.sisioh.baseunits.scala.timeutil.Clock

class BankAccountEventJSONSerializerSpec
    extends ActorSpec(
      ActorSystem("BankAccountEventJSONSerializerSpec", ConfigFactory.load("bank-account-aggregate-spec.conf"))
    ) {
  val extension = SerializationExtension(system)

  "BankAccountEventJSONSerializer" - {
    "should encode CreateEvent" in {
      val serializer    = extension.serializerFor(classOf[BankAccountOpened])
      val now           = Clock.now
      val expectedEvent = BankAccountOpened(BankAccountId(1L), BankAccountName("test-1"), now)
      val byteArray     = serializer.toBinary(expectedEvent)
      val event         = serializer.fromBinary(byteArray, Some(classOf[BankAccountOpened]))
      event shouldBe expectedEvent
    }
    "should encode UpdateEvent" in {
      val serializer    = extension.serializerFor(classOf[BankAccountEventUpdated])
      val now           = Clock.now
      val expectedEvent = BankAccountEventUpdated(BankAccountId(1L), BankAccountName("test-1"), now)
      val byteArray     = serializer.toBinary(expectedEvent)
      val event         = serializer.fromBinary(byteArray, Some(classOf[BankAccountEventUpdated]))
      event shouldBe expectedEvent
    }
    "should encode DepositEvent" in {
      val serializer    = extension.serializerFor(classOf[BankAccountDeposited])
      val now           = Clock.now
      val expectedEvent = BankAccountDeposited(BankAccountId(1L), Money.yens(100), now)
      val byteArray     = serializer.toBinary(expectedEvent)
      val event         = serializer.fromBinary(byteArray, Some(classOf[BankAccountDeposited]))
      event shouldBe expectedEvent
    }
    "should encode WithdrawEvent" in {
      val serializer    = extension.serializerFor(classOf[BankAccountWithdrawn])
      val now           = Clock.now
      val expectedEvent = BankAccountWithdrawn(BankAccountId(1L), Money.yens(100), now)
      val byteArray     = serializer.toBinary(expectedEvent)
      val event         = serializer.fromBinary(byteArray, Some(classOf[BankAccountWithdrawn]))
      event shouldBe expectedEvent
    }
    "should encode DestroyEvent" in {
      val serializer    = extension.serializerFor(classOf[BankAccountClosed])
      val now           = Clock.now
      val expectedEvent = BankAccountClosed(BankAccountId(1L), now)
      val byteArray     = serializer.toBinary(expectedEvent)
      val event         = serializer.fromBinary(byteArray, Some(classOf[BankAccountClosed]))
      event shouldBe expectedEvent
    }
  }
} 
Example 190
package com.github.j5ik2o.bank.adaptor.useCase

import akka.actor.ActorSystem
import com.github.j5ik2o.bank.adaptor.aggregate.{ BankAccountAggregate, BankAccountAggregateFlowsImpl }
import com.github.j5ik2o.bank.adaptor.dao.BankAccountReadModelFlowsImpl
import com.github.j5ik2o.bank.adaptor.readJournal.JournalReaderImpl
import com.github.j5ik2o.bank.adaptor.util.{ ActorSpec, BankAccountSpecSupport, FlywayWithMySQLSpecSupport }
import com.github.j5ik2o.bank.domain.model.{ BankAccountId, BankAccountName }
import com.github.j5ik2o.bank.useCase.{ BankAccountAggregateUseCase, BankAccountReadModelUseCase }
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol._
import com.github.j5ik2o.scalatestplus.db.{ MySQLdConfig, UserWithPassword }
import com.typesafe.config.ConfigFactory
import com.wix.mysql.distribution.Version.v5_6_21
import org.scalatest.time.{ Millis, Seconds, Span }
import org.sisioh.baseunits.scala.money.Money

import scala.concurrent.duration._

class BankAccountReadModelUseCaseImplSpec
    extends ActorSpec(
      ActorSystem("BankAccountReadModelUseCaseImplSpec", ConfigFactory.load("bank-account-use-case-spec.conf"))
    )
    with FlywayWithMySQLSpecSupport
    with BankAccountSpecSupport {

  override implicit val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(200, Millis))

  override protected lazy val mySQLdConfig: MySQLdConfig = MySQLdConfig(
    version = v5_6_21,
    port = Some(12345),
    userWithPassword = Some(UserWithPassword("bank", "passwd")),
    timeout = Some((30 seconds) * sys.env.getOrElse("SBT_TEST_TIME_FACTOR", "1").toDouble)
  )

  import system.dispatcher

  "BankAccountReadModelUseCaseImpl" - {
    "should be able to read read-model" in {
      val id           = bankAccountIdGenerator.generateId().futureValue
      val aggregateRef = system.actorOf(BankAccountAggregate.props, BankAccountAggregate.name(id))
      val bankAccountReadModelUseCase = new BankAccountReadModelUseCase(
        new BankAccountReadModelFlowsImpl(dbConfig.profile, dbConfig.db),
        new JournalReaderImpl()
      )
      bankAccountReadModelUseCase.execute()
      createDomainEvents(id,
                         new BankAccountAggregateUseCase(
                           new BankAccountAggregateFlowsImpl(aggregateRef)
                         ))
      awaitAssert(
        {
          val resolveBankAccountEventsSucceeded = bankAccountReadModelUseCase
            .resolveBankAccountEventsById(ResolveBankAccountEventsRequest(id))
            .futureValue
            .asInstanceOf[ResolveBankAccountEventsSucceeded]
          resolveBankAccountEventsSucceeded.bankAccountId shouldBe id
          resolveBankAccountEventsSucceeded.events.head.`type` shouldBe "deposit"
          resolveBankAccountEventsSucceeded.events.head.amount shouldBe 1000
          resolveBankAccountEventsSucceeded.events.head.currencyCode shouldBe "JPY"
        },
        3 seconds,
        50 milliseconds
      )
    }
  }

  private def createDomainEvents(id: BankAccountId, bankAccountAggregateUseCase: BankAccountAggregateUseCase) = {
    val openBankAccountSucceeded = bankAccountAggregateUseCase
      .openBankAccount(OpenBankAccountRequest(id, BankAccountName("test-1")))
      .futureValue
      .asInstanceOf[OpenBankAccountSucceeded]
    openBankAccountSucceeded.bankAccountId shouldBe id
    val depositSucceeded =
      bankAccountAggregateUseCase
        .addBankAccountEvent(DepositRequest(id, Money.yens(1000L)))
        .futureValue
        .asInstanceOf[DepositSucceeded]
    depositSucceeded.bankAccountId shouldBe id
  }
} 
Example 191
Source File: K8SFixture.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import org.scalatest.{FutureOutcome, fixture}
import skuber.api.client._
import com.typesafe.config.ConfigFactory
import skuber.api.client.impl.KubernetesClientImpl

trait K8SFixture extends fixture.AsyncFlatSpec {

  override type FixtureParam =  K8SRequestContext

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val dispatcher = system.dispatcher

  val config = ConfigFactory.load()

  override def withFixture(test: OneArgAsyncTest): FutureOutcome = {
    val k8s = k8sInit(config)
    complete {
      withFixture(test.toNoArgAsyncTest(k8s))
    } lastly {
      k8s.close
    }
  }
} 
Example 192
Source File: PodLogSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import java.time.ZonedDateTime

import akka.stream.scaladsl.TcpIdleTimeoutException
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually
import skuber.Pod.LogQueryParams
import skuber.json.format._

import scala.concurrent.Await
import scala.concurrent.duration._

class PodLogSpec extends K8SFixture with Eventually with Matchers with BeforeAndAfterAll {
  val podName: String = java.util.UUID.randomUUID().toString

  behavior of "PodLog"

  val idleTimeout = 3.seconds
  override val config = ConfigFactory.parseString(s"skuber.pod-log.idle-timeout=${idleTimeout.toSeconds}s").withFallback(ConfigFactory.load())

  override def beforeAll(): Unit = {
    super.beforeAll()

    val k8s = k8sInit(config)
    Await.result(k8s.create(getNginxPod(podName, "1.7.9")), 3.second)
    // Let the pod running
    Thread.sleep(3000)
    k8s.close
  }

  override def afterAll(): Unit = {
    val k8s = k8sInit(config)
    Await.result(k8s.delete[Pod](podName), 3.second)
    Thread.sleep(3000)
    k8s.close

    super.afterAll()
  }

  it should "get log of a pod" in { k8s =>
    k8s.getPodLogSource(podName, LogQueryParams(follow = Some(false))).flatMap { source =>
      source.map(_.utf8String).runReduce(_ + _).map { s =>
        assert(s == "foo\n")
      }
    }
  }

  it should "tail log of a pod and timeout after a while" in { k8s =>
    var log = ""
    var start = ZonedDateTime.now()
    k8s.getPodLogSource(podName, LogQueryParams(follow = Some(true))).flatMap { source =>
      source.map(_.utf8String).runForeach(log += _)
    }.failed.map { case e: TcpIdleTimeoutException =>
      val msgPattern = s"TCP idle-timeout encountered on connection to [^,]+, no bytes passed in the last ${idleTimeout}"
      assert(e.getMessage.matches(msgPattern), s"""["${e.getMessage}"] does not match ["${msgPattern}"]""")
      assert(log == "foo\n")
      assert(ZonedDateTime.now().isAfter(start.withSecond(idleTimeout.toSeconds.toInt)))
    }
  }

  def getNginxContainer(version: String): Container = Container(
    name = "ubuntu", image = "nginx:" + version,
    command = List("sh"),
    args = List("-c", s"""echo "foo"; trap exit TERM; sleep infinity & wait""")
  )

  def getNginxPod(name: String, version: String): Pod = {
    val container = getNginxContainer(version)
    val podSpec = Pod.Spec(containers = List((container)))
    Pod.named(podName).copy(spec = Some(podSpec))
  }
} 
Example 193
Source File: Boot.scala    From unicorn   with Apache License 2.0 5 votes vote down vote up
package unicorn.rhino

import scala.concurrent.duration._
import akka.actor.{ActorSystem, Props}
import akka.routing.FromConfig
import akka.io.IO
import spray.can.Http
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.ConfigFactory


object Boot extends App {

  // we need an ActorSystem to host our application in
  implicit val actorSystem = ActorSystem("unicorn-rhino")

  // create a pool of RhinoActors
  val service = actorSystem.actorOf(FromConfig.props(Props[RhinoActor]), "rhino-router")

  val conf = ConfigFactory.load()
  val serverPort = conf.getInt("spray.can.server.port")

  val ip = if (System.getProperty("loopback.only") != null) "127.0.0.1" else "0.0.0.0"
  IO(Http) ! Http.Bind(service, interface = ip, port = serverPort)
} 
Example 194
Source File: UnitSpec.scala    From sbt-coursier   with Apache License 2.0 5 votes vote down vote up
package t

import com.typesafe.config.ConfigFactory
import org.scalatest.{ MustMatchers, WordSpec }

class UnitSpec extends WordSpec with MustMatchers {
  def conf = ConfigFactory.defaultReference()

  "Config" should {
    "return Akka HTTP server provider" in {
      val serverProvider = conf.getString("play.server.provider")
      serverProvider mustBe "play.core.server.AkkaHttpServerProvider"
    }

    "be able to load Netty settings" in {
      val nettyTransport = conf.getString("play.server.netty.transport")
      nettyTransport mustBe "jdk"
    }
  }
} 
Example 195
Source File: DogStatsDReporterSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import com.typesafe.config.ConfigFactory
import org.coursera.metrics.datadog.transport.Transport
import org.specs2.mock.Mockito
import org.specs2.mutable.SpecificationLike

import scala.concurrent.duration.{FiniteDuration, _}

class DogStatsDReporterSpec extends AkkaTestkitSpecs2Support with SpecificationLike with Mockito {

  "The DatadogReporter reporter" should {

    "report metrics when triggered by the scheduler" in {

      implicit val conf = ConfigFactory.parseString(
        """
         {
          enabled=on
          host="localhost"
          port=8125
          reporting-interval=10ms
          metric-prefix = "pref"
          tags = ["boo", "hoo"]
          api-key = "abc123"
        }
        """)

      val dogStatsDReporter = spy(new DogStatsDReporter)

      val transport = mock[Transport]
      dogStatsDReporter.getTransport returns transport

      val rptr = mock[org.coursera.metrics.datadog.DatadogReporter]
      dogStatsDReporter.getReporter returns rptr

      dogStatsDReporter.start(FiniteDuration(2, TimeUnit.MILLISECONDS))
      there was after(100.millisecond).atLeastOne(dogStatsDReporter).report()

      dogStatsDReporter.tags must containAllOf(Seq("boo", "hoo", "app:container-service", "version:1.0.0.N/A"))
      dogStatsDReporter.prefix must be equalTo "pref"

      dogStatsDReporter.stop
      there was one(transport).close()
    }
  }

} 
Example 196
Source File: StatsDReporterSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import com.github.jjagged.metrics.reporting.statsd.StatsD
import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import com.typesafe.config.ConfigFactory
import org.specs2.mock.Mockito
import org.specs2.mutable.SpecificationLike

import scala.concurrent.duration._


class StatsDReporterSpec extends AkkaTestkitSpecs2Support with SpecificationLike with Mockito {

  "The StatsDReporter reporter" should {

    "report metrics when triggered by the scheduler" in {

      implicit val conf = ConfigFactory.parseString(
        """
         {
          enabled=on
          reporting-interval=10ms
          host="localhost"
          port=9092
          metric-prefix = "pref"
        }
        """)

      val statsdReporter = spy(new StatsDReporter)
      val statsD = mock[StatsD]
      statsdReporter.getStatsD returns statsD

      val rptr = mock[com.github.jjagged.metrics.reporting.StatsDReporter]
      statsdReporter.getReporter returns rptr

      statsdReporter.start(FiniteDuration(2, TimeUnit.MILLISECONDS))
      there was after(100.millisecond).atLeastOne(statsdReporter).report()

      statsdReporter.stop
      there was one(statsD).close()
    }
  }

} 
Example 197
Source File: ContainerBuilder.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container

import com.github.vonnagy.service.container.core.CoreConfig
import com.github.vonnagy.service.container.health.HealthCheck
import com.github.vonnagy.service.container.http.routing.RoutedEndpoints
import com.github.vonnagy.service.container.listener.ContainerLifecycleListener
import com.github.vonnagy.service.container.service.ContainerService


case class ContainerBuilder(
                             endpoints: Seq[Class[_ <: RoutedEndpoints]] = Seq.empty,
                             healthChecks: Seq[HealthCheck] = Seq.empty,
                             props: Seq[(String, Props)] = Seq.empty,
                             listeners: Seq[ContainerLifecycleListener] = Seq.empty,
                             config: Config = ConfigFactory.empty,
                             name: String = "service-container",
                             system: Option[ActorSystem] = None
                           ) extends CoreConfig {

  def withConfig(conf: Config): ContainerBuilder = copy(config = conf)

  def withRoutes(routes: Class[_ <: RoutedEndpoints]*): ContainerBuilder = copy(endpoints = routes)

  def withConfigValue(name: String, value: Any): ContainerBuilder =
    copy(config = this.config.withValue(name, ConfigValueFactory.fromAnyRef(value)))

  def withHealthChecks(checks: HealthCheck*): ContainerBuilder = copy(healthChecks = checks)

  def withActors(actors: (String, Props)*): ContainerBuilder = copy(props = actors)

  def withListeners(obs: ContainerLifecycleListener*): ContainerBuilder = copy(listeners = obs)

  def withActorSystem(sys: ActorSystem): ContainerBuilder = copy(system = Some(sys))

  def withName(name: String): ContainerBuilder = copy(name = name)

  def build: ContainerService = {
    implicit val actorSystem = system.getOrElse(ActorSystem.create(name, getConfig(Some(config))))
    val svc = new ContainerService(endpoints, healthChecks, props, listeners, name) with App
    svc
  }

  def validateConfig(paths: String*) = {
    paths.foreach { path =>
      if (!config.hasPath(path)) {
        throw new MissingConfigException(s"Missing required config property: '$path'.")
      }
    }
  }
}

class MissingConfigException(s: String) extends RuntimeException(s) 
Example 198
Source File: CoreConfig.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.core

import java.io.File

import com.typesafe.config.{Config, ConfigFactory}

trait CoreConfig {

  
  def getConfig(config: Option[Config]): Config = {

    val sysConfig = System.getProperty("config.file") match {
      // If we were not passed a Config then check to see if a config file
      // exists within a conf directory under the application context
      case null if new File("conf/application.conf").exists => ConfigFactory.load("conf/application.conf")
      // Load the default
      case null => ConfigFactory.load()
      // If there is a system property for the file then use that
      case f => ConfigFactory.parseFile(new File(f))
    }

    (config match {
      case Some(conf) => conf.withFallback(sysConfig)
      case None => sysConfig
    }).withFallback(ConfigFactory.load()).resolve()
  }

} 
Example 199
Source File: MetricsReportingManagerSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import com.github.vonnagy.service.container.health.{GetHealth, HealthInfo, HealthState}
import com.typesafe.config.ConfigFactory
import org.specs2.mutable.SpecificationLike


class MetricsReportingManagerSpec
    extends AkkaTestkitSpecs2Support(ActorSystem("default",
      ConfigFactory.parseString("container.metrics.reporters.Slf4j.enabled=on")))
    with SpecificationLike {

  // Run in order
  sequential

  "The MetricsReportingManager" should {

    val probe = TestProbe()
    val act = TestActorRef[MetricsReportingManager](MetricsReportingManager.props())

    "be able to load the defined reporters" in {
      act.underlyingActor.reporters.size must be equalTo (1)
    }

    "be able to report it's health" in {
      probe.send(act, GetHealth)
      probe.expectMsgClass(classOf[HealthInfo]) must beEqualTo(HealthInfo("metrics-reporting",
        HealthState.OK, "The system is currently managing 1 metrics reporters",
        Some(List("com.github.vonnagy.service.container.metrics.reporting.Slf4jReporter")), List()))
    }

    "be able to stop the running reporters" in {
      act.underlyingActor.stopReporters
      act.underlyingActor.reporters.size must be equalTo (0)
    }


  }
} 
Example 200
Source File: HttpServiceSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http

import akka.actor.{ActorSystem, Props}
import akka.testkit.{TestActorRef, TestProbe}
import com.github.vonnagy.service.container.{AkkaTestkitSpecs2Support, TestUtils}
import com.github.vonnagy.service.container.health.HealthState
import com.typesafe.config.ConfigFactory
import org.specs2.mutable.SpecificationLike

class HttpServiceSpec extends AkkaTestkitSpecs2Support(ActorSystem("test", {
  val http = TestUtils.temporaryServerHostnameAndPort()
  val https = TestUtils.temporaryServerHostnameAndPort()

  ConfigFactory.parseString(
    s"""
      container.http.interface="${http._2}"
      container.http.port=${http._3}
      container.https.interface="${https._2}"
      container.https.port=${https._3}
    """)})) with SpecificationLike {

  sequential
  val probe = TestProbe()
  val act = TestActorRef[HttpService](Props(new HttpService(Nil)), probe.testActor, "service")

  "The HttpService" should {

    "be able to check the services health before it is started" in {
      act.underlyingActor.getHttpHealth must not be null
      act.underlyingActor.getHttpHealth.state must be equalTo HealthState.CRITICAL
    }

    "be able to start and Http service on a specified port" in {
      act.underlyingActor.httpSettings.isEmpty must beFalse
      act.underlyingActor.httpServer.isEmpty must beTrue
      probe.send(act, HttpStart)
      val msg = probe.expectMsg(HttpStarted)
      msg must be equalTo HttpStarted

      act.underlyingActor.httpServer.size must be equalTo(2)
    }

    "be able to check the services health after it is started" in {
      act.underlyingActor.getHttpHealth must not be null
      act.underlyingActor.getHttpHealth.state must be equalTo HealthState.OK
    }

    "be able to stop the Http service" in  {
      act.underlyingActor.stopHttpServer
      val msg = probe.expectMsg(HttpStopped)
      msg must be equalTo HttpStopped
      act.underlyingActor.httpServer.isEmpty must beTrue
    }
  }

}