org.scalatest.time.Seconds Scala Examples

The following examples show how to use org.scalatest.time.Seconds. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: IntegrationTest.scala    From kmq   with Apache License 2.0 6 votes vote down vote up
package com.softwaremill.kmq.redelivery

import java.time.Duration
import java.util.Random

import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.softwaremill.kmq._
import com.softwaremill.kmq.redelivery.infrastructure.KafkaSpec
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

import scala.collection.mutable.ArrayBuffer

class IntegrationTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with KafkaSpec with BeforeAndAfterAll with Eventually with Matchers {

  implicit val materializer = ActorMaterializer()
  import system.dispatcher

  "KMQ" should "resend message if not committed" in {
    val bootstrapServer = s"localhost:${testKafkaConfig.kafkaPort}"
    val kmqConfig = new KmqConfig("queue", "markers", "kmq_client", "kmq_redelivery", Duration.ofSeconds(1).toMillis,
    1000)

    val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
      .withBootstrapServers(bootstrapServer)
      .withGroupId(kmqConfig.getMsgConsumerGroupId)
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    val markerProducerSettings = ProducerSettings(system,
      new MarkerKey.MarkerKeySerializer(), new MarkerValue.MarkerValueSerializer())
      .withBootstrapServers(bootstrapServer)
      .withProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, classOf[ParititionFromMarkerKey].getName)
    val markerProducer = markerProducerSettings.createKafkaProducer()

    val random = new Random()

    lazy val processedMessages = ArrayBuffer[String]()
    lazy val receivedMessages = ArrayBuffer[String]()

    val control = Consumer.committableSource(consumerSettings, Subscriptions.topics(kmqConfig.getMsgTopic)) // 1. get messages from topic
      .map { msg =>
      ProducerMessage.Message(
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(msg.record), new StartMarker(kmqConfig.getMsgTimeoutMs)), msg)
    }
      .via(Producer.flow(markerProducerSettings, markerProducer)) // 2. write the "start" marker
      .map(_.message.passThrough)
      .mapAsync(1) { msg =>
        msg.committableOffset.commitScaladsl().map(_ => msg.record) // this should be batched
      }
      .map { msg =>
        receivedMessages += msg.value
        msg
      }
      .filter(_ => random.nextInt(5) != 0)
      .map { processedMessage =>
        processedMessages += processedMessage.value
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(processedMessage), EndMarker.INSTANCE)
      }
      .to(Producer.plainSink(markerProducerSettings, markerProducer)) // 5. write "end" markers
      .run()

    val redeliveryHook = RedeliveryTracker.start(new KafkaClients(bootstrapServer), kmqConfig)

    val messages = (0 to 20).map(_.toString)
    messages.foreach(msg => sendToKafka(kmqConfig.getMsgTopic,msg))

    eventually {
      receivedMessages.size should be > processedMessages.size
      processedMessages.sortBy(_.toInt).distinct shouldBe messages
    }(PatienceConfig(timeout = Span(15, Seconds)), implicitly)

    redeliveryHook.close()
    control.shutdown()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    TestKit.shutdownActorSystem(system)
  }
} 
Example 2
Source File: JobStatusFlusherSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestKit}
import io.hydrosphere.mist.master.JobDetails.Status
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.{ActorSpec, JobDetails, TestData}
import io.hydrosphere.mist.master.logging.JobLogger
import mist.api.data._
import org.scalatest.concurrent.Eventually
import org.scalatest.{FunSpecLike, Matchers}
import org.scalatest.prop.TableDrivenPropertyChecks._
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.{Future, Promise}

class JobStatusFlusherSpec extends ActorSpec("job-status-flusher") with TestData with Eventually {

  it("should flush status correctly") {
    val initial = Promise[JobDetails]

    val updateResult = new AtomicReference[Option[JobDetails]](None)
    val props = JobStatusFlusher.props(
      id = "id",
      get = (_) => initial.future,
      update = (d: JobDetails) => {updateResult.set(Some(d));Future.successful(())},
      loggerF = _ => JobLogger.NOOP
    )
    val flusher = TestActorRef(props)

    flusher ! ReportedEvent.plain(QueuedEvent("id"))
    flusher ! ReportedEvent.plain(StartedEvent("id", System.currentTimeMillis()))
    flusher ! ReportedEvent.plain(FinishedEvent("id", System.currentTimeMillis(), JsNumber(42)))

    initial.success(mkDetails(JobDetails.Status.Initialized))

    eventually(timeout(Span(3, Seconds))) {
      val value = updateResult.get
      value.isDefined shouldBe true

      val d = value.get
      d.status shouldBe JobDetails.Status.Finished
    }
  }

  describe("event conversion") {

    val baseDetails = mkDetails(JobDetails.Status.Initialized)

    val expected = Table(
      ("event", "details"),
      (QueuedEvent("id"), baseDetails.copy(status = Status.Queued)),
      (StartedEvent("id", 1), baseDetails.copy(status = Status.Started, startTime = Some(1))),
      (CancellingEvent("id", 1), baseDetails.copy(status = Status.Cancelling)),
      (CancelledEvent("id", 1), baseDetails.copy(status = Status.Canceled, endTime = Some(1))),
      (FinishedEvent("id", 1, JsMap("1" -> JsNumber(2))),
        baseDetails.copy(
          status = Status.Finished,
          endTime = Some(1),
          jobResult =
            Some(
              Right(JsMap("1" -> JsNumber(2)))
            )
        )),
      (FailedEvent("id", 1, "error"),
        baseDetails.copy(status = Status.Failed, endTime = Some(1), jobResult = Some(Left("error")))),
      (WorkerAssigned("id", "workerId"), baseDetails.copy(workerId = Some("workerId")))
    )

    it("should correct update job details") {
      forAll(expected) { (e: UpdateStatusEvent, d: JobDetails) =>
        JobStatusFlusher.applyStatusEvent(baseDetails, e) shouldBe d
      }
    }
  }
} 
Example 3
Source File: OutputCommitCoordinatorIntegrationSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext}
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.time.{Seconds, Span}

import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite, TaskContext}
import org.apache.spark.util.Utils


class OutputCommitCoordinatorIntegrationSuite
  extends SparkFunSuite
  with LocalSparkContext
  with TimeLimits {

  // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
  implicit val defaultSignaler: Signaler = ThreadSignaler

  override def beforeAll(): Unit = {
    super.beforeAll()
    val conf = new SparkConf()
      .set("spark.hadoop.outputCommitCoordination.enabled", "true")
      .set("spark.hadoop.mapred.output.committer.class",
        classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName)
    sc = new SparkContext("local[2, 4]", "test", conf)
  }

  test("exception thrown in OutputCommitter.commitTask()") {
    // Regression test for SPARK-10381
    failAfter(Span(60, Seconds)) {
      val tempDir = Utils.createTempDir()
      try {
        sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out")
      } finally {
        Utils.deleteRecursively(tempDir)
      }
    }
  }
}

private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter {
  override def commitTask(context: TaskAttemptContext): Unit = {
    val ctx = TaskContext.get()
    if (ctx.attemptNumber < 1) {
      throw new java.io.FileNotFoundException("Intentional exception")
    }
    super.commitTask(context)
  }
} 
Example 4
Source File: AkkaDiscoveryNameResolverProviderSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.URI
import java.net.InetSocketAddress
import java.util.{ List => JList }

import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.collection.immutable

import io.grpc.Attributes
import io.grpc.NameResolver.Listener
import io.grpc.EquivalentAddressGroup

import akka.actor.ActorSystem
import akka.discovery.Lookup
import akka.discovery.ServiceDiscovery
import akka.discovery.ServiceDiscovery.Resolved
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.testkit.TestKit

import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpecLike

class AkkaDiscoveryNameResolverProviderSpec
    extends TestKit(ActorSystem())
    with AnyWordSpecLike
    with Matchers
    with ScalaFutures {

  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))

  "AkkaDiscoveryNameResolverProviderSpec" should {
    "provide a NameResolver that uses the supplied serviceName" in {
      val serviceName = "testServiceName"
      val discovery = new ServiceDiscovery() {
        override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = {
          lookup.serviceName should be(serviceName)
          Future.successful(Resolved(serviceName, immutable.Seq(ResolvedTarget("10.0.0.3", Some(4312), None))))
        }
      }
      val provider = new AkkaDiscoveryNameResolverProvider(
        discovery,
        443,
        portName = None,
        protocol = None,
        resolveTimeout = 3.seconds)

      val resolver = provider.newNameResolver(new URI("//" + serviceName), null)

      val addressGroupsPromise = Promise[List[EquivalentAddressGroup]]
      val listener = new Listener() {
        override def onAddresses(addresses: JList[EquivalentAddressGroup], attributes: Attributes): Unit = {
          import scala.collection.JavaConverters._
          addressGroupsPromise.success(addresses.asScala.toList)
        }
        override def onError(error: io.grpc.Status): Unit = ???
      }
      resolver.start(listener)
      val addressGroups = addressGroupsPromise.future.futureValue
      addressGroups.size should be(1)
      val addresses = addressGroups(0).getAddresses()
      addresses.size should be(1)
      val address = addresses.get(0).asInstanceOf[InetSocketAddress]
      address.getHostString() should be("10.0.0.3")
      address.getPort() should be(4312)
    }
  }

} 
Example 5
Source File: AkkaDiscoveryNameResolverSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.InetSocketAddress

import akka.actor.ActorSystem
import akka.grpc.{ GrpcClientSettings, GrpcServiceException }
import akka.testkit.TestKit
import io.grpc.Status
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.JavaConverters._

class AkkaDiscoveryNameResolverSpec
    extends TestKit(ActorSystem())
    with AnyWordSpecLike
    with Matchers
    with ScalaFutures {
  implicit val ex = system.dispatcher
  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))

  "The AkkaDiscovery-backed NameResolver" should {
    "correctly report an error for an unknown hostname" in {
      val host = "example.invalid"
      val resolver = AkkaDiscoveryNameResolver(GrpcClientSettings.connectToServiceAt(host, 80))
      val probe = new NameResolverListenerProbe()

      resolver.start(probe)

      val exception = probe.future.failed.futureValue.asInstanceOf[GrpcServiceException]
      exception shouldBe an[GrpcServiceException]
      exception.status.getCode == Status.UNKNOWN.getCode
      // FIXME: This description is not portable - it arises from native function response, which differs by OS
      // exception.status.getDescription should equal(host + ": Name or service not known")
    }

    "support serving a static host/port" in {
      // Unfortunately it needs to be an actually resolvable address...
      val host = "akka.io"
      val port = 4040
      val resolver = AkkaDiscoveryNameResolver(GrpcClientSettings.connectToServiceAt(host, port))
      val probe = new NameResolverListenerProbe()

      resolver.start(probe)

      val addresses = probe.future.futureValue match {
        case Seq(addressGroup) => addressGroup.getAddresses
        case _                 => fail("Expected a single address group")
      }
      addresses.asScala.toSeq match {
        case Seq(address: InetSocketAddress) =>
          address.getPort should be(port)
          address.getAddress.getHostName should be(host)
        case other =>
          fail(s"Expected a single InetSocketAddress, got $other")
      }
    }
  }
} 
Example 6
Source File: GrpcExceptionHandlerSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.scaladsl

import akka.actor.ActorSystem
import akka.grpc.GrpcServiceException
import akka.grpc.internal.{ GrpcProtocolNative, GrpcResponseHelpers, Identity }
import akka.grpc.scaladsl.GrpcExceptionHandler.defaultMapper
import akka.http.scaladsl.model.HttpEntity._
import akka.http.scaladsl.model.HttpResponse
import akka.stream.ActorMaterializer
import io.grpc.Status
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpec

import scala.concurrent.{ ExecutionException, Future }

class GrpcExceptionHandlerSpec extends AnyWordSpec with Matchers with ScalaFutures with BeforeAndAfterAll {
  implicit val system = ActorSystem("Test")
  implicit val materializer = ActorMaterializer()
  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))
  implicit val writer = GrpcProtocolNative.newWriter(Identity)

  val expected: Function[Throwable, Status] = {
    case e: ExecutionException =>
      if (e.getCause == null) Status.INTERNAL
      else expected(e.getCause)
    case grpcException: GrpcServiceException => grpcException.status
    case _: NotImplementedError              => Status.UNIMPLEMENTED
    case _: UnsupportedOperationException    => Status.UNIMPLEMENTED
    case _                                   => Status.INTERNAL
  }

  val otherTypes: Seq[Throwable] = Seq(
    new GrpcServiceException(status = Status.DEADLINE_EXCEEDED),
    new NotImplementedError,
    new UnsupportedOperationException,
    new NullPointerException,
    new RuntimeException)

  val executionExceptions: Seq[Throwable] =
    otherTypes.map(new ExecutionException(_)) :+ new ExecutionException("doh", null)

  "defaultMapper" should {
    (otherTypes ++ executionExceptions).foreach { e =>
      val exp = expected(e)
      s"Map $e to $exp" in {
        defaultMapper(system)(e).status shouldBe exp
      }
    }
  }

  "default(defaultMapper)" should {
    (otherTypes ++ executionExceptions).foreach { e =>
      s"Correctly map $e" in {
        val exp = GrpcResponseHelpers.status(defaultMapper(system)(e))
        val expChunks = getChunks(exp)
        val act = GrpcExceptionHandler.from(defaultMapper(system))(system, writer)(e).futureValue
        val actChunks = getChunks(act)
        // Following is because aren't equal
        act.status shouldBe exp.status
        actChunks.toString shouldEqual expChunks.toString
      }
    }
  }

  def getChunks(resp: HttpResponse): Seq[ChunkStreamPart] =
    (resp.entity match {
      case Chunked(_, chunks) =>
        chunks.runFold(Seq.empty[ChunkStreamPart]) { case (seq, chunk) => seq :+ chunk }
      case _ => Future.successful(Seq.empty[ChunkStreamPart])
    }).futureValue

  override def afterAll(): Unit = {
    super.afterAll()
    system.terminate()
  }
} 
Example 7
Source File: PgSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist

import com.zaxxer.hikari.HikariConfig
import io.hydrosphere.mist.core.CommonData.{Action, JobParams}
import io.hydrosphere.mist.master.{DbConfig, JobDetails}
import io.hydrosphere.mist.master.JobDetails.Source
import io.hydrosphere.mist.master.store.{HikariDataSourceTransactor, HikariJobRepository, JobRepository, PgJobRequestSql}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import mist.api.data.JsMap

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration

class PgSpec extends FunSpec
  with BeforeAndAfterAll
  with Matchers
  with Eventually {
  
  implicit override val patienceConfig = PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(1, Seconds)))
  
  var pgContainer: TestContainer = _
  var repo: HikariJobRepository = _
  
  override def beforeAll = {
    pgContainer = TestContainer.run(DockerImage("postgres","latest"), Map(5433 -> 5432))
    
    val cfg = DbConfig.JDBCDbConfig(
      10,
      "org.postgresql.Driver",
      "jdbc:postgresql://localhost:5433/postgres",
      Some("postgres"),
      Some("postgres"),
      true
    )
    
    repo = JobRepository.create(cfg) match {
      case Left(e) => throw e
      case Right(r) => r
    }
  }
  override def afterAll = {
    pgContainer.close()
    repo.shutdown()
  }
  
  private def await[A](f: Future[A]): A = Await.result(f, Duration.Inf)
  
  
  it("remove") {
    val details = fixtureJobDetails("id")
    await(repo.remove(details.jobId))
    await(repo.get(details.jobId)) shouldBe None
  }
  
  it("update") {
    val details = fixtureJobDetails("id")
    await(repo.update(details))
    await(repo.get(details.jobId)) shouldBe Some(details)
  }
  
  it("clear") {
    (1 to 10).foreach(i => await(repo.update(fixtureJobDetails(s"jobId $i"))))
    await(repo.clear())
    await(repo.getAll(10, 0)).size shouldBe 0
  }
  
  it("filter by status") {
    (1 to 2).foreach(i => {
      val details = fixtureJobDetails(s"jobId $i", JobDetails.Status.Started)
      await(repo.update(details))
    })
    await(repo.update(fixtureJobDetails("ignore")))
    
    val runningJobs = repo.filteredByStatuses(List(JobDetails.Status.Started))
    await(runningJobs).size shouldBe 2
  }
  
  it("decode failure") {
    val details = fixtureJobDetails("failed").withFailure("Test Error")
    await(repo.update(details))
    await(repo.get("failed")) shouldBe Some(details)
  }
  
  
  // Helper functions
  private def fixtureJobDetails(
    jobId: String,
    status: JobDetails.Status = JobDetails.Status.Initialized): JobDetails = {
    JobDetails(
      params = JobParams("path", "className", JsMap.empty, Action.Execute),
      jobId = jobId,
      source = Source.Http,
      function = "function",
      context = "context",
      externalId = None,
      status = status
    )
  }
} 
Example 8
Source File: WorkerRunnerSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.workers

import java.util.concurrent.atomic.AtomicBoolean

import io.hydrosphere.mist.core.CommonData.WorkerInitInfo
import io.hydrosphere.mist.core.MockitoSugar
import io.hydrosphere.mist.master.execution.workers.starter.{WorkerProcess, WorkerStarter}
import io.hydrosphere.mist.master.execution.{SpawnSettings, workers}
import io.hydrosphere.mist.master.{ActorSpec, FilteredException, TestData}
import io.hydrosphere.mist.utils.akka.ActorRegHub
import org.mockito.Mockito._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}

class WorkerRunnerSpec extends ActorSpec("worker-runner") with TestData with MockitoSugar with Eventually {

  describe("default runner") {

    def mkSpawnSettings(starter: WorkerStarter): SpawnSettings = SpawnSettings(
      runnerCmd = starter,
      timeout = 10 seconds,
      readyTimeout = 10 seconds,
      akkaAddress = "akkaAddr",
      logAddress = "logAddr",
      httpAddress = "httpAddr",
      maxArtifactSize = 100L
    )

    it("should run worker") {
      val starter = new WorkerStarter {
        override def onStart(name: String, initInfo: WorkerInitInfo): WorkerProcess = WorkerProcess.NonLocal
        override def stopAction: StopAction = StopAction.Remote
      }
      val regHub = mock[ActorRegHub]
      when(regHub.waitRef(any[String], any[Duration])).thenSuccess(null)

      val termination = Promise[Unit]
      val runner = new workers.WorkerRunner.DefaultRunner(
        spawn = mkSpawnSettings(starter),
        regHub = regHub,
        connect = (_, _, _, _, _) => Future.successful(WorkerConnection("id", null, workerLinkData, termination.future))
      )

      Await.result(runner("id", FooContext), Duration.Inf)
    }

    it("should call onStop if connect was failed") {
      val check = new AtomicBoolean(false)

      val runnerCmd = new WorkerStarter {
        override def onStart(name: String, initInfo: WorkerInitInfo): WorkerProcess = WorkerProcess.NonLocal
        override def stopAction: StopAction = StopAction.CustomFn(_ => check.set(true))
      }

      val regHub = mock[ActorRegHub]
      when(regHub.waitRef(any[String], any[Duration])).thenFailure(FilteredException())

      val runner = new workers.WorkerRunner.DefaultRunner(
        spawn = mkSpawnSettings(runnerCmd),
        regHub = regHub,
        connect = (_, _, _, _, _) => Future.failed(FilteredException())
      )

      intercept[Throwable] {
        Await.result(runner("id", FooContext), Duration.Inf)
      }

      eventually(timeout(Span(3, Seconds))) {
        check.get shouldBe true
      }
    }

  }
} 
Example 9
Source File: WorkerHubSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.workers

import akka.actor.ActorRef
import io.hydrosphere.mist.core.{CommonData, MockitoSugar}
import io.hydrosphere.mist.master.TestData
import io.hydrosphere.mist.master.models.ContextConfig
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{FunSpec, Matchers}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future, Promise}

class WorkerHubSpec extends FunSpec with Matchers with TestData with Eventually with MockitoSugar {

  it("should mirror connections") {
    val termination = Promise[Unit]

    val runner = new WorkerRunner {
      override def apply(id: String, ctx: ContextConfig): Future[WorkerConnection] =
        Future.successful(WorkerConnection(id, null, workerLinkData.copy(name = id), termination.future))
    }
    val hub = new WorkerHub(runner, TestConnector.apply)

    val connector = hub.start("id", FooContext)

    Await.result(connector.askConnection(), Duration.Inf)

    eventually(timeout(Span(3, Seconds))) {
      hub.workerConnections().size shouldBe 1
    }
    termination.success(())
    eventually(timeout(Span(3, Seconds))) {
      hub.workerConnections().size shouldBe 0
    }
  }


  case class TestConnector(
    id: String,
    ctx: ContextConfig,
    runner: WorkerRunner) extends WorkerConnector {

    import scala.concurrent.ExecutionContext.Implicits.global

    def askConnection(): Future[PerJobConnection] = runner(id, ctx).map(conn => mock[PerJobConnection])

    def warmUp(): Unit = ()

    def shutdown(force: Boolean): Future[Unit] = ???

    def whenTerminated(): Future[Unit] = ???

    def releaseConnection(connectionId: String): Unit = ???
  }
} 
Example 10
Source File: StoreFlusherSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import java.util.concurrent.atomic.AtomicReference

import akka.testkit.TestActorRef
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.logging.JobLogger
import io.hydrosphere.mist.master.{ActorSpec, JobDetails, TestData}
import mist.api.data.JsNumber
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.{Future, Promise}

class StoreFlusherSpec extends ActorSpec("store-flusher") with TestData with Eventually {

  it("should flush job statuses") {
    val initial1 = Promise[JobDetails]
    val initial2 = Promise[JobDetails]

    val updateResult1 = new AtomicReference[Option[JobDetails]](None)
    val updateResult2 = new AtomicReference[Option[JobDetails]](None)
    val props = StoreFlusher.props(
      get = (id: String) => id match {
        case "1" => initial1.future
        case "2" => initial2.future
      },
      update = (d: JobDetails) => {
        d.jobId match {
          case "1" => updateResult1.set(Some(d))
          case "2" => updateResult2.set(Some(d))
        }
        Future.successful(())
      },
      jobLoggerF = _ => JobLogger.NOOP
    )
    val flusher = TestActorRef(props)

    Seq("1", "2").foreach(id => {
      flusher ! ReportedEvent.plain(QueuedEvent(id))
      flusher ! ReportedEvent.plain(StartedEvent(id, System.currentTimeMillis()))
      flusher ! ReportedEvent.plain(FinishedEvent(id, System.currentTimeMillis(), JsNumber(42)))
    })
    initial1.success(mkDetails(JobDetails.Status.Initialized).copy(jobId = "1"))
    initial2.success(mkDetails(JobDetails.Status.Initialized).copy(jobId = "2"))

    def test(ref: AtomicReference[Option[JobDetails]]): Unit = {
      val value = ref.get
      value.isDefined shouldBe true

      val d = value.get
      d.status shouldBe JobDetails.Status.Finished
    }

    eventually(timeout(Span(3, Seconds))) {
      test(updateResult1)
      test(updateResult2)
    }
  }
} 
Example 11
Source File: StatusReporterSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import io.hydrosphere.mist.core.MockitoSugar
import io.hydrosphere.mist.master.Messages.StatusMessages.{QueuedEvent, UpdateStatusEvent}
import io.hydrosphere.mist.master.logging.{JobLogger, LogService}
import io.hydrosphere.mist.master.store.JobRepository
import io.hydrosphere.mist.master.{ActorSpec, EventsStreamer, JobDetails, TestData}
import org.mockito.Mockito._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

class StatusReporterSpec extends ActorSpec("status-reporter") with TestData with Eventually with MockitoSugar {

  it("should flush and stream") {
    val repo = mock[JobRepository]
    when(repo.get(any[String])).thenSuccess(Some(mkDetails(JobDetails.Status.Initialized)))
    when(repo.update(any[JobDetails])).thenSuccess(())
    val logService = mock[LogService]
    when(logService.getJobLogger(any[String]))
      .thenReturn(JobLogger.NOOP)

    val streamer = mock[EventsStreamer]
    val reporter = StatusReporter.reporter(repo, streamer, logService)

    reporter.reportPlain(QueuedEvent("id"))

    verify(streamer).push(any[UpdateStatusEvent])

    eventually(timeout(Span(3, Seconds))) {
      verify(repo).get(any[String])
      verify(repo).update(any[JobDetails])
    }
  }

} 
Example 12
Source File: ZkJobSpecRepositoryTest.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package repository.impl.kv

import dcos.metronome.model.{JobId, JobSpec}
import dcos.metronome.utils.state.PersistentStoreWithNestedPathsSupport
import dcos.metronome.utils.test.Mockito
import org.scalatest.FunSuite
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}

import concurrent.Future

class ZkJobSpecRepositoryTest extends FunSuite with Mockito with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))

  test("delete") {
    val f = new Fixture
    f.store.delete(any).returns(Future.successful(true))

    f.repository.delete(f.jobId).futureValue

    verify(f.store).delete("job-runs/foo.bar")
    verify(f.store).delete("job-specs/foo.bar")
  }

  test("create") {
    val f = new Fixture

    f.store.createPath(any).returns(Future.successful(Unit))

    f.repository.create(f.jobId, f.jobSpec).failed.futureValue

    verify(f.store).createPath("job-runs/foo.bar")
    verify(f.store).create(eq("job-specs/foo.bar"), any)
  }

  class Fixture {
    val ec = scala.concurrent.ExecutionContext.global

    val store: PersistentStoreWithNestedPathsSupport = mock[PersistentStoreWithNestedPathsSupport]
    val repository = new ZkJobSpecRepository(store, ec)

    val jobId = JobId("foo.bar")
    val jobSpec = JobSpec(jobId)
  }
} 
Example 13
Source File: OutputCommitCoordinatorIntegrationSuite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.{Span, Seconds}

import org.apache.spark.{SparkConf, SparkContext, LocalSparkContext, SparkFunSuite, TaskContext}
import org.apache.spark.util.Utils


class OutputCommitCoordinatorIntegrationSuite
  extends SparkFunSuite
  with LocalSparkContext
  with Timeouts {

  override def beforeAll(): Unit = {
    super.beforeAll()
    val conf = new SparkConf()
      .set("master", "local[2,4]")
      .set("spark.speculation", "true")
      .set("spark.hadoop.mapred.output.committer.class",
        classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName)
    sc = new SparkContext("local[2, 4]", "test", conf)
  }

  test("exception thrown in OutputCommitter.commitTask()") {//异常抛出
    // Regression test for SPARK-10381
    failAfter(Span(60, Seconds)) {
      val tempDir = Utils.createTempDir()
      try {
        sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out")
      } finally {
        Utils.deleteRecursively(tempDir)
      }
    }
  }
}

private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter {
  override def commitTask(context: TaskAttemptContext): Unit = {
    val ctx = TaskContext.get()
    if (ctx.attemptNumber < 1) {
      throw new java.io.FileNotFoundException("Intentional exception")
    }
    super.commitTask(context)
  }
} 
Example 14
Source File: ToolConstants.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package test

import org.scalatest.Tag
import org.scalatest.time.{Milliseconds, Seconds, Span}


object ToolConstants {
  val DefaultMaxInputQueueSize = 50000
  val DefaultMaxOutputQueueSize = 50000
  val AccumulationTimeout = Span(500, Milliseconds)
  val EventuallyTimeout = Span(10, Seconds)
  val EventuallyInterval = Span(5, Milliseconds)
  val NewInputLineTimeout = Span(10, Seconds)
  val NextOutputLineTimeout = Span(5, Seconds)
  val NoWindows = Tag("NoWindows")
} 
Example 15
Source File: SparkLauncherErrorHandlingTest.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.sessionmanager

import scalaz.Failure

import com.google.inject.Guice
import org.scalatest.Inside
import org.scalatest.concurrent.Futures
import org.scalatest.time.{Second, Seconds, Span}

import ai.deepsense.commons.StandardSpec
import ai.deepsense.sessionmanager.service.sessionspawner.sparklauncher.clusters.SeahorseSparkLauncher
import ai.deepsense.sessionmanager.service.sessionspawner.sparklauncher.spark.SparkArgumentParser.UnknownOption
import ai.deepsense.sessionmanager.service.sessionspawner.sparklauncher.{SparkLauncherConfig, SparkLauncherError}

class SparkLauncherErrorHandlingTest extends StandardSpec with Futures with Inside {

  import ai.deepsense.sessionmanager.service.TestData._

  private implicit val patience = PatienceConfig(Span(10, Seconds), Span(1, Second))
  private val sparkLauncherConfig = {
    val injector = Guice.createInjector(new SessionManagerAppModule())
    injector.getInstance(classOf[SparkLauncherConfig])
  }

  "Unknown opt handling (pre spark launcher)" in {
    val clusterDetails = someClusterDetails.copy (
      params = Some("--non-existing-parameter some-value")
    )
    val creating = SeahorseSparkLauncher(Nil, sparkLauncherConfig, clusterDetails)
    inside(creating) { case Failure(error) =>
      error shouldBe an [UnknownOption]
    }
  }

  "Unknown illegal conf key in params" in {
    val clusterDetails = someClusterDetails.copy (
      params = Some("--conf not-spark.executor.extraJavaOptions=-XX:+PrintGCDetails")
    )
    val creating = SeahorseSparkLauncher(Nil, sparkLauncherConfig, clusterDetails)
    inside(creating) { case Failure(error) =>
      error shouldBe an [SparkLauncherError]
      error.getMessage should include ("'key' must start with 'spark.'")
    }

  }

} 
Example 16
Source File: OutputCommitCoordinatorIntegrationSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.{Span, Seconds}

import org.apache.spark.{SparkConf, SparkContext, LocalSparkContext, SparkFunSuite, TaskContext}
import org.apache.spark.util.Utils


class OutputCommitCoordinatorIntegrationSuite
  extends SparkFunSuite
  with LocalSparkContext
  with Timeouts {

  override def beforeAll(): Unit = {
    super.beforeAll()
    val conf = new SparkConf()
      .set("master", "local[2,4]")
      .set("spark.hadoop.outputCommitCoordination.enabled", "true")
      .set("spark.hadoop.mapred.output.committer.class",
        classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName)
    sc = new SparkContext("local[2, 4]", "test", conf)
  }

  test("exception thrown in OutputCommitter.commitTask()") {
    // Regression test for SPARK-10381
    failAfter(Span(60, Seconds)) {
      val tempDir = Utils.createTempDir()
      try {
        sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out")
      } finally {
        Utils.deleteRecursively(tempDir)
      }
    }
  }
}

private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter {
  override def commitTask(context: TaskAttemptContext): Unit = {
    val ctx = TaskContext.get()
    if (ctx.attemptNumber < 1) {
      throw new java.io.FileNotFoundException("Intentional exception")
    }
    super.commitTask(context)
  }
} 
Example 17
Source File: OutputCommitCoordinatorIntegrationSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.{Seconds, Span}

import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite, TaskContext}
import org.apache.spark.util.Utils


class OutputCommitCoordinatorIntegrationSuite
  extends SparkFunSuite
  with LocalSparkContext
  with Timeouts {

  override def beforeAll(): Unit = {
    super.beforeAll()
    val conf = new SparkConf()
      .set("spark.hadoop.outputCommitCoordination.enabled", "true")
      .set("spark.hadoop.mapred.output.committer.class",
        classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName)
    sc = new SparkContext("local[2, 4]", "test", conf)
  }

  test("exception thrown in OutputCommitter.commitTask()") {
    // Regression test for SPARK-10381
    failAfter(Span(60, Seconds)) {
      val tempDir = Utils.createTempDir()
      try {
        sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out")
      } finally {
        Utils.deleteRecursively(tempDir)
      }
    }
  }
}

private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter {
  override def commitTask(context: TaskAttemptContext): Unit = {
    val ctx = TaskContext.get()
    if (ctx.attemptNumber < 1) {
      throw new java.io.FileNotFoundException("Intentional exception")
    }
    super.commitTask(context)
  }
} 
Example 18
Source File: TestAggregateSpec.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cluster

import akka.actor.Props
import com.productfoundry.akka.PassivationConfig
import com.productfoundry.akka.cqrs.{AggregateStatus, AggregateFactory, AggregateIdResolution, EntityIdResolution}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Millis, Span}
import test.support.ClusterSpec
import test.support.ClusterConfig._

class TestAggregateSpecMultiJvmNode1 extends TestAggregateSpec
class TestAggregateSpecMultiJvmNode2 extends TestAggregateSpec

object TestActorFactory extends AggregateFactory[TestAggregate] {
  override def props(config: PassivationConfig): Props = {
    Props(classOf[TestAggregate], config)
  }
}

class TestAggregateSpec  extends ClusterSpec with Eventually {

  implicit def entityIdResolution: EntityIdResolution[TestAggregate] = new AggregateIdResolution[TestAggregate]()

  implicit def aggregateFactory: AggregateFactory[TestAggregate] = TestActorFactory

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(5, Seconds)),
    interval = scaled(Span(100, Millis))
  )

  "Test aggregate" must {

    "given cluster joined" in {
      setupSharedJournal()
      joinCluster()
    }

    enterBarrier("when")

    val entityContext = new ClusterSingletonEntityContext(system)

    "send all commands to same aggregate" in {

      def test(): Unit = {
        val aggregate = entityContext.entitySupervisorFactory[TestAggregate].getOrCreate
        val id = TestId("1")

        aggregate ! Count(id)
        expectMsgType[AggregateStatus.Success]

        eventually {
          aggregate ! GetCount(id)
          expectMsgType[GetCountResult].count shouldBe 2
        }
      }

      on(node1)(test())

      on(node2)(test())
    }
  }
} 
Example 19
Source File: HedgeFundSpec.scala    From CSYE7200_Old   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200

import akka.actor.ActorSystem
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import edu.neu.coe.csye7200.actors.SymbolQuery
import org.scalatest.concurrent.{Futures, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{FlatSpec, Matchers, _}

import scala.concurrent.Future
import scala.util.{Failure, Success}


class HedgeFundSpec extends FlatSpec with Matchers with
  Futures with ScalaFutures with TryValues with Inside {

  behavior of "SymbolQuery"

  it should "work" in {
    import scala.concurrent.duration._
    implicit val system: ActorSystem = ActorSystem("HedgeFund")
    implicit val timeout: Timeout = Timeout(30.seconds)
    val ay = HedgeFund.startup(ConfigFactory.load())
    ay should matchPattern { case Success(_) => }
    val qf = ay match {
      case Success(q) => q ? SymbolQuery("MSFT", List("name", "symbol", "price", "GF", "t", "l"))
      case Failure(x) => Future.failed(x)
    }
    whenReady(qf, org.scalatest.concurrent.PatienceConfiguration.Timeout(Span(10, Seconds))) { q => println(q) }
  }

} 
Example 20
Source File: PaginationSupportSpec.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.persistence

import io.vamp.common.akka.ExecutionContextProvider
import io.vamp.common.http.OffsetResponseEnvelope
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.JUnitRunner
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ FlatSpec, Matchers }

import scala.concurrent.{ ExecutionContext, Future }

@RunWith(classOf[JUnitRunner])
class PaginationSupportSpec extends FlatSpec with Matchers with PaginationSupport with ScalaFutures with ExecutionContextProvider {

  case class ResponseEnvelope(response: List[Int], total: Long, page: Int, perPage: Int) extends OffsetResponseEnvelope[Int]

  implicit def executionContext = ExecutionContext.global

  implicit val defaultPatience = PatienceConfig(timeout = Span(3, Seconds), interval = Span(100, Millis))

  "PaginationSupport" should "collect all from single page" in {
    val list = (1 to 5).toList

    val source = (page: Int, perPage: Int) ⇒ Future {
      ResponseEnvelope(list.take(perPage), list.size, page, perPage)
    }

    whenReady(consume(allPages(source, 5)))(_ shouldBe list)
  }

  it should "collect all from multiple pages" in {
    val list = (1 to 15).toList

    val source = (page: Int, perPage: Int) ⇒ Future {
      ResponseEnvelope(list.slice((page - 1) * perPage, page * perPage), list.size, page, perPage)
    }

    whenReady(consume(allPages(source, 5)))(_ shouldBe list)
  }

  it should "collect all from multiple pages without round total / per page" in {
    val list = (1 to 17).toList

    val source = (page: Int, perPage: Int) ⇒ Future {
      ResponseEnvelope(list.slice((page - 1) * perPage, page * perPage), list.size, page, perPage)
    }

    whenReady(consume(allPages(source, 5)))(_ shouldBe list)
  }
} 
Example 21
Source File: SQLQuerySpec.scala    From scruid   with Apache License 2.0 5 votes vote down vote up
package ing.wbaa.druid

import java.time.{ LocalDateTime, ZonedDateTime }

import akka.stream.scaladsl.Sink
import ing.wbaa.druid.SQL._
import ing.wbaa.druid.client.CirceDecoders
import io.circe.generic.auto._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpec

class SQLQuerySpec extends AnyWordSpec with Matchers with ScalaFutures with CirceDecoders {
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(20, Seconds), interval = Span(5, Millis))
  private val totalNumberOfEntries  = 39244
  private val usOnlyNumberOfEntries = 528

  implicit val config = DruidConfig()
  implicit val mat    = config.client.actorMaterializer

  case class Result(hourTime: ZonedDateTime, count: Int)

  "SQL query" should {

    val sqlQuery: SQLQuery = dsql"""
      |SELECT FLOOR(__time to HOUR) AS hourTime, count(*) AS "count"
      |FROM wikipedia
      |WHERE "__time" BETWEEN TIMESTAMP '2015-09-12 00:00:00' AND TIMESTAMP '2015-09-13 00:00:00'
      |GROUP BY 1
      |""".stripMargin

    "successfully be interpreted by Druid" in {
      val resultsF = sqlQuery.execute()
      whenReady(resultsF) { response =>
        response.list[Result].map(_.count).sum shouldBe totalNumberOfEntries
      }
    }

    "support streaming" in {
      val resultsF = sqlQuery.streamAs[Result]().runWith(Sink.seq)

      whenReady(resultsF) { results =>
        results.map(_.count).sum shouldBe totalNumberOfEntries
      }
    }
  }

  "SQL parameterized query" should {

    val fromDateTime   = LocalDateTime.of(2015, 9, 12, 0, 0, 0, 0)
    val untilDateTime  = fromDateTime.plusDays(1)
    val countryIsoCode = "US"

    val sqlQuery: SQLQuery =
      dsql"""
      |SELECT FLOOR(__time to HOUR) AS hourTime, count(*) AS "count"
      |FROM wikipedia
      |WHERE "__time" BETWEEN ${fromDateTime} AND ${untilDateTime} AND countryIsoCode = ${countryIsoCode}
      |GROUP BY 1
      |""".stripMargin

    "be expressed as a parameterized query with three parameters" in {
      sqlQuery.query.count(_ == '?') shouldBe 3
      sqlQuery.parameters.size shouldBe 3

      sqlQuery.parameters(0) shouldBe SQLQueryParameter(SQLQueryParameterType.Timestamp,
                                                        "2015-09-12 00:00:00")
      sqlQuery.parameters(1) shouldBe SQLQueryParameter(SQLQueryParameterType.Timestamp,
                                                        "2015-09-13 00:00:00")
      sqlQuery.parameters(2) shouldBe SQLQueryParameter(SQLQueryParameterType.Varchar, "US")
    }

    "successfully be interpreted by Druid" in {
      val resultsF = sqlQuery.execute()
      whenReady(resultsF) { response =>
        response.list[Result].map(_.count).sum shouldBe usOnlyNumberOfEntries
      }
    }

    "support streaming" in {
      val resultsF = sqlQuery.streamAs[Result]().runWith(Sink.seq)

      whenReady(resultsF) { results =>
        results.map(_.count).sum shouldBe usOnlyNumberOfEntries
      }

    }

  }
} 
Example 22
Source File: FuturesTest.scala    From courscala   with Apache License 2.0 5 votes vote down vote up
package org.coursera.common.concurrent

import org.junit.Test
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.AssertionsForJUnit
import org.scalatest.time.Millis
import org.scalatest.time.Seconds
import org.scalatest.time.Span

import scala.concurrent.Future

class FuturesTest extends AssertionsForJUnit with ScalaFutures {

  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(1, Seconds)), interval = scaled(Span(20, Millis)))

  import scala.concurrent.ExecutionContext.Implicits.global

  @Test
  def map(): Unit = {
    val raw = Map("a" -> Future.successful(1), "b" -> Future.successful(2))
    val expected = Map("a" -> 1, "b" -> 2)

    assertResult(expected)(Futures.map(raw).futureValue)
  }

  @Test
  def extract(): Unit = {
    val Futures.Extract(future1, future2) = Futures.immediate((1, 2))

    assertResult(1)(future1.futureValue)
    assertResult(2)(future2.futureValue)
  }

} 
Example 23
Source File: EmbeddedKafkaSpecSupport.scala    From embedded-kafka-schema-registry   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka.schemaregistry

import java.net.{InetAddress, Socket}

import net.manub.embeddedkafka.schemaregistry.EmbeddedKafkaSpecSupport.{
  Available,
  NotAvailable,
  ServerStatus
}
import org.scalatest.Assertion
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success, Try}

trait EmbeddedKafkaSpecSupport
    extends AnyWordSpecLike
    with Matchers
    with Eventually
    with IntegrationPatience {

  implicit val config: PatienceConfig =
    PatienceConfig(Span(1, Seconds), Span(100, Milliseconds))

  def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion =
    eventually {
      status(port) shouldBe expectedStatus
    }

  private def status(port: Int): ServerStatus = {
    Try(new Socket(InetAddress.getByName("localhost"), port)) match {
      case Failure(_) => NotAvailable
      case Success(_) => Available
    }
  }
}

object EmbeddedKafkaSpecSupport {
  sealed trait ServerStatus
  case object Available    extends ServerStatus
  case object NotAvailable extends ServerStatus
} 
Example 24
Source File: GlowBaseTest.scala    From glow   with Apache License 2.0 5 votes vote down vote up
package io.projectglow.sql

import htsjdk.samtools.util.Log
import org.apache.spark.sql.SparkSession
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.scalatest.concurrent.{AbstractPatienceConfiguration, Eventually}
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Args, FunSuite, Status, Tag}

import io.projectglow.Glow
import io.projectglow.SparkTestShim.SharedSparkSessionBase
import io.projectglow.common.{GlowLogging, TestUtils}
import io.projectglow.sql.util.BGZFCodec

abstract class GlowBaseTest
    extends FunSuite
    with SharedSparkSessionBase
    with GlowLogging
    with GlowTestData
    with TestUtils
    with JenkinsTestPatience {

  override protected def sparkConf: SparkConf = {
    super
      .sparkConf
      .set("spark.hadoop.io.compression.codecs", classOf[BGZFCodec].getCanonicalName)
  }

  override def initializeSession(): Unit = ()

  override protected implicit def spark: SparkSession = {
    val sess = SparkSession.builder().config(sparkConf).master("local[2]").getOrCreate()
    Glow.register(sess)
    SparkSession.setActiveSession(sess)
    Log.setGlobalLogLevel(Log.LogLevel.ERROR)
    sess
  }

  protected def gridTest[A](testNamePrefix: String, testTags: Tag*)(params: Seq[A])(
      testFun: A => Unit): Unit = {
    for (param <- params) {
      test(testNamePrefix + s" ($param)", testTags: _*)(testFun(param))
    }
  }

  override def afterEach(): Unit = {
    DebugFilesystem.assertNoOpenStreams()
    eventually {
      assert(spark.sparkContext.getPersistentRDDs.isEmpty)
      assert(spark.sharedState.cacheManager.isEmpty, "Cache not empty.")
    }
    super.afterEach()
  }

  override def runTest(testName: String, args: Args): Status = {
    logger.info(s"Running test '$testName'")
    val res = super.runTest(testName, args)
    if (res.succeeds()) {
      logger.info(s"Done running test '$testName'")
    } else {
      logger.info(s"Done running test '$testName' with a failure")
    }
    res
  }

  protected def withSparkConf[T](configs: Map[String, String])(f: => T): T = {
    val initialConfigValues = configs.keys.map(k => (k, spark.conf.getOption(k)))
    try {
      configs.foreach { case (k, v) => spark.conf.set(k, v) }
      f
    } finally {
      initialConfigValues.foreach {
        case (k, Some(v)) => spark.conf.set(k, v)
        case (k, None) => spark.conf.unset(k)
      }
    }
  }
}


  final override implicit val patienceConfig: PatienceConfig =
    if (sys.env.get("JENKINS_HOST").nonEmpty) {
      // increase the timeout on jenkins where parallelizing causes things to be very slow
      PatienceConfig(Span(10, Seconds), Span(50, Milliseconds))
    } else {
      // use the default timeout on local machines so failures don't hang for a long time
      PatienceConfig(Span(5, Seconds), Span(15, Milliseconds))
    }
} 
Example 25
Source File: AuthenticationRouterSpec.scala    From akka-http-rest-api   with MIT License 5 votes vote down vote up
package authentication

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import core.ErrorWrapper
import token.TokenRepository
import user.UserRepository
import utils.{FlatSpecWithRedis, FlatSpecWithSql, BaseRoutesSpec}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import UserAuthJsonProtocol._
import core.CommonJsonProtocol._
import org.scalatest.time.{Millis, Seconds, Span}

class AuthenticationRouterSpec extends BaseRoutesSpec with FlatSpecWithSql with FlatSpecWithRedis {
  spec =>

  override implicit val actorSystem: ActorSystem = spec.system

  val tokenRepo = new TokenRepository
  val userRepo = new UserRepository
  val userAuthRepo = new UserAuthRepository
  val userAuthService = new UserAuthService(tokenRepo, userAuthRepo, userRepo)

  val authRouter = new AuthenticationRouter with TestRoutesSupport {
    override val userAuthService = spec.userAuthService

    override implicit val redis = spec.redis
  }

  val routes = Route.seal(authRouter.authenticationRoutes)

  // how long it should wait before declaring that the future has timed out
  implicit val defaultPatience = PatienceConfig(timeout = Span(2, Seconds), interval = Span(50, Millis))

  "POST /register" should "register new user" in {
    Post("/auth/register", UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)) ~> routes ~> check {
      userAuthRepo.findByUserEmail("[email protected]").futureValue should be('defined)
      status should be(StatusCodes.Created)
    }
  }

  "POST /register with invalid data" should "result in an error" in {
    Post("/auth/register") ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
    }
  }

  "POST /register with an existing email" should "return 409 with an error message" in {
    userAuthService.register(UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)).futureValue

    Post("/auth/register", UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)) ~> routes ~> check {
      status should be(StatusCodes.Conflict)
      entityAs[ErrorWrapper].userMessage should be("E-mail already in use")
    }
  }

  "POST /login with valid data" should "result in login user" in {
    userAuthService.register(UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)).futureValue

    Post("/auth/login", UserLoginRequest("[email protected]", "password")) ~> routes ~> check {
      status should be(StatusCodes.OK)
    }
  }

  "POST /login with invalid data" should "result in an error" in {
    Post("/auth/login") ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
    }
  }

  "POST /login with invalid password" should "result in an error" in {
    userAuthService.register(UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)).futureValue

    Post("/auth/login", UserLoginRequest("[email protected]", "pass")) ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
      entityAs[ErrorWrapper].userMessage should be("Password is invalid")
    }
  }

  "POST /login with unknown email" should "result in an error" in {
    Post("/auth/login", UserLoginRequest("[email protected]", "password")) ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
      entityAs[ErrorWrapper].userMessage should be("E-mail does not exist")
    }
  }

  "POST /auth/whatever" should "not be bound to /auth - reject unmatchedPath request" in {
    Post("/auth/whatever") ~> routes ~> check {
      status should be(StatusCodes.NotFound)
    }
  }
} 
Example 26
Source File: LocalIntegrationTest.scala    From kafka-examples   with Apache License 2.0 5 votes vote down vote up
package com.cloudera.streaming.refapp

import java.sql.Timestamp

import org.scalatest.Matchers._
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.{Seconds, Span}

import org.apache.spark.sql.Encoders

class LocalIntegrationTest extends IntegrationTestBase {

  test("Integration test with one kafka and one spark instance embedded in the same JVM") {

    val inputDir = "src/test/resources/samples"

    val spark = EmbeddedSpark.sparkSession

    val fileSource = new FileSources(spark, inputDir)
    val kafkaConfig = EmbeddedKafkaBroker.defaultKafkaConfig
    val kafkaSource = new KafkaSource(spark, kafkaConfig)

    val application = new Application(
      spark,
      Sources(
        statesFromCluster = fileSource.jsonFile("states"),
        customersFromCluster = fileSource.jsonFile("customers"),
        vendorsFromCluster = fileSource.jsonFile("vendors"),
        customersFromStream = kafkaSource.jsonStreamWithKafkaTimestamp("customer"),
        vendorsFromStream = kafkaSource.jsonStreamWithTimestampFromMessage("vendor", "update_timestamp"),
        transactionsFromStream = kafkaSource.jsonStreamWithTimestampFromMessage("transaction", "event_timestamp")
      ),
      Sinks(
        invalidTransactions = Memory.memorySink("invalidTransactions"),
        validTransactions = Memory.memorySink("validTransactions"),
        customerOrphans = Memory.memorySink("customerOrphans"),
        vendorOrphans = Memory.memorySink("vendorOrphans"),
        customers = Memory.memorySink("customers"),
        vendors = Memory.memorySink("vendors"),
        transactionsOperationalMetadata = Memory.memorySink("transactionsOperationalMetadata")
      ))

    application.start()

    eventually(timeout(Span(20, Seconds)), interval(Span(5, Seconds))) {
      EmbeddedKafkaBroker.publishStringMessageToKafka(
        "transaction",
        """{
          "transaction_id": "1",
          "customer_id": 1,
          "vendor_id": 1,
          "event_state": "CREATED",
          "event_timestamp": "2018-11-12 09:42:00",
          "price": "100",
          "card_type": "Credit"}""")
      EmbeddedKafkaBroker.publishStringMessageToKafka(
        "transaction",
        """{
          "transaction_id": "21",
          "customer_id": 100,
          "vendor_id": 2,
          "event_state": "SWIPED",
          "event_timestamp": "2018-11-13 09:45:01",
          "price": "100",
          "card_type": "Debit"}""")

      val validTransactionsQuery = application.streamingQueries.validTransactions
      validTransactionsQuery.processAllAvailable()
      val currentContent = spark.table("validTransactions").as[Transaction](Encoders.product).collect()

      currentContent.shouldBe(
        Array(
          Transaction(
            transaction_id = "1",
            customer_id = Some(1),
            vendor_id = Some(1),
            event_state = Some("CREATED"),
            event_timestamp = Timestamp.valueOf("2018-11-12 09:42:00"),
            price = Some("100"),
            card_type = Some("Credit")),
          Transaction(
            transaction_id = "21",
            customer_id = Some(100),
            vendor_id = Some(2),
            event_state = Some("SWIPED"),
            event_timestamp = Timestamp.valueOf("2018-11-13 09:45:01"),
            price = Some("100"),
            card_type = Some("Debit"))
        ))
    }
  }
} 
Example 27
Source File: HiveOrcTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Seconds, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveOrcTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(120, Seconds), Span(10, Seconds))

  "Hive" should {
    "write non partitioned orc records" in {
      val count = 10000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions-orc.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $topic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(topic)
    }
  }
} 
Example 28
Source File: StdinForSystemSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package system

import org.apache.toree.kernel.protocol.v5.client.SparkKernelClient
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Milliseconds, Span}
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import test.utils.root.{SparkKernelClientDeployer, SparkKernelDeployer}



  describe("Stdin for System") {
    describe("when the kernel requests input") {
      ignore("should receive input based on the client's response function") {
        var response: String = ""
        client.setResponseFunction((_, _) => TestReplyString)

        // Read in a chunk of data (our reply string) and return it as a string
        // to be verified by the test
        client.execute(
          """
            |var result: Array[Byte] = Array()
            |val in = kernel.in
            |do {
            |    result = result :+ in.read().toByte
            |} while(in.available() > 0)
            |new String(result)
          """.stripMargin
        ).onResult { result =>
          response = result.data("text/plain")
        }.onError { _ =>
          fail("Client execution to trigger kernel input request failed!")
        }

        eventually {
          response should contain (TestReplyString)
        }
      }
    }
  }

} 
Example 29
Source File: OutputCommitCoordinatorIntegrationSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.{Span, Seconds}

import org.apache.spark.{SparkConf, SparkContext, LocalSparkContext, SparkFunSuite, TaskContext}
import org.apache.spark.util.Utils


class OutputCommitCoordinatorIntegrationSuite
  extends SparkFunSuite
  with LocalSparkContext
  with Timeouts {

  override def beforeAll(): Unit = {
    super.beforeAll()
    val conf = new SparkConf()
      .set("master", "local[2,4]")
      .set("spark.speculation", "true")
      .set("spark.hadoop.mapred.output.committer.class",
        classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName)
    sc = new SparkContext("local[2, 4]", "test", conf)
  }

  test("exception thrown in OutputCommitter.commitTask()") {
    // Regression test for SPARK-10381
    failAfter(Span(60, Seconds)) {
      val tempDir = Utils.createTempDir()
      try {
        sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out")
      } finally {
        Utils.deleteRecursively(tempDir)
      }
    }
  }
}

private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter {
  override def commitTask(context: TaskAttemptContext): Unit = {
    val ctx = TaskContext.get()
    if (ctx.attemptNumber < 1) {
      throw new java.io.FileNotFoundException("Intentional exception")
    }
    super.commitTask(context)
  }
} 
Example 30
Source File: CassandraSpec.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package macros

import java.util

import com.datastax.driver.core.{ Session, Cluster }
import org.cassandraunit.CQLDataLoader
import org.cassandraunit.dataset.CQLDataSet
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.concurrent.ScalaFutures
import org.scalatest._
import org.scalatest.time.{ Minute, Seconds, Span }
import scala.concurrent.duration._

trait CassandraSpec extends FlatSpec with BeforeAndAfterAll with BeforeAndAfterEach with ScalaFutures with Matchers {
  def port: Int = 9142
  def host: String = "127.0.0.1"

  private lazy val cluster = new Cluster.Builder().addContactPoints(host).withPort(port).build()
  implicit lazy val session: Session = cluster.connect()
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(1, Minute), interval = Span(5, Seconds))

  def testDataFixtures: String = ""
  private lazy val fixtures = StringCQLDataSet(testDataFixtures, false, false, "test")
  private lazy val schema = Seq(new ClassPathCQLDataSet("schema/01.cql"), new ClassPathCQLDataSet("schema/02.cql"))

  override protected def beforeAll(): Unit = {
    EmbeddedCassandraServerHelper.startEmbeddedCassandra(1.minute.toMillis)
    loadClean()
    super.beforeEach()
  }

  override protected def afterAll(): Unit = {
    session.close()
    cluster.close()
    super.afterAll()
  }

  def loadClean() = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
    loadData((schema :+ fixtures): _*)
  }

  def loadData(datasets: CQLDataSet*) = {
    val loader = new CQLDataLoader(session)
    datasets.foreach(loader.load)
  }
}

object Helpers {
  def splitStatements(statements: String) =
    statements.split(";").map(_.trim).filter(!_.isEmpty)
}

case class StringCQLDataSet(
    cqlStatements: String,
    isKeyspaceCreation: Boolean,
    isKeyspaceDeletion: Boolean,
    getKeyspaceName: String
) extends CQLDataSet {
  lazy val getCQLStatements = util.Arrays.asList(Helpers.splitStatements(cqlStatements): _*)

} 
Example 31
Source File: XMLParsingStopSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParsingStopSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  it should "Stop parsing when the passed in xPath is encountered" in {

    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Qualifier")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Function")),
      XMLExtract(Seq("GovTalkMessage", "Body", "IRenvelope", "MTR", "SA100", "YourPersonalDetails", "NationalInsuranceNumber")), //This is in the body, will not be parsed
      XMLStopParsing(Seq("GovTalkMessage", "Body"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Function"), Map(), Some("submit")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Qualifier"), Map(), Some("request"))
    )

    whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }

  it should "Notify if the payload exceeded the maximum allowed size" in {
    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")))
    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List(), Map(), Some("Stream max size"))
    )

    whenReady(source.runWith(parseToXMLElements(paths, Some(200)))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }


} 
Example 32
Source File: XMLParserXMLExtractNamespaceSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import akka.util.ByteString
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParserXMLExtractNamespaceSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  behavior of "CompleteChunkStage#parser"


  it should "Parse and extract several non-default namespaces" in {

    val testXMLX =
      <ns5:GovTalkMessage
      xmlns:ns0="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"
      xmlns:ns2="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"
      xmlns:ns5="http://www.govtalk.gov.uk/CM/envelope"
      xmlns:ns1="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/14-15/1"
      xmlns:ns3="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/16-17/1"
      xmlns:ns4="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"
      xmlns="">
        <ns5:EnvelopeVersion>2.0</ns5:EnvelopeVersion>
        <ns5:Header></ns5:Header>
        <ns5:GovTalkDetails></ns5:GovTalkDetails>
      </ns5:GovTalkMessage>

    val source = Source(List(ByteString(testXMLX.toString())))


    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:BLABLA" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/CM/envelope"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns0" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns4" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns5" -> "http://www.govtalk.gov.uk/CM/envelope"), Some("")),
      XMLElement(List(), Map(CompleteChunkStage.STREAM_SIZE -> "681"), Some(CompleteChunkStage.STREAM_SIZE))
    )

          whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }
} 
Example 33
Source File: ControllerSpec.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.util

import akka.http.scaladsl.model.{ HttpEntity, MediaTypes }
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.testkit.TestKitBase
import akka.util.ByteString
import com.github.j5ik2o.scalatestplus.db.{ MySQLdConfig, UserWithPassword }
import com.wix.mysql.distribution.Version.v5_6_21
import io.circe.Encoder
import io.circe.syntax._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.prop.PropertyChecks
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, FreeSpecLike, Matchers }

import scala.concurrent.duration._

object ControllerSpec {

  implicit class JsonOps[A](val self: A) extends AnyVal {
    def toEntity(implicit enc: Encoder[A]): HttpEntity.Strict =
      HttpEntity(MediaTypes.`application/json`, ByteString(self.asJson.noSpaces))
  }

}

abstract class ControllerSpec
    extends FreeSpecLike
    with PropertyChecks
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures
    with FlywayWithMySQLSpecSupport
    with ScalatestRouteTest
    with TestKitBase {
  override implicit val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(10, Seconds), interval = Span(200, Millis))

  override def afterAll: Unit = cleanUp()

  override protected lazy val mySQLdConfig: MySQLdConfig = MySQLdConfig(
    version = v5_6_21,
    port = Some(12345),
    userWithPassword = Some(UserWithPassword("bank", "passwd")),
    timeout = Some((30 seconds) * sys.env.getOrElse("SBT_TEST_TIME_FACTOR", "1").toDouble)
  )

} 
Example 34
package com.github.j5ik2o.bank.adaptor.useCase

import akka.actor.ActorSystem
import com.github.j5ik2o.bank.adaptor.aggregate.{ BankAccountAggregate, BankAccountAggregateFlowsImpl }
import com.github.j5ik2o.bank.adaptor.dao.BankAccountReadModelFlowsImpl
import com.github.j5ik2o.bank.adaptor.readJournal.JournalReaderImpl
import com.github.j5ik2o.bank.adaptor.util.{ ActorSpec, BankAccountSpecSupport, FlywayWithMySQLSpecSupport }
import com.github.j5ik2o.bank.domain.model.{ BankAccountId, BankAccountName }
import com.github.j5ik2o.bank.useCase.{ BankAccountAggregateUseCase, BankAccountReadModelUseCase }
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol._
import com.github.j5ik2o.scalatestplus.db.{ MySQLdConfig, UserWithPassword }
import com.typesafe.config.ConfigFactory
import com.wix.mysql.distribution.Version.v5_6_21
import org.scalatest.time.{ Millis, Seconds, Span }
import org.sisioh.baseunits.scala.money.Money

import scala.concurrent.duration._

class BankAccountReadModelUseCaseImplSpec
    extends ActorSpec(
      ActorSystem("BankAccountReadModelUseCaseImplSpec", ConfigFactory.load("bank-account-use-case-spec.conf"))
    )
    with FlywayWithMySQLSpecSupport
    with BankAccountSpecSupport {

  override implicit val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(200, Millis))

  override protected lazy val mySQLdConfig: MySQLdConfig = MySQLdConfig(
    version = v5_6_21,
    port = Some(12345),
    userWithPassword = Some(UserWithPassword("bank", "passwd")),
    timeout = Some((30 seconds) * sys.env.getOrElse("SBT_TEST_TIME_FACTOR", "1").toDouble)
  )

  import system.dispatcher

  "BankAccountReadModelUseCaseImpl" - {
    "should be able to read read-model" in {
      val id           = bankAccountIdGenerator.generateId().futureValue
      val aggregateRef = system.actorOf(BankAccountAggregate.props, BankAccountAggregate.name(id))
      val bankAccountReadModelUseCase = new BankAccountReadModelUseCase(
        new BankAccountReadModelFlowsImpl(dbConfig.profile, dbConfig.db),
        new JournalReaderImpl()
      )
      bankAccountReadModelUseCase.execute()
      createDomainEvents(id,
                         new BankAccountAggregateUseCase(
                           new BankAccountAggregateFlowsImpl(aggregateRef)
                         ))
      awaitAssert(
        {
          val resolveBankAccountEventsSucceeded = bankAccountReadModelUseCase
            .resolveBankAccountEventsById(ResolveBankAccountEventsRequest(id))
            .futureValue
            .asInstanceOf[ResolveBankAccountEventsSucceeded]
          resolveBankAccountEventsSucceeded.bankAccountId shouldBe id
          resolveBankAccountEventsSucceeded.events.head.`type` shouldBe "deposit"
          resolveBankAccountEventsSucceeded.events.head.amount shouldBe 1000
          resolveBankAccountEventsSucceeded.events.head.currencyCode shouldBe "JPY"
        },
        3 seconds,
        50 milliseconds
      )
    }
  }

  private def createDomainEvents(id: BankAccountId, bankAccountAggregateUseCase: BankAccountAggregateUseCase) = {
    val openBankAccountSucceeded = bankAccountAggregateUseCase
      .openBankAccount(OpenBankAccountRequest(id, BankAccountName("test-1")))
      .futureValue
      .asInstanceOf[OpenBankAccountSucceeded]
    openBankAccountSucceeded.bankAccountId shouldBe id
    val depositSucceeded =
      bankAccountAggregateUseCase
        .addBankAccountEvent(DepositRequest(id, Money.yens(1000L)))
        .futureValue
        .asInstanceOf[DepositSucceeded]
    depositSucceeded.bankAccountId shouldBe id
  }
} 
Example 35
Source File: ClusterSoakSpec.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.soak
import akka.actor.ActorSystem
import akka.discovery.ServiceDiscovery.Resolved
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, StatusCodes}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.kubernetes.soak.Tests.{ResponseTimeNanos, Target}
import akka.kubernetes.soak.{StatsJsonSupport, TestResults}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{Matchers, WordSpec}
import akka.util.PrettyDuration._

import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._

class ClusterSoakSpec(endpoints: Resolved)(implicit system: ActorSystem)
    extends WordSpec
    with StatsJsonSupport
    with ScalaFutures
    with Matchers {

  import system.dispatcher
  implicit val mat = ActorMaterializer()
  implicit override val patienceConfig = PatienceConfig(timeout = Span(30, Seconds), interval = Span(2, Seconds))
  val log = Logging(system, getClass)

  "The Clustered service" should {

    "not have had any failures" in {

      val responses: immutable.Seq[TestResults] = Source(endpoints.addresses)
        .mapAsyncUnordered(10) { rt =>
          log.info("Hitting {}", rt.host)
          val request = HttpRequest(uri = s"http://${rt.host}:${rt.port.getOrElse(8080)}/stats")
          for {
            response <- Http().singleRequest(request)
            entity <- response.entity.toStrict(1.second)
            results <- response.status match {
              case StatusCodes.OK =>
                Unmarshal(entity).to[TestResults]
              case unexpected =>
                Future.failed(
                  new RuntimeException(s"Unexpected response code: $unexpected body: ${entity.data.utf8String}")
                )
            }
          } yield results
        }
        .runWith(Sink.seq)
        .futureValue

      log.info("{} nodes tested", responses.size)

      val maxJoinTimes =
        responses.map(_.joiningTime).sorted.reverse.take(5).map(_.nanos.pretty)

      log.info("Max join times: {}", maxJoinTimes)

      val maxResponseTimePerNode: immutable.Seq[(Target, ResponseTimeNanos)] =
        responses.map(_.lastResult.responses.maxBy(_._2))

      val averageResponseTimesPerNode = responses
        .map((eachNode: TestResults) => {
          val total = eachNode.lastResult.responses.map(_._2).sum.nanos
          val count = eachNode.lastResult.responses.size
          total / count
        })
        .sorted
        .reverse

      log.info("All response times: {}", responses)
      log.info("Slowest response times across all node pings: {}",
               maxResponseTimePerNode.sortBy(_._2).reverse.take(5).map(_._2.nanos.pretty))
      log.info("Slowest average response times across all node pings: {}",
               averageResponseTimesPerNode.take(5).map(_.pretty))

      responses.filter(_.testsFailed != 0) shouldEqual Nil

      withClue("Response took longer than 2 seconds. Do some investigation") {
        responses.filter(_.lastResult.responses.exists(_._2.nanos > 2.seconds)) shouldEqual Nil
      }

      withClue("Found unreachable events") {
        responses.filter(_.memberUnreachableEvents != 0) shouldEqual Nil
      }

      withClue("Found downed events") {
        responses.filter(_.memberDownedEvents != 0) shouldEqual Nil
      }
    }
  }

} 
Example 36
Source File: AkkaKubernetesSpec.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, StatusCodes}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.management.cluster.{ClusterHttpManagementJsonProtocol, ClusterMembers}
import akka.stream.ActorMaterializer
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}

class AkkaKubernetesSpec extends WordSpec with BeforeAndAfterAll with ScalaFutures with Matchers with ClusterHttpManagementJsonProtocol with Eventually {
  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()

  implicit override val patienceConfig = PatienceConfig(timeout = Span(60, Seconds), interval = Span(2, Seconds))

  val target = system.settings.config.getString("akka.k8s.target")
  val clusterSize = system.settings.config.getInt("akka.k8s.cluster-size")
  val deployedVersion = system.settings.config.getString("akka.k8s.deployment-version")

  val log = system.log

  log.info("Running with target {} clusterSize {} version {}", target, clusterSize, deployedVersion)

  "Version deployed" should {
    "should have been updated" in {
      eventually {
        val response = Http().singleRequest(HttpRequest(uri = s"$target/version")).futureValue
        val reportedVersion = Unmarshal(response.entity).to[String].futureValue
        log.info("Reported version is: {}", reportedVersion)
        reportedVersion shouldEqual deployedVersion
      }
    }
  }

  "Cluster formation" should {

    "work" in {
      eventually {
        val response = Http().singleRequest(HttpRequest(uri = s"$target/cluster/members")).futureValue
        response.status shouldEqual StatusCodes.OK

        val clusterMembers: ClusterMembers = Unmarshal(response).to[ClusterMembers].futureValue
        withClue("Latest response: " + clusterMembers) {
          clusterMembers.members.size shouldEqual clusterSize
          clusterMembers.unreachable shouldEqual Seq.empty
        }
        log.info("Current cluster members: {}", clusterMembers)
      }
    }
  }

  "Akka Boss (singleton)" should {

    "say hello" in {
      val response = Http().singleRequest(HttpRequest(uri = s"$target/boss")).futureValue
      response.status shouldEqual StatusCodes.OK
    }
  }

  "Akka members (sharding)" should {
    "do some work" in {
      val response = Http().singleRequest(HttpRequest(uri = s"$target/team-member/johan")).futureValue
      response.status shouldEqual StatusCodes.OK
    }
  }

  override protected def afterAll(): Unit = {
    system.terminate()
  }
} 
Example 37
Source File: FutureUtilSpec.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.utils.future

import org.scalatest.{Matchers, WordSpec}
import cool.graph.utils.future.FutureUtils._
import org.scalatest.concurrent.ScalaFutures._
import org.scalatest.time.{Millis, Seconds, Span}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

class FutureUtilSpec extends WordSpec with Matchers {
  val patienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  "runSequentially" should {
    "run all given futures in sequence" in {

      val testList = List[() => Future[Long]](
        () => { Thread.sleep(500); Future.successful(System.currentTimeMillis()) },
        () => { Thread.sleep(250); Future.successful(System.currentTimeMillis()) },
        () => { Thread.sleep(100); Future.successful(System.currentTimeMillis()) }
      )

      val values: Seq[Long] = testList.runSequentially.futureValue(patienceConfig)
      (values, values.tail).zipped.forall((a, b) => a < b)
    }
  }

  "andThenFuture" should {

    "Should work correctly in error and success cases" in {
      val f1 = Future.successful(100)
      val f2 = Future.failed(new Exception("This is a test"))

      whenReady(
        f1.andThenFuture(
          handleSuccess = x => Future.successful("something"),
          handleFailure = e => Future.successful("another something")
        )) { res =>
        res should be(100)
      }

      whenReady(
        f2.andThenFuture(
            handleSuccess = (x: Int) => Future.successful("something"),
            handleFailure = e => Future.successful("another something")
          )
          .failed) { res =>
        res shouldBe a[Exception]
      }
    }
  }

} 
Example 38
Source File: ArrayFunctionsIT.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.dsl

import com.crobox.clickhouse.ClickhouseClientSpec
import com.crobox.clickhouse.dsl.execution.ClickhouseQueryExecutor
import com.crobox.clickhouse.dsl.language.ClickhouseTokenizerModule
import com.crobox.clickhouse.internal.QuerySettings
import com.crobox.clickhouse.testkit.ClickhouseSpec
import org.scalatest.time.{Millis, Seconds, Span}

import scala.concurrent.Future

class ArrayFunctionsIT extends ClickhouseClientSpec with ClickhouseSpec {
  implicit lazy val chExecutor: ClickhouseQueryExecutor            = ClickhouseQueryExecutor.default(clickClient)
  implicit lazy val clickhouseTokenizer: ClickhouseTokenizerModule = new ClickhouseTokenizerModule {}

  override implicit def patienceConfig =
    PatienceConfig(timeout = scaled(Span(10, Seconds)), interval = scaled(Span(20, Millis)))

  private def execute(query: Query): Future[String] = {
    implicit val settings: QuerySettings = QuerySettings()
    clickClient.query(clickhouseTokenizer.toSql(query.internalQuery, None)).map(_.trim)
  }

  it should "arrayFunction: has" in {
    execute(select(has(Array(1, 2, 3, 4), 2))).futureValue.toInt should be(1)
  }

  it should "arrayFunction: hasAny" in {
    execute(select(hasAny(Array(1, 2, 3, 4), Array(2)))).futureValue.toInt should be(1)
    execute(select(hasAny(Array(1, 2, 3, 4), Array(5)))).futureValue.toInt should be(0)
    execute(select(hasAny(Array(1, 2, 3, 4), Array(1,2)))).futureValue.toInt should be(1)
    execute(select(hasAny(Array(1, 2, 3, 4), Array(1,5)))).futureValue.toInt should be(1)
  }

  it should "arrayFunction: hasAll" in {
    execute(select(hasAll(Array(1, 2, 3, 4), Array(1,2)))).futureValue.toInt should be(1)
    execute(select(hasAll(Array(1, 2, 3, 4), Array(1,5)))).futureValue.toInt should be(0)
  }

  it should "arrayFunction: resize" in {
    execute(select(arrayResize(Array(1, 2, 3, 4), 3, 0))).futureValue should be("[1,2,3]")
    execute(select(arrayResize(Array(1, 2, 3, 4), 4, 0))).futureValue should be("[1,2,3,4]")
    execute(select(arrayResize(Array(1, 2, 3, 4), 5, 0))).futureValue should be("[1,2,3,4,0]")

    execute(select(arrayResize(Array("a", "b", "c", "d"), 3, "z"))).futureValue should be("['a','b','c']")
    execute(select(arrayResize(Array("a", "b", "c", "d"), 4, "z"))).futureValue should be("['a','b','c','d']")
    execute(select(arrayResize(Array("a", "b", "c", "d"), 5, "z"))).futureValue should be("['a','b','c','d','z']")
  }
} 
Example 39
Source File: EmbeddedKafkaSpecSupport.scala    From embedded-kafka   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka

import java.net.{InetAddress, Socket}

import net.manub.embeddedkafka.EmbeddedKafkaSpecSupport.{
  Available,
  NotAvailable,
  ServerStatus
}
import org.scalatest.Assertion
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success, Try}

trait EmbeddedKafkaSpecSupport
    extends AnyWordSpecLike
    with Matchers
    with Eventually
    with IntegrationPatience {

  implicit val config: PatienceConfig =
    PatienceConfig(Span(1, Seconds), Span(100, Milliseconds))

  def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion =
    eventually {
      status(port) shouldBe expectedStatus
    }

  private def status(port: Int): ServerStatus = {
    Try(new Socket(InetAddress.getByName("localhost"), port)) match {
      case Failure(_) => NotAvailable
      case Success(_) => Available
    }
  }
}

object EmbeddedKafkaSpecSupport {
  sealed trait ServerStatus
  case object Available    extends ServerStatus
  case object NotAvailable extends ServerStatus
} 
Example 40
Source File: XmlrpcConnection.scala    From xmlrpc   with MIT License 5 votes vote down vote up
package xmlrpc

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.util.Timeout
import org.scalatest.FunSpec
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import xmlrpc.protocol.XmlrpcProtocol

import scala.concurrent.duration._
import scala.language.postfixOps
import scalaz.{Success, Failure}

class XmlrpcConnection extends FunSpec with ScalaFutures {
  // Xmlrpc imports
  import Xmlrpc._
  import XmlrpcProtocol._

  // Scalatest setup
  implicit val default: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))

  // Xmrpc setup, server is up but it is not mine, found on Internet
  implicit val testServer = XmlrpcServer("http://betty.userland.com/RPC2")

  // Spray setup
  implicit val system = ActorSystem()
  implicit val ma = ActorMaterializer()
  implicit val timeout = Timeout(5 seconds)
  import system.dispatcher

  describe("The connection with a XML-RPC server") {
    it("should invoke the test method successfully in the server") {
      val invocation = invokeMethod[Int, String]("examples.getStateName", 41).underlying
      val responseMessage = "South Dakota"

      whenReady(invocation) {
        case Success(value) => assertResult(responseMessage) {value}
        case Failure(errors) => fail("Errors when deserializing\n" + errors)
      }
    }
  }
} 
Example 41
Source File: PostgresServiceSpec.scala    From docker-it-scala   with MIT License 5 votes vote down vote up
package com.whisk.docker

import com.spotify.docker.client.DefaultDockerClient
import com.whisk.docker.impl.spotify.SpotifyDockerFactory
import com.whisk.docker.scalatest.DockerTestKit
import org.scalatest.time.{Second, Seconds, Span}
import org.scalatest.{FlatSpec, Matchers}

class PostgresServiceSpec
    extends FlatSpec
    with Matchers
    with DockerTestKit
    with DockerPostgresService {

  implicit val pc = PatienceConfig(Span(20, Seconds), Span(1, Second))

  override implicit val dockerFactory: DockerFactory = new SpotifyDockerFactory(
    DefaultDockerClient.fromEnv().build())

  "postgres node" should "be ready with log line checker" in {
    isContainerReady(postgresContainer).futureValue shouldBe true
  }
} 
Example 42
Source File: AllAtOnceSpec.scala    From docker-it-scala   with MIT License 5 votes vote down vote up
package com.whisk.docker

import com.whisk.docker.impl.spotify.DockerKitSpotify
import org.scalatest.time.{Second, Seconds, Span}
import org.scalatest.{FlatSpec, Matchers}

class AllAtOnceSpec
    extends FlatSpec
    with Matchers
    with DockerKitSpotify
    with DockerElasticsearchService
    with DockerCassandraService
    with DockerNeo4jService
    with DockerMongodbService
    with PingContainerKit {

  implicit val pc = PatienceConfig(Span(20, Seconds), Span(1, Second))

  "all containers" should "be ready at the same time" in {
    dockerContainers.map(_.image).foreach(println)
    dockerContainers.forall(c => isContainerReady(c).futureValue) shouldBe true
  }
} 
Example 43
Source File: MongodbServiceSpec.scala    From docker-it-scala   with MIT License 5 votes vote down vote up
package com.whisk.docker

import com.whisk.docker.impl.spotify.DockerKitSpotify
import com.whisk.docker.scalatest.DockerTestKit
import org.scalatest.time.{Second, Seconds, Span}
import org.scalatest.{FlatSpec, Matchers}

class MongodbServiceSpec
    extends FlatSpec
    with Matchers
    with DockerTestKit
    with DockerKitSpotify
    with DockerMongodbService {

  implicit val pc = PatienceConfig(Span(20, Seconds), Span(1, Second))

  "mongodb node" should "be ready with log line checker" in {
    isContainerReady(mongodbContainer).futureValue shouldBe true
    mongodbContainer.getPorts().futureValue.get(27017) should not be empty
    mongodbContainer.getIpAddresses().futureValue should not be Seq.empty
  }
} 
Example 44
Source File: CassandraServiceSpec.scala    From docker-it-scala   with MIT License 5 votes vote down vote up
package com.whisk.docker

import com.github.dockerjava.core.DefaultDockerClientConfig
import com.github.dockerjava.netty.NettyDockerCmdExecFactory
import com.whisk.docker.impl.dockerjava.{Docker, DockerJavaExecutorFactory}
import com.whisk.docker.scalatest.DockerTestKit
import org.scalatest.time.{Second, Seconds, Span}
import org.scalatest.{FlatSpec, Matchers}

class CassandraServiceSpec
    extends FlatSpec
    with Matchers
    with DockerCassandraService
    with DockerTestKit {

  implicit val pc = PatienceConfig(Span(20, Seconds), Span(1, Second))

  override implicit val dockerFactory: DockerFactory = new DockerJavaExecutorFactory(
    new Docker(DefaultDockerClientConfig.createDefaultConfigBuilder().build(),
               factory = new NettyDockerCmdExecFactory()))

  "cassandra node" should "be ready with log line checker" in {
    isContainerReady(cassandraContainer).futureValue shouldBe true
  }
} 
Example 45
Source File: SeleniumTest.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web

import java.util.concurrent.TimeUnit

import org.openqa.selenium.firefox.{FirefoxDriver, FirefoxOptions}
import org.openqa.selenium.remote.RemoteWebDriver
import org.openqa.selenium.{Dimension, WebElement}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

private trait ServerConfig {
  def init(): Unit
  def createUrl(part: String): String
  def destroy(): Unit
}

// Doesn't launch embedded guide app server
private final class ExternalServerConfig(urlPrefix: String) extends ServerConfig {
  require(!urlPrefix.endsWith("/"))

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    urlPrefix + part
  }

  override def init(): Unit = {}
  override def destroy(): Unit = {}
}

// Launches embedded guide server
private final class InternalServerConfig extends ServerConfig {
  private val server = Launcher.createApplicationServer()

  override def init(): Unit = server.start()

  override def destroy(): Unit = server.stop()

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    s"http://127.0.0.2:${server.port}$part"
  }
}

abstract class SeleniumTest extends AnyWordSpec with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with Eventually {
  override implicit val patienceConfig: PatienceConfig = PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(50, Millis)))

  protected final val driver: RemoteWebDriver = new FirefoxDriver(new FirefoxOptions().setHeadless(true))
  driver.manage().timeouts().implicitlyWait(200, TimeUnit.MILLISECONDS)
  driver.manage().window().setSize(new Dimension(1440, 800))

  protected final def findElementById(id: String): WebElement = eventually {
    driver.findElementById(id)
  }

  protected def url: String

  private val server: ServerConfig = new InternalServerConfig

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    server.init()
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    driver.get(server.createUrl(url))
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    server.destroy()
    driver.close()
  }
} 
Example 46
Source File: AccessTokenSpec.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ HttpEntity, HttpResponse, StatusCodes }
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.stream.{ ActorMaterializer, Materializer }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec }

import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration.Duration

class AccessTokenSpec extends FlatSpec with DiagrammedAssertions with ScalaFutures with BeforeAndAfterAll {
  implicit val system: ActorSystem        = ActorSystem()
  implicit val ec: ExecutionContext       = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()
  implicit val defaultPatience: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(700, Millis))

  override def afterAll(): Unit = {
    Await.ready(system.terminate(), Duration.Inf)
  }

  behavior of "AccessToken"

  it should "apply from HttpResponse" in {
    val accessToken  = "xxx"
    val tokenType    = "bearer"
    val expiresIn    = 86400
    val refreshToken = "yyy"

    val httpResponse = HttpResponse(
      status = StatusCodes.OK,
      headers = Nil,
      entity = HttpEntity(
        `application/json`,
        s"""
           |{
           |  "access_token": "$accessToken",
           |  "token_type": "$tokenType",
           |  "expires_in": $expiresIn,
           |  "refresh_token": "$refreshToken"
           |}
         """.stripMargin
      )
    )

    val result = AccessToken(httpResponse)

    whenReady(result) { token =>
      assert(token.accessToken == accessToken)
      assert(token.tokenType == tokenType)
      assert(token.expiresIn == expiresIn)
      assert(token.refreshToken.contains(refreshToken))
    }
  }
} 
Example 47
Source File: StdinForSystemSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package system

import org.apache.toree.kernel.protocol.v5.client.SparkKernelClient
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Milliseconds, Span}
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import test.utils.root.{SparkKernelClientDeployer, SparkKernelDeployer}



  describe("Stdin for System") {
    describe("when the kernel requests input") {
      ignore("should receive input based on the client's response function") {
        var response: String = ""
        client.setResponseFunction((_, _) => TestReplyString)

        // Read in a chunk of data (our reply string) and return it as a string
        // to be verified by the test
        client.execute(
          """
            |var result: Array[Byte] = Array()
            |val in = kernel.in
            |do {
            |    result = result :+ in.read().toByte
            |} while(in.available() > 0)
            |new String(result)
          """.stripMargin
        ).onResult { result =>
          response = result.data("text/plain")
        }.onError { _ =>
          fail("Client execution to trigger kernel input request failed!")
        }

        eventually {
          response should contain (TestReplyString)
        }
      }
    }
  }

} 
Example 48
Source File: StringFunctionsIT.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.dsl

import java.util.UUID

import com.crobox.clickhouse.{ClickhouseClientSpec, TestSchemaClickhouseQuerySpec}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import spray.json.DefaultJsonProtocol.{jsonFormat, _}
import spray.json.RootJsonFormat

class StringFunctionsIT
    extends ClickhouseClientSpec
    with TestSchemaClickhouseQuerySpec
    with ScalaFutures {

  private val columnString = "oneem,twoem,threeem"
  override val table2Entries: Seq[Table2Entry] =
    Seq(Table2Entry(UUID.randomUUID(), columnString, randomInt, randomString, None))

  override implicit def patienceConfig =
    PatienceConfig(timeout = scaled(Span(10, Seconds)), interval = scaled(Span(20, Millis)))

  case class Result(result: String)
  implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[String, Result](Result.apply, "result")

  it should "split by character" in {
    val resultRows =
      chExecutor.execute[Result](select(arrayJoin(splitByChar(",", col1)) as "result") from TwoTestTable).futureValue.rows
    resultRows.length shouldBe 3
    resultRows.map(_.result) should contain theSameElementsAs Seq("oneem", "twoem", "threeem")
  }

  it should "split by string" in {
    val resultRows =
      chExecutor.execute[Result](select(arrayJoin(splitByString("em,", col1)) as "result") from TwoTestTable).futureValue.rows
    resultRows.length shouldBe 3
    resultRows.map(_.result) should contain theSameElementsAs Seq("one", "two", "threeem")
  }

  it should "concatenate string back" in {
    val resultRows =
      chExecutor
        .execute[Result](select(arrayStringConcat(splitByChar(",", col1), ",") as "result") from TwoTestTable)
        .futureValue
        .rows
    resultRows.length shouldBe 1
    resultRows.map(_.result).head shouldBe columnString

  }

} 
Example 49
Source File: AggregationFunctionsIT.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.dsl

import java.util.UUID

import com.crobox.clickhouse.TestSchemaClickhouseQuerySpec
import com.crobox.clickhouse.ClickhouseClientSpec
import org.scalatest.time.{Millis, Seconds, Span}
import spray.json.DefaultJsonProtocol._
import spray.json.RootJsonFormat

class AggregationFunctionsIT
    extends ClickhouseClientSpec
    with TestSchemaClickhouseQuerySpec {

  private val entries = 200145
  override val table1Entries: Seq[Table1Entry] =
    Seq.fill(entries)(Table1Entry(UUID.randomUUID(), numbers = Seq(1, 2, 3)))
  override val table2Entries: Seq[Table2Entry] =
    Seq.fill(entries)(Table2Entry(UUID.randomUUID(), randomString, randomInt, randomString, None))

  override implicit def patienceConfig =
    PatienceConfig(timeout = scaled(Span(10, Seconds)), interval = scaled(Span(20, Millis)))

  "Combinators" should "apply for aggregations" in {
    case class Result(columnResult: String) {
      def result = columnResult.toInt
    }
    implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[String, Result](Result.apply, "result")
    val resultSimple = chExecutor
      .execute[Result](select(uniq(shieldId) as "result") from OneTestTable)
      .futureValue
    val resultExact = chExecutor
      .execute[Result](select(uniqExact(shieldId) as "result") from OneTestTable)
      .futureValue
    resultSimple.rows.head.result shouldBe (entries +- entries / 100)
    resultSimple.rows.head.result should not be entries
    resultExact.rows.head.result shouldBe entries
  }

  it should "run quantiles" in {
    case class Result(result: Seq[Int])
    implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[Seq[Int], Result](Result.apply, "result")
    val result = chExecutor
      .execute[Result](
        select(quantiles(col2, 0.1F, 0.2F, 0.3F, 0.4F, 0.5F, 0.99F) as ref[Seq[Int]]("result")) from TwoTestTable
      )
      .futureValue
    result.rows.head.result should have length 6
  }

  it should "run for each" in {
    case class Result(result: Seq[String])
    implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[Seq[String], Result](Result.apply, "result")
    val result = chExecutor
      .execute[Result](
        select(forEach[Int, TableColumn[Seq[Int]], Double](numbers) { column =>
          sum(column)
        } as "result") from OneTestTable
      )
      .futureValue
    val queryResult = result.rows.head.result.map(_.toInt)
    queryResult should have length 3
    queryResult should contain theSameElementsAs Seq(entries, entries * 2, entries * 3)
  }

} 
Example 50
Source File: KinesisProducerIntegrationSpec.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis

import java.io.File

import com.amazonaws.services.kinesis.producer.{KinesisProducer => AWSKinesisProducer}
import com.typesafe.config.ConfigFactory
import com.weightwatchers.reactive.kinesis.common.{
  KinesisSuite,
  KinesisTestConsumer,
  TestCredentials
}
import com.weightwatchers.reactive.kinesis.consumer.KinesisConsumer.ConsumerConf
import com.weightwatchers.reactive.kinesis.models.ProducerEvent
import com.weightwatchers.reactive.kinesis.producer.{KinesisProducer, ProducerConf}
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FreeSpec, Matchers}

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

//scalastyle:off magic.number
class KinesisProducerIntegrationSpec
    extends FreeSpec
    with Matchers
    with MockitoSugar
    with BeforeAndAfterAll
    with Eventually
    with KinesisSuite {

  implicit val ece = scala.concurrent.ExecutionContext.global

  val TestStreamNrOfMessagesPerShard: Long = 0

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Millis))

  "The KinesisProducer" - {

    "Should publish a message to a stream" in new withKinesisConfForApp(
      "int-test-stream-producer-1"
    ) {

      val conf     = producerConf()
      val producer = KinesisProducer(conf)

      val existingRecordCount = testConsumer.retrieveRecords(conf.streamName, 10).size

      val event = ProducerEvent("1234", Random.alphanumeric.take(10).mkString)
      producer.addUserRecord(event)

      eventually {
        val records: Seq[String] = testConsumer.retrieveRecords(conf.streamName, 10)
        records.size shouldBe (existingRecordCount + 1)
        records should contain(
          new String(event.payload.array(), java.nio.charset.StandardCharsets.UTF_8)
        )
      }
    }
  }
}

//scalastyle:on 
Example 51
Source File: HydraMetricsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.monitor

import akka.japi.Option.Some
import kamon.Kamon
import kamon.metric.{Counter, Gauge}
import org.scalamock.scalatest.proxy.MockFactory
import org.scalatest.{BeforeAndAfterAll, _}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Seconds, Span}
import scalacache.guava.GuavaCache

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Random, Try}

class HydraMetricsSpec
    extends Matchers
    with AnyFlatSpecLike
    with Eventually
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with MockFactory
    with ScalaFutures {

  import HydraMetrics._
  import scalacache.modes.try_._

  implicit override val patienceConfig =
    PatienceConfig(
      timeout = scaled(Span(2, Seconds)),
      interval = scaled(Span(5, Millis))
    )

  override def beforeEach() = {
    gaugesCache.removeAll()
    countersCache.removeAll()
    histogramsCache.removeAll()
  }

  override def afterAll = Try(Kamon.stopModules())

  val lookup = "lookup.xyz"
  val lookup2 = "lookup.abc"

  def generateTags: Seq[(String, String)] = Seq("tag1" -> "Everything's fine.")

  "An object mixing in HydraMetrics" should
    "create new counters with new lookup keys + metric names" in {
    shouldCreateNewMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "create new gauges with new lookup keys + metric names" in {
    shouldCreateNewMetric[Gauge](incrementGauge _, gaugesCache)
  }

  it should "lookup existing counters" in {
    shouldLookupExistingMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "lookup an existing gauge" in {
    shouldLookupExistingMetric[Gauge](decrementGauge _, gaugesCache)
  }

  it should
    "lookup an existing histogram" in {
    val f = recordToHistogram _

    whenReady(f(lookup, "histogram.metric", 100, generateTags)) { r =>
      whenReady(f(lookup, "histogram.metric", 100, generateTags)) { x =>
        r shouldEqual x
      }
    }
  }

  private def shouldCreateNewMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    cache.get(lookup).map { result => result shouldBe None }

    f(lookup, "metric" + Random.nextInt(Integer.MAX_VALUE), generateTags)

    cache.get(lookup).map { result => result shouldBe a[Some[_]] }
  }

  private def shouldLookupExistingMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    val metric = "metric" + Random.nextInt(Integer.MAX_VALUE)

    f(lookup, metric, generateTags) shouldEqual f(lookup, metric, generateTags)
  }
} 
Example 52
Source File: IngestorRegistrarSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import hydra.common.util.ActorUtils
import hydra.ingest.services.IngestorRegistrar.UnregisterAll
import hydra.ingest.services.IngestorRegistry.{
  FindAll,
  FindByName,
  LookupResult
}
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._


class IngestorRegistrarSpec
    extends TestKit(ActorSystem("IngestorRegistrarSpec"))
    with Matchers
    with AnyFunSpecLike
    with ImplicitSender
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually {

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(10, Seconds), interval = Span(1, Seconds))

  val registry = system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val act = system.actorOf(Props[IngestorRegistrar])

  implicit val timeout = Timeout(3, TimeUnit.SECONDS)

  describe("The ingestor registrar actor") {
    it("registers from classpath on bootstrap") {
      eventually {
        whenReady(
          (registry ? FindByName(ActorUtils.actorName(classOf[TestIngestor])))
            .mapTo[LookupResult]
        ) { i =>
          i.ingestors.size shouldBe 1
          i.ingestors(0).name shouldBe ActorUtils.actorName(
            classOf[TestIngestor]
          )
        }
      }
    }

    it("unregisters") {
      act ! UnregisterAll
      eventually {
        whenReady((registry ? FindAll).mapTo[LookupResult]) { i =>
          i.ingestors.size shouldBe 0
        }
      }
    }
  }
} 
Example 53
Source File: KafkaIntSpec.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package common

import cakesolutions.kafka.testkit.KafkaServer
import kafka.utils.ZkUtils
import org.apache.kafka.clients.admin.AdminClient
import org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{ Millis, Seconds, Span }

import scala.collection.JavaConverters._
import scala.concurrent.duration._

abstract class KafkaIntSpec extends BaseSpec with BeforeAndAfterAll with PatienceConfiguration {

  override implicit val patienceConfig = PatienceConfig(Span(3, Seconds), Span(250, Millis))

  val kafkaServer = new KafkaServer()
  val kafkaPort = kafkaServer.kafkaPort

  val zkSessionTimeout = 30 seconds
  val zkConnectionTimeout = 30 seconds

  lazy val zkUtils = ZkUtils(s"localhost:${kafkaServer.zookeeperPort}", zkSessionTimeout.toMillis.toInt,
    zkConnectionTimeout.toMillis.toInt, isZkSecurityEnabled = false)

  lazy val kafkaAdminClient = AdminClient.create(Map[String, AnyRef](
    BOOTSTRAP_SERVERS_CONFIG -> s"localhost:$kafkaPort"
  ).asJava)

  override def beforeAll() = kafkaServer.startup()

  override def afterAll() = {
    kafkaAdminClient.close()
    zkUtils.close()
    kafkaServer.close()
  }

} 
Example 54
Source File: OutputCommitCoordinatorIntegrationSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.{Seconds, Span}

import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite, TaskContext}
import org.apache.spark.util.Utils


class OutputCommitCoordinatorIntegrationSuite
  extends SparkFunSuite
  with LocalSparkContext
  with Timeouts {

  override def beforeAll(): Unit = {
    super.beforeAll()
    val conf = new SparkConf()
      .set("spark.hadoop.outputCommitCoordination.enabled", "true")
      .set("spark.hadoop.mapred.output.committer.class",
        classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName)
    sc = new SparkContext("local[2, 4]", "test", conf)
  }

  test("exception thrown in OutputCommitter.commitTask()") {
    // Regression test for SPARK-10381
    failAfter(Span(60, Seconds)) {
      val tempDir = Utils.createTempDir()
      try {
        sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out")
      } finally {
        Utils.deleteRecursively(tempDir)
      }
    }
  }
}

private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter {
  override def commitTask(context: TaskAttemptContext): Unit = {
    val ctx = TaskContext.get()
    if (ctx.attemptNumber < 1) {
      throw new java.io.FileNotFoundException("Intentional exception")
    }
    super.commitTask(context)
  }
} 
Example 55
Source File: ApplicationSpec.scala    From Scala-Programming-Projects   with MIT License 5 votes vote down vote up
import org.scalatest.Matchers._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Seconds, Span}
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.libs.ws.WSClient
import play.api.test.Helpers._
import scala.concurrent.duration._



class ApplicationSpec extends PlaySpec with ScalaFutures with GuiceOneServerPerSuite {
  "Application" should {
    val wsClient = app.injector.instanceOf[WSClient]
    val myPublicAddress = s"localhost:$port"

    "send 404 on a bad request" in {
      val testURL = s"http://$myPublicAddress/boom"

      whenReady(wsClient.url(testURL).get(), Timeout(1 second)) { response =>
        response.status mustBe NOT_FOUND
      }
    }

    "render the index page" in {
      val testURL = s"http://$myPublicAddress/"

      whenReady(wsClient.url(testURL).get(), Timeout(1 second)) { response =>
        response.status mustBe OK
        response.contentType should include("text/html")
      }
    }
  }
} 
Example 56
Source File: ViewTestSupport.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.sales.view

import com.typesafe.config.Config
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.Logger
import org.slf4j.LoggerFactory.getLogger
import pl.newicom.dddd.view.sql.SqlViewStore
import slick.dbio._

import scala.concurrent.ExecutionContext
import slick.jdbc.H2Profile

trait ViewTestSupport extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>

  def config: Config
  lazy val viewStore = new SqlViewStore(config)
  val log: Logger = getLogger(getClass)

  implicit val profile = H2Profile

  implicit class ViewStoreAction[A](a: DBIO[A])(implicit ex: ExecutionContext) {
    private val future = viewStore.run(a)

    def run(): Unit = future.map(_ => ()).futureValue
    def result: A = future.futureValue
  }

  def ensureSchemaDropped: DBIO[Unit]
  def ensureSchemaCreated: DBIO[Unit]

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(10, Seconds)),
    interval = scaled(Span(200, Millis))
  )

  override def beforeAll() {
    viewStore.run {
      ensureSchemaDropped >> ensureSchemaCreated
    }.futureValue

  }

} 
Example 57
Source File: DispatcherTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams.dispatcher

import java.util.concurrent.atomic.AtomicReference

import akka.NotUsed
import akka.stream.scaladsl.{Keep, Sink}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.platform.akkastreams.dispatcher.SubSource.OneAfterAnother
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.{ExecutionContextExecutor, Future}

//TODO: merge/review the tests we have around the Dispatcher!
class DispatcherTest extends WordSpec with AkkaBeforeAndAfterAll with Matchers with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(250, Milliseconds)))

  "A Dispatcher" should {
    "not race when creating new subscriptions" in {
      // The test setup here is a little different from the above tests,
      // because we wanted to be specific about emitted pairs and use of Thread.sleep.

      implicit val ec: ExecutionContextExecutor = materializer.executionContext

      val elements = new AtomicReference(Map.empty[Int, Int])
      def readElement(i: Int): Future[Int] = Future {
        Thread.sleep(10) // In a previous version of Dispatcher, this sleep caused a race condition.
        elements.get()(i)
      }
      def readSuccessor(i: Int): Int = i + 1

      // compromise between catching flakes and not taking too long
      0 until 25 foreach { _ =>
        val d = Dispatcher("test", 0, 0)

        // Verify that the results are what we expected
        val subscriptions = 1 until 10 map { i =>
          elements.updateAndGet(m => m + (i -> i))
          d.signalNewHead(i)
          d.startingAt(i - 1, OneAfterAnother(readSuccessor, readElement))
            .toMat(Sink.collection)(Keep.right[NotUsed, Future[Seq[(Int, Int)]]])
            .run()
        }

        d.close()

        subscriptions.zip(1 until 10) foreach {
          case (f, i) =>
            whenReady(f) { vals =>
              vals.map(_._1) should contain theSameElementsAs (i to 9)
              vals.map(_._2) should contain theSameElementsAs (i until 10)
            }
        }
      }
    }
  }
} 
Example 58
Source File: SecurityTest.scala    From Conseil   with Apache License 2.0 5 votes vote down vote up
package tech.cryptonomic.conseil.api.security

import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}
import tech.cryptonomic.conseil.api.security.Security.SecurityApi

class SecurityTest extends WordSpec with Matchers with ScalatestRouteTest with ScalaFutures {

  implicit override val patienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(20, Millis))

  "The SecurityApi" should {

      "valid itself" in {
        SecurityApi(Set.empty, None).isValid shouldBe false
        SecurityApi(Set.empty, Some(false)).isValid shouldBe false

        SecurityApi(Set("some-key"), Some(false)).isValid shouldBe true
        SecurityApi(Set("some-key"), None).isValid shouldBe true
        SecurityApi(Set.empty, Some(true)).isValid shouldBe true
        SecurityApi(Set("some-key"), Some(true)).isValid shouldBe true
      }

      "validate a given key" in {
        SecurityApi(Set("some-key"), None).validateApiKey(Some("some-key")).futureValue shouldBe true
        SecurityApi(Set("some-key"), Some(true)).validateApiKey(Some("some-key")).futureValue shouldBe true

        SecurityApi(Set.empty, None).validateApiKey(Some("some-key")).futureValue shouldBe false
        SecurityApi(Set.empty, Some(true)).validateApiKey(Some("some-key")).futureValue shouldBe false

        SecurityApi(Set.empty, None).validateApiKey(None).futureValue shouldBe false
        SecurityApi(Set.empty, Some(true)).validateApiKey(None).futureValue shouldBe true
      }

    }
} 
Example 59
Source File: AcceptanceSpecPatience.scala    From renku   with Apache License 2.0 5 votes vote down vote up
package ch.renku.acceptancetests.tooling

import org.scalatest.concurrent.{AbstractPatienceConfiguration, PatienceConfiguration}
import org.scalatest.time.{Millis, Seconds, Span}

trait AcceptanceSpecPatience extends AbstractPatienceConfiguration { this: PatienceConfiguration =>

  implicit abstract override val patienceConfig: PatienceConfig = PatienceConfig(
    timeout  = scaled(Span(AcceptanceSpecPatience.WAIT_SCALE * 30, Seconds)),
    interval = scaled(Span(150, Millis))
  )
}

object AcceptanceSpecPatience {
  // Scale all wait times by a constant value.
  val WAIT_SCALE = 2;
} 
Example 60
Source File: Page.scala    From renku   with Apache License 2.0 5 votes vote down vote up
package ch.renku.acceptancetests.pages

import ch.renku.acceptancetests.pages.Page._
import ch.renku.acceptancetests.pages.RenkuPage.RenkuBaseUrl
import ch.renku.acceptancetests.tooling._
import eu.timepit.refined.W
import eu.timepit.refined.api.Refined
import eu.timepit.refined.collection.NonEmpty
import eu.timepit.refined.string._
import org.openqa.selenium.{By, WebDriver, WebElement}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{Matchers => ScalatestMatchers}
import org.scalatestplus.selenium.WebBrowser

import scala.concurrent.duration._
import scala.language.{implicitConversions, postfixOps}

abstract class Page[Url <: BaseUrl] extends ScalatestMatchers with Eventually with AcceptanceSpecPatience {

  val path:  Path
  val title: Title
  def pageReadyElement(implicit webDriver: WebDriver): Option[WebElement]
  def url(implicit baseUrl:                Url): String = s"$baseUrl$path"

  protected implicit def toWebElement(element: WebBrowser.Element): WebElement =
    element.underlying
  protected implicit def toMaybeWebElement(maybeElement: Option[WebBrowser.Element]): Option[WebElement] =
    maybeElement.map(_.underlying)

  protected implicit class ElementOps(element: WebBrowser.Element) {

    def parent: WebElement = element.findElement(By.xpath("./.."))

    def enterValue(value: String): Unit = value foreach { char =>
      element.sendKeys(char.toString) sleep (100 millis)
    }
  }

  protected implicit class WebElementOps(element: WebElement) {

    def enterValue(value: String): Unit = value foreach { char =>
      element.sendKeys(char.toString) sleep (100 millis)
    }
  }

  object sleep {
    def apply(duration: Duration): Unit = Page.SleepThread(duration)
  }

  protected implicit class OperationOps(unit: Unit) {
    def sleep(duration: Duration): Unit = Page.SleepThread(duration)
  }

  protected def waitUpTo(duration: Duration): PatienceConfig =
    PatienceConfig(
      // Wait up to 2 minutes for this operation
      timeout  = scaled(Span(AcceptanceSpecPatience.WAIT_SCALE * duration.toSeconds, Seconds)),
      interval = scaled(Span(2, Seconds))
    )
}

object Page {
  type Path  = String Refined StartsWith[W.`"/"`.T]
  type Title = String Refined NonEmpty

  // Use a unique name to avoid problems on case-insensitive and preserving file systems
  object SleepThread {
    def apply(duration: Duration): Unit = Thread sleep duration.toMillis
  }
}

abstract class RenkuPage extends Page[RenkuBaseUrl]

object RenkuPage {
  case class RenkuBaseUrl(value: String Refined Url) extends BaseUrl(value)
} 
Example 61
Source File: UsesActorSystem.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import akka.actor.ActorSystem
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._


trait UsesActorSystem extends BeforeAndAfterAll with PatienceConfiguration { this: Suite =>
  implicit lazy val actorSystem: ActorSystem = ActorSystem()
  implicit def executionContext: ExecutionContext = actorSystem.dispatcher

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(10, Milliseconds)))

  override protected def afterAll(): Unit = {
    Await.ready(actorSystem.terminate(), Duration.Inf)
    super.afterAll()
  }

  def wait(duration: FiniteDuration): Future[Unit] =
    if (duration == Duration.Zero) Future.successful(()) else {
      val promise = Promise[Unit]()
      actorSystem.scheduler.scheduleOnce(duration)(promise.success(()))
      promise.future
    }

  def waitUntil(predicate: => Future[Boolean], retryInterval: FiniteDuration): Future[Unit] =
    predicate.flatMap { r =>
      if (r) Future.successful(())
      else wait(retryInterval).flatMap(_ => waitUntil(predicate, retryInterval))
    }

  def waitFor[T](future: => Future[T])(condition: T => Boolean, retryInterval: FiniteDuration): Future[T] =
    future.flatMap { value =>
      if (condition(value)) Future.successful(value)
      else wait(retryInterval).flatMap(_ => waitFor(future)(condition, retryInterval))
    }
} 
Example 62
Source File: HttpDeleteSpec.scala    From http-verbs   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.http

import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.mockito.ArgumentCaptor
import org.mockito.Matchers.{any, eq => is}
import org.mockito.Mockito._
import org.scalatest.concurrent.PatienceConfiguration.{Interval, Timeout}
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.mockito.MockitoSugar
import uk.gov.hmrc.http.hooks.HttpHook

import scala.concurrent.{ExecutionContext, Future}

import uk.gov.hmrc.http.HttpReads.Implicits._

class HttpDeleteSpec extends AnyWordSpecLike with Matchers with MockitoSugar with CommonHttpBehaviour {

  import ExecutionContext.Implicits.global

  class StubbedHttpDelete(doDeleteResult: Future[HttpResponse], doDeleteWithHeaderResult: Future[HttpResponse]) extends HttpDelete with ConnectionTracingCapturing {
    val testHook1                                   = mock[HttpHook]
    val testHook2                                   = mock[HttpHook]
    val hooks                                       = Seq(testHook1, testHook2)
    override def configuration: Option[Config]      = None
    override protected def actorSystem: ActorSystem = ActorSystem("test-actor-system")

    def appName: String = ???

    def doDelete(url: String, headers: Seq[(String, String)])(implicit hc: HeaderCarrier, ec: ExecutionContext) =
      doDeleteResult
  }

  "HttpDelete" should {
    "be able to return plain responses" in {
      val response   = HttpResponse(200, testBody)
      val testDelete = new StubbedHttpDelete(Future.successful(response), Future.successful(response))
      testDelete.DELETE[HttpResponse](url, Seq("foo" -> "bar")).futureValue shouldBe response
    }

    "be able to return objects deserialised from JSON" in {
      val testDelete = new StubbedHttpDelete(Future.successful(HttpResponse(200, """{"foo":"t","bar":10}""")),
        Future.successful(HttpResponse(200, """{"foo":"t","bar":10}""")))
      testDelete
        .DELETE[TestClass](url, Seq("foo" -> "bar"))
        .futureValue(Timeout(Span(2, Seconds)), Interval(Span(15, Millis))) shouldBe TestClass("t", 10)
    }

    behave like anErrorMappingHttpCall("DELETE", (url, responseF) => new StubbedHttpDelete(responseF, responseF).DELETE[HttpResponse](url, Seq("foo" -> "bar")))
    behave like aTracingHttpCall("DELETE", "DELETE", new StubbedHttpDelete(defaultHttpResponse, defaultHttpResponse)) { _.DELETE[HttpResponse](url, Seq("foo" -> "bar")) }

    "Invoke any hooks provided" in {
      val dummyResponse       = HttpResponse(200, testBody)
      val dummyResponseFuture = Future.successful(dummyResponse)
      val dummyHeader         = Future.successful(dummyResponse)
      val testDelete          = new StubbedHttpDelete(dummyResponseFuture, dummyHeader)

      testDelete.DELETE[HttpResponse](url, Seq("header" -> "foo")).futureValue

      val respArgCaptor1 = ArgumentCaptor.forClass(classOf[Future[HttpResponse]])
      val respArgCaptor2 = ArgumentCaptor.forClass(classOf[Future[HttpResponse]])

      verify(testDelete.testHook1).apply(is(url), is("DELETE"), is(None), respArgCaptor1.capture())(any(), any())
      verify(testDelete.testHook2).apply(is(url), is("DELETE"), is(None), respArgCaptor2.capture())(any(), any())

      // verifying directly without ArgumentCaptor didn't work as Futures were different instances
      // e.g. Future.successful(5) != Future.successful(5)
      respArgCaptor1.getValue.futureValue shouldBe dummyResponse
      respArgCaptor2.getValue.futureValue shouldBe dummyResponse
    }
  }
} 
Example 63
Source File: SystemSpec.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.tests.e2e

import ecommerce.invoicing.ReceivePayment
import ecommerce.sales._
import ecommerce.shipping.ShippingSerializationHintsProvider
import ecommerce.tests.e2e.SystemSpec._
import org.json4s.Formats
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import pl.newicom.dddd.utils.UUIDSupport.uuid7
class SystemSpec extends TestDriver with Eventually {

  implicit override val patienceConfig: PatienceConfig = PatienceConfig(
    timeout = scaled(Span(10, Seconds)),
    interval = scaled(Span(2, Seconds))
  )

  "Ecommerce system" should {
    val reservationId = new ReservationId(uuid7)
    val invoiceId     = reservationId
    val customerId    = uuid7

    using(sales_write) { implicit b =>
      "create reservation" in eventually {
        POST command {
          CreateReservation(reservationId, customerId)
        }
      }
    }

    using(sales_read) { implicit b =>
      "respond to reservation/{reservationId} query" in eventually {
        GET / s"reservation/$reservationId"
      }
    }

    using(sales_write) { implicit b =>
      "reserve product" in eventually {
        val product = Product(
          productId = uuid7,
          name = "DDDD For Dummies - 7th Edition",
          productType = ProductType.Standard,
          price = Money(10.0)
        )

        POST command {
          ReserveProduct(reservationId, product, quantity = 1)
        }
      }

      "confirm reservation" in eventually {
        POST command {
          ConfirmReservation(reservationId)
        }
      }
    }

    using(invoicing_write) { implicit b =>
      "pay" in eventually {
        POST command {
          ReceivePayment(invoiceId, reservationId.value, Money(10.0), paymentId = "230982342")
        }
      }
    }

    using(shipping_read) { implicit b =>
      "respond to /shipment/order/{orderId}" in eventually {
        GET / s"shipment/order/$reservationId"
      }
    }
  }
}
import pl.newicom.dddd.serialization.JsonSerHints._

object SystemSpec {

  val sales     = EndpointConfig(path = "ecommerce/sales")
  val invoicing = EndpointConfig(path = "ecommerce/invoicing")
  val shipping  = EndpointConfig(path = "ecommerce/shipping")

  val sales_write: EndpointConfig     = sales.copy(port = 9100)
  val invoicing_write: EndpointConfig = invoicing.copy(port = 9200)

  val sales_read: EndpointConfig    = sales.copy(port = 9110)
  val shipping_read: EndpointConfig = shipping.copy(port = 9310)

  implicit val formats: Formats =
    new SalesSerializationHintsProvider().hints() ++
      new ShippingSerializationHintsProvider().hints()

} 
Example 64
Source File: ViewTestSupport.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.sales.view

import com.typesafe.config.Config
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.Logger
import org.slf4j.LoggerFactory.getLogger
import pl.newicom.dddd.view.sql.SqlViewStore
import slick.dbio._
import slick.jdbc.H2Profile

import scala.concurrent.ExecutionContext

trait ViewTestSupport extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>

  def config: Config
  lazy val viewStore = new SqlViewStore(config)
  val log: Logger = getLogger(getClass)

  implicit val profile = H2Profile

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(5, Seconds)),
    interval = scaled(Span(200, Millis))
  )

  implicit class ViewStoreAction[A](a: DBIO[A])(implicit ex: ExecutionContext) {
    private val future = viewStore.run(a)

    def run(): Unit = future.map(_ => ()).futureValue
    def result: A = future.futureValue
  }

  def ensureSchemaDropped: DBIO[Unit]
  def ensureSchemaCreated: DBIO[Unit]

  override def beforeAll() {
    val setup = viewStore.run {
      ensureSchemaDropped >> ensureSchemaCreated
    }
    assert(setup.isReadyWithin(Span(5, Seconds)))

  }

} 
Example 65
Source File: EventsByPersistenceIdFastForwardSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query

import java.util.UUID

import akka.persistence.PersistentRepr
import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec }
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.TestSink
import com.typesafe.config.ConfigFactory
import org.scalatest.time.{ Milliseconds, Seconds, Span }

object EventsByPersistenceIdFastForwardSpec {

  // separate from EventsByPersistenceIdWithControlSpec since it needs the refreshing enabled
  val config = ConfigFactory.parseString(s"""
    akka.persistence.cassandra.journal.keyspace=EventsByPersistenceIdFastForwardSpec
    akka.persistence.cassandra.query.refresh-interval = 250ms
    akka.persistence.cassandra.query.max-result-size-query = 2
    akka.persistence.cassandra.journal.target-partition-size = 15
    """).withFallback(CassandraLifecycle.config)
}

class EventsByPersistenceIdFastForwardSpec
    extends CassandraSpec(EventsByPersistenceIdFastForwardSpec.config)
    with DirectWriting {

  override implicit val patience = PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Milliseconds))

  "be able to fast forward when currently looking for missing sequence number" in {
    val w1 = UUID.randomUUID().toString
    val evt1 = PersistentRepr("e-1", 1L, "f", "", writerUuid = w1)
    writeTestEvent(evt1)

    val src = queries.eventsByPersistenceIdWithControl("f", 0L, Long.MaxValue)
    val (futureControl, probe) = src.map(_.event).toMat(TestSink.probe[Any])(Keep.both).run()
    val control = futureControl.futureValue
    probe.request(5)

    val evt3 = PersistentRepr("e-3", 3L, "f", "", writerUuid = w1)
    writeTestEvent(evt3)

    probe.expectNext("e-1")

    system.log.debug("Sleeping for query to go into look-for-missing-seqnr-mode")
    Thread.sleep(2000)

    // then we fast forward past the gap
    control.fastForward(3L)
    probe.expectNext("e-3")

    val evt2 = PersistentRepr("e-2", 2L, "f", "", writerUuid = w1)
    val evt4 = PersistentRepr("e-4", 4L, "f", "", writerUuid = w1)
    writeTestEvent(evt2)
    writeTestEvent(evt4)
    probe.expectNext("e-4")

    probe.cancel()
  }
} 
Example 66
Source File: EventCollaborationSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink}
import akka.testkit.TestKit
import com.github.krasserm.ases.log.{KafkaEventLog, KafkaSpec}
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class EventCollaborationSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
  import EventSourcingSpec._

  implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))

  val emitterId1 = "processor1"
  val emitterId2 = "processor2"

  val kafkaEventLog: KafkaEventLog =
    new log.KafkaEventLog(host, port)

  def processor(emitterId: String, topicPartition: TopicPartition): Flow[Request, Response, NotUsed] =
    EventSourcing(emitterId, 0, requestHandler, eventHandler).join(kafkaEventLog.flow(topicPartition))

  "A group of EventSourcing stages" when {
    "joined with a shared event log" can {
      "collaborate via publish-subscribe" in {
        val topicPartition = new TopicPartition("p-1", 0)    // shared topic partition
        val (pub1, sub1) = probes(processor(emitterId1, topicPartition)) // processor 1
        val (pub2, sub2) = probes(processor(emitterId2, topicPartition)) // processor 2

        pub1.sendNext(Increment(3))
        // Both processors receive event but
        // only processor 1 creates response
        sub1.requestNext(Response(3))

        pub2.sendNext(Increment(-4))
        // Both processors receive event but
        // only processor 2 creates response
        sub2.requestNext(Response(-1))

        // consume and verify events emitted by both processors
        kafkaEventLog.source[Incremented](topicPartition).via(log.replayed).map {
          case Durable(event, eid, _, sequenceNr) => (event, eid, sequenceNr)
        }.runWith(Sink.seq).futureValue should be(Seq(
          (Incremented(3), emitterId1, 0L),
          (Incremented(-4), emitterId2, 1L)
        ))
      }
    }
  }
} 
Example 67
Source File: KafkaEventLogSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases.log

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import com.github.krasserm.ases._
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class KafkaEventLogSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
  implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))

  val kafkaEventLog: KafkaEventLog = new KafkaEventLog(host, port)

  "A Kafka event log" must {
    "provide a sink for writing events and a source for delivering replayed events" in {
      val topicPartition = new TopicPartition("p-1", 0)
      val events = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val expected = durables(events).map(Delivered(_)) :+ Recovered

      Source(events).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      kafkaEventLog.source[String](topicPartition).take(4).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a flow with an input port for writing events and and output port for delivering replayed and live events" in {
      val topicPartition = new TopicPartition("p-2", 0)
      val events1 = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val events2 = Seq("d", "e", "f").map(Emitted(_, emitterId))
      val expected = (durables(events1).map(Delivered(_)) :+ Recovered) ++ durables(events2, offset = 3).map(Delivered(_))

      Source(events1).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      Source(events2).via(kafkaEventLog.flow(topicPartition)).take(7).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a source that only delivers events of compatible types" in {
      val topicPartition = new TopicPartition("p-3", 0)
      val events = Seq("a", "b", 1, 2).map(Emitted(_, emitterId))
      val expected = durables(events).drop(2).map(Delivered(_)) :+ Recovered

      Source(events).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      kafkaEventLog.source[Int](topicPartition).take(3).runWith(Sink.seq).futureValue should be(expected)
    }
  }
} 
Example 68
Source File: MultiDcSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{ HttpRequest, StatusCodes }
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.management.scaladsl.ManagementRouteProviderSettings
import akka.stream.ActorMaterializer
import akka.testkit.SocketUtil
import com.typesafe.config.ConfigFactory
import org.scalatest.{ Matchers, WordSpec }
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.time.{ Millis, Seconds, Span }

class MultiDcSpec
    extends WordSpec
    with Matchers
    with ScalaFutures
    with ClusterHttpManagementJsonProtocol
    with Eventually {

  implicit val patience: PatienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(50, Millis))

  val config = ConfigFactory.parseString(
    """
      |akka.actor.provider = "cluster"
      |akka.remote.log-remote-lifecycle-events = off
      |akka.remote.netty.tcp.hostname = "127.0.0.1"
      |#akka.loglevel = DEBUG
    """.stripMargin
  )

  "Http cluster management" must {
    "allow multiple DCs" in {
      val Vector(httpPortA, portA, portB) = SocketUtil.temporaryServerAddresses(3, "127.0.0.1").map(_.getPort)
      val dcA = ConfigFactory.parseString(
        s"""
           |akka.management.http.hostname = "127.0.0.1"
           |akka.management.http.port = $httpPortA
           |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"]
           |akka.cluster.multi-data-center.self-data-center = "DC-A"
           |akka.remote.netty.tcp.port = $portA
          """.stripMargin
      )
      val dcB = ConfigFactory.parseString(
        s"""
           |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"]
           |akka.cluster.multi-data-center.self-data-center = "DC-B"
           |akka.remote.netty.tcp.port = $portB
          """.stripMargin
      )

      implicit val dcASystem = ActorSystem("MultiDcSystem", config.withFallback(dcA))
      val dcBSystem = ActorSystem("MultiDcSystem", config.withFallback(dcB))
      implicit val materializer = ActorMaterializer()

      val routeSettings =
        ManagementRouteProviderSettings(selfBaseUri = s"http://127.0.0.1:$httpPortA", readOnly = false)

      try {
        Http()
          .bindAndHandle(ClusterHttpManagementRouteProvider(dcASystem).routes(routeSettings), "127.0.0.1", httpPortA)
          .futureValue

        eventually {
          val response =
            Http().singleRequest(HttpRequest(uri = s"http://127.0.0.1:$httpPortA/cluster/members")).futureValue
          response.status should equal(StatusCodes.OK)
          val members = Unmarshal(response.entity).to[ClusterMembers].futureValue
          members.members.size should equal(2)
          members.members.map(_.status) should equal(Set("Up"))
        }
      } finally {
        dcASystem.terminate()
        dcBSystem.terminate()
      }
    }
  }
} 
Example 69
Source File: ConsulDiscoverySpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap.discovery

import java.net.InetAddress

import akka.actor.ActorSystem
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.discovery.consul.ConsulServiceDiscovery
import akka.testkit.TestKitBase
import com.google.common.net.HostAndPort
import com.orbitz.consul.Consul
import com.orbitz.consul.model.catalog.ImmutableCatalogRegistration
import com.orbitz.consul.model.health.ImmutableService
import com.pszymczyk.consul.{ ConsulProcess, ConsulStarterBuilder }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }

import scala.concurrent.duration._

class ConsulDiscoverySpec extends WordSpecLike with Matchers with BeforeAndAfterAll with TestKitBase with ScalaFutures {

  private val consul: ConsulProcess = ConsulStarterBuilder.consulStarter().withHttpPort(8500).build().start()

  "Consul Discovery" should {
    "work for defaults" in {
      val consulAgent =
        Consul.builder().withHostAndPort(HostAndPort.fromParts(consul.getAddress, consul.getHttpPort)).build()
      consulAgent
        .catalogClient()
        .register(
          ImmutableCatalogRegistration
            .builder()
            .service(
              ImmutableService
                .builder()
                .addTags(s"system:${system.name}", "akka-management-port:1234")
                .address("127.0.0.1")
                .id("test")
                .service("test")
                .port(1235)
                .build()
            )
            .node("testNode")
            .address("localhost")
            .build()
        )

      val lookupService = new ConsulServiceDiscovery(system)
      val resolved = lookupService.lookup("test", 10.seconds).futureValue
      resolved.addresses should contain(
        ResolvedTarget(
          host = "127.0.0.1",
          port = Some(1234),
          address = Some(InetAddress.getByName("127.0.0.1"))
        )
      )
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    consul.close()
  }

  override implicit lazy val system: ActorSystem = ActorSystem("test")

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = scaled(Span(30, Seconds)), interval = scaled(Span(50, Millis)))

} 
Example 70
Source File: HttpContactPointRoutesSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import akka.cluster.{ Cluster, ClusterEvent }
import akka.event.NoLogging
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.management.cluster.bootstrap.ClusterBootstrapSettings
import akka.testkit.{ SocketUtil, TestProbe }
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ Matchers, WordSpecLike }

class HttpContactPointRoutesSpec
    extends WordSpecLike
    with Matchers
    with ScalatestRouteTest
    with HttpBootstrapJsonProtocol
    with Eventually {

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = scaled(Span(3, Seconds)), interval = scaled(Span(50, Millis)))

  override def testConfigSource =
    s"""
    akka {
      remote {
        netty.tcp {
          hostname = "127.0.0.1"
          port = ${SocketUtil.temporaryServerAddress("127.0.0.1").getPort}
        }
      }
    }
    """.stripMargin

  "Http Bootstrap routes" should {

    val settings = ClusterBootstrapSettings(system.settings.config, NoLogging)
    val httpBootstrap = new HttpClusterBootstrapRoutes(settings)

    "empty list if node is not part of a cluster" in {
      ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
        responseAs[String] should include(""""seedNodes":[]""")
      }
    }

    "include seed nodes when part of a cluster" in {
      val cluster = Cluster(system)
      cluster.join(cluster.selfAddress)

      val p = TestProbe()
      cluster.subscribe(p.ref, ClusterEvent.InitialStateAsEvents, classOf[ClusterEvent.MemberUp])
      val up = p.expectMsgType[ClusterEvent.MemberUp]
      up.member should ===(cluster.selfMember)

      eventually {
        ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
          val response = responseAs[HttpBootstrapJsonProtocol.SeedNodes]
          response.seedNodes should !==(Set.empty)
          response.seedNodes.map(_.node) should contain(cluster.selfAddress)
        }
      }
    }
  }

} 
Example 71
Source File: LeaseSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.coordination.lease.kubernetes

import akka.actor.ActorSystem
import akka.coordination.lease.TimeoutSettings
import akka.coordination.lease.kubernetes.internal.KubernetesApiImpl
import akka.coordination.lease.scaladsl.LeaseProvider
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.time.{ Milliseconds, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }


abstract class LeaseSpec() extends WordSpec with ScalaFutures with BeforeAndAfterAll with Matchers with Eventually {

  def system: ActorSystem

  implicit val patience = PatienceConfig(Span(3, Seconds), Span(500, Milliseconds))

  lazy val underTest = LeaseProvider(system)
  // for cleanup
  val config = system.settings.config.getConfig(KubernetesLease.configPath)
  lazy val k8sApi = new KubernetesApiImpl(system, KubernetesSettings(config, TimeoutSettings(config)))
  val leaseName = "lease-1"
  val client1 = "client1"
  val client2 = "client2"

  // two leases instances for the same lease name
  lazy val lease1Client1 = underTest.getLease(leaseName, "akka.coordination.lease.kubernetes", client1)
  lazy val lease1Client2 = underTest.getLease(leaseName, "akka.coordination.lease.kubernetes", client2)

  "A lease" should {

    "be different instances" in {
      assert(lease1Client1 ne lease1Client2)
    }

    "work" in {
      lease1Client1.acquire().futureValue shouldEqual true
      lease1Client1.checkLease() shouldEqual true
    }

    "be reentrant" in {
      lease1Client1.acquire().futureValue shouldEqual true
      lease1Client1.checkLease() shouldEqual true
      lease1Client2.checkLease() shouldEqual false
    }

    "not allow another client to acquire the lease" in {
      lease1Client2.acquire().futureValue shouldEqual false
      lease1Client2.checkLease() shouldEqual false
    }

    "maintain the lease for a prolonged period" in {
      lease1Client1.acquire().futureValue shouldEqual true
      lease1Client1.checkLease() shouldEqual true
      Thread.sleep(200)
      lease1Client1.checkLease() shouldEqual true
      Thread.sleep(200)
      lease1Client1.checkLease() shouldEqual true
      Thread.sleep(200)
      lease1Client1.checkLease() shouldEqual true
    }

    "not allow another client to release the lease" in {
      lease1Client2.release().failed.futureValue.getMessage shouldEqual s"Tried to release a lease that is not acquired"
    }

    "allow removing the lease" in {
      lease1Client1.release().futureValue shouldEqual true
      eventually {
        lease1Client1.checkLease() shouldEqual false
      }
    }

    "allow a new client to get the lease once released" in {
      lease1Client2.acquire().futureValue shouldEqual true
      lease1Client2.checkLease() shouldEqual true
      lease1Client1.checkLease() shouldEqual false
    }
  }

} 
Example 72
Source File: OutputCommitCoordinatorIntegrationSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.{Seconds, Span}

import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite, TaskContext}
import org.apache.spark.util.Utils


class OutputCommitCoordinatorIntegrationSuite
  extends SparkFunSuite
  with LocalSparkContext
  with Timeouts {

  override def beforeAll(): Unit = {
    super.beforeAll()
    val conf = new SparkConf()
      .set("spark.hadoop.outputCommitCoordination.enabled", "true")
      .set("spark.hadoop.mapred.output.committer.class",
        classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName)
    sc = new SparkContext("local[2, 4]", "test", conf)
  }

  test("exception thrown in OutputCommitter.commitTask()") {
    // Regression test for SPARK-10381
    failAfter(Span(60, Seconds)) {
      val tempDir = Utils.createTempDir()
      try {
        sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out")
      } finally {
        Utils.deleteRecursively(tempDir)
      }
    }
  }
}

private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter {
  override def commitTask(context: TaskAttemptContext): Unit = {
    val ctx = TaskContext.get()
    if (ctx.attemptNumber < 1) {
      throw new java.io.FileNotFoundException("Intentional exception")
    }
    super.commitTask(context)
  }
} 
Example 73
Source File: ProducerStubSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.testkit

import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.server.LagomApplicationContext
import com.lightbend.lagom.scaladsl.server.LocalServiceLocator
import com.lightbend.lagom.scaladsl.testkit.services._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.Seconds
import org.scalatest.time.Span
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec

class ProducerStubSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll with Eventually {
  var producerStub: ProducerStub[AlphaEvent] = _

  private val stubbedApplication: LagomApplicationContext => DownstreamApplication = { ctx =>
    new DownstreamApplication(ctx) with LocalServiceLocator with TestTopicComponents {
      val stubFactory = new ProducerStubFactory(actorSystem, materializer)
      producerStub = stubFactory.producer[AlphaEvent](AlphaService.TOPIC_ID)
      override lazy val alphaService = new AlphaServiceStub(producerStub)
    }
  }

  "The ProducerStub" should {
    "send message to consuming services" in ServiceTest.withServer(ServiceTest.defaultSetup.withCluster())(
      stubbedApplication
    ) { server =>
      implicit val exCtx = server.application.actorSystem.dispatcher
      producerStub.send(AlphaEvent(22))
      eventually(timeout(Span(5, Seconds))) {
        server.serviceClient
          .implement[CharlieService]
          .messages
          .invoke()
          .map { response =>
            response should ===(Seq(ReceivedMessage("A", 22)))
          }
          .recover {
            case t: Throwable => fail(t)
          }
      }
    }
  }
}

class AlphaServiceStub(stub: ProducerStub[AlphaEvent]) extends AlphaService {
  override def messages: Topic[AlphaEvent] = stub.topic
}