org.scalatest.BeforeAndAfterAll Scala Examples

The following examples show how to use org.scalatest.BeforeAndAfterAll. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: WireMockHelper.scala    From pertax-frontend   with Apache License 2.0 6 votes vote down vote up
package util

import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait WireMockHelper extends BeforeAndAfterAll with BeforeAndAfterEach {
  this: Suite =>

  protected val server: WireMockServer = new WireMockServer(wireMockConfig().dynamicPort())

  override def beforeAll(): Unit = {
    server.start()
    super.beforeAll()
  }

  override def beforeEach(): Unit = {
    server.resetAll()
    super.beforeEach()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    server.stop()
  }
} 
Example 2
Source File: IntegrationTest.scala    From kmq   with Apache License 2.0 6 votes vote down vote up
package com.softwaremill.kmq.redelivery

import java.time.Duration
import java.util.Random

import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.softwaremill.kmq._
import com.softwaremill.kmq.redelivery.infrastructure.KafkaSpec
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

import scala.collection.mutable.ArrayBuffer

class IntegrationTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with KafkaSpec with BeforeAndAfterAll with Eventually with Matchers {

  implicit val materializer = ActorMaterializer()
  import system.dispatcher

  "KMQ" should "resend message if not committed" in {
    val bootstrapServer = s"localhost:${testKafkaConfig.kafkaPort}"
    val kmqConfig = new KmqConfig("queue", "markers", "kmq_client", "kmq_redelivery", Duration.ofSeconds(1).toMillis,
    1000)

    val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
      .withBootstrapServers(bootstrapServer)
      .withGroupId(kmqConfig.getMsgConsumerGroupId)
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    val markerProducerSettings = ProducerSettings(system,
      new MarkerKey.MarkerKeySerializer(), new MarkerValue.MarkerValueSerializer())
      .withBootstrapServers(bootstrapServer)
      .withProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, classOf[ParititionFromMarkerKey].getName)
    val markerProducer = markerProducerSettings.createKafkaProducer()

    val random = new Random()

    lazy val processedMessages = ArrayBuffer[String]()
    lazy val receivedMessages = ArrayBuffer[String]()

    val control = Consumer.committableSource(consumerSettings, Subscriptions.topics(kmqConfig.getMsgTopic)) // 1. get messages from topic
      .map { msg =>
      ProducerMessage.Message(
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(msg.record), new StartMarker(kmqConfig.getMsgTimeoutMs)), msg)
    }
      .via(Producer.flow(markerProducerSettings, markerProducer)) // 2. write the "start" marker
      .map(_.message.passThrough)
      .mapAsync(1) { msg =>
        msg.committableOffset.commitScaladsl().map(_ => msg.record) // this should be batched
      }
      .map { msg =>
        receivedMessages += msg.value
        msg
      }
      .filter(_ => random.nextInt(5) != 0)
      .map { processedMessage =>
        processedMessages += processedMessage.value
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(processedMessage), EndMarker.INSTANCE)
      }
      .to(Producer.plainSink(markerProducerSettings, markerProducer)) // 5. write "end" markers
      .run()

    val redeliveryHook = RedeliveryTracker.start(new KafkaClients(bootstrapServer), kmqConfig)

    val messages = (0 to 20).map(_.toString)
    messages.foreach(msg => sendToKafka(kmqConfig.getMsgTopic,msg))

    eventually {
      receivedMessages.size should be > processedMessages.size
      processedMessages.sortBy(_.toInt).distinct shouldBe messages
    }(PatienceConfig(timeout = Span(15, Seconds)), implicitly)

    redeliveryHook.close()
    control.shutdown()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    TestKit.shutdownActorSystem(system)
  }
} 
Example 3
Source File: SqlUnitTest.scala    From SparkUnitTestingExamples   with Apache License 2.0 6 votes vote down vote up
package com.cloudera.sa.spark.unittest.sql

import org.apache.spark.sql.Row
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}

import scala.collection.mutable

class SqlUnitTest extends FunSuite with
BeforeAndAfterEach with BeforeAndAfterAll{

  @transient var sc: SparkContext = null
  @transient var hiveContext: HiveContext = null

  override def beforeAll(): Unit = {

    val envMap = Map[String,String](("Xmx", "512m"))

    val sparkConfig = new SparkConf()
    sparkConfig.set("spark.broadcast.compress", "false")
    sparkConfig.set("spark.shuffle.compress", "false")
    sparkConfig.set("spark.shuffle.spill.compress", "false")
    sparkConfig.set("spark.io.compression.codec", "lzf")
    sc = new SparkContext("local[2]", "unit test", sparkConfig)
    hiveContext = new HiveContext(sc)
  }

  override def afterAll(): Unit = {
    sc.stop()
  }

  test("Test table creation and summing of counts") {
    val personRDD = sc.parallelize(Seq(Row("ted", 42, "blue"),
      Row("tj", 11, "green"),
      Row("andrew", 9, "green")))

    hiveContext.sql("create table person (name string, age int, color string)")

    val emptyDataFrame = hiveContext.sql("select * from person limit 0")

    val personDataFrame = hiveContext.createDataFrame(personRDD, emptyDataFrame.schema)
    personDataFrame.registerTempTable("tempPerson")

    val ageSumDataFrame = hiveContext.sql("select sum(age) from tempPerson")

    val localAgeSum = ageSumDataFrame.take(10)

    assert(localAgeSum(0).get(0) == 62, "The sum of age should equal 62 but it equaled " + localAgeSum(0).get(0))
  }
} 
Example 4
Source File: PostgresIT.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.persistence

import com.codahale.metrics.{MetricRegistry, SharedMetricRegistries}
import com.daml.dec.DirectExecutionContext
import com.daml.logging.LoggingContext.newLoggingContext
import com.daml.metrics.Metrics
import com.daml.platform.configuration.ServerRole
import com.daml.platform.store.FlywayMigrations
import com.daml.platform.store.dao.{HikariJdbcConnectionProvider, JdbcConnectionProvider}
import com.daml.resources.Resource
import com.daml.testing.postgresql.PostgresAroundAll
import org.scalatest.{AsyncWordSpec, BeforeAndAfterAll, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration.DurationInt

class PostgresIT extends AsyncWordSpec with Matchers with PostgresAroundAll with BeforeAndAfterAll {

  private var connectionProviderResource: Resource[JdbcConnectionProvider] = _
  private var connectionProvider: JdbcConnectionProvider = _
  private val metrics = new Metrics(SharedMetricRegistries.getOrCreate("PostgresIT"))

  override def beforeAll(): Unit = {
    super.beforeAll()
    newLoggingContext { implicit logCtx =>
      connectionProviderResource = HikariJdbcConnectionProvider
        .owner(
          ServerRole.Testing(getClass),
          postgresDatabase.url,
          maxConnections = 4,
          new MetricRegistry,
        )
        .acquire()(DirectExecutionContext)
      connectionProvider = Await.result(connectionProviderResource.asFuture, 10.seconds)
    }
  }

  override protected def afterAll(): Unit = {
    Await.result(connectionProviderResource.release(), 10.seconds)
    super.afterAll()
  }

  "Postgres" when {
    "running queries using Hikari" should {
      "be accessible" in {
        connectionProvider.runSQL(metrics.test.db) { conn =>
          val resultSet = conn.createStatement().executeQuery("SELECT 1")
          resultSet.next()
          val result = resultSet.getInt(1)
          result shouldEqual 1
        }
      }
    }
  }

  "Flyway" should {
    "execute initialisation script" in {
      newLoggingContext { implicit logCtx =>
        new FlywayMigrations(postgresDatabase.url).migrate()(DirectExecutionContext)
      }.map { _ =>
        connectionProvider.runSQL(metrics.test.db) { conn =>
          def checkTableExists(table: String) = {
            val resultSet = conn.createStatement().executeQuery(s"SELECT * from $table")
            resultSet.next shouldEqual false
          }

          def checkTableDoesNotExist(table: String) = {
            val resultSet = conn.createStatement().executeQuery(s"SELECT to_regclass('$table')")
            resultSet.next shouldEqual true
            Option(resultSet.getString(1)) shouldEqual Option.empty[String]
            resultSet.wasNull() shouldEqual true
          }

          checkTableExists("parameters")
          checkTableExists("configuration_entries")

          checkTableExists("participant_command_completions")
          checkTableExists("participant_command_submissions")
          checkTableExists("participant_contract_witnesses")
          checkTableExists("participant_contracts")
          checkTableExists("participant_events")

          checkTableExists("parties")
          checkTableExists("party_entries")

          checkTableExists("packages")
          checkTableExists("package_entries")

          checkTableDoesNotExist("participant_event_flat_transaction_witnesses")
          checkTableDoesNotExist("participant_event_transaction_tree_witnesses")
        }
      }
    }
  }
} 
Example 5
Source File: MetricsAround.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox

import com.codahale.metrics.MetricRegistry
import org.scalatest.BeforeAndAfterAll

trait MetricsAround extends BeforeAndAfterAll {
  self: org.scalatest.Suite =>

  @volatile protected var metrics: MetricRegistry = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    metrics = new MetricRegistry
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
  }
} 
Example 6
Source File: DropRepeatedSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.server.api

import akka.actor.ActorSystem
import akka.pattern.pipe
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.{TestKit, TestProbe}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.collection.immutable
import scala.concurrent.ExecutionContext

final class DropRepeatedSpec
    extends TestKit(ActorSystem(classOf[DropRepeatedSpec].getSimpleName))
    with WordSpecLike
    with Matchers
    with BeforeAndAfterAll {

  private[this] implicit val materializer: Materializer = Materializer(system)
  private[this] implicit val executionContext: ExecutionContext = materializer.executionContext

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }

  "DropRepeated" should {
    "drop repeated elements" in {
      val probe = TestProbe()
      val input = immutable.Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5)

      val _ = Source(input)
        .via(DropRepeated())
        .runWith(Sink.seq)
        .pipeTo(probe.ref)
        .failed
        .foreach(fail(_))

      probe.expectMsg(Vector(1, 2, 3, 4, 5))
    }

    "does not drop duplicate elements that are not repeated" in {
      val probe = TestProbe()
      val input = immutable.Seq(1, 1, 2, 2, 1, 1, 2, 2)

      val _ = Source(input)
        .via(DropRepeated())
        .runWith(Sink.seq)
        .pipeTo(probe.ref)
        .failed
        .foreach(fail(_))

      probe.expectMsg(Vector(1, 2, 1, 2))
    }
  }
} 
Example 7
Source File: AuthorizationTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http

import java.nio.file.Files

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.auth.TokenHolder
import com.daml.bazeltools.BazelRunfiles.rlocation
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.daml.http.util.TestUtil.requiredFile
import com.daml.ledger.api.auth.{AuthServiceStatic, Claim, ClaimPublic, Claims}
import com.daml.ledger.client.LedgerClient
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.slf4j.LoggerFactory

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

final class AuthorizationTest extends AsyncFlatSpec with BeforeAndAfterAll with Matchers {

  private val dar = requiredFile(rlocation("docs/quickstart-model.dar"))
    .fold(e => throw new IllegalStateException(e), identity)

  private val testId: String = this.getClass.getSimpleName

  implicit val asys: ActorSystem = ActorSystem(testId)
  implicit val mat: Materializer = Materializer(asys)
  implicit val aesf: ExecutionSequencerFactory = new AkkaExecutionSequencerPool(testId)(asys)
  implicit val ec: ExecutionContext = asys.dispatcher

  private val publicToken = "public"
  private val emptyToken = "empty"
  private val mockedAuthService = Option(AuthServiceStatic {
    case `publicToken` => Claims(Seq[Claim](ClaimPublic))
    case `emptyToken` => Claims(Nil)
  })

  private val accessTokenFile = Files.createTempFile("Extractor", "AuthSpec")
  private val tokenHolder = Option(new TokenHolder(accessTokenFile))

  private def setToken(string: String): Unit = {
    val _ = Files.write(accessTokenFile, string.getBytes())
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    try {
      Files.delete(accessTokenFile)
    } catch {
      case NonFatal(e) =>
        LoggerFactory
          .getLogger(classOf[AuthorizationTest])
          .warn("Unable to delete temporary token file", e)
    }
  }

  protected def withLedger[A] =
    HttpServiceTestFixture
      .withLedger[A](List(dar), testId, Option(publicToken), mockedAuthService) _

  private def packageService(client: LedgerClient): PackageService =
    new PackageService(HttpService.loadPackageStoreUpdates(client.packageClient, tokenHolder))

  behavior of "PackageService against an authenticated sandbox"

  it should "fail immediately if the authorization is insufficient" in withLedger { client =>
    setToken(emptyToken)
    packageService(client).reload.failed.map(_ => succeed)
  }

  it should "succeed if the authorization is sufficient" in withLedger { client =>
    setToken(publicToken)
    packageService(client).reload.map(_ => succeed)
  }

} 
Example 8
Source File: HttpServiceIntegrationTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http

import java.io.File
import java.nio.file.Files

import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpMethods, HttpRequest, StatusCodes, Uri}
import com.daml.http.Statement.discard
import com.daml.http.util.TestUtil.writeToFile
import org.scalacheck.Gen
import org.scalatest.{Assertion, BeforeAndAfterAll}

import scala.concurrent.Future

class HttpServiceIntegrationTest extends AbstractHttpServiceIntegrationTest with BeforeAndAfterAll {

  private val staticContent: String = "static"

  private val staticContentDir: File =
    Files.createTempDirectory("integration-test-static-content").toFile

  override def staticContentConfig: Option[StaticContentConfig] =
    Some(StaticContentConfig(prefix = staticContent, directory = staticContentDir))

  override def jdbcConfig: Option[JdbcConfig] = None

  private val expectedDummyContent: String = Gen
    .listOfN(100, Gen.identifier)
    .map(_.mkString(" "))
    .sample
    .getOrElse(throw new IllegalStateException(s"Cannot create dummy text content"))

  private val dummyFile: File =
    writeToFile(new File(staticContentDir, "dummy.txt"), expectedDummyContent).get
  require(dummyFile.exists)

  override protected def afterAll(): Unit = {
    // clean up temp directory
    discard { dummyFile.delete() }
    discard { staticContentDir.delete() }
    super.afterAll()
  }

  "should serve static content from configured directory" in withHttpService { (uri: Uri, _, _) =>
    Http()
      .singleRequest(
        HttpRequest(
          method = HttpMethods.GET,
          uri = uri.withPath(Uri.Path(s"/$staticContent/${dummyFile.getName}"))))
      .flatMap { resp =>
        discard { resp.status shouldBe StatusCodes.OK }
        val bodyF: Future[String] = getResponseDataBytes(resp, debug = false)
        bodyF.flatMap { body =>
          body shouldBe expectedDummyContent
        }
      }: Future[Assertion]
  }

  "Forwarded" - {
    import Endpoints.Forwarded
    "can 'parse' sample" in {
      Forwarded("for=192.168.0.1;proto=http;by=192.168.0.42").proto should ===(Some("http"))
    }

    "can 'parse' quoted sample" in {
      Forwarded("for=192.168.0.1;proto = \"https\" ;by=192.168.0.42").proto should ===(
        Some("https"))
    }
  }
} 
Example 9
Source File: SuiteResourceManagement.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait SuiteResource[+T] {
  protected def suiteResource: Resource[T]
}


trait SuiteResourceManagement {}

trait SuiteResourceManagementAroundAll
    extends SuiteResource[Any]
    with SuiteResourceManagement
    with BeforeAndAfterAll {
  self: Suite =>

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    suiteResource.setup()
  }

  override protected def afterAll(): Unit = {
    suiteResource.close()
    super.afterAll()
  }
}

trait SuiteResourceManagementAroundEach
    extends SuiteResource[Any]
    with SuiteResourceManagement
    with BeforeAndAfterEach {
  self: Suite =>

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    suiteResource.setup()
  }

  override protected def afterEach(): Unit = {
    suiteResource.close()
    super.afterEach()
  }
} 
Example 10
Source File: AkkaBeforeAndAfterAll.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.Executors

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.LoggerFactory

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContext}

trait AkkaBeforeAndAfterAll extends BeforeAndAfterAll {
  self: Suite =>
  private val logger = LoggerFactory.getLogger(getClass)

  protected def actorSystemName: String = this.getClass.getSimpleName

  private implicit lazy val executionContext: ExecutionContext =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(
        new ThreadFactoryBuilder()
          .setDaemon(true)
          .setNameFormat(s"$actorSystemName-thread-pool-worker-%d")
          .setUncaughtExceptionHandler((thread, _) =>
            logger.error(s"got an uncaught exception on thread: ${thread.getName}"))
          .build()))

  protected implicit lazy val system: ActorSystem =
    ActorSystem(actorSystemName, defaultExecutionContext = Some(executionContext))

  protected implicit lazy val materializer: Materializer = Materializer(system)

  protected implicit lazy val executionSequencerFactory: ExecutionSequencerFactory =
    new AkkaExecutionSequencerPool(poolName = actorSystemName, actorCount = 1)

  override protected def afterAll(): Unit = {
    executionSequencerFactory.close()
    materializer.shutdown()
    Await.result(system.terminate(), 30.seconds)
    super.afterAll()
  }
} 
Example 11
Source File: ReferenceServiceAndClientSpecHttp.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import com.daml.grpc.adapter.client.ReferenceClientCompatibilityCheck
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
import java.net.InetSocketAddress

class ReferenceServiceAndClientHttpSpec
    extends WordSpec
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures
    with ReferenceClientCompatibilityCheck
    with ReferenceServiceFixture {

  "Reference service" when {

    "testing with reference client" should {
      behave like referenceClientCompatible(clientStub)
    }
  }
  override def socketAddress = Some(new InetSocketAddress("127.0.0.1", 0))
} 
Example 12
Source File: ReferenceServiceAndClientSpecInProcess.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import com.daml.grpc.adapter.client.ReferenceClientCompatibilityCheck
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}

class ReferenceServiceAndClientSpecInProcess
    extends WordSpec
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures
    with ReferenceClientCompatibilityCheck
    with ReferenceServiceFixture {

  "Reference service" when {

    "testing with reference client" should {
      behave like referenceClientCompatible(clientStub)
    }
  }

  override def socketAddress = None
} 
Example 13
Source File: AkkaClientWithReferenceServiceSpecBase.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import akka.stream.scaladsl.Sink
import com.daml.grpc.adapter.client.ReferenceClientCompatibilityCheck
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.grpc.adapter.{ExecutionSequencerFactory, TestExecutionSequencerFactory}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.platform.hello.HelloRequest
import io.grpc.StatusRuntimeException
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}

import java.net.SocketAddress

abstract class AkkaClientWithReferenceServiceSpecBase(
    override protected val socketAddress: Option[SocketAddress])
    extends WordSpec
    with Matchers
    with BeforeAndAfterAll
    with AkkaBeforeAndAfterAll
    with ScalaFutures
    with ReferenceClientCompatibilityCheck
    with AkkaClientCompatibilityCheck
    with ReferenceServiceFixture {

  protected implicit val esf: ExecutionSequencerFactory = TestExecutionSequencerFactory.instance

  "Akka client" when {

    "testing with reference service" should {
      behave like akkaClientCompatible(clientStub)
    }

    "handle request errors when server streaming" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(-1), clientStub.serverStreaming)
        .runWith(Sink.ignore)

      whenReady(elemsF.failed)(_ shouldBe a[StatusRuntimeException])
    }

  }
} 
Example 14
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 15
Source File: ScalaUtilIT.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.util

import java.util.concurrent.{Executors, ScheduledExecutorService}

import com.daml.ledger.client.binding.util.ScalaUtil.FutureOps
import org.scalatest.concurrent.AsyncTimeLimitedTests
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.scalatest.{AsyncWordSpec, BeforeAndAfterAll, Matchers}

import scala.concurrent.{Future, Promise, TimeoutException}

class ScalaUtilIT
    extends AsyncWordSpec
    with AsyncTimeLimitedTests
    with Matchers
    with BeforeAndAfterAll {

  implicit val scheduler: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor()

  override def afterAll(): Unit = {
    scheduler.shutdownNow()
    super.afterAll()
  }

  "FutureOps" can {

    "future with timeout" should {

      "fail Future with TimoutException after specified duration" in {
        val promise = Promise[Unit]() // never completes
        val future = promise.future.timeout("name", 1000.millis, 100.millis)
        recoverToSucceededIf[TimeoutException](future)
      }

      "be able to complete within specified duration" in {
        val future = Future {
          "result"
        }.timeoutWithDefaultWarn("name", 1.second)

        future.map(_ shouldBe "result")
      }

    }

  }
  override lazy val timeLimit: Span = 10.seconds
} 
Example 16
Source File: UtilTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.codegen

import com.daml.lf.data.Ref.{QualifiedName, PackageId}

import java.io.IOException
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{FileVisitResult, Files, Path, SimpleFileVisitor}
import com.daml.lf.{iface => I}

import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import org.scalatest.prop.GeneratorDrivenPropertyChecks

class UtilTest extends UtilTestHelpers with GeneratorDrivenPropertyChecks {

  val packageInterface =
    I.Interface(packageId = PackageId.assertFromString("abcdef"), typeDecls = Map.empty)
  val scalaPackageParts = Array("com", "digitalasset")
  val scalaPackage: String = scalaPackageParts.mkString(".")
  val util =
    lf.LFUtil(
      scalaPackage,
      I.EnvironmentInterface fromReaderInterfaces packageInterface,
      outputDir.toFile)

  def damlScalaName(damlNameSpace: Array[String], name: String): util.DamlScalaName =
    util.DamlScalaName(damlNameSpace, name)

  behavior of "Util"

  it should "mkDamlScalaName for a Contract named Test" in {
    val result = util.mkDamlScalaNameFromDirsAndName(Array(), "Test")
    result shouldEqual damlScalaName(Array.empty, "Test")
    result.packageName shouldEqual scalaPackage
    result.qualifiedName shouldEqual (scalaPackage + ".Test")
  }

  it should "mkDamlScalaName for a Template names foo.bar.Test" in {
    val result = util.mkDamlScalaName(Util.Template, QualifiedName assertFromString "foo.bar:Test")
    result shouldEqual damlScalaName(Array("foo", "bar"), "Test")
    result.packageName shouldEqual (scalaPackage + ".foo.bar")
    result.qualifiedName shouldEqual (scalaPackage + ".foo.bar.Test")
  }

  "partitionEithers" should "equal scalaz separate in simple cases" in forAll {
    iis: List[Either[Int, Int]] =>
      import scalaz.syntax.monadPlus._, scalaz.std.list._, scalaz.std.either._
      Util.partitionEithers(iis) shouldBe iis.separate
  }

}

abstract class UtilTestHelpers extends FlatSpec with Matchers with BeforeAndAfterAll {

  val outputDir = Files.createTempDirectory("codegenUtilTest")

  override protected def afterAll(): Unit = {
    super.afterAll()
    deleteRecursively(outputDir)
  }

  def deleteRecursively(dir: Path): Unit = {
    Files.walkFileTree(
      dir,
      new SimpleFileVisitor[Path] {
        override def postVisitDirectory(dir: Path, exc: IOException) = {
          Files.delete(dir)
          FileVisitResult.CONTINUE
        }

        override def visitFile(file: Path, attrs: BasicFileAttributes) = {
          Files.delete(file)
          FileVisitResult.CONTINUE
        }
      }
    )
    ()
  }
} 
Example 17
Source File: PostgresAroundEach.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.testing.postgresql

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait PostgresAroundEach
    extends PostgresAroundSuite
    with BeforeAndAfterAll
    with BeforeAndAfterEach {
  self: Suite =>

  override protected def beforeAll(): Unit = {
    // We start PostgreSQL before calling `super` because _generally_ the database needs to be up
    // before everything else.
    connectToPostgresqlServer()
    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    disconnectFromPostgresqlServer()
  }

  override protected def beforeEach(): Unit = {
    // We create the database before calling `super` for the same reasons as above.
    createNewDatabase()
    super.beforeEach()
  }

  override protected def afterEach(): Unit = {
    super.afterEach()
    dropDatabase()
  }
} 
Example 18
Source File: PostgresAroundAll.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.testing.postgresql

import org.scalatest.{BeforeAndAfterAll, Suite}

trait PostgresAroundAll extends PostgresAroundSuite with BeforeAndAfterAll {
  self: Suite =>

  override protected def beforeAll(): Unit = {
    // We start PostgreSQL before calling `super` because _generally_ the database needs to be up
    // before everything else.
    connectToPostgresqlServer()
    createNewDatabase()
    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    disconnectFromPostgresqlServer()
  }
} 
Example 19
Source File: TestTableStatsSinglePathMain.scala    From Spark.TableStatsExample   with Apache License 2.0 5 votes vote down vote up
package com.cloudera.sa.examples.tablestats


import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StringType, LongType, StructField, StructType}
import org.scalatest.{FunSuite, BeforeAndAfterEach, BeforeAndAfterAll}


class TestTableStatsSinglePathMain extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll{
  test("run table stats on sample data") {

    val sparkConfig = new SparkConf()
    sparkConfig.set("spark.broadcast.compress", "false")
    sparkConfig.set("spark.shuffle.compress", "false")
    sparkConfig.set("spark.shuffle.spill.compress", "false")
    var sc = new SparkContext("local", "test", sparkConfig)
    try {
      val sqlContext = new org.apache.spark.sql.SQLContext(sc)

      val schema =
        StructType(
          Array(
            StructField("id", LongType, true),
            StructField("name", StringType, true),
            StructField("age", LongType, true),
            StructField("gender", StringType, true),
            StructField("height", LongType, true),
            StructField("job_title", StringType, true)
          )
        )

      val rowRDD = sc.parallelize(Array(
        Row(1l, "Name.1", 20l, "M", 6l, "dad"),
        Row(2l, "Name.2", 20l, "F", 5l, "mom"),
        Row(3l, "Name.3", 20l, "F", 5l, "mom"),
        Row(4l, "Name.4", 20l, "M", 5l, "mom"),
        Row(5l, "Name.5", 10l, "M", 4l, "kid"),
        Row(6l, "Name.6", 8l, "M", 3l, "kid")))

      val df = sqlContext.createDataFrame(rowRDD, schema)

      val firstPassStats = TableStatsSinglePathMain.getFirstPassStat(df)

      assertResult(6l)(firstPassStats.columnStatsMap(0).maxLong)
      assertResult(1l)(firstPassStats.columnStatsMap(0).minLong)
      assertResult(21l)(firstPassStats.columnStatsMap(0).sumLong)
      assertResult(3l)(firstPassStats.columnStatsMap(0).avgLong)

      assertResult(2)(firstPassStats.columnStatsMap(3).topNValues.topNCountsForColumnArray.length)

      firstPassStats.columnStatsMap(3).topNValues.topNCountsForColumnArray.foreach { r =>
        if (r._1.equals("M")) {
          assertResult(4l)(r._2)
        } else if (r._1.equals("F")) {
          assertResult(2l)(r._2)
        } else {
          throw new RuntimeException("Unknown gender: " + r._1)
        }
      }
    } finally {
      sc.stop()
    }
  }
} 
Example 20
Source File: TestDB.scala    From slick-jdbc-extension-scala   with MIT License 5 votes vote down vote up
package com.github.tarao
package slickjdbc
package helper

import scala.language.implicitConversions
import scala.concurrent.duration.Duration
import org.scalatest.{FunSpec, BeforeAndAfterAll, BeforeAndAfterEach}
import slick.jdbc.H2Profile.api.Database

case class Timeout(duration: Duration)
object Timeout {
  implicit val forever: Timeout = Timeout(Duration.Inf)
}

class DBRunner(val db: Database) {
  import scala.concurrent.{Future, Await}
  import slick.driver.H2Driver.api.Database
  import slick.dbio.{DBIOAction, NoStream, Effect}

  def run[R](a: DBIOAction[R, NoStream, Nothing])(implicit
    timeout: Timeout
  ): R = Await.result(db.run(a), timeout.duration)

  def close = db.close
}

object FreshId {
  var id = 0
  def apply() = { id = max; id }
  def max = { id + 1 }
}

trait Repository {
  def db: DBRunner
}

trait TestDB extends BeforeAndAfterAll with BeforeAndAfterEach {
  self: FunSpec =>

  lazy val config = {
    import com.typesafe.config.{ConfigFactory, ConfigValueFactory => V}
    import slick.jdbc.JdbcDataSource

    // Rewrite database name to thread local one so that writing from
    // multiple test threads run parallel won't conflict each other.
    val c = ConfigFactory.load.getConfig("h2memtest")
    val name = "test" + Thread.currentThread.getId
    val url = c.getString("url").replaceFirst("""\btest\b""", name)
    c.withValue("url", V.fromAnyRef(url))
  }

  lazy val db = new DBRunner(Database.forConfig("", config))

  override def beforeAll = {
    import slick.driver.H2Driver.api._

    db.run { sqlu"""
      CREATE TABLE IF NOT EXISTS entry (
        entry_id BIGINT NOT NULL PRIMARY KEY,
        url VARCHAR(2048) NOT NULL UNIQUE
      )
    """ }

    db.run { sqlu"""
      CREATE TABLE IF NOT EXISTS ids (
        id BIGINT NOT NULL PRIMARY KEY
      )
    """ }

    super.beforeAll
  }

  override def afterAll = {
    db.close
    super.afterAll
  }
} 
Example 21
Source File: LocalClusterSparkContext.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}

trait LocalClusterSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local-cluster[2, 1, 1024]")
      .setAppName("test-cluster")
      .set("spark.rpc.message.maxSize", "1") // set to 1MB to detect direct serialization of data
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    try {
      if (sc != null) {
        sc.stop()
      }
    } finally {
      super.afterAll()
    }
  }
} 
Example 22
Source File: KafkaStreamSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]()
    stream.map(_._2).countByValue().foreachRDD { r =>
      r.collect().foreach { kv =>
        result.synchronized {
          val count = result.getOrElseUpdate(kv._1, 0) + kv._2
          result.put(kv._1, count)
        }
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(result.synchronized { sent === result })
    }
  }
} 
Example 23
Source File: KafkaClusterSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.util.Random

import kafka.common.TopicAndPartition
import org.scalatest.BeforeAndAfterAll

import org.apache.spark.SparkFunSuite

class KafkaClusterSuite extends SparkFunSuite with BeforeAndAfterAll {
  private val topic = "kcsuitetopic" + Random.nextInt(10000)
  private val topicAndPartition = TopicAndPartition(topic, 0)
  private var kc: KafkaCluster = null

  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll() {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()

    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, Map("a" -> 1))
    kc = new KafkaCluster(Map("metadata.broker.list" -> kafkaTestUtils.brokerAddress))
  }

  override def afterAll() {
    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("metadata apis") {
    val leader = kc.findLeaders(Set(topicAndPartition)).right.get(topicAndPartition)
    val leaderAddress = s"${leader._1}:${leader._2}"
    assert(leaderAddress === kafkaTestUtils.brokerAddress, "didn't get leader")

    val parts = kc.getPartitions(Set(topic)).right.get
    assert(parts(topicAndPartition), "didn't get partitions")

    val err = kc.getPartitions(Set(topic + "BAD"))
    assert(err.isLeft, "getPartitions for a nonexistant topic should be an error")
  }

  test("leader offset apis") {
    val earliest = kc.getEarliestLeaderOffsets(Set(topicAndPartition)).right.get
    assert(earliest(topicAndPartition).offset === 0, "didn't get earliest")

    val latest = kc.getLatestLeaderOffsets(Set(topicAndPartition)).right.get
    assert(latest(topicAndPartition).offset === 1, "didn't get latest")
  }

  test("consumer offset apis") {
    val group = "kcsuitegroup" + Random.nextInt(10000)

    val offset = Random.nextInt(10000)

    val set = kc.setConsumerOffsets(group, Map(topicAndPartition -> offset))
    assert(set.isRight, "didn't set consumer offsets")

    val get = kc.getConsumerOffsets(group, Set(topicAndPartition)).right.get
    assert(get(topicAndPartition) === offset, "didn't get consumer offsets")
  }
} 
Example 24
Source File: TestHiveSingleton.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.test

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql.SparkSession
import org.apache.spark.SparkFunSuite


trait TestHiveSingleton extends SparkFunSuite with BeforeAndAfterAll {
  protected val spark: SparkSession = TestHive.sparkSession
  protected val hiveContext: TestHiveContext = TestHive

  protected override def afterAll(): Unit = {
    try {
      hiveContext.reset()
    } finally {
      super.afterAll()
    }
  }

} 
Example 25
Source File: HiveSerDeSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.execution

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql.hive.test.TestHive


class HiveSerDeSuite extends HiveComparisonTest with BeforeAndAfterAll {
  override def beforeAll(): Unit = {
    import TestHive._
    import org.apache.hadoop.hive.serde2.RegexSerDe
    super.beforeAll()
    TestHive.setCacheTables(false)
    sql(s"""CREATE TABLE IF NOT EXISTS sales (key STRING, value INT)
       |ROW FORMAT SERDE '${classOf[RegexSerDe].getCanonicalName}'
       |WITH SERDEPROPERTIES ("input.regex" = "([^ ]*)\t([^ ]*)")
       """.stripMargin)
    sql(s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/sales.txt")}' INTO TABLE sales")
  }

  // table sales is not a cache table, and will be clear after reset
  createQueryTest("Read with RegexSerDe", "SELECT * FROM sales", false)

  createQueryTest(
    "Read and write with LazySimpleSerDe (tab separated)",
    "SELECT * from serdeins")

  createQueryTest("Read with AvroSerDe", "SELECT * FROM episodes")

  createQueryTest("Read Partitioned with AvroSerDe", "SELECT * FROM episodes_part")
} 
Example 26
Source File: ConcurrentHiveSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.execution

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.sql.hive.test.TestHiveContext

class ConcurrentHiveSuite extends SparkFunSuite with BeforeAndAfterAll {
  ignore("multiple instances not supported") {
    test("Multiple Hive Instances") {
      (1 to 10).map { i =>
        val conf = new SparkConf()
        conf.set("spark.ui.enabled", "false")
        val ts =
          new TestHiveContext(new SparkContext("local", s"TestSQLContext$i", conf))
        ts.sparkSession.sql("SHOW TABLES").collect()
        ts.sparkSession.sql("SELECT * FROM src").collect()
        ts.sparkSession.sql("SHOW TABLES").collect()
      }
    }
  }
} 
Example 27
Source File: ListTablesSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.hive.test.TestHiveSingleton

class ListTablesSuite extends QueryTest with TestHiveSingleton with BeforeAndAfterAll {
  import hiveContext._
  import hiveContext.implicits._

  val df = sparkContext.parallelize((1 to 10).map(i => (i, s"str$i"))).toDF("key", "value")

  override def beforeAll(): Unit = {
    super.beforeAll()
    // The catalog in HiveContext is a case insensitive one.
    sessionState.catalog.createTempView(
      "ListTablesSuiteTable", df.logicalPlan, overrideIfExists = true)
    sql("CREATE TABLE HiveListTablesSuiteTable (key int, value string)")
    sql("CREATE DATABASE IF NOT EXISTS ListTablesSuiteDB")
    sql("CREATE TABLE ListTablesSuiteDB.HiveInDBListTablesSuiteTable (key int, value string)")
  }

  override def afterAll(): Unit = {
    try {
      sessionState.catalog.dropTable(
        TableIdentifier("ListTablesSuiteTable"), ignoreIfNotExists = true, purge = false)
      sql("DROP TABLE IF EXISTS HiveListTablesSuiteTable")
      sql("DROP TABLE IF EXISTS ListTablesSuiteDB.HiveInDBListTablesSuiteTable")
      sql("DROP DATABASE IF EXISTS ListTablesSuiteDB")
    } finally {
      super.afterAll()
    }
  }

  test("get all tables of current database") {
    Seq(tables(), sql("SHOW TABLes")).foreach {
      case allTables =>
        // We are using default DB.
        checkAnswer(
          allTables.filter("tableName = 'listtablessuitetable'"),
          Row("", "listtablessuitetable", true))
        checkAnswer(
          allTables.filter("tableName = 'hivelisttablessuitetable'"),
          Row("default", "hivelisttablessuitetable", false))
        assert(allTables.filter("tableName = 'hiveindblisttablessuitetable'").count() === 0)
    }
  }

  test("getting all tables with a database name") {
    Seq(tables("listtablessuiteDb"), sql("SHOW TABLes in listTablesSuitedb")).foreach {
      case allTables =>
        checkAnswer(
          allTables.filter("tableName = 'listtablessuitetable'"),
          Row("", "listtablessuitetable", true))
        assert(allTables.filter("tableName = 'hivelisttablessuitetable'").count() === 0)
        checkAnswer(
          allTables.filter("tableName = 'hiveindblisttablessuitetable'"),
          Row("listtablessuitedb", "hiveindblisttablessuitetable", false))
    }
  }
} 
Example 28
Source File: UISeleniumSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import scala.util.Random

import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually._
import org.scalatest.selenium.WebBrowser
import org.scalatest.time.SpanSugar._

import org.apache.spark.ui.SparkUICssErrorHandler

class UISeleniumSuite
  extends HiveThriftJdbcTest
  with WebBrowser with Matchers with BeforeAndAfterAll {

  implicit var webDriver: WebDriver = _
  var server: HiveThriftServer2 = _
  val uiPort = 20000 + Random.nextInt(10000)
  override def mode: ServerMode.Value = ServerMode.binary

  override def beforeAll(): Unit = {
    webDriver = new HtmlUnitDriver {
      getWebClient.setCssErrorHandler(new SparkUICssErrorHandler)
    }
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    if (webDriver != null) {
      webDriver.quit()
    }
    super.afterAll()
  }

  override protected def serverStartCommand(port: Int) = {
    val portConf = if (mode == ServerMode.binary) {
      ConfVars.HIVE_SERVER2_THRIFT_PORT
    } else {
      ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT
    }

    s"""$startScript
        |  --master local
        |  --hiveconf hive.root.logger=INFO,console
        |  --hiveconf ${ConfVars.METASTORECONNECTURLKEY}=$metastoreJdbcUri
        |  --hiveconf ${ConfVars.METASTOREWAREHOUSE}=$warehousePath
        |  --hiveconf ${ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST}=localhost
        |  --hiveconf ${ConfVars.HIVE_SERVER2_TRANSPORT_MODE}=$mode
        |  --hiveconf $portConf=$port
        |  --driver-class-path ${sys.props("java.class.path")}
        |  --conf spark.ui.enabled=true
        |  --conf spark.ui.port=$uiPort
     """.stripMargin.split("\\s+").toSeq
  }

  ignore("thrift server ui test") {
    withJdbcStatement { statement =>
      val baseURL = s"http://localhost:$uiPort"

      val queries = Seq(
        "CREATE TABLE test_map(key INT, value STRING)",
        s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")

      queries.foreach(statement.execute)

      eventually(timeout(10 seconds), interval(50 milliseconds)) {
        go to baseURL
        find(cssSelector("""ul li a[href*="sql"]""")) should not be None
      }

      eventually(timeout(10 seconds), interval(50 milliseconds)) {
        go to (baseURL + "/sql")
        find(id("sessionstat")) should not be None
        find(id("sqlstat")) should not be None

        // check whether statements exists
        queries.foreach { line =>
          findAll(cssSelector("""ul table tbody tr td""")).map(_.text).toList should contain (line)
        }
      }
    }
  }
} 
Example 29
Source File: IOEncryptionSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn

import java.io._
import java.nio.charset.StandardCharsets
import java.security.PrivilegedExceptionAction
import java.util.UUID

import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Matchers}

import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.config._
import org.apache.spark.serializer._
import org.apache.spark.storage._

class IOEncryptionSuite extends SparkFunSuite with Matchers with BeforeAndAfterAll
  with BeforeAndAfterEach {
  private[this] val blockId = new TempShuffleBlockId(UUID.randomUUID())
  private[this] val conf = new SparkConf()
  private[this] val ugi = UserGroupInformation.createUserForTesting("testuser", Array("testgroup"))
  private[this] val serializer = new KryoSerializer(conf)

  override def beforeAll(): Unit = {
    System.setProperty("SPARK_YARN_MODE", "true")
    ugi.doAs(new PrivilegedExceptionAction[Unit]() {
      override def run(): Unit = {
        conf.set(IO_ENCRYPTION_ENABLED, true)
        val creds = new Credentials()
        SecurityManager.initIOEncryptionKey(conf, creds)
        SparkHadoopUtil.get.addCurrentUserCredentials(creds)
      }
    })
  }

  override def afterAll(): Unit = {
    SparkEnv.set(null)
    System.clearProperty("SPARK_YARN_MODE")
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
  }

  override def afterEach(): Unit = {
    super.afterEach()
    conf.set("spark.shuffle.compress", false.toString)
    conf.set("spark.shuffle.spill.compress", false.toString)
  }

  test("IO encryption read and write") {
    ugi.doAs(new PrivilegedExceptionAction[Unit] {
      override def run(): Unit = {
        conf.set(IO_ENCRYPTION_ENABLED, true)
        conf.set("spark.shuffle.compress", false.toString)
        conf.set("spark.shuffle.spill.compress", false.toString)
        testYarnIOEncryptionWriteRead()
      }
    })
  }

  test("IO encryption read and write with shuffle compression enabled") {
    ugi.doAs(new PrivilegedExceptionAction[Unit] {
      override def run(): Unit = {
        conf.set(IO_ENCRYPTION_ENABLED, true)
        conf.set("spark.shuffle.compress", true.toString)
        conf.set("spark.shuffle.spill.compress", true.toString)
        testYarnIOEncryptionWriteRead()
      }
    })
  }

  private[this] def testYarnIOEncryptionWriteRead(): Unit = {
    val plainStr = "hello world"
    val outputStream = new ByteArrayOutputStream()
    val serializerManager = new SerializerManager(serializer, conf)
    val wrappedOutputStream = serializerManager.wrapStream(blockId, outputStream)
    wrappedOutputStream.write(plainStr.getBytes(StandardCharsets.UTF_8))
    wrappedOutputStream.close()

    val encryptedBytes = outputStream.toByteArray
    val encryptedStr = new String(encryptedBytes)
    assert(plainStr !== encryptedStr)

    val inputStream = new ByteArrayInputStream(encryptedBytes)
    val wrappedInputStream = serializerManager.wrapStream(blockId, inputStream)
    val decryptedBytes = new Array[Byte](1024)
    val len = wrappedInputStream.read(decryptedBytes)
    val decryptedStr = new String(decryptedBytes, 0, len, StandardCharsets.UTF_8)
    assert(decryptedStr === plainStr)
  }
} 
Example 30
Source File: SharedSparkContext.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.Suite


trait SharedSparkContext extends BeforeAndAfterAll with BeforeAndAfterEach { self: Suite =>

  @transient private var _sc: SparkContext = _

  def sc: SparkContext = _sc

  var conf = new SparkConf(false)

  override def beforeAll() {
    super.beforeAll()
    _sc = new SparkContext(
      "local[4]", "test", conf.set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName))
  }

  override def afterAll() {
    try {
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    DebugFilesystem.assertNoOpenStreams()
  }
} 
Example 31
Source File: SparkFunSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

// scalastyle:off
import java.io.File

import org.scalatest.{BeforeAndAfterAll, FunSuite, Outcome}

import org.apache.spark.internal.Logging
import org.apache.spark.util.AccumulatorContext


  final protected override def withFixture(test: NoArgTest): Outcome = {
    val testName = test.text
    val suiteName = this.getClass.getName
    val shortSuiteName = suiteName.replaceAll("org.apache.spark", "o.a.s")
    try {
      logInfo(s"\n\n===== TEST OUTPUT FOR $shortSuiteName: '$testName' =====\n")
      test()
    } finally {
      logInfo(s"\n\n===== FINISHED $shortSuiteName: '$testName' =====\n")
    }
  }

} 
Example 32
Source File: MasterWebUISuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master.ui

import java.io.DataOutputStream
import java.net.{HttpURLConnection, URL}
import java.nio.charset.StandardCharsets
import java.util.Date

import scala.collection.mutable.HashMap

import org.mockito.Mockito.{mock, times, verify, when}
import org.scalatest.BeforeAndAfterAll

import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
import org.apache.spark.deploy.DeployMessages.{KillDriverResponse, RequestKillDriver}
import org.apache.spark.deploy.DeployTestUtils._
import org.apache.spark.deploy.master._
import org.apache.spark.rpc.{RpcEndpointRef, RpcEnv}


class MasterWebUISuite extends SparkFunSuite with BeforeAndAfterAll {

  val conf = new SparkConf
  val securityMgr = new SecurityManager(conf)
  val rpcEnv = mock(classOf[RpcEnv])
  val master = mock(classOf[Master])
  val masterEndpointRef = mock(classOf[RpcEndpointRef])
  when(master.securityMgr).thenReturn(securityMgr)
  when(master.conf).thenReturn(conf)
  when(master.rpcEnv).thenReturn(rpcEnv)
  when(master.self).thenReturn(masterEndpointRef)
  val masterWebUI = new MasterWebUI(master, 0)

  override def beforeAll() {
    super.beforeAll()
    masterWebUI.bind()
  }

  override def afterAll() {
    masterWebUI.stop()
    super.afterAll()
  }

  test("kill application") {
    val appDesc = createAppDesc()
    // use new start date so it isn't filtered by UI
    val activeApp = new ApplicationInfo(
      new Date().getTime, "app-0", appDesc, new Date(), null, Int.MaxValue)

    when(master.idToApp).thenReturn(HashMap[String, ApplicationInfo]((activeApp.id, activeApp)))

    val url = s"http://localhost:${masterWebUI.boundPort}/app/kill/"
    val body = convPostDataToString(Map(("id", activeApp.id), ("terminate", "true")))
    val conn = sendHttpRequest(url, "POST", body)
    conn.getResponseCode

    // Verify the master was called to remove the active app
    verify(master, times(1)).removeApplication(activeApp, ApplicationState.KILLED)
  }

  test("kill driver") {
    val activeDriverId = "driver-0"
    val url = s"http://localhost:${masterWebUI.boundPort}/driver/kill/"
    val body = convPostDataToString(Map(("id", activeDriverId), ("terminate", "true")))
    val conn = sendHttpRequest(url, "POST", body)
    conn.getResponseCode

    // Verify that master was asked to kill driver with the correct id
    verify(masterEndpointRef, times(1)).ask[KillDriverResponse](RequestKillDriver(activeDriverId))
  }

  private def convPostDataToString(data: Map[String, String]): String = {
    (for ((name, value) <- data) yield s"$name=$value").mkString("&")
  }

  
  private def sendHttpRequest(
      url: String,
      method: String,
      body: String = ""): HttpURLConnection = {
    val conn = new URL(url).openConnection().asInstanceOf[HttpURLConnection]
    conn.setRequestMethod(method)
    if (body.nonEmpty) {
      conn.setDoOutput(true)
      conn.setRequestProperty("Content-Type", "application/x-www-form-urlencoded")
      conn.setRequestProperty("Content-Length", Integer.toString(body.length))
      val out = new DataOutputStream(conn.getOutputStream)
      out.write(body.getBytes(StandardCharsets.UTF_8))
      out.close()
    }
    conn
  }
} 
Example 33
Source File: SortShuffleSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import java.io.File

import scala.collection.JavaConverters._

import org.apache.commons.io.FileUtils
import org.apache.commons.io.filefilter.TrueFileFilter
import org.scalatest.BeforeAndAfterAll

import org.apache.spark.rdd.ShuffledRDD
import org.apache.spark.serializer.{JavaSerializer, KryoSerializer}
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.util.Utils

class SortShuffleSuite extends ShuffleSuite with BeforeAndAfterAll {

  // This test suite should run all tests in ShuffleSuite with sort-based shuffle.

  private var tempDir: File = _

  override def beforeAll() {
    super.beforeAll()
    conf.set("spark.shuffle.manager", "sort")
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
    tempDir = Utils.createTempDir()
    conf.set("spark.local.dir", tempDir.getAbsolutePath)
  }

  override def afterEach(): Unit = {
    try {
      Utils.deleteRecursively(tempDir)
    } finally {
      super.afterEach()
    }
  }

  test("SortShuffleManager properly cleans up files for shuffles that use the serialized path") {
    sc = new SparkContext("local", "test", conf)
    // Create a shuffled RDD and verify that it actually uses the new serialized map output path
    val rdd = sc.parallelize(1 to 10, 1).map(x => (x, x))
    val shuffledRdd = new ShuffledRDD[Int, Int, Int](rdd, new HashPartitioner(4))
      .setSerializer(new KryoSerializer(conf))
    val shuffleDep = shuffledRdd.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]]
    assert(SortShuffleManager.canUseSerializedShuffle(shuffleDep))
    ensureFilesAreCleanedUp(shuffledRdd)
  }

  test("SortShuffleManager properly cleans up files for shuffles that use the deserialized path") {
    sc = new SparkContext("local", "test", conf)
    // Create a shuffled RDD and verify that it actually uses the old deserialized map output path
    val rdd = sc.parallelize(1 to 10, 1).map(x => (x, x))
    val shuffledRdd = new ShuffledRDD[Int, Int, Int](rdd, new HashPartitioner(4))
      .setSerializer(new JavaSerializer(conf))
    val shuffleDep = shuffledRdd.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]]
    assert(!SortShuffleManager.canUseSerializedShuffle(shuffleDep))
    ensureFilesAreCleanedUp(shuffledRdd)
  }

  private def ensureFilesAreCleanedUp(shuffledRdd: ShuffledRDD[_, _, _]): Unit = {
    def getAllFiles: Set[File] =
      FileUtils.listFiles(tempDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet
    val filesBeforeShuffle = getAllFiles
    // Force the shuffle to be performed
    shuffledRdd.count()
    // Ensure that the shuffle actually created files that will need to be cleaned up
    val filesCreatedByShuffle = getAllFiles -- filesBeforeShuffle
    filesCreatedByShuffle.map(_.getName) should be
    Set("shuffle_0_0_0.data", "shuffle_0_0_0.index")
    // Check that the cleanup actually removes the files
    sc.env.blockManager.master.removeShuffle(0, blocking = true)
    for (file <- filesCreatedByShuffle) {
      assert (!file.exists(), s"Shuffle file $file was not cleaned up")
    }
  }
} 
Example 34
Source File: SparkListenerWithClusterSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import scala.collection.mutable

import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}

import org.apache.spark.{LocalSparkContext, SparkContext, SparkFunSuite}
import org.apache.spark.scheduler.cluster.ExecutorInfo


  val WAIT_TIMEOUT_MILLIS = 10000

  before {
    sc = new SparkContext("local-cluster[2,1,1024]", "SparkListenerSuite")
  }

  test("SparkListener sends executor added message") {
    val listener = new SaveExecutorInfo
    sc.addSparkListener(listener)

    // This test will check if the number of executors received by "SparkListener" is same as the
    // number of all executors, so we need to wait until all executors are up
    sc.jobProgressListener.waitUntilExecutorsUp(2, 60000)

    val rdd1 = sc.parallelize(1 to 100, 4)
    val rdd2 = rdd1.map(_.toString)
    rdd2.setName("Target RDD")
    rdd2.count()

    sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
    assert(listener.addedExecutorInfo.size == 2)
    assert(listener.addedExecutorInfo("0").totalCores == 1)
    assert(listener.addedExecutorInfo("1").totalCores == 1)
  }

  private class SaveExecutorInfo extends SparkListener {
    val addedExecutorInfo = mutable.Map[String, ExecutorInfo]()

    override def onExecutorAdded(executor: SparkListenerExecutorAdded) {
      addedExecutorInfo(executor.executorId) = executor.executorInfo
    }
  }
} 
Example 35
Source File: DiskBlockManagerSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{File, FileWriter}

import scala.language.reflectiveCalls

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.Utils

class DiskBlockManagerSuite extends SparkFunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val testConf = new SparkConf(false)
  private var rootDir0: File = _
  private var rootDir1: File = _
  private var rootDirs: String = _

  var diskBlockManager: DiskBlockManager = _

  override def beforeAll() {
    super.beforeAll()
    rootDir0 = Utils.createTempDir()
    rootDir1 = Utils.createTempDir()
    rootDirs = rootDir0.getAbsolutePath + "," + rootDir1.getAbsolutePath
  }

  override def afterAll() {
    try {
      Utils.deleteRecursively(rootDir0)
      Utils.deleteRecursively(rootDir1)
    } finally {
      super.afterAll()
    }
  }

  override def beforeEach() {
    super.beforeEach()
    val conf = testConf.clone
    conf.set("spark.local.dir", rootDirs)
    diskBlockManager = new DiskBlockManager(conf, deleteFilesOnStop = true)
  }

  override def afterEach() {
    try {
      diskBlockManager.stop()
    } finally {
      super.afterEach()
    }
  }

  test("basic block creation") {
    val blockId = new TestBlockId("test")
    val newFile = diskBlockManager.getFile(blockId)
    writeToFile(newFile, 10)
    assert(diskBlockManager.containsBlock(blockId))
    newFile.delete()
    assert(!diskBlockManager.containsBlock(blockId))
  }

  test("enumerating blocks") {
    val ids = (1 to 100).map(i => TestBlockId("test_" + i))
    val files = ids.map(id => diskBlockManager.getFile(id))
    files.foreach(file => writeToFile(file, 10))
    assert(diskBlockManager.getAllBlocks.toSet === ids.toSet)
  }

  def writeToFile(file: File, numBytes: Int) {
    val writer = new FileWriter(file, true)
    for (i <- 0 until numBytes) writer.write(i)
    writer.close()
  }
} 
Example 36
Source File: SerializerSpecHelper.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils.serializer

import java.io.{File}
import java.lang.reflect.Modifier

import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.ops.{Exp => ExpOps, Pow => PowOps, Select => SelectOps, Sum => SumOps, Tile => TileOps}
import com.intel.analytics.bigdl.nn.tf.{DecodeGif => DecodeGifOps, DecodeJpeg => DecodeJpegOps, DecodePng => DecodePngOps, DecodeRaw => DecodeRawOps}
import com.intel.analytics.bigdl.utils.RandomGenerator.RNG
import com.intel.analytics.bigdl.utils.tf.loaders.{Pack => _}
import com.intel.analytics.bigdl.utils.{Shape => KShape}
import org.reflections.Reflections
import org.reflections.scanners.SubTypesScanner
import org.reflections.util.{ClasspathHelper, ConfigurationBuilder, FilterBuilder}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.collection.JavaConverters._
import scala.collection.mutable


abstract class SerializerSpecHelper extends FlatSpec with Matchers with BeforeAndAfterAll{

  val postFix = "bigdl"
  val excludedClass = new mutable.HashSet[String]()
  val excludedPackage = new mutable.HashSet[String]()

  private val expected = new mutable.HashSet[String]()
  val tested = new mutable.HashSet[String]()

  private var executedCount = 0

  protected def getPackage(): String = ""

  protected def addExcludedClass(): Unit = {}

  protected def addExcludedPackage(): Unit = {}

  protected def getExpected(): mutable.Set[String] = expected

  override protected def beforeAll() = {
    addExcludedClass
    addExcludedPackage
    val filterBuilder = new FilterBuilder()
    excludedPackage.foreach(filterBuilder.excludePackage(_))
    val reflections = new Reflections(new ConfigurationBuilder()
      .filterInputsBy(filterBuilder)
      .setUrls(ClasspathHelper.forPackage(getPackage()))
      .setScanners(new SubTypesScanner()))
    val subTypes = reflections.getSubTypesOf(classOf[AbstractModule[_, _, _]])
      .asScala.filter(sub => !Modifier.isAbstract(sub.getModifiers)).
      filter(sub => !excludedClass.contains(sub.getName))
    subTypes.foreach(sub => expected.add(sub.getName))
  }

  protected def runSerializationTest(module : AbstractModule[_, _, Float],
                                   input : Activity, cls: Class[_] = null) : Unit = {
    runSerializationTestWithMultiClass(module, input,
      if (cls == null) Array(module.getClass) else Array(cls))
  }

  protected def runSerializationTestWithMultiClass(module : AbstractModule[_, _, Float],
      input : Activity, classes: Array[Class[_]]) : Unit = {
    val name = module.getName
    val serFile = File.createTempFile(name, postFix)
    val originForward = module.evaluate().forward(input)

    ModulePersister.saveToFile[Float](serFile.getAbsolutePath, null, module.evaluate(), true)
    RNG.setSeed(1000)
    val loadedModule = ModuleLoader.loadFromFile[Float](serFile.getAbsolutePath)

    val afterLoadForward = loadedModule.forward(input)

    if (serFile.exists) {
      serFile.delete
    }

    afterLoadForward should be (originForward)
    classes.foreach(cls => {
      if (getExpected.contains(cls.getName)) {
        tested.add(cls.getName)
      }
    })
  }


  override protected def afterAll() = {
    println(s"total ${getExpected.size}, remaining ${getExpected.size - tested.size}")
    tested.filter(!getExpected.contains(_)).foreach(t => {
      println(s"$t do not need to be tested")
    })
    getExpected.foreach(exp => {
      require(tested.contains(exp), s" $exp not included in the test!")
    })
  }
} 
Example 37
Source File: MLlibTestSparkContext.scala    From spark-lp   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext


trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    sqlContext = new SQLContext(sc)
  }

  override def afterAll() {
    sqlContext = null
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 38
Source File: TestBooleanCompressSuite.scala    From CarbonDataLearning   with GNU General Public License v3.0 5 votes vote down vote up
package org.github.xubo245.carbonDataLearning.booleanDataType

import java.io.File

import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

class TestBooleanCompressSuite extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll {
  val rootPath = new File(this.getClass.getResource("/").getPath
    + "../..").getCanonicalPath

  override def beforeEach(): Unit = {
    sql("drop table if exists boolean_table")
  }

  override def afterAll(): Unit = {
    sql("drop table if exists boolean_table")
    assert(BooleanFile.deleteFile(randomBoolean))
  }

  val pathOfManyDataType = s"$rootPath/src/test/resources/bool/supportBooleanBigFile.csv"
  val pathOfOnlyBoolean = s"$rootPath/src/test/resources/bool/supportBooleanBigFileOnlyBoolean.csv"
  val randomBoolean = s"$rootPath/src/test/resources/bool/supportRandomBooleanBigFile.csv"
  val trueNum = 10000000

  override def beforeAll(): Unit = {
    assert(BooleanFile.createBooleanFileRandom(randomBoolean, trueNum, 0.4))
    CarbonProperties.getInstance()
      .addProperty("carbon.storelocation", s"$rootPath/target/warehouse/")

  }

  test("test boolean compress rate: random file") {
    sql(
      s"""
         | CREATE TABLE boolean_table(
         | booleanField BOOLEAN
         | )
         | STORED BY 'carbondata'
       """.stripMargin)

    sql(
      s"""
         | LOAD DATA LOCAL INPATH '${randomBoolean}'
         | INTO TABLE boolean_table
         | options('FILEHEADER'='booleanField')
           """.stripMargin)

    sql("select * from boolean_table").show(100)
    sql("select count(*) from boolean_table").show()
    sql("select count(*) from boolean_table where booleanField= true").show()
    sql("select count(*) from boolean_table where booleanField= false").show()
    checkAnswer(
      sql("select count(*) from boolean_table"),
      Row(trueNum))
  }

} 
Example 39
Source File: TestSpec.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.serialization.SerializationExtension
import akka.stream.{ ActorMaterializer, Materializer }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.prop.PropertyChecks
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

trait TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually
    with PropertyChecks
    with AkkaPersistenceQueries
    with AkkaStreamUtils
    with InMemoryCleanup {

  implicit val timeout: Timeout = Timeout(10.seconds)
  implicit val system: ActorSystem = ActorSystem()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val mat: Materializer = ActorMaterializer()
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  val serialization = SerializationExtension(system)

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  def killActors(actors: ActorRef*): Unit = {
    val probe = TestProbe()
    actors.foreach { actor ⇒
      probe watch actor
      actor ! PoisonPill
      probe expectTerminated actor
    }
  }

  override protected def afterAll(): Unit = {
    system.terminate()
    system.whenTerminated.toTry should be a 'success
  }
} 
Example 40
Source File: StatsClientSpec.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.akka.http

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import io.opencensus.scala.http.testSuite.MockStats
import io.opencensus.scala.stats.{Distribution, MeasurementDouble}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AsyncFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.Future

class StatsClientSpec
    extends AsyncFlatSpec
    with BeforeAndAfterAll
    with Matchers {
  implicit val system: ActorSystem = ActorSystem()

  def statsClientWithMock: (StatsClient, MockStats) = {
    val mockStats = new MockStats
    val client = new StatsClient {
      override private[http] val stats = mockStats
    }

    (client, mockStats)
  }

  it should "register the correct view" in {
    val (client, mock) = statsClientWithMock

    val doRequest =
      client.recorded(_ => Future.successful(HttpResponse()), "routeName")

    doRequest(HttpRequest()).flatMap(_.discardEntityBytes().future()).map { _ =>
      mock.registeredViews should have length 1

      val roundtripLatency = mock.registeredViews.head

      roundtripLatency.name shouldBe "opencensus.io/http/client/roundtrip_latency"
      roundtripLatency.measure.name shouldBe "opencensus.io/http/client/roundtrip_latency"
      roundtripLatency.aggregation shouldBe a[Distribution]
    }
  }
  it should "record the correct measure value" in {
    val (client, mock) = statsClientWithMock

    val doRequest =
      client.recorded(_ => Future.successful(HttpResponse()), "routeName")

    doRequest(HttpRequest()).flatMap(_.discardEntityBytes().future()).map { _ =>
      val (measurement, _) = mock.recordedMeasurements.head

      measurement match {
        case MeasurementDouble(measure, value) =>
          measure.name shouldBe "opencensus.io/http/client/roundtrip_latency"
          value.toInt shouldBe >(0)
        case other => fail(s"Expected MeasurementDouble got $other")
      }
    }
  }

  it should "record the correct measure tags" in {
    val (client, mock) = statsClientWithMock

    val doRequest =
      client.recorded(_ => Future.successful(HttpResponse()), "routeName")

    doRequest(HttpRequest()).flatMap(_.discardEntityBytes().future()).map { _ =>
      mock.recordedMeasurements should have length 1
      val (_, tags) = mock.recordedMeasurements.head

      val tagsKeyValues =
        tags.map(tag => (tag.key.getName, tag.value.asString()))

      val expectedTags = List(
        "http_client_method" -> "GET",
        "http_client_route"  -> "routeName",
        "http_client_status" -> "200"
      )

      tagsKeyValues should contain theSameElementsAs expectedTags
    }
  }

  override def afterAll(): Unit = {
    system.terminate()
    super.afterAll()
  }
} 
Example 41
Source File: SparkFunSuite.scala    From spark-alchemy   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

// scalastyle:off
import java.io.File

import scala.annotation.tailrec
import org.apache.log4j.{Appender, Level, Logger}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, BeforeAndAfterEach, FunSuite, Outcome, Suite}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.util.{AccumulatorContext, Utils}


  protected def withLogAppender(
    appender: Appender,
    loggerName: Option[String] = None,
    level: Option[Level] = None)(
    f: => Unit): Unit = {
    val logger = loggerName.map(Logger.getLogger).getOrElse(Logger.getRootLogger)
    val restoreLevel = logger.getLevel
    logger.addAppender(appender)
    if (level.isDefined) {
      logger.setLevel(level.get)
    }
    try f finally {
      logger.removeAppender(appender)
      if (level.isDefined) {
        logger.setLevel(restoreLevel)
      }
    }
  }
} 
Example 42
Source File: FilterTopFeaturesProcessTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.s2jobs.wal.process

import com.holdenkarau.spark.testing.DataFrameSuiteBase
import org.apache.s2graph.s2jobs.task.TaskConf
import org.apache.s2graph.s2jobs.wal.transformer.DefaultTransformer
import org.apache.s2graph.s2jobs.wal.{DimValCountRank, WalLogAgg}
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

class FilterTopFeaturesProcessTest extends FunSuite with Matchers with BeforeAndAfterAll with DataFrameSuiteBase {
  import org.apache.s2graph.s2jobs.wal.TestData._

  test("test filterTopKsPerDim.") {
    import spark.implicits._
    val featureDf = spark.createDataset(featureDictExpected).map { x =>
      (x.dimVal.dim, x.dimVal.value, x.count, x.rank)
    }.toDF("dim", "value", "count", "rank")

    val maxRankPerDim = spark.sparkContext.broadcast(Map.empty[String, Int])

    // filter nothing because all feature has rank < 10
    val filtered = FilterTopFeaturesProcess.filterTopKsPerDim(featureDf, maxRankPerDim, 10)

    val real = filtered.orderBy("dim", "rank").map(DimValCountRank.fromRow).collect()
    real.zip(featureDictExpected).foreach { case (real, expected) =>
        real shouldBe expected
    }
    // filter rank >= 2
    val filtered2 = FilterTopFeaturesProcess.filterTopKsPerDim(featureDf, maxRankPerDim, 2)
    val real2 = filtered2.orderBy("dim", "rank").map(DimValCountRank.fromRow).collect()
    real2 shouldBe featureDictExpected.filter(_.rank < 2)
  }


  test("test filterWalLogAgg.") {
    import spark.implicits._
    val walLogAgg = spark.createDataset(aggExpected)
    val featureDf = spark.createDataset(featureDictExpected).map { x =>
      (x.dimVal.dim, x.dimVal.value, x.count, x.rank)
    }.toDF("dim", "value", "count", "rank")
    val maxRankPerDim = spark.sparkContext.broadcast(Map.empty[String, Int])

    val transformers = Seq(DefaultTransformer(TaskConf.Empty))
    // filter nothing. so input, output should be same.
    val featureFiltered = FilterTopFeaturesProcess.filterTopKsPerDim(featureDf, maxRankPerDim, 10)
    val validFeatureHashKeys = FilterTopFeaturesProcess.collectDistinctFeatureHashes(spark, featureFiltered)
    val validFeatureHashKeysBCast = spark.sparkContext.broadcast(validFeatureHashKeys)
    val real = FilterTopFeaturesProcess.filterWalLogAgg(spark, walLogAgg, transformers, validFeatureHashKeysBCast)
      .collect().sortBy(_.from)

    real.zip(aggExpected).foreach { case (real, expected) =>
      real shouldBe expected
    }
  }

  test("test entire process. filter nothing.") {
    import spark.implicits._
    val df = spark.createDataset(aggExpected).toDF()
    val featureDf = spark.createDataset(featureDictExpected).map { x =>
      (x.dimVal.dim, x.dimVal.value, x.count, x.rank)
    }.toDF("dim", "value", "count", "rank")

    val inputKey = "input"
    val featureDictKey = "feature"
    // filter nothing since we did not specified maxRankPerDim and defaultMaxRank.
    val taskConf = new TaskConf(name = "test", `type` = "test",
      inputs = Seq(inputKey, featureDictKey),
      options = Map(
        "featureDict" -> featureDictKey,
        "walLogAgg" -> inputKey
      )
    )
    val inputMap = Map(inputKey -> df, featureDictKey -> featureDf)
    val job = new FilterTopFeaturesProcess(taskConf)
    val filtered = job.execute(spark, inputMap)
      .orderBy("from")
      .as[WalLogAgg]
      .collect()

    filtered.zip(aggExpected).foreach { case (real, expected) =>
      real shouldBe expected
    }

  }
} 
Example 43
Source File: BuildTopFeaturesProcessTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.s2jobs.wal.process

import com.holdenkarau.spark.testing.DataFrameSuiteBase
import org.apache.s2graph.s2jobs.task.TaskConf
import org.apache.s2graph.s2jobs.wal.DimValCountRank
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

class BuildTopFeaturesProcessTest extends FunSuite with Matchers with BeforeAndAfterAll with DataFrameSuiteBase {

  import org.apache.s2graph.s2jobs.wal.TestData._

  test("test entire process.") {
    import spark.implicits._
    val df = spark.createDataset(aggExpected).toDF()

    val taskConf = new TaskConf(name = "test", `type` = "test", inputs = Seq("input"),
      options = Map("minUserCount" -> "0")
    )
    val job = new BuildTopFeaturesProcess(taskConf)


    val inputMap = Map("input" -> df)
    val featureDicts = job.execute(spark, inputMap)
      .orderBy("dim", "rank")
      .map(DimValCountRank.fromRow)
      .collect()

    featureDicts shouldBe featureDictExpected

  }
} 
Example 44
Source File: WalLogAggregateProcessTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.s2jobs.wal.process

import com.holdenkarau.spark.testing.DataFrameSuiteBase
import org.apache.s2graph.s2jobs.task.TaskConf
import org.apache.s2graph.s2jobs.wal._
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

class WalLogAggregateProcessTest extends FunSuite with Matchers with BeforeAndAfterAll with DataFrameSuiteBase {
  import org.apache.s2graph.s2jobs.wal.TestData._

  test("test entire process") {
    import spark.sqlContext.implicits._

    val edges = spark.createDataset(walLogsLs).toDF()
    val processKey = "agg"
    val inputMap = Map(processKey -> edges)

    val taskConf = new TaskConf(name = "test", `type` = "agg", inputs = Seq(processKey),
      options = Map("maxNumOfEdges" -> "10")
    )

    val job = new WalLogAggregateProcess(taskConf = taskConf)
    val processed = job.execute(spark, inputMap)

    processed.printSchema()
    processed.orderBy("from").as[WalLogAgg].collect().zip(aggExpected).foreach { case (real, expected) =>
      real shouldBe expected
    }
  }

} 
Example 45
Source File: SchemaTest.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.core.schema

import org.apache.s2graph.core.TestCommonWithModels
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.concurrent.ExecutionContext

class SchemaTest extends FunSuite with Matchers with TestCommonWithModels with BeforeAndAfterAll {
  override def beforeAll(): Unit = {
    initTests()
  }

  override def afterAll(): Unit = {
    graph.shutdown()
  }

  test("test Label.findByName") {
    val labelOpt = Label.findByName(labelName, useCache = false)
    println(labelOpt)
    labelOpt.isDefined shouldBe true
    val indices = labelOpt.get.indices
    indices.size > 0 shouldBe true
    println(indices)
    val defaultIndexOpt = labelOpt.get.defaultIndex
    println(defaultIndexOpt)
    defaultIndexOpt.isDefined shouldBe true
    val metas = labelOpt.get.metaProps
    println(metas)
    metas.size > 0 shouldBe true
    val srcService = labelOpt.get.srcService
    println(srcService)
    val tgtService = labelOpt.get.tgtService
    println(tgtService)
    val service = labelOpt.get.service
    println(service)
    val srcColumn = labelOpt.get.srcService
    println(srcColumn)
    val tgtColumn = labelOpt.get.tgtService
    println(tgtColumn)
  }

  test("serialize/deserialize Schema.") {
    import scala.collection.JavaConverters._
    val originalMap = Schema.safeUpdateCache.asMap().asScala
    val newCache = Schema.fromBytes(config, Schema.toBytes())(ExecutionContext.Implicits.global)
    val newMap = newCache.asMap().asScala

    originalMap.size shouldBe newMap.size
    originalMap.keySet shouldBe newMap.keySet

    originalMap.keySet.foreach { key =>
      val (originalVal, _, _) = originalMap(key)
      val (newVal, _, _) = newMap(key)

      originalVal shouldBe newVal
    }
  }
} 
Example 46
Source File: CounterEtlFunctionsSpec.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.loader.core

import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.schema.{Label, Service}
import org.apache.s2graph.core.types.HBaseType
import org.apache.s2graph.core.{S2Graph, Management}
import org.apache.s2graph.counter.models.DBModel
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.ExecutionContext.Implicits.global

class CounterEtlFunctionsSpec extends FlatSpec with BeforeAndAfterAll with Matchers {
  val config = ConfigFactory.load()
  val cluster = config.getString("hbase.zookeeper.quorum")
  DBModel.initialize(config)

  val graph = new S2Graph(config)(global)
  val management = new Management(graph)

  override def beforeAll: Unit = {
    management.createService("test", cluster, "test", 1, None, "gz")
    management.createLabel("test_case", "test", "src", "string", "test", "tgt", "string", true, "test", Nil, Nil, "weak", None, None, HBaseType.DEFAULT_VERSION, false, "gz")
  }

  override def afterAll: Unit = {
    Label.delete(Label.findByName("test_case", false).get.id.get)
    Service.delete(Service.findByName("test", false).get.id.get)
  }

  "CounterEtlFunctions" should "parsing log" in {
    val data =
      """
        |1435107139287	insert	e	aaPHfITGUU0B_150212123559509	abcd	test_case	{"cateid":"100110102","shopid":"1","brandid":""}
        |1435106916136	insert	e	Tgc00-wtjp2B_140918153515441	efgh	test_case	{"cateid":"101104107","shopid":"2","brandid":""}
      """.stripMargin.trim.split('\n')
    val items = {
      for {
        line <- data
        item <- CounterEtlFunctions.parseEdgeFormat(line)
      } yield {
        item.action should equal("test_case")
        item
      }
    }

    items should have size 2
  }
} 
Example 47
Source File: MutateRouteSpec.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.http

import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.testkit.ScalatestRouteTest
import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.Management.JsonModel.Prop
import org.apache.s2graph.core.S2Graph
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsValue, Json}

class MutateRouteSpec extends WordSpec with Matchers with PlayJsonSupport with ScalaFutures with ScalatestRouteTest with S2GraphMutateRoute with BeforeAndAfterAll {

  import scala.collection.JavaConverters._

  val dbUrl = "jdbc:h2:file:./var/metastore_mutate_route;MODE=MYSQL;AUTO_SERVER=true"
  val config =
    ConfigFactory.parseMap(Map("db.default.url" -> dbUrl).asJava)
  lazy val s2graph = new S2Graph(config.withFallback(ConfigFactory.load()))
  override val logger = LoggerFactory.getLogger(this.getClass)

  override def afterAll(): Unit = {
    s2graph.shutdown(true)
  }

  lazy val routes = mutateRoute

  val serviceName = "kakaoFavorites"
  val columnName = "userName"

  "MutateRoute" should {

    "be able to insert vertex (POST /mutate/vertex/insert)" in {
      s2graph.management.createService(serviceName, "localhost", s"${serviceName}-dev", 1, None)
      s2graph.management.createServiceColumn(serviceName, columnName, "string", Seq(Prop("age", "0", "integer")))

      // {"timestamp": 10, "serviceName": "s2graph", "columnName": "user", "id": 1, "props": {}}
      val param = Json.obj(
        "timestamp" -> 10,
        "serviceName" -> serviceName,
        "columnName" -> columnName,
        "id" -> "user_a",
        "props" -> Json.obj(
          "age" -> 20
        )
      )

      val entity = Marshal(param).to[MessageEntity].futureValue
      val request = Post("/vertex/insert").withEntity(entity)

      request ~> routes ~> check {
        status should ===(StatusCodes.OK)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]
        response should ===(Json.toJson(Seq(true)))
      }
    }
  }
} 
Example 48
Source File: AdminRouteSpec.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.http

import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.testkit.ScalatestRouteTest
import com.typesafe.config.ConfigFactory
import org.apache.s2graph.core.Management.JsonModel.Prop
import org.apache.s2graph.core.S2Graph
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsString, JsValue, Json}

class AdminRoutesSpec extends WordSpec with Matchers with ScalaFutures with ScalatestRouteTest with S2GraphAdminRoute with BeforeAndAfterAll {
  import scala.collection.JavaConverters._

  val dbUrl = "jdbc:h2:file:./var/metastore_admin_route;MODE=MYSQL;AUTO_SERVER=true"
  val config =
    ConfigFactory.parseMap(Map("db.default.url" -> dbUrl).asJava)
  lazy val s2graph = new S2Graph(config.withFallback(ConfigFactory.load()))
  override val logger = LoggerFactory.getLogger(this.getClass)

  override def afterAll(): Unit = {
    s2graph.shutdown(true)
  }

  lazy val routes = adminRoute

  val serviceName = "kakaoFavorites"
  val columnName = "userName"

  "AdminRoute" should {
    "be able to create service (POST /createService)" in {
      val serviceParam = Json.obj(
        "serviceName" -> serviceName,
        "compressionAlgorithm" -> "gz"
      )

      val serviceEntity = Marshal(serviceParam).to[MessageEntity].futureValue
      val request = Post("/createService").withEntity(serviceEntity)

      request ~> routes ~> check {
        status should ===(StatusCodes.Created)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]

        (response \\ "name").head should ===(JsString("kakaoFavorites"))
        (response \\ "status").head should ===(JsString("ok"))
      }
    }

    "return service if present (GET /getService/{serviceName})" in {
      val request = HttpRequest(uri = s"/getService/$serviceName")

      request ~> routes ~> check {
        status should ===(StatusCodes.OK)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]

        (response \\ "name").head should ===(JsString("kakaoFavorites"))
      }
    }

    "be able to create serviceColumn (POST /createServiceColumn)" in {
      val serviceColumnParam = Json.obj(
        "serviceName" -> serviceName,
        "columnName" -> columnName,
        "columnType" -> "string",
        "props" -> Json.toJson(
          Seq(
            Json.obj("name" -> "age", "defaultValue" -> "-1", "dataType" -> "integer")
          )
        )
      )

      val serviceColumnEntity = Marshal(serviceColumnParam).to[MessageEntity].futureValue
      val request = Post("/createServiceColumn").withEntity(serviceColumnEntity)

      request ~> routes ~> check {
        status should ===(StatusCodes.Created)
        contentType should ===(ContentTypes.`application/json`)

        val response = entityAs[JsValue]

        (response \\ "serviceName").head should ===(JsString("kakaoFavorites"))
        (response \\ "columnName").head should ===(JsString("userName"))
        (response \\ "status").head should ===(JsString("ok"))
      }
    }
  }
} 
Example 49
Source File: MatcherSpec.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import akka.actor.ActorSystem
import akka.testkit.TestKitBase
import com.typesafe.config.ConfigFactory
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.settings.loadConfig
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

abstract class MatcherSpec(_actorSystemName: String) extends AnyWordSpecLike with MatcherSpecLike {
  protected def actorSystemName: String = _actorSystemName
}

trait MatcherSpecLike extends TestKitBase with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with ScorexLogging {
  this: Suite =>

  protected def actorSystemName: String

  implicit override lazy val system: ActorSystem = ActorSystem(
    actorSystemName,
    loadConfig(ConfigFactory.empty())
  )

  override protected def afterAll(): Unit = {
    super.afterAll()
    shutdown(system)
  }
} 
Example 50
Source File: ExchangeTransactionCreatorSpecification.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.model

import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.crypto
import com.wavesplatform.dex.domain.crypto.Proofs
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.domain.order.OrderOps._
import com.wavesplatform.dex.domain.transaction.ExchangeTransactionV2
import com.wavesplatform.dex.domain.utils.EitherExt2
import com.wavesplatform.dex.{MatcherSpecBase, NoShrink}
import org.scalacheck.Gen
import org.scalamock.scalatest.PathMockFactory
import org.scalatest.matchers.should.Matchers
import org.scalatest.prop.TableDrivenPropertyChecks
import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.{Assertion, BeforeAndAfterAll}
import org.scalatestplus.scalacheck.{ScalaCheckPropertyChecks => PropertyChecks}

import scala.concurrent.ExecutionContext.Implicits.global

class ExchangeTransactionCreatorSpecification
    extends AnyWordSpec
    with Matchers
    with MatcherSpecBase
    with BeforeAndAfterAll
    with PathMockFactory
    with PropertyChecks
    with NoShrink
    with TableDrivenPropertyChecks {

  private def getExchangeTransactionCreator(hasMatcherScript: Boolean = false,
                                            hasAssetScripts: Asset => Boolean = _ => false): ExchangeTransactionCreator = {
    new ExchangeTransactionCreator(MatcherAccount, matcherSettings.exchangeTxBaseFee, hasMatcherScript, hasAssetScripts)
  }

  "ExchangeTransactionCreator" should {
    "create an ExchangeTransactionV2" when {
      (List(1, 2, 3) ++ List(1, 2, 3)).combinations(2).foreach {
        case List(counterVersion, submittedVersion) =>
          s"counterVersion=$counterVersion, submittedVersion=$submittedVersion" in {
            val counter   = buy(wavesBtcPair, 100000, 0.0008, matcherFee = Some(2000L), version = counterVersion.toByte)
            val submitted = sell(wavesBtcPair, 100000, 0.0007, matcherFee = Some(1000L), version = submittedVersion.toByte)

            val tc = getExchangeTransactionCreator()
            val oe = mkOrderExecutedRaw(submitted, counter)

            tc.createTransaction(oe).explicitGet() shouldBe a[ExchangeTransactionV2]
          }
      }
    }

    "take fee from order executed event" when {
      "orders are matched fully" in {
        val preconditions = for { ((_, buyOrder), (_, sellOrder)) <- orderV3MirrorPairGenerator } yield (buyOrder, sellOrder)
        test(preconditions)
      }

      "orders are matched partially" in {
        val preconditions = for { ((_, buyOrder), (senderSell, sellOrder)) <- orderV3MirrorPairGenerator } yield {
          val sellOrderWithUpdatedAmount = sellOrder.updateAmount(sellOrder.amount / 2)
          val newSignature               = crypto.sign(senderSell, sellOrderWithUpdatedAmount.bodyBytes())
          val correctedSellOrder         = sellOrderWithUpdatedAmount.updateProofs(Proofs(Seq(ByteStr(newSignature))))

          (buyOrder, correctedSellOrder)
        }

        test(preconditions)
      }

      def test(preconditions: Gen[(Order, Order)]): Assertion = forAll(preconditions) {
        case (buyOrder, sellOrder) =>
          val tc = getExchangeTransactionCreator()
          val oe = mkOrderExecutedRaw(buyOrder, sellOrder)
          val tx = tc.createTransaction(oe).explicitGet()

          tx.buyMatcherFee shouldBe oe.submittedExecutedFee
          tx.sellMatcherFee shouldBe oe.counterExecutedFee
      }
    }
  }
} 
Example 51
Source File: MatcherSuiteBase.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import java.nio.charset.StandardCharsets
import java.util.concurrent.ThreadLocalRandom

import cats.instances.FutureInstances
import com.wavesplatform.dex.asset.DoubleOps
import com.wavesplatform.dex.domain.account.KeyPair
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.it.api.BaseContainersKit
import com.wavesplatform.dex.it.api.node.HasWavesNode
import com.wavesplatform.dex.it.config.{GenesisConfig, PredefinedAccounts, PredefinedAssets}
import com.wavesplatform.dex.it.dex.HasDex
import com.wavesplatform.dex.it.matchers.ItMatchers
import com.wavesplatform.dex.it.test.InformativeTestStart
import com.wavesplatform.dex.it.waves.{MkWavesEntities, ToWavesJConversions}
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import com.wavesplatform.dex.waves.WavesFeeConstants
import com.wavesplatform.it.api.ApiExtensions
import org.scalatest.concurrent.Eventually
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, CancelAfterFailure}

import scala.concurrent.duration.DurationInt

trait MatcherSuiteBase
    extends AnyFreeSpec
    with Matchers
    with CancelAfterFailure
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with BaseContainersKit
    with HasDex
    with HasWavesNode
    with MkWavesEntities
    with ApiExtensions
    with ItMatchers
    with DoubleOps
    with WavesFeeConstants
    with PredefinedAssets
    with PredefinedAccounts
    with DiffMatcherWithImplicits
    with InformativeTestStart
    with FutureInstances
    with ToWavesJConversions
    with ScorexLogging {

  GenesisConfig.setupAddressScheme()

  override protected val moduleName: String = "dex-it"

  override implicit def patienceConfig: PatienceConfig = super.patienceConfig.copy(timeout = 30.seconds, interval = 1.second)

  override protected def beforeAll(): Unit = {
    log.debug(s"Perform beforeAll")
    kafkaServer.foreach { _ =>
      createKafkaTopic(dexRunConfig.getString("waves.dex.events-queue.kafka.topic"))
    }
    wavesNode1.start()
    dex1.start()
  }

  override protected def afterAll(): Unit = {
    log.debug(s"Perform afterAll")
    stopBaseContainers()
    super.afterAll()
  }

  def createAccountWithBalance(balances: (Long, Asset)*): KeyPair = {
    val account = KeyPair(ByteStr(s"account-test-${ThreadLocalRandom.current().nextInt()}".getBytes(StandardCharsets.UTF_8)))

    balances.foreach {
      case (balance, asset) =>
        assert(
          wavesNode1.api.balance(alice, asset) >= balance,
          s"Alice doesn't have enough balance in ${asset.toString} to make a transfer"
        )
        broadcastAndAwait(mkTransfer(alice, account.toAddress, balance, asset))
    }
    account
  }
} 
Example 52
Source File: IntegrationSuiteBase.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration

import com.wavesplatform.dex.asset.DoubleOps
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.it.api.BaseContainersKit
import com.wavesplatform.dex.it.api.node.{HasWavesNode, NodeApiExtensions}
import com.wavesplatform.dex.it.config.{GenesisConfig, PredefinedAccounts, PredefinedAssets}
import com.wavesplatform.dex.it.test.InformativeTestStart
import com.wavesplatform.dex.it.waves.{MkWavesEntities, ToWavesJConversions}
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import com.wavesplatform.dex.waves.WavesFeeConstants
import org.scalatest.concurrent.Eventually
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import scala.concurrent.duration.DurationInt

trait IntegrationSuiteBase
    extends AnyFreeSpec
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with BaseContainersKit
    with HasWavesNode
    with MkWavesEntities
    with WavesFeeConstants
    with NodeApiExtensions
    with PredefinedAssets
    with PredefinedAccounts
    with DoubleOps
    with DiffMatcherWithImplicits
    with InformativeTestStart
    with ToWavesJConversions
    with ScorexLogging {

  GenesisConfig.setupAddressScheme()

  override protected val moduleName: String = "waves-integration-it"

  override implicit def patienceConfig: PatienceConfig = super.patienceConfig.copy(timeout = 30.seconds, interval = 1.second)

  override protected def beforeAll(): Unit = {
    log.debug(s"Perform beforeAll")
    wavesNode1.start()
  }

  override protected def afterAll(): Unit = {
    log.debug(s"Perform afterAll")
    stopBaseContainers()
    super.afterAll()
  }
} 
Example 53
Source File: BlockchainCacheSpecification.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.caches

import java.time.Duration
import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Executors}

import mouse.any.anySyntaxMouse
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.JavaConverters._
import scala.concurrent._

class BlockchainCacheSpecification extends AnyWordSpecLike with Matchers with BeforeAndAfterAll {

  private val executor: ExecutorService                          = Executors.newCachedThreadPool
  implicit private val blockingContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(executor)

  private class BlockchainCacheTest(loader: String => Future[String], expiration: Option[Duration], invalidationPredicate: String => Boolean)
      extends BlockchainCache[String, String](loader, expiration, invalidationPredicate)

  private def createCache(loader: String => Future[String],
                          expiration: Option[Duration] = None,
                          invalidationPredicate: String => Boolean = _ => false): BlockchainCacheTest = {
    new BlockchainCacheTest(loader, expiration, invalidationPredicate)
  }

  override def afterAll(): Unit = {
    super.afterAll()
    executor.shutdownNow()
  }

  private val andThenAwaitTimeout = 300

  "BlockchainCache" should {

    "not keep failed futures" in {

      val goodKey = "good key"
      val badKey  = "gRPC Error"

      val keyAccessMap = new ConcurrentHashMap[String, Int] unsafeTap (m => { m.put(goodKey, 0); m.put(badKey, 0) })
      val gRPCError    = new RuntimeException("gRPC Error occurred")

      val cache =
        createCache(
          key => {
            (if (key == badKey) Future.failed(gRPCError) else Future.successful(s"value = $key")) unsafeTap { _ =>
              keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1)
            }
          }
        )

      val badKeyAccessCount = 10

      Await.result(
        (1 to badKeyAccessCount).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey recover { case _ => "sad" }
          } yield { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }

    "not keep values according to the predicate" in {

      val goodKey = "111"
      val badKey  = "222"

      val keyAccessMap = new ConcurrentHashMap[String, Int](Map(goodKey -> 0, badKey -> 0).asJava)

      val cache = createCache(
        key => { keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1); Future.successful(key) },
        invalidationPredicate = _.startsWith("2")
      )

      Await.result(
        (1 to 10).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey
          } yield blocking { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }
  }
} 
Example 54
Source File: HasWebSockets.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.websockets

import java.lang
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.wavesplatform.dex.api.ws.connection.{WsConnection, WsConnectionOps}
import com.wavesplatform.dex.api.ws.entities.{WsBalances, WsOrder}
import com.wavesplatform.dex.api.ws.protocol.{WsAddressSubscribe, WsInitial, WsOrderBookSubscribe}
import com.wavesplatform.dex.domain.account.KeyPair
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.error.ErrorFormatterContext
import com.wavesplatform.dex.it.config.PredefinedAssets
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import mouse.any._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._

trait HasWebSockets extends BeforeAndAfterAll with HasJwt with WsConnectionOps with WsMessageOps {
  _: Suite with Eventually with Matchers with DiffMatcherWithImplicits with PredefinedAssets =>

  implicit protected val system: ActorSystem        = ActorSystem()
  implicit protected val materializer: Materializer = Materializer.matFromSystem(system)
  implicit protected val efc: ErrorFormatterContext = assetDecimalsMap.apply

  protected def getWsStreamUri(dex: DexContainer): String = s"ws://127.0.0.1:${dex.restApiAddress.getPort}/ws/v0"

  protected val knownWsConnections: ConcurrentHashMap.KeySetView[WsConnection, lang.Boolean] =
    ConcurrentHashMap.newKeySet[WsConnection]()

  protected def addConnection(connection: WsConnection): Unit = knownWsConnections.add(connection)

  protected def mkWsAddressConnection(client: KeyPair,
                                      dex: DexContainer,
                                      keepAlive: Boolean = true,
                                      subscriptionLifetime: FiniteDuration = 1.hour): WsConnection = {
    val jwt        = mkJwt(client, lifetime = subscriptionLifetime)
    val connection = mkDexWsConnection(dex, keepAlive)
    connection.send(WsAddressSubscribe(client.toAddress, WsAddressSubscribe.defaultAuthType, jwt))
    connection
  }

  protected def mkWsOrderBookConnection(assetPair: AssetPair, dex: DexContainer, depth: Int = 1): WsConnection = {
    val connection = mkDexWsConnection(dex)
    connection.send(WsOrderBookSubscribe(assetPair, depth))
    connection
  }

  protected def mkWsInternalConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(s"${getWsStreamUri(dex)}/internal", keepAlive)

  protected def mkDexWsConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(getWsStreamUri(dex), keepAlive)

  protected def mkWsConnection(uri: String, keepAlive: Boolean = true): WsConnection = {
    new WsConnection(uri, keepAlive) unsafeTap { wsc =>
      addConnection(wsc)
      eventually { wsc.collectMessages[WsInitial] should have size 1 }
      wsc.clearMessages()
    }
  }

  protected def assertChanges(c: WsConnection, squash: Boolean = true)(expBs: Map[Asset, WsBalances]*)(expOs: WsOrder*): Unit = {
    eventually {
      if (squash) {
        c.balanceChanges.size should be <= expBs.size
        c.balanceChanges.squashed should matchTo { expBs.toList.squashed }
        c.orderChanges.size should be <= expOs.size
        c.orderChanges.squashed should matchTo { expOs.toList.squashed }
      } else {
        c.balanceChanges should matchTo(expBs)
        c.orderChanges should matchTo(expOs)
      }
    }

    c.clearMessages()
  }

  protected def cleanupWebSockets(): Unit = {
    if (!knownWsConnections.isEmpty) {
      knownWsConnections.forEach { _.close() }
      materializer.shutdown()
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    cleanupWebSockets()
  }
} 
Example 55
Source File: ClusterSingletonHelperTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.cluster

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike, Matchers}
import org.slf4j.LoggerFactory

import scala.util.Random

object ClusterSingletonHelperTest {
  val port = 20000 + Random.nextInt(20000)
}

class ClusterSingletonHelperTest (_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {

  def this() = this(ActorSystem("test-actor-system", ConfigFactory.parseString(
      s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
          |akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
          |akka.remote.netty.tcp.hostname="localhost"
          |akka.remote.netty.tcp.port=${ClusterSingletonHelperTest.port}
          |akka.cluster.seed-nodes = ["akka.tcp://test-actor-system@localhost:${ClusterSingletonHelperTest.port}"]
    """.stripMargin
    ).withFallback(ConfigFactory.load("application-test.conf"))))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  val log = LoggerFactory.getLogger(getClass)


  test("start and communicate with cluster-singleton") {


    val started = TestProbe()
    val proxy = ClusterSingletonHelper.startClusterSingleton(system, Props(new OurClusterSingleton(started.ref)), "ocl")
    started.expectMsg("started")
    val sender = TestProbe()
    sender.send(proxy, "ping")
    sender.expectMsg("pong")

  }
}

class OurClusterSingleton(started:ActorRef) extends Actor {

  started ! "started"
  def receive = {
    case "ping" => sender ! "pong"
  }
} 
Example 56
package no.nextgentel.oss.akkatools.aggregate.aggregateTest_usingAggregateStateBase

import java.util.UUID

import akka.actor.{ActorPath, ActorSystem, Props}
import akka.persistence.{DeleteMessagesFailure, DeleteMessagesSuccess, SaveSnapshotFailure, SaveSnapshotSuccess, SnapshotMetadata, SnapshotOffer}
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import no.nextgentel.oss.akkatools.aggregate._
import no.nextgentel.oss.akkatools.testing.AggregateTesting
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike, Matchers}
import org.slf4j.LoggerFactory



  override def onSnapshotOffer(offer: SnapshotOffer): Unit = {
    state = offer.snapshot.asInstanceOf[StringState]
  }

  override def acceptSnapshotRequest(req: SaveSnapshotOfCurrentState): Boolean = {
    if (state == StringState("WAT")) {
      state = StringState("SAVED")
      true
    }
    else {
      state = StringState("WAT") //So it works second time
      false
    }
  }

  override def onSnapshotSuccess(success: SaveSnapshotSuccess): Unit = {
    state = StringState("SUCCESS_SNAP")
  }

  override def onSnapshotFailure(failure: SaveSnapshotFailure): Unit = {
    state = StringState("FAIL_SNAP")
  }

  override def onDeleteMessagesSuccess(success: DeleteMessagesSuccess): Unit = {
    state = StringState("SUCCESS_MSG")
  }

  override def onDeleteMessagesFailure(failure: DeleteMessagesFailure): Unit = {
    state = StringState("FAIL_MSG")
  }

  // Used as prefix/base when constructing the persistenceId to use - the unique ID is extracted runtime from actorPath which is construced by Sharding-coordinator
  override def persistenceIdBase(): String = "/x/"
}

case class StringEv(data: String)

case class StringState(data:String) extends AggregateStateBase[StringEv, StringState] {
  override def transitionState(event: StringEv): StateTransition[StringEv, StringState] =
    StateTransition(StringState(event.data))
} 
Example 57
Source File: ActorWithDMSupportTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.persistence

import java.util.concurrent.TimeUnit

import akka.actor.{Props, ActorSystem}
import akka.testkit.{TestProbe, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, FunSuiteLike}

import scala.concurrent.duration.FiniteDuration

class ActorWithDMSupportTest(_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {
  def this() = this(ActorSystem("ActorWithDMSupportTest", ConfigFactory.load("application-test.conf")))

  test("success with dm") {
    val a = system.actorOf(Props(new TestActorWithDMSupport()))
    val s = TestProbe()

    // send raw
    s.send(a, "sendok")
    s.expectMsg("ok")

    // send via dm and withNewPayload
    val dm = DurableMessage(1L, "sendok", s.ref.path)
    s.send(a, dm)
    s.expectMsg(dm.withNewPayload("ok"))

    // send raw - do nothing
    s.send(a, "silent")


    // send silent - wait for configm
    s.send(a, DurableMessage(1L, "silent", s.ref.path))
    s.expectMsg( DurableMessageReceived(1,None) )


    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm-custom", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm-custom")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

  }


}

class TestActorWithDMSupport extends ActorWithDMSupport {
  // All raw messages or payloads in DMs are passed to this function.
  override def receivePayload = {
    case "sendok" =>
      send(sender.path, "ok")
    case "silent" =>
      Unit
    case "no-confirm" =>
      throw new LogWarningAndSkipDMConfirmException("something went wrong")
    case "no-confirm-custom" =>
      throw new CustomLogWarningAndSkipDMConfirm()
  }
}

class CustomLogWarningAndSkipDMConfirm extends Exception("") with LogWarningAndSkipDMConfirm 
Example 58
Source File: AffinityPropagationSuite.scala    From SparkAffinityPropagation   with MIT License 5 votes vote down vote up
package org.viirya.spark.ml

import scala.collection.mutable

import org.scalatest.{BeforeAndAfterAll, FunSuite, Suite}

import org.viirya.spark.ml.AffinityPropagation._

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.graphx.{Edge, Graph}

class AffinityPropagationSuite extends FunSuite with BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("AffinityPropagationUnitTest")
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    try {
      if (sc != null) {
        sc.stop()
      }
      sc = null
    } finally {
      super.afterAll()
    }
  }  

  test("affinity propagation") {
    
    val similarities = Seq[(Long, Long, Double)](
      (0, 1, 1.0), (1, 0, 1.0), (0, 2, 1.0), (2, 0, 1.0), (0, 3, 1.0), (3, 0, 1.0),
      (1, 2, 1.0), (2, 1, 1.0), (2, 3, 1.0), (3, 2, 1.0))
    val expected = Array(
      Array(0.0,     1.0/3.0, 1.0/3.0, 1.0/3.0),
      Array(1.0/2.0,     0.0, 1.0/2.0,     0.0),
      Array(1.0/3.0, 1.0/3.0,     0.0, 1.0/3.0),
      Array(1.0/2.0,     0.0, 1.0/2.0,     0.0))
    val s = constructGraph(sc.parallelize(similarities, 2), true, false)
    s.edges.collect().foreach { case Edge(i, j, x) =>
      assert(math.abs(x.similarity - expected(i.toInt)(j.toInt)) < 1e-14)
    }
  }
} 
Example 59
Source File: HomeControllerSpec.scala    From phantom-activator-template   with Apache License 2.0 5 votes vote down vote up
package controllers

import org.scalatest.{BeforeAndAfterAll, MustMatchers, WordSpec}
import org.scalatestplus.play.guice.GuiceOneAppPerTest
import org.slf4j.LoggerFactory
import play.api.test.Helpers._
import play.api.test._


class HomeControllerSpec extends WordSpec with GuiceOneAppPerTest with MustMatchers with BeforeAndAfterAll {

  private val logger = LoggerFactory.getLogger("embedded-cassandra")

  override protected def beforeAll(): Unit = {
    EmbeddedCassandra.start(logger)
  }

  override protected def afterAll(): Unit = {
    EmbeddedCassandra.cleanup(logger)
  }

  "Application" should {

    "render the index page" in {
      val result = route(app, FakeRequest(GET, "/")).get
      status(result) must equal(OK)
      contentAsString(result) must include("Spring Bud")
    }
  }

} 
Example 60
Source File: PailDataSourceSpec.scala    From utils   with Apache License 2.0 5 votes vote down vote up
package com.indix.utils.spark.pail

import java.util

import com.backtype.hadoop.pail.{PailFormatFactory, PailSpec, PailStructure}
import com.backtype.support.{Utils => PailUtils}
import com.google.common.io.Files
import org.apache.commons.io.FileUtils
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, FlatSpec}
import org.scalatest.Matchers._

import scala.collection.JavaConverters._
import scala.util.Random

case class User(name: String, age: Int)

class UserPailStructure extends PailStructure[User] {
  override def isValidTarget(dirs: String*): Boolean = true

  override def getType: Class[_] = classOf[User]

  override def serialize(user: User): Array[Byte] = PailUtils.serialize(user)

  override def getTarget(user: User): util.List[String] = List(user.age % 10).map(_.toString).asJava

  override def deserialize(serialized: Array[Byte]): User = PailUtils.deserialize(serialized).asInstanceOf[User]
}

class PailDataSourceSpec extends FlatSpec with BeforeAndAfterAll with PailDataSource {
  private var spark: SparkSession = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    spark = SparkSession.builder().master("local[2]").appName("PailDataSource").getOrCreate()
  }

  val userPailSpec = new PailSpec(PailFormatFactory.SEQUENCE_FILE, new UserPailStructure)

  "PailBasedReaderWriter" should "read/write user records from/into pail" in {
    val output = Files.createTempDir()
    val users = (1 to 100).map { index => User(s"foo$index", Random.nextInt(40))}
    spark.sparkContext.parallelize(users)
      .saveAsPail(output.getAbsolutePath, userPailSpec)

    val input = output.getAbsolutePath
    val total = spark.sparkContext.pailFile[User](input)
      .map(u => u.name)
      .count()

    total should be(100)
    FileUtils.deleteDirectory(output)
  }
} 
Example 61
Source File: ParquetAvroDataSourceSpec.scala    From utils   with Apache License 2.0 5 votes vote down vote up
package com.indix.utils.spark.parquet

import java.io.File

import com.google.common.io.Files
import com.indix.utils.spark.parquet.avro.ParquetAvroDataSource
import org.apache.commons.io.FileUtils
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.apache.spark.sql.SparkSession
import org.scalactic.Equality
import org.scalatest.Matchers.{be, convertToAnyShouldWrapper, equal}
import org.scalatest.{BeforeAndAfterAll, FlatSpec}
import java.util.{Arrays => JArrays}

case class SampleAvroRecord(a: Int, b: String, c: Seq[String], d: Boolean, e: Double, f: collection.Map[String, String], g: Array[Byte])

class ParquetAvroDataSourceSpec extends FlatSpec with BeforeAndAfterAll with ParquetAvroDataSource {
  private var spark: SparkSession = _
  implicit val sampleAvroRecordEq = new Equality[SampleAvroRecord] {
    override def areEqual(left: SampleAvroRecord, b: Any): Boolean = b match {
      case right: SampleAvroRecord =>
        left.a == right.a &&
          left.b == right.b &&
          Equality.default[Seq[String]].areEqual(left.c, right.c) &&
          left.d == right.d &&
          left.e == right.e &&
          Equality.default[collection.Map[String, String]].areEqual(left.f, right.f) &&
          JArrays.equals(left.g, right.g)
      case _ => false
    }
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    spark = SparkSession.builder().master("local[2]").appName("ParquetAvroDataSource").getOrCreate()
  }

  override protected def afterAll(): Unit = {
    try {
      spark.sparkContext.stop()
    } finally {
      super.afterAll()
    }
  }

  "AvroBasedParquetDataSource" should "read/write avro records as ParquetData" in {

    val outputLocation = Files.createTempDir().getAbsolutePath + "/output"

    val sampleRecords: Seq[SampleAvroRecord] = Seq(
      SampleAvroRecord(1, "1", List("a1"), true, 1.0d, Map("a1" -> "b1"), "1".getBytes),
      SampleAvroRecord(2, "2", List("a2"), false, 2.0d, Map("a2" -> "b2"), "2".getBytes),
      SampleAvroRecord(3, "3", List("a3"), true, 3.0d, Map("a3" -> "b3"), "3".getBytes),
      SampleAvroRecord(4, "4", List("a4"), true, 4.0d, Map("a4" -> "b4"), "4".getBytes),
      SampleAvroRecord(5, "5", List("a5"), false, 5.0d, Map("a5" -> "b5"), "5".getBytes)
    )

    val sampleDf = spark.createDataFrame(sampleRecords)

    sampleDf.rdd.saveAvroInParquet(outputLocation, sampleDf.schema, CompressionCodecName.GZIP)

    val sparkVal = spark

    import sparkVal.implicits._

    val records: Array[SampleAvroRecord] = spark.read.parquet(outputLocation).as[SampleAvroRecord].collect()

    records.length should be(5)
    // We use === to use the custom Equality defined above for comparing Array[Byte]
    // Ref - https://github.com/scalatest/scalatest/issues/491
    records.sortBy(_.a) === sampleRecords.sortBy(_.a)

    FileUtils.deleteDirectory(new File(outputLocation))
  }

} 
Example 62
Source File: DynamoDBJournalPerfSpec.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.journal

import java.net.URI

import akka.persistence.CapabilityFlag
import akka.persistence.journal.JournalPerfSpec
import com.github.j5ik2o.akka.persistence.dynamodb.utils.{ DynamoDBSpecSupport, RandomPortUtil }
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.duration._

object DynamoDBJournalPerfSpec {
  val dynamoDBPort = RandomPortUtil.temporaryServerPort()
}

class DynamoDBJournalPerfSpec
    extends JournalPerfSpec(
      ConfigFactory
        .parseString(
          s"""
        |j5ik2o.dynamo-db-journal {
        |  shard-count = 2
        |  queue-buffer-size = 1024
        |  queue-parallelism = 1
        |  write-parallelism = 1
        |  query-batch-size = 1024
        |  dynamo-db-client {
        |    endpoint = "http://127.0.0.1:${DynamoDBJournalPerfSpec.dynamoDBPort}/"
        |  }
        |}
        |
        |j5ik2o.dynamo-db-snapshot.dynamo-db-client {
        |  endpoint = "http://127.0.0.1:${DynamoDBJournalPerfSpec.dynamoDBPort}/"
        |}
        |
        """.stripMargin
        ).withFallback(ConfigFactory.load("journal-reference"))
    )
    with BeforeAndAfterAll
    with ScalaFutures
    with DynamoDBSpecSupport {
  override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = false

  
  override def measurementIterations: Int = 5

  override protected lazy val dynamoDBPort: Int = DynamoDBJournalPerfSpec.dynamoDBPort

  val underlying: JavaDynamoDbAsyncClient = JavaDynamoDbAsyncClient
    .builder()
    .credentialsProvider(
      StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey))
    )
    .endpointOverride(URI.create(dynamoDBEndpoint))
    .build()

  override def dynamoDbAsyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(underlying)

  override def beforeAll(): Unit = {
    super.beforeAll()
    createTable()
  }

  override def afterAll(): Unit = {
    deleteTable()
    super.afterAll()
  }

} 
Example 63
Source File: AkkaQuickstartSpec.scala    From didactic-computing-machine   with GNU Affero General Public License v3.0 5 votes vote down vote up
//#full-example
package com.lightbend.akka.sample

import org.scalatest.{ BeforeAndAfterAll, FlatSpecLike, Matchers }
import akka.actor.{ Actor, Props, ActorSystem }
import akka.testkit.{ ImplicitSender, TestKit, TestActorRef, TestProbe }
import scala.concurrent.duration._
import Greeter._
import Printer._

//#test-classes
class AkkaQuickstartSpec(_system: ActorSystem)
  extends TestKit(_system)
  with Matchers
  with FlatSpecLike
  with BeforeAndAfterAll {
  //#test-classes

  def this() = this(ActorSystem("AkkaQuickstartSpec"))

  override def afterAll: Unit = {
    shutdown(system)
  }

  //#first-test
  //#specification-example
  "A Greeter Actor" should "pass on a greeting message when instructed to" in {
    //#specification-example
    val testProbe = TestProbe()
    val helloGreetingMessage = "hello"
    val helloGreeter = system.actorOf(Greeter.props(helloGreetingMessage, testProbe.ref))
    val greetPerson = "Akka"
    helloGreeter ! WhoToGreet(greetPerson)
    helloGreeter ! Greet
    testProbe.expectMsg(500 millis, Greeting(s"$helloGreetingMessage, $greetPerson"))
  }
  //#first-test
}
//#full-example 
Example 64
Source File: KafkaIntSpec.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package common

import cakesolutions.kafka.testkit.KafkaServer
import kafka.utils.ZkUtils
import org.apache.kafka.clients.admin.AdminClient
import org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{ Millis, Seconds, Span }

import scala.collection.JavaConverters._
import scala.concurrent.duration._

abstract class KafkaIntSpec extends BaseSpec with BeforeAndAfterAll with PatienceConfiguration {

  override implicit val patienceConfig = PatienceConfig(Span(3, Seconds), Span(250, Millis))

  val kafkaServer = new KafkaServer()
  val kafkaPort = kafkaServer.kafkaPort

  val zkSessionTimeout = 30 seconds
  val zkConnectionTimeout = 30 seconds

  lazy val zkUtils = ZkUtils(s"localhost:${kafkaServer.zookeeperPort}", zkSessionTimeout.toMillis.toInt,
    zkConnectionTimeout.toMillis.toInt, isZkSecurityEnabled = false)

  lazy val kafkaAdminClient = AdminClient.create(Map[String, AnyRef](
    BOOTSTRAP_SERVERS_CONFIG -> s"localhost:$kafkaPort"
  ).asJava)

  override def beforeAll() = kafkaServer.startup()

  override def afterAll() = {
    kafkaAdminClient.close()
    zkUtils.close()
    kafkaServer.close()
  }

} 
Example 65
Source File: PipeDecisionTest.scala    From sddf   with GNU General Public License v3.0 5 votes vote down vote up
package de.unihamburg.vsis.sddf.test.classification

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.scalatest.BeforeAndAfterAll
import org.scalatest.FunSuite

import de.unihamburg.vsis.sddf.SddfContext.Duplicate
import de.unihamburg.vsis.sddf.SddfContext.NoDuplicate
import de.unihamburg.vsis.sddf.SddfContext.SymPairSim
import de.unihamburg.vsis.sddf.classification.PipeClassificationDecisionTree
import de.unihamburg.vsis.sddf.classification.PipeClassificationNaiveBayes
import de.unihamburg.vsis.sddf.classification.PipeClassificationSvm
import de.unihamburg.vsis.sddf.pipe.context.SddfPipeContext
import de.unihamburg.vsis.sddf.reading.SymPair
import de.unihamburg.vsis.sddf.reading.Tuple
import de.unihamburg.vsis.sddf.test.util.LocalSparkContext

class PipeClassificationTest extends FunSuite with LocalSparkContext with BeforeAndAfterAll{
  
  var input: (SymPairSim, RDD[LabeledPoint]) = _
  
  override def beforeAll() {
    super.beforeAll()
    val tuple1 = Tuple("test1","test1","test1")
    tuple1.id = 1
    val tuple2 = Tuple("test2","test2","test2")
    tuple2.id = 2
    val tuple3 = Tuple("hans","franz","wurst")
    tuple3.id = 3
    
    val symPairSim: SymPairSim = sc.parallelize(Seq(
      (new SymPair(tuple1, tuple2), Array(1D,1D,0D))
      ,(new SymPair(tuple2, tuple3), Array(0D,0D,1D))
    ))
    
    val trainingData: RDD[LabeledPoint] = sc.parallelize(Seq(
      LabeledPoint(label = Duplicate, features = Vectors.dense(Array(0.99,1.0,0.0)))
      ,LabeledPoint(label = Duplicate, features = Vectors.dense(Array(1.0,1.0,0.0)))
      ,LabeledPoint(label = Duplicate, features = Vectors.dense(Array(1.0,0.875,0.0)))
      ,LabeledPoint(label = Duplicate, features = Vectors.dense(Array(1.0,1.0,0.1)))
      ,LabeledPoint(label = Duplicate, features = Vectors.dense(Array(1.0,0.89,0.0)))
      
      ,LabeledPoint(label = NoDuplicate, features = Vectors.dense(Array(0.1,0.0,1.0)))
      ,LabeledPoint(label = NoDuplicate, features = Vectors.dense(Array(0.0,0.2,1.0)))
      ,LabeledPoint(label = NoDuplicate, features = Vectors.dense(Array(0.06,0.0,0.89)))
      ,LabeledPoint(label = NoDuplicate, features = Vectors.dense(Array(0.21,0.19,0.91)))
    ))
    
    input = (symPairSim, trainingData)
  }

  override def afterAll() {
    super.afterAll()
  }
              
	test("naive bayes classification test") {
    val classificationPipe = new PipeClassificationNaiveBayes()
    implicit val pipeContext = new SddfPipeContext()
    val result = classificationPipe.run(input)
    assert(result.count === 1)
  }
  
  test("svm classification test") {
    val classificationPipe = new PipeClassificationSvm()
    implicit val pipeContext = new SddfPipeContext()
    val result = classificationPipe.run(input)
    assert(result.count === 1)
  }

  test("decision tree classification test") {
    val classificationPipe = new PipeClassificationDecisionTree()
    implicit val pipeContext = new SddfPipeContext()
    val result = classificationPipe.run(input)
    assert(result.count === 1)
  }

} 
Example 66
Source File: LocalSparkContext.scala    From sddf   with GNU General Public License v3.0 5 votes vote down vote up
package de.unihamburg.vsis.sddf.test.util

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite

trait LocalSparkContext extends BeforeAndAfterAll { self: Suite =>

  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("SddF Test")
      .set("spark.ui.enabled", "false")
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    if (sc != null) {
      sc.stop()
    }
    super.afterAll()
  }

} 
Example 67
Source File: MLlibTestSparkContext.scala    From bisecting-kmeans   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}

trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    sqlContext = new SQLContext(sc)
  }

  override def afterAll() {
    sqlContext = null
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 68
Source File: JdbcQueryServiceSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.dataset.query.jdbc

import cats.effect.IO
import cats.instances.list.catsStdInstancesForList
import daf.dataset.query.{ Count, GroupByClause, Gt, NamedColumn, Query, SelectClause, ValueColumn, WhereClause }
import daf.instances.H2TransactorInstance
import doobie.free.KleisliInterpreter
import doobie.free.connection.AsyncConnectionIO
import doobie.implicits.{ toConnectionIOOps, toSqlInterpolator }
import doobie.util.query.Query0
import doobie.util.transactor.{ Strategy, Transactor }
import doobie.util.update.Update
import org.apache.commons.dbcp.BasicDataSource
import org.scalatest.{ BeforeAndAfterAll, MustMatchers, WordSpec }

class JdbcQueryServiceSpec extends WordSpec with MustMatchers with BeforeAndAfterAll {

  private lazy val service = new JdbcQueryService(null, None) with H2TransactorInstance

  override def beforeAll(): Unit = JdbcQueries.prepare.transact { service.transactor("") }.unsafeRunSync() match {
    case (_     , rows) if rows == 0   => throw new RuntimeException("Unable to start test: [rows] were not created")
    case (_, _)                        => // do nothing
  }

  "A jdbc query service" must {

    "run queries" in  {
      service.exec(JdbcQueries.select, "user", "").map { _.toCsv.toList }.get must be {
        List(
          """"COUNTRY", "COUNTS"""",
          """"Italy", 2""",
          """"Netherlands", 1"""
        )
      }
    }
  }
}

object JdbcQueries {

  type User = (String, String, Int, String)

  private def createTransactor(dataSource: BasicDataSource) = Transactor[IO, BasicDataSource](
    dataSource, a => IO(a.getConnection), KleisliInterpreter[IO].ConnectionInterpreter, Strategy.void
  )

  val ddl =
    sql"""
      CREATE TABLE user(
        id VARCHAR,
        username VARCHAR,
        age SMALLINT,
        country VARCHAR
      )
    """.update.run

  Query0.apply("").stream

  val insert =
    Update[User]("INSERT INTO user(id, username, age, country) VALUES (?, ?, ?, ?)").updateMany[List] {
      List(
        ("id1", "user1", 42, "Italy"),
        ("id2", "user2", 32, "Italy"),
        ("id3", "user3", 27, "Italy"),
        ("id4", "user4", 33, "Netherlands")
      )
    }

  val prepare = for {
    table  <- JdbcQueries.ddl
    insert <- JdbcQueries.insert
  } yield (table, insert)

  val select = Query(
    select  = SelectClause {
      Seq(
        NamedColumn("country"), Count(NamedColumn("id")) as "counts"
      )
    },
    where   = Some {
      WhereClause { Gt(NamedColumn("age"), ValueColumn(30)) }
    },
    groupBy = Some {
      GroupByClause { Seq(NamedColumn("country")) }
    },
    having = None,
    limit  = None
  )

} 
Example 69
Source File: DatasetFunctionsSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.dataset

import java.io.ByteArrayInputStream

import akka.stream.ActorMaterializer
import akka.stream.scaladsl.StreamConverters
import controllers.modules.TestAbstractModule
import daf.filesystem.MergeStrategy
import daf.instances.{ AkkaInstance, ConfigurationInstance }
import org.scalatest.{ BeforeAndAfterAll, MustMatchers, WordSpecLike }

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random

class DatasetFunctionsSpec extends TestAbstractModule
  with WordSpecLike
  with MustMatchers
  with BeforeAndAfterAll
  with ConfigurationInstance
  with AkkaInstance {

  implicit lazy val executionContext = actorSystem.dispatchers.lookup("akka.actor.test-dispatcher")

  protected implicit lazy val materializer = ActorMaterializer.create { actorSystem }

  override def beforeAll() = {
    startAkka()
  }

  def data = (1 to 5) .map { i =>
    Random.alphanumeric.grouped(20).take(5).map { s => s"$i - ${s.mkString}" }.toStream :+ defaultSeparator
  }

  def stream = MergeStrategy.coalesced {
    data.map { iter =>
      new ByteArrayInputStream(
        iter.mkString(defaultSeparator).getBytes("UTF-8")
      )
    }
  }

  def source = StreamConverters.fromInputStream(() => stream, 5)

  "Source manipulation" must {

    "convert to a string source" in {
      Await.result(
        wrapDefault { asStringSource(source) }.runFold("") { _ + _ },
        5.seconds
      ).split(defaultSeparator).length must be { 25 }
    }

    "convert to a json source" in {
      Await.result(
        wrapJson { asStringSource(source) }.runFold("") { _ + _ },
        5.seconds
      ).split(jsonSeparator).length must be { 25 }
    }

  }

} 
Example 70
Source File: MergeStrategySpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.filesystem

import java.io.{ Closeable, InputStream }
import java.util.Scanner

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ FSDataInputStream, FSDataOutputStream, FileSystem, Path }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }

import scala.collection.convert.decorateAsScala._
import scala.util.{ Random, Try }

class MergeStrategySpec extends WordSpec with Matchers with BeforeAndAfterAll {

  private implicit val fileSystem = FileSystem.getLocal(new Configuration)

  private val numFiles = 10

  private val baseDir = "test-dir".asHadoop

  private val workingDir = baseDir / f"merge-strategy-spec-${Random.nextInt(10000)}%05d"

  private def safely[A <: Closeable, U](f: A => U) = { stream: A =>
    val attempt = Try { f(stream) }
    stream.close()
    attempt
  }

  private def readFile(path: Path) = safely[FSDataInputStream, Seq[String]] { _.scanner.asScala.toSeq } apply fileSystem.open(path)

  private def readFiles = Try {
    fileSystem.listStatus(workingDir).toSeq.flatMap { status => readFile(status.getPath).get }
  }

  private def openFiles = Try {
    fileSystem.listStatus(workingDir).toSeq.map { status => fileSystem.open(status.getPath) }
  }

  private def createFile(fileName: String) = safely[FSDataOutputStream, Unit] { stream =>
    Random.alphanumeric.grouped(200).take(10).map { randomSplits(_) }.foreach { row =>
      stream.writeUTF { row.mkString("", ",", "\n") }
    }
  } apply fileSystem.create { workingDir / fileName }

  private def randomSplits(chars: Stream[Char], strings: Seq[String] = Seq.empty): Seq[String] = chars.splitAt { Random.nextInt(10) + 5 } match {
    case (head, tail) if tail.isEmpty => head.drop(1).mkString +: strings
    case (head, tail)                 => randomSplits(tail, head.mkString +: strings)
  }

  private def createWorkingDir = Try { fileSystem.mkdirs(workingDir) }

  private def createFiles = Try {
    0 until numFiles foreach { index => createFile(s"test-file-$index").get } // this is relatively nasty, and should be handled in a `traverse`
  }

  private def prepareData = for {
    _ <- createWorkingDir
    _ <- createFiles
  } yield ()

  private def purgeData = Try { fileSystem.delete(workingDir, true) }

  override def beforeAll() = prepareData.get

  override def afterAll() = purgeData.get

  "MergeStrategies info" when {

    "given compressed format files" must {

      "throw an exception" in {
        an[IllegalArgumentException] must be thrownBy MergeStrategies.find { FileInfo(workingDir / "test-file-0", 0, FileDataFormats.raw, FileCompressionFormats.gzip) }
      }
    }

    "given data as csv" must {

      "drop one line and merge the rest" in {
        safely[InputStream, Seq[String]] { new Scanner(_).asScala.toList }.andThen { attempt =>
          for {
            merged   <- attempt
            expected <- readFiles
          } merged.size should be { expected.size - numFiles + 1 }
        } apply MergeStrategies.csv.merge { openFiles.get }
      }
    }

    "given data as json" must {

      "just merge the files into one" in {
        safely[InputStream, Seq[String]] { new Scanner(_).asScala.toList }.andThen { attempt =>
          for {
            merged   <- attempt
            expected <- readFiles
          } merged.size should be { expected.size }
        } apply MergeStrategies.json.merge { openFiles.get }
      }

    }
  }
} 
Example 71
Source File: HDFSBase.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package daf.util

import better.files.{ File, _ }
import daf.util.DataFrameClasses.{ Address, Person }
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.hdfs.{ HdfsConfiguration, MiniDFSCluster }
import org.apache.hadoop.test.PathUtils
import org.apache.spark.sql.{ SaveMode, SparkSession }
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }
import org.slf4j.LoggerFactory

import scala.util.{ Failure, Random, Try }

abstract class HDFSBase extends FlatSpec with Matchers with BeforeAndAfterAll {

  var miniCluster: Try[MiniDFSCluster] = Failure[MiniDFSCluster](new Exception)

  var fileSystem: Try[FileSystem] = Failure[FileSystem](new Exception)

  val sparkSession: SparkSession = SparkSession.builder().master("local").getOrCreate()

  val alogger = LoggerFactory.getLogger(this.getClass)

  val (testDataPath, confPath) = {
    val testDataPath = s"${PathUtils.getTestDir(this.getClass).getCanonicalPath}/MiniCluster"
    val confPath = s"$testDataPath/conf"
    (
      testDataPath.toFile.createIfNotExists(asDirectory = true, createParents = false),
      confPath.toFile.createIfNotExists(asDirectory = true, createParents = false)
    )
  }

  def pathAvro = "opendata/test.avro"
  def pathParquet = "opendata/test.parquet"
  def pathCsv = "opendata/test.csv"

  def getSparkSession = sparkSession

  override def beforeAll(): Unit = {

    val conf = new HdfsConfiguration()
    conf.setBoolean("dfs.permissions", true)
    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA)

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath.pathAsString)
    //FileUtil.fullyDelete(testDataPath.toJava)

    conf.set(s"hadoop.proxyuser.${System.getProperties.get("user.name")}.groups", "*")
    conf.set(s"hadoop.proxyuser.${System.getProperties.get("user.name")}.hosts", "*")

    val builder = new MiniDFSCluster.Builder(conf)
    miniCluster = Try(builder.build())
    fileSystem = miniCluster.map(_.getFileSystem)
    fileSystem.foreach(fs => {
      val confFile: File = confPath / "hdfs-site.xml"
      for { os <- confFile.newOutputStream.autoClosed } fs.getConf.writeXml(os)
    })

    writeDf()
  }

  override def afterAll(): Unit = {
    miniCluster.foreach(_.shutdown(true))
    val _ = testDataPath.parent.parent.delete(true)
    sparkSession.stop()
  }

  
  private def writeDf(): Unit = {
    import sparkSession.implicits._

    alogger.info(s"TestDataPath ${testDataPath.toJava.getAbsolutePath}")
    alogger.info(s"ConfPath ${confPath.toJava.getAbsolutePath}")
    val persons = (1 to 10).map(i => Person(s"Andy$i", Random.nextInt(85), Address("Via Ciccio Cappuccio")))
    val caseClassDS = persons.toDS()
    caseClassDS.write.format("parquet").mode(SaveMode.Overwrite).save(pathParquet)
    caseClassDS.write.format("com.databricks.spark.avro").mode(SaveMode.Overwrite).save(pathAvro)
    //writing directly the Person dataframe generates an exception
    caseClassDS.toDF.select("name", "age").write.format("csv").mode(SaveMode.Overwrite).option("header", "true").save(pathCsv)
  }
}

object DataFrameClasses {

  final case class Address(street: String)

  final case class Person(name: String, age: Int, address: Address)
} 
Example 72
Source File: KuduMiniCluster.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.teamdigitale.miniclusters

import org.apache.kudu.client.{KuduClient, MiniKuduCluster}
import org.apache.kudu.client.KuduClient.KuduClientBuilder
import org.apache.kudu.client.MiniKuduCluster.MiniKuduClusterBuilder
import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import org.apache.logging.log4j.LogManager

class KuduMiniCluster extends AutoCloseable {
  val alogger = LogManager.getLogger(this.getClass)

  var kuduMiniCluster: MiniKuduCluster = _
  var kuduClient: KuduClient = _
  var kuduContext: KuduContext = _
  var sparkSession = SparkSession.builder().appName(s"test-${System.currentTimeMillis()}").master("local[*]").getOrCreate()

  def start() {
    alogger.info("Starting KUDU mini cluster")

    System.setProperty(
      "binDir",
      s"${System.getProperty("user.dir")}/src/test/kudu_executables/${sun.awt.OSInfo.getOSType().toString.toLowerCase}"
    )


    kuduMiniCluster = new MiniKuduClusterBuilder()
      .numMasters(1)
      .numTservers(3)
      .build()

    val envMap = Map[String, String](("Xmx", "512m"))

    kuduClient = new KuduClientBuilder(kuduMiniCluster.getMasterAddresses).build()
    assert(kuduMiniCluster.waitForTabletServers(1))

    kuduContext = new KuduContext(kuduMiniCluster.getMasterAddresses, sparkSession.sparkContext)

  }

  override def close() {
    alogger.info("Ending KUDU mini cluster")
    kuduClient.shutdown()
    kuduMiniCluster.shutdown()
    sparkSession.close()
  }
}

  object KuduMiniCluster {

    def main(args: Array[String]): Unit = {

      try {
        val kudu = new KuduMiniCluster()
        kudu.start()

        println(s"MASTER KUDU ${kudu.kuduMiniCluster.getMasterAddresses}")
        while(true){

        }
      }
    }


  } 
Example 73
Source File: KuduEventsHandlerSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.teamdigitale.storage

import java.io.File
import java.util.concurrent.TimeUnit

import org.apache.kudu.spark.kudu._
import it.teamdigitale.miniclusters.KuduMiniCluster
import it.teamdigitale.config.IotIngestionManagerConfig.KuduConfig
import it.teamdigitale.managers.IotIngestionManager
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import it.gov.daf.iotingestion.event.Event
import it.teamdigitale.EventModel.{EventToKuduEvent, EventToStorableEvent}
import org.apache.logging.log4j.LogManager

import scala.util.{Failure, Success, Try}

class KuduEventsHandlerSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  val logger = LogManager.getLogger(this.getClass)
  val kuduCluster = new KuduMiniCluster()

  val metrics: Seq[Try[Event]] = Range(0,100).map(x => Success( Event(
    version = 1L,
    id = x + "metric",
    ts = System.currentTimeMillis() + x ,
    event_type_id = 0,
    location = "41.1260529:16.8692905",
    source = "http://domain/sensor/url",
    body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()),
    event_subtype_id = Some("Via Cernaia(TO)"),
    attributes = Map("value" -> x.toString)
  )))

  val rdd = kuduCluster.sparkSession.sparkContext.parallelize(metrics)


  "KuduEventsHandler" should "store correctly data" in {

   val metricsRDD = rdd
      .map(event => EventToStorableEvent(event))
      .flatMap(e => e.toOption)
      .map(se => EventToKuduEvent(se)).flatMap(e => e.toOption)

    val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD)

    val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEvents", 2)

    KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig)
    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)

    val df = kuduCluster.sparkSession.sqlContext
      .read
      .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName))
      .kudu

    df.count shouldBe 100

  }

  "KuduEventsHandler" should "handle redundant data" in {

    val metricsRDD = rdd
      .map(event => EventToStorableEvent(event))
      .flatMap(e => e.toOption)
      .map(se => EventToKuduEvent(se))
      .flatMap(e => e.toOption)

    val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD)

    val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEventsDuplicate", 2)
    KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig)

    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)
    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)

    val df = kuduCluster.sparkSession.sqlContext
      .read
      .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName))
      .kudu

    df.count shouldBe 100

  }

  override def beforeAll() {
    kuduCluster.start()
  }

  override def afterAll() {
    kuduCluster.start()
  }

} 
Example 74
Source File: EmbeddedKafkaUnavailableSpec.scala    From scalatest-embedded-kafka   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka

import org.apache.kafka.common.serialization.StringSerializer
import org.scalatest.BeforeAndAfterAll
import org.scalatest.tagobjects.Slow

class EmbeddedKafkaUnavailableSpec
    extends EmbeddedKafkaSpecSupport
    with EmbeddedKafka
    with BeforeAndAfterAll {

  "the publishToKafka method" should {
    "throw a KafkaUnavailableException when Kafka is unavailable when trying to publish" in {
      a[KafkaUnavailableException] shouldBe thrownBy {
        implicit val serializer = new StringSerializer()
        publishToKafka("non_existing_topic", "a message")
      }
    }
  }

  "the consumeFirstStringMessageFrom method" should {
    "throw a KafkaUnavailableException when there's no running instance of Kafka" taggedAs Slow ignore {
      // TODO: This test is *really* slow. The request.max.timeout.ms in the underlying consumer should be changed.
      a[KafkaUnavailableException] shouldBe thrownBy {
        consumeFirstStringMessageFrom("non_existing_topic")
      }
    }
  }
} 
Example 75
Source File: MLlibTestSparkContext.scala    From spark-tfocs   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{ BeforeAndAfterAll, Suite }

import org.apache.spark.{ SparkConf, SparkContext }
import org.apache.spark.sql.SQLContext


trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    sqlContext = new SQLContext(sc)
  }

  override def afterAll() {
    sqlContext = null
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 76
Source File: MakingNestedTableTest.scala    From SparkUnitTestingExamples   with Apache License 2.0 5 votes vote down vote up
package com.cloudera.sa.spark.unittest.sql

import org.apache.spark.sql.Row
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.hive.HiveContext
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}

object MakingNestedTableTest   extends FunSuite with
  BeforeAndAfterEach with BeforeAndAfterAll {

  @transient var sc: SparkContext = null
  @transient var hiveContext: HiveContext = null

  override def beforeAll(): Unit = {

    val envMap = Map[String, String](("Xmx", "512m"))

    val sparkConfig = new SparkConf()
    sparkConfig.set("spark.broadcast.compress", "false")
    sparkConfig.set("spark.shuffle.compress", "false")
    sparkConfig.set("spark.shuffle.spill.compress", "false")
    sparkConfig.set("spark.io.compression.codec", "lzf")
    sc = new SparkContext("local[2]", "unit test", sparkConfig)
    hiveContext = new HiveContext(sc)
  }

  override def afterAll(): Unit = {
    sc.stop()
  }

  test("Test table creation and summing of counts") {

    val loanRDD = sc.parallelize(Seq(Row("100", "100000000"),
                                      Row("101", "100000000"),
                                      Row("102", "100000000")))

    val partiesRDD = sc.parallelize(Seq(Row("100", "ted"),
      Row("101", "bob", "42"),
      Row("101", "cat", "42"),
      Row("102", "Jen", "42"),
      Row("102", "Jenny", "42"),
      Row("102", "Ed", "42")))

    //loan
    hiveContext.sql("create table loan (id string, amount string) as parquet")
    val emptyLoanDF = hiveContext.sql("select * from loan limit 0;")
    val loanDF = hiveContext.createDataFrame(loanRDD, emptyLoanDF.schema)
    loanDF.registerTempTable("loanTmp")
    hiveContext.sql("insert into loan select * from loanTmp")

    //parties
    hiveContext.sql("create table party (loan_id string, name string, age string) as parquet")
    val emptyPartyDF = hiveContext.sql("select * from party limit 0;")
    val partyDF = hiveContext.createDataFrame(partiesRDD, emptyPartyDF.schema)
    partyDF.registerTempTable("partyTmp")
    hiveContext.sql("insert into party select * from partyTmp")

    val keyValueParty = hiveContext.sql("select * from party").map(r => {
      //Key Value
      (r.getString(r.fieldIndex("loan_id")), Seq(r))
    }).reduceByKey((a, b) => {
      a ++ b
    })

    val keyValueLoan = hiveContext.sql("select * from loan").map(r => {
      //Key Value
      (r.getString(r.fieldIndex("id")), r.getString(r.fieldIndex("amount")))
    })

    val nestedRDD = keyValueLoan.join(keyValueParty).map(r => {
      val loanId = r._1
      val loanAmount = r._2._1
      val seqOfParties = r._2._2.map(r => {
        Row(r.getString(r.fieldIndex("name")),
        r.getString(r.fieldIndex("age")))
      })

      Row(loanId, loanAmount, seqOfParties)
    })

    hiveContext.sql("create table nested (" +
      "loan_id string, " +
      "amount string, " +
      "party <array<struct<" +
      "  name: String," +
      "  age: String>>" +
      ") as parquet")

    val emptyNestedDF = hiveContext.sql("select * from nested limit 0;")
    val nestedDF = hiveContext.createDataFrame(nestedRDD, emptyNestedDF.schema)
    nestedDF.registerTempTable("nestedTmp")
    hiveContext.sql("insert into nested select * from nestedTmp")


  }
} 
Example 77
Source File: StreamingUnitTest.scala    From SparkUnitTestingExamples   with Apache License 2.0 5 votes vote down vote up
package com.cloudera.sa.spark.unittest.streaming

import org.apache.spark.rdd.RDD
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}

import scala.collection.mutable.Queue

class StreamingUnitTest extends FunSuite with
BeforeAndAfterEach with BeforeAndAfterAll{

  @transient var sc: SparkContext = null
  @transient var ssc: StreamingContext = null

  override def beforeAll(): Unit = {

    val envMap = Map[String,String](("Xmx", "512m"))

    val sparkConfig = new SparkConf()
    sparkConfig.set("spark.broadcast.compress", "false")
    sparkConfig.set("spark.shuffle.compress", "false")
    sparkConfig.set("spark.shuffle.spill.compress", "false")
    sparkConfig.set("spark.io.compression.codec", "lzf")
    sc = new SparkContext("local[2]", "unit test", sparkConfig)
    ssc = new StreamingContext(sc, Milliseconds(200))
  }

  override def afterAll(): Unit = {
    sc.stop()
  }

  test("Streaming word count") {

    val firstBatchRDD = sc.parallelize(Seq("a", "b", "c"))
    val secondBatchRDD = sc.parallelize(Seq("a", "e"))
    val thirdBatchRDD = sc.parallelize(Seq("b", "c", "e", "f"))
    val forthBatchRDD = sc.parallelize(Seq("a", "e"))

    val queue = new Queue[RDD[String]]

    queue.+=(firstBatchRDD)
    queue.+=(secondBatchRDD)
    queue.+=(thirdBatchRDD)
    queue.+=(forthBatchRDD)

    println(queue)

    val startTime = System.currentTimeMillis()

    val dstream = new TestableQueueInputDStream(ssc, queue, true, sc.makeRDD(Seq[String](), 1))
    //ssc.queueStream(queue)

    dstream.checkpoint(Seconds(100))

    val batchTotals:DStream[(String, Int)] = dstream.map(r => (r, 1)).reduceByKey(_ + _)

    val streamTotals = batchTotals.updateStateByKey(
      (seq:Seq[Int], opt:Option[Int]) => {
        if (!seq.isEmpty) {
          val totalCountForNew = seq.reduce(_ + _)
          if (opt.isEmpty) {
            Option(totalCountForNew)
          } else {
            Option(opt.get + totalCountForNew)
          }
        } else {
          opt
        }
    })

    streamTotals.foreachRDD(rdd => {

    })

    ssc.checkpoint("./tmp")
    ssc.start()
    ssc.awaitTerminationOrTimeout(2000)

    val endTime = System.currentTimeMillis()

    val rddList = streamTotals.slice(new Time(startTime), new Time(endTime))

    rddList(0).collect().foreach(println)
    assert(rddList(0).collect().filter(r => r._1.equals("a"))(0)._2 == 1)
    rddList(1).collect().foreach(println)
    assert(rddList(1).collect().filter(r => r._1.equals("a"))(0)._2  == 2)
    rddList(2).collect().foreach(println)
    assert(rddList(2).collect().filter(r => r._1.equals("a"))(0)._2  == 2)
    rddList(3).collect().foreach(println)
    assert(rddList(3).collect().filter(r => r._1.equals("a"))(0)._2  == 3)
  }
} 
Example 78
Source File: CoreUnitTest.scala    From SparkUnitTestingExamples   with Apache License 2.0 5 votes vote down vote up
package com.cloudera.sa.spark.unittest.core

import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSuite}

import scala.collection.mutable

class CoreUnitTest extends FunSuite with
BeforeAndAfterEach with BeforeAndAfterAll{

  @transient var sc: SparkContext = null

  override def beforeAll(): Unit = {

    val envMap = Map[String,String](("Xmx", "512m"))

    val sparkConfig = new SparkConf()
    sparkConfig.set("spark.broadcast.compress", "false")
    sparkConfig.set("spark.shuffle.compress", "false")
    sparkConfig.set("spark.shuffle.spill.compress", "false")
    sparkConfig.set("spark.io.compression.codec", "lzf")
    sc = new SparkContext("local[2]", "unit test", sparkConfig)
  }

  override def afterAll(): Unit = {
    sc.stop()
  }

  test("Test word count") {
    val quotesRDD = sc.parallelize(Seq("Courage is not simply one of the virtues, but the form of every virtue at the testing point",
      "We have a very active testing community which people don't often think about when you have open source",
      "Program testing can be used to show the presence of bugs, but never to show their absence",
      "Simple systems are not feasible because they require infinite testing",
      "Testing leads to failure, and failure leads to understanding"))

    val wordCountRDD = quotesRDD.flatMap(r => r.split(' ')).
      map(r => (r.toLowerCase, 1)).
      reduceByKey((a,b) => a + b)

    val wordMap = new mutable.HashMap[String, Int]()
    wordCountRDD.take(100).
      foreach{case(word, count) => wordMap.put(word, count)}
    //Note this is better then foreach(r => wordMap.put(r._1, r._2)

    assert(wordMap.get("to").get == 4, "The word count for 'to' should had been 4 but it was " + wordMap.get("to").get)
    assert(wordMap.get("testing").get == 5, "The word count for 'testing' should had been 5 but it was " + wordMap.get("testing").get)
    assert(wordMap.get("is").get == 1, "The word count for 'is' should had been 1 but it was " + wordMap.get("is").get)
  }
} 
Example 79
Source File: BaseSolrCloudTest.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse.solr

import java.io.File

import org.apache.solr.client.solrj.impl.{ CloudSolrClient, ZkClientClusterStateProvider }
import org.apache.solr.client.solrj.response.UpdateResponse
import org.apache.solr.common.SolrInputDocument
import org.scalatest.{ BeforeAndAfterAll, FunSuite }

trait BaseSolrCloudTest extends FunSuite with BeforeAndAfterAll {
  val TEST_CONF_NAME              = "testconf"
  val miniSolrCloudCluster        = TestUtil.miniSolrCloudCluster()
  val solrClient: CloudSolrClient = miniSolrCloudCluster.getSolrClient

  def solrService =
    SolrProvider.fromClient(List(miniSolrCloudCluster.getZkServer.getZkAddress), solrClient)

  def addDocument(document: Map[String, String]): UpdateResponse =
    solrClient.add(mapToSolrDocument(document))

  def mapToSolrDocument(event: Map[String, _]): SolrInputDocument = {
    val doc = new SolrInputDocument()
    event.foreach(field => doc.addField(field._1, field._2))
    doc
  }

  override def beforeAll(): Unit = {
    val zkProvider = new ZkClientClusterStateProvider(solrClient.getZkStateReader)

    zkProvider.uploadConfig(
      new File(System.getProperty("user.dir") + "/test-config/solr7/conf").toPath,
      TEST_CONF_NAME)
  }
} 
Example 80
Source File: BaseSolrCloudTest.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse.solr

import java.io.File

import io.phdata.pulse.common.SolrServiceImpl
import org.apache.solr.client.solrj.impl.CloudSolrServer
import org.apache.solr.client.solrj.response.UpdateResponse
import org.apache.solr.cloud.MiniSolrCloudCluster
import org.apache.solr.common.SolrInputDocument
import org.scalatest.{ BeforeAndAfterAll, FunSuite }

trait BaseSolrCloudTest extends FunSuite with BeforeAndAfterAll {
  val TEST_CONF_NAME                             = "testconf"
  val miniSolrCloudCluster: MiniSolrCloudCluster = TestUtil.miniSolrCloudCluster()
  val solrClient: CloudSolrServer                = miniSolrCloudCluster.getSolrClient

  override def beforeAll(): Unit =
    miniSolrCloudCluster.getSolrClient.uploadConfig(
      new File(System.getProperty("user.dir") + "/test-config/solr4/conf").toPath,
      TEST_CONF_NAME)

  def addDocument(document: Map[String, String]): UpdateResponse =
    solrClient.add(mapToSolrDocument(document))

  def solrService: SolrServiceImpl =
    SolrProvider.fromClient(List(miniSolrCloudCluster.getZkServer.getZkAddress), solrClient)

  def mapToSolrDocument(event: Map[String, _]): SolrInputDocument = {
    val doc = new SolrInputDocument()
    event.foreach(field => doc.addField(field._1, field._2))
    doc
  }
} 
Example 81
Source File: SqlAlertTriggerTest.scala    From pulse   with Apache License 2.0 5 votes vote down vote up
package io.phdata.pulse.alertengine.trigger

import java.sql.{ DriverManager, Statement }

import io.phdata.pulse.alertengine.{ AlertsDb, TestObjectGenerator }
import io.phdata.pulse.solr.TestUtil
import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, FunSuite }

class SqlAlertTriggerTest extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
  private val applicationName: String = "sql_test_" + TestUtil.randomIdentifier()
  private val dbUrl                   = s"jdbc:h2:mem:$applicationName;DB_CLOSE_DELAY=-1"

  override def beforeEach(): Unit = {
    super.beforeEach()
    AlertsDb.reset()
    prepareDatabase()
  }

  override def afterAll(): Unit =
    withStatement(statement => statement.execute("DROP ALL OBJECTS DELETE FILES;"))

  private def withStatement(function: Statement => Unit): Unit = {
    val connection = DriverManager.getConnection(dbUrl)
    try {
      val statement = connection.createStatement()
      try {
        function.apply(statement)
      } finally {
        statement.close()
      }
    } finally {
      connection.close()
    }
  }

  private def prepareDatabase(): Unit =
    withStatement { statement =>
      statement.execute("DROP ALL OBJECTS DELETE FILES;")
      statement.execute(s"""CREATE TABLE $applicationName (
           |id int not null,
           |error boolean not null,
           |message varchar(255) not null,
           |);""".stripMargin)
    }

  test("query returns matching documents") {
    withStatement { statement =>
      statement.execute(s"""INSERT INTO $applicationName (id, error, message) VALUES
           |(1, true, 'sad'),
           |(3, true, 'very sad'),
           |(2, false, 'happy');""".stripMargin)
    }
    val alertRule =
      TestObjectGenerator.alertRule(
        query = s"""select * from $applicationName
           |where error = true
           |order by id""".stripMargin,
        retryInterval = 1,
        resultThreshold = Some(1),
        alertProfiles = List("[email protected]")
      )
    val expectedDocuments = Seq(
      Map("id" -> 1, "error" -> true, "message" -> "sad"),
      Map("id" -> 3, "error" -> true, "message" -> "very sad")
    )

    val trigger = new SqlAlertTrigger(dbUrl)
    val result  = trigger.query(applicationName, alertRule)
    assertResult(expectedDocuments)(result)
  }

  test("query returns no documents") {
    val alertRule = TestObjectGenerator.alertRule(query = s"select * from $applicationName")

    val trigger = new SqlAlertTrigger(dbUrl)
    assertResult(Seq.empty)(trigger.query(applicationName, alertRule))
  }

  test("invalid query") {
    val alertRule = TestObjectGenerator.alertRule()

    val trigger = new SqlAlertTrigger(dbUrl)
    assertThrows[Exception](trigger.query(applicationName, alertRule))
  }

  test("connection with options") {
    val alertRule = TestObjectGenerator.alertRule(query = s"select * from $applicationName")

    val trigger = new SqlAlertTrigger(dbUrl, dbOptions = Map("hello" -> "stuff"))
    trigger.query(applicationName, alertRule)
  }

  test("dbUrl null") {
    assertThrows[IllegalArgumentException](new SqlAlertTrigger(null))
  }

  test("dbUrl empty") {
    assertThrows[IllegalArgumentException](new SqlAlertTrigger(""))
  }

} 
Example 82
Source File: LiigaJournalistSpec.scala    From avoin-voitto   with MIT License 5 votes vote down vote up
package liigavoitto.journalist

import org.scalatest.{BeforeAndAfterAll, MustMatchers, WordSpecLike}

import scala.util.Try

class LiigaJournalistSpec
  extends WordSpecLike
  with TestUtils
  with BeforeAndAfterAll
  with MustMatchers
  with MockData {

  "LiigaJournalist" must {
    "create an article with language" in {
      val data = md

      val finnishRes = LiigaJournalist.createArticle(data, "fi")
      assert(finnishRes.isDefined)
      assert(finnishRes.get.language == "fi")

      val swedishRes = LiigaJournalist.createArticle(data, "sv")
      assert(swedishRes.isDefined)
      assert(swedishRes.get.language == "sv")
    }
  }
} 
Example 83
Source File: ServerSpec.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext

import cats.effect.{ IO, Blocker, ContextShift }

import org.scalatest.{ FlatSpec, Matchers, BeforeAndAfterAll }

import fs2.{ Stream, Chunk }

import scodec.bits._
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

class ServerSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  val ex = Executors.newCachedThreadPool()
  val ec = ExecutionContext.fromExecutor(ex)
  val bl = Blocker.liftExecutionContext(ec)
  val (sg, closeSg) = fs2.io.tcp.SocketGroup[IO](bl).allocated.unsafeRunSync()

  override def afterAll(): Unit = {
    super.afterAll()
    closeSg.unsafeRunSync()
    ex.shutdown()
  }

  "Server" should "respond to a request" in {
    val responses: Vector[Response] = Stream(
      Server.serve(Server.port, sg).drain,
      client(Server.port)
    ).parJoin(Int.MaxValue).take(1L).compile.toVector.unsafeRunSync()
    responses should === (Vector(Ok))
  }

  def client(port: Int): Stream[IO, Response] = {
    Stream.resource(sg.client[IO](Server.addr(port))).flatMap { socket =>
      val bvs: Stream[IO, BitVector] = Stream(Codec[Request].encode(ReSeed(56)).require)
      val bs: Stream[IO, Byte] = bvs.flatMap { bv =>
        Stream.chunk(Chunk.bytes(bv.bytes.toArray))
      }
      val read = bs.through(socket.writes(Server.timeout)).drain.onFinalize(socket.endOfOutput) ++
        socket.reads(Server.bufferSize, Server.timeout).chunks.map(ch => BitVector.view(ch.toArray))
      read.fold(BitVector.empty)(_ ++ _).map(bv => Codec[Response].decode(bv).require.value)
    }
  }
} 
Example 84
Source File: CassandraServerSpecLike.scala    From Spark2Cassandra   with Apache License 2.0 5 votes vote down vote up
package com.github.jparkie.spark.cassandra

import java.net.{ InetAddress, InetSocketAddress }

import com.datastax.driver.core.Session
import com.datastax.spark.connector.cql.CassandraConnector
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.{ BeforeAndAfterAll, Suite }

trait CassandraServerSpecLike extends BeforeAndAfterAll { this: Suite =>
  // Remove protected modifier because of SharedSparkContext.
  override def beforeAll(): Unit = {
    super.beforeAll()

    EmbeddedCassandraServerHelper.startEmbeddedCassandra()
  }

  // Remove protected modifier because of SharedSparkContext.
  override def afterAll(): Unit = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()

    super.afterAll()
  }

  def getClusterName: String = {
    EmbeddedCassandraServerHelper.getClusterName
  }

  def getHosts: Set[InetAddress] = {
    val temporaryAddress =
      new InetSocketAddress(EmbeddedCassandraServerHelper.getHost, EmbeddedCassandraServerHelper.getNativeTransportPort)
        .getAddress

    Set(temporaryAddress)
  }

  def getNativeTransportPort: Int = {
    EmbeddedCassandraServerHelper.getNativeTransportPort
  }

  def getRpcPort: Int = {
    EmbeddedCassandraServerHelper.getRpcPort
  }

  def getCassandraConnector: CassandraConnector = {
    CassandraConnector(hosts = getHosts, port = getNativeTransportPort)
  }

  def createKeyspace(session: Session, keyspace: String): Unit = {
    session.execute(
      s"""CREATE KEYSPACE "$keyspace"
          |WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
       """.stripMargin
    )
  }
} 
Example 85
Source File: GenericTestSpec.scala    From spark-ml-serving   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.spark_ml_serving

import io.hydrosphere.spark_ml_serving.common.LocalData
import org.apache.spark.SparkConf
import org.apache.spark.ml.linalg.{Matrix, Vector}
import org.apache.spark.mllib.linalg.{Matrix => OldMatrix, Vector => OldVector}
import org.apache.spark.ml.{Pipeline, PipelineStage}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.scalatest.{BeforeAndAfterAll, FunSpec}

trait GenericTestSpec extends FunSpec with BeforeAndAfterAll {
  val conf = new SparkConf()
    .setMaster("local[2]")
    .setAppName("test")
    .set("spark.ui.enabled", "false")

  val session: SparkSession = SparkSession.builder().config(conf).getOrCreate()

  def modelPath(modelName: String): String = s"./target/test_models/${session.version}/$modelName"

  def test(
    name: String,
    data: => DataFrame,
    steps: => Seq[PipelineStage],
    columns: => Seq[String],
    accuracy: Double = 0.01
  ) = {
    val path = modelPath(name.toLowerCase())
    var validation = LocalData.empty
    var localPipelineModel = Option.empty[LocalPipelineModel]

    it("should train") {
      val pipeline = new Pipeline().setStages(steps.toArray)
      val pipelineModel = pipeline.fit(data)
      validation = LocalData.fromDataFrame(pipelineModel.transform(data))
      pipelineModel.write.overwrite().save(path)
    }

    it("should load local version") {
      localPipelineModel = Some(LocalPipelineModel.load(path))
      assert(localPipelineModel.isDefined)
    }

    it("should transform LocalData") {
      val localData = LocalData.fromDataFrame(data)
      val model = localPipelineModel.get
      val result = model.transform(localData)
      columns.foreach { col =>
        val resCol = result
          .column(col)
          .getOrElse(throw new IllegalArgumentException("Result column is absent"))
        val valCol = validation
          .column(col)
          .getOrElse(throw new IllegalArgumentException("Validation column is absent"))
        resCol.data.zip(valCol.data).foreach {
          case (r: Seq[Number @unchecked], v: Seq[Number @unchecked]) if r.head.isInstanceOf[Number] && r.head.isInstanceOf[Number] =>
            r.zip(v).foreach {
              case (ri, vi) =>
                assert(ri.doubleValue() - vi.doubleValue() <= accuracy, s"$ri - $vi > $accuracy")
            }
          case (r: Number, v: Number) =>
            assert(r.doubleValue() - v.doubleValue() <= accuracy, s"$r - $v > $accuracy")
          case (r, n) =>
            assert(r === n)
        }
        result.column(col).foreach { resData =>
          resData.data.foreach { resRow =>
            if (resRow.isInstanceOf[Seq[_]]) {
              assert(resRow.isInstanceOf[List[_]], resRow)
            } else if (resRow.isInstanceOf[Vector] || resRow.isInstanceOf[OldVector] || resRow
              .isInstanceOf[Matrix] || resRow.isInstanceOf[OldMatrix]) {
              assert(false, s"SparkML type detected. Column: $col, value: $resRow")
            }
          }
        }
      }
    }
  }

  def modelTest(
    data: => DataFrame,
    steps: => Seq[PipelineStage],
    columns: => Seq[String],
    accuracy: Double = 0.01
  ): Unit = {
    lazy val name = steps.map(_.getClass.getSimpleName).foldLeft("") {
      case ("", b) => b
      case (a, b) => a + "-" + b
    }

    describe(name) {
      test(name, data, steps, columns, accuracy)
    }
  }
} 
Example 86
Source File: TaskRepositoryOnRDBSpec.scala    From ddd-on-scala   with MIT License 5 votes vote down vote up
package crossroad0201.dddonscala.infrastructure.task

import crossroad0201.dddonscala.adapter.infrastructure.rdb.ScalikeJdbcSessionHolder
import crossroad0201.dddonscala.adapter.infrastructure.rdb.task.TaskRepositoryOnRDB
import crossroad0201.dddonscala.domain.UnitOfWork
import crossroad0201.dddonscala.domain.task._
import crossroad0201.dddonscala.domain.user.UserId
import org.scalatest.{BeforeAndAfterAll, GivenWhenThen, Inside, Matchers}
import org.scalatest.fixture.WordSpec
import scalikejdbc.scalatest.AutoRollback
import scalikejdbc._
import scalikejdbc.config.DBs

import scala.util.{Failure, Success}

class TaskRepositoryOnRDBSpec
    extends WordSpec
    with GivenWhenThen
    with Matchers
    with Inside
    with BeforeAndAfterAll
    with AutoRollback {

  override protected def beforeAll() = DBs.setupAll

  override protected def afterAll() = DBs.closeAll

  override def fixture(implicit session: DBSession) {
    sql"""INSERT INTO tasks VALUES ('TESTTASK001', 'テストタスク1', 'OPENED', 'USER001', NULL, 1)""".update.apply

    sql"""INSERT INTO tasks VALUES ('TESTTASK002', 'テストタスク2', 'CLOSED', 'USER001', 'USER002', 1)""".update.apply
    sql"""INSERT INTO task_comments VALUES (1, 'TESTTASK002', 'ひとつめのコメント', 'USER001')""".update.apply
    sql"""INSERT INTO task_comments VALUES (2, 'TESTTASK002', 'ふたつめのコメント', 'USER002')""".update.apply
  }

  "get" when {
    "タスクが存在する" should {
      "タスクが返される" in { implicit dbs =>
        new WithFixture {
          Given("存在するタスクID")
          val taskId = "TESTTASK002"

          Then("タスクを取得する")
          val actual = get(TaskId(taskId))
          println(s"Actual: $actual")

          When("タスクが返される")
          inside(actual) {
            case (Success(Some(aTask))) =>
              aTask.id should be(TaskId("TESTTASK002"))
              aTask.name should be(TaskName("テストタスク2"))
              aTask.state should be(TaskState.Closed)
              aTask.authorId should be(UserId("USER001"))
              aTask.assignment should be(Assigned(UserId("USER002")))
          }
        }
      }
    }
  }

  trait WithFixture extends TaskRepositoryOnRDB {
    implicit def dbSessionAsUnitOfWork(implicit dbs: DBSession): UnitOfWork =
      new UnitOfWork with ScalikeJdbcSessionHolder {
        override val dbSession = dbs
      }
  }

} 
Example 87
Source File: BytesUtilsSuite.scala    From Backup-Repo   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hbase

import org.apache.spark.Logging
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.sql.types._
import org.apache.spark.sql.hbase.types.HBaseBytesType
import org.apache.spark.sql.hbase.util.BytesUtils
import org.scalatest.{BeforeAndAfterAll, FunSuite}

class BytesUtilsSuite extends FunSuite with BeforeAndAfterAll with Logging {
  test("Bytes Ordering Test") {
    val s = Seq(-257, -256, -255, -129, -128, -127, -64, -16, -4, -1,
      0, 1, 4, 16, 64, 127, 128, 129, 255, 256, 257)
    val result = s.map(i => (i, BytesUtils.create(IntegerType).toBytes(i)))
      .sortWith((f, s) =>
      HBaseBytesType.ordering.gt(
        f._2.asInstanceOf[HBaseBytesType.InternalType],
        s._2.asInstanceOf[HBaseBytesType.InternalType]))
    assert(result.map(a => a._1) == s.sorted.reverse)
  }

  def compare(a: Array[Byte], b: Array[Byte]): Int = {
    val length = Math.min(a.length, b.length)
    var result: Int = 0
    for (i <- 0 to length - 1) {
      val diff: Int = (a(i) & 0xff).asInstanceOf[Byte] - (b(i) & 0xff).asInstanceOf[Byte]
      if (diff != 0) {
        result = diff
      }
    }
    result
  }

  test("Bytes Utility Test") {
    assert(BytesUtils.toBoolean(BytesUtils.create(BooleanType)
      .toBytes(input = true), 0) === true)
    assert(BytesUtils.toBoolean(BytesUtils.create(BooleanType)
      .toBytes(input = false), 0) === false)

    assert(BytesUtils.toDouble(BytesUtils.create(DoubleType).toBytes(12.34d), 0)
      === 12.34d)
    assert(BytesUtils.toDouble(BytesUtils.create(DoubleType).toBytes(-12.34d), 0)
      === -12.34d)

    assert(BytesUtils.toFloat(BytesUtils.create(FloatType).toBytes(12.34f), 0)
      === 12.34f)
    assert(BytesUtils.toFloat(BytesUtils.create(FloatType).toBytes(-12.34f), 0)
      === -12.34f)

    assert(BytesUtils.toInt(BytesUtils.create(IntegerType).toBytes(12), 0)
      === 12)
    assert(BytesUtils.toInt(BytesUtils.create(IntegerType).toBytes(-12), 0)
      === -12)

    assert(BytesUtils.toLong(BytesUtils.create(LongType).toBytes(1234l), 0)
      === 1234l)
    assert(BytesUtils.toLong(BytesUtils.create(LongType).toBytes(-1234l), 0)
      === -1234l)

    assert(BytesUtils.toShort(BytesUtils.create(ShortType)
      .toBytes(12.asInstanceOf[Short]), 0) === 12)
    assert(BytesUtils.toShort(BytesUtils.create(ShortType)
      .toBytes(-12.asInstanceOf[Short]), 0) === -12)

    assert(BytesUtils.toUTF8String(BytesUtils.create(StringType).toBytes("abc"), 0, 3)
      === UTF8String("abc"))
    assert(BytesUtils.toUTF8String(BytesUtils.create(StringType).toBytes(""), 0, 0) === UTF8String(""))

    assert(BytesUtils.toByte(BytesUtils.create(ByteType)
      .toBytes(5.asInstanceOf[Byte]), 0) === 5)
    assert(BytesUtils.toByte(BytesUtils.create(ByteType)
      .toBytes(-5.asInstanceOf[Byte]), 0) === -5)

    assert(compare(BytesUtils.create(IntegerType).toBytes(128),
      BytesUtils.create(IntegerType).toBytes(-128)) > 0)
  }

  test("byte array plus one") {
    var byteArray =  Array[Byte](0x01.toByte, 127.toByte)
    assert(Bytes.compareTo(BytesUtils.addOne(byteArray),  Array[Byte](0x01.toByte, 0x80.toByte)) == 0)

    byteArray =  Array[Byte](0xff.toByte, 0xff.toByte)
    assert(BytesUtils.addOne(byteArray) == null)

    byteArray =  Array[Byte](0x02.toByte, 0xff.toByte)
    assert(Bytes.compareTo(BytesUtils.addOne(byteArray),  Array[Byte](0x03.toByte, 0x00.toByte)) == 0)
  }

  test("float comparison") {
    val f1 = BytesUtils.create(FloatType).toBytes(-1.23f)
    val f2 = BytesUtils.create(FloatType).toBytes(100f)
    assert(Bytes.compareTo(f1, f2) < 0)
  }
} 
Example 88
Source File: AmqpSubscriberPerfSpec.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqp

import akka.Done
import akka.actor.{Actor, ActorSystem, Props}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.{TestKit, TestProbe}
import dispatch.url
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Ignore}
import rhttpc.transport.{Deserializer, InboundQueueData, OutboundQueueData, Serializer}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Random, Try}

@Ignore
class AmqpSubscriberPerfSpec extends TestKit(ActorSystem("AmqpSubscriberPerfSpec")) with FlatSpecLike with BeforeAndAfterAll {
  import system.dispatcher

  implicit val materializer = ActorMaterializer()

  implicit def serializer[Msg] = new Serializer[Msg] {
    override def serialize(obj: Msg): String = obj.toString
  }

  implicit def deserializer[Msg] = new Deserializer[Msg] {
    override def deserialize(value: String): Try[Msg] = Try(value.asInstanceOf[Msg])
  }

  val queueName = "request"
  val outboundQueueData = OutboundQueueData(queueName, autoDelete = true, durability = false)
  val inboundQueueData = InboundQueueData(queueName, batchSize = 10, parallelConsumers = 10, autoDelete = true, durability = false)
  val count = 100

  private val interface = "localhost"
  private val port = 8081

  def handle(request: HttpRequest) = {
    val delay = 5 + Random.nextInt(10)
    after(delay.seconds, system.scheduler)(Future.successful(HttpResponse()))
  }

  it should "have a good throughput" in {
    val bound = Await.result(
      Http().bindAndHandleAsync(
        handle, interface, port
      ),
      5.seconds
    )
    val http = dispatch.Http()
//      .configure(_.setMaxConnections(count)
//        .setExecutorService(Executors.newFixedThreadPool(count)))

    val connection = Await.result(AmqpConnectionFactory.connect(system), 5 seconds)
    val transport = AmqpTransport(
      connection = connection
    )
    val publisher = transport.publisher[String](outboundQueueData)
    val probe = TestProbe()
    val actor = system.actorOf(Props(new Actor {
      override def receive: Receive = {
        case str: String =>
          http(url(s"http://$interface:$port") OK identity).map(_ => Done).pipeTo(self)(sender())
        case Done =>
          probe.ref ! Done
          sender() ! Done
      }
    }))
    val subscriber = transport.subscriber[String](inboundQueueData, actor)
    subscriber.start()

    try {
      measureMeanThroughput(count) {
        (1 to count).foreach { _ => publisher.publish("x") }

        probe.receiveWhile(10 minutes, messages = count) { case a => a }
      }
    } finally {
      Await.result(subscriber.stop(), 5.seconds)
      connection.close(5 * 1000)
      Await.result(bound.unbind(), 5.seconds)
    }
  }

  def measureMeanThroughput(count: Int)(consume: => Unit) = {
    val before = System.currentTimeMillis()
    consume
    val msgsPerSecond = count / ((System.currentTimeMillis() - before).toDouble / 1000)
    println(s"Throughput was: $msgsPerSecond msgs/sec")
  }

  override protected def afterAll(): Unit = {
    shutdown()
  }
} 
Example 89
Source File: AvroParquetReaderFnTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.parquet

import java.util.UUID

import io.eels.component.avro.AvroSchemaFns
import io.eels.component.parquet.avro.AvroParquetReaderFn
import io.eels.schema.{DoubleType, Field, LongType, StructType}
import org.apache.avro.SchemaBuilder
import org.apache.avro.generic.{GenericData, GenericRecord}
import org.apache.avro.util.Utf8
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.parquet.avro.AvroParquetWriter
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}

class AvroParquetReaderFnTest extends WordSpec with Matchers with BeforeAndAfterAll {

  private implicit val conf = new Configuration()
  private implicit val fs = FileSystem.get(new Configuration())

  private val path = new Path(UUID.randomUUID().toString())

  override def afterAll(): Unit = {
    val fs = FileSystem.get(new Configuration())
    fs.delete(path, false)
  }

  private val avroSchema = SchemaBuilder.record("com.chuckle").fields()
    .requiredString("str").requiredLong("looong").requiredDouble("dooble").endRecord()

  private val writer = AvroParquetWriter.builder[GenericRecord](path)
    .withSchema(avroSchema)
    .build()

  private val record = new GenericData.Record(avroSchema)
  record.put("str", "wibble")
  record.put("looong", 999L)
  record.put("dooble", 12.34)
  writer.write(record)
  writer.close()

  val schema = StructType(Field("str"), Field("looong", LongType(true), true), Field("dooble", DoubleType, true))

  "AvroParquetReaderFn" should {
    "support projections on doubles" in {

      val reader = AvroParquetReaderFn(path, None, Option(AvroSchemaFns.toAvroSchema(schema.removeField("looong"))))
      val record = reader.read()
      reader.close()

      record.get("str").asInstanceOf[Utf8].toString shouldBe "wibble"
      record.get("dooble") shouldBe 12.34
    }
    "support projections on longs" in {

      val reader = AvroParquetReaderFn(path, None, Option(AvroSchemaFns.toAvroSchema(schema.removeField("str"))))
      val record = reader.read()
      reader.close()

      record.get("looong") shouldBe 999L
    }
    "support full projections" in {

      val reader = AvroParquetReaderFn(path, None, Option(AvroSchemaFns.toAvroSchema(schema)))
      val record = reader.read()
      reader.close()

      record.get("str").asInstanceOf[Utf8].toString shouldBe "wibble"
      record.get("looong") shouldBe 999L
      record.get("dooble") shouldBe 12.34

    }
    "support non projections" in {

      val reader = AvroParquetReaderFn(path, None, None)
      val group = reader.read()
      reader.close()

      group.get("str").asInstanceOf[Utf8].toString shouldBe "wibble"
      group.get("looong") shouldBe 999L
      group.get("dooble") shouldBe 12.34

    }
  }
} 
Example 90
Source File: HiveDynamicPartitionTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive

import java.io.File

import io.eels.component.hive.partition.DynamicPartitionStrategy
import io.eels.datastream.DataStream
import io.eels.schema.{Field, Partition, StructType}
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.util.Try

class HiveDynamicPartitionTest extends FunSuite with Matchers with BeforeAndAfterAll {

  import HiveConfig._

  val dbname = HiveTestUtils.createTestDatabase
  val table = "dynp_test_" + System.currentTimeMillis()

  val schema = StructType(Field("a"), Field("b"))

  Try {
    HiveTable(dbname, table).create(schema, Seq("a"))
  }

  override def afterAll(): Unit = Try {
    HiveTable(dbname, table).drop()
  }

  test("dynamic partition strategy should create new partitions") {
    assume(new File(s"$basePath/core-site.xml").exists)
    HiveTable(dbname, table).partitionValues("a") shouldBe Set.empty
    DataStream.fromValues(schema, Seq(Seq("1", "2"), Seq("3", "4"))).to(HiveSink(dbname, table))
    HiveTable(dbname, table).partitionValues("a") shouldBe Set("1", "3")
  }

  test("skip partition if partition already exists") {
    assume(new File(s"$basePath/core-site.xml").exists)
    new DynamicPartitionStrategy().ensurePartition(Partition("a" -> "1"), dbname, table, false, client)
    new DynamicPartitionStrategy().ensurePartition(Partition("a" -> "1"), dbname, table, false, client)
  }
} 
Example 91
Source File: HivePartitionConstraintTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive

import java.io.File

import io.eels.datastream.DataStream
import io.eels.schema.{Field, PartitionConstraint, StringType, StructType}
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.util.Try

class HivePartitionConstraintTest extends FunSuite with Matchers with BeforeAndAfterAll {

  import HiveConfig._

  val dbname = HiveTestUtils.createTestDatabase
  private val table = "constraints_test_" + System.currentTimeMillis()

  override def afterAll(): Unit = Try {
    HiveTable(dbname, table).drop()
  }

  val schema = StructType(
    Field("state", StringType),
    Field("city", StringType)
  )

  Try {
    DataStream.fromValues(schema, Seq(
      Seq("iowa", "des moines"),
      Seq("iowa", "iow city"),
      Seq("maine", "augusta")
    )).to(HiveSink(dbname, table).withCreateTable(true, Seq("state")))
  }

  test("hive source with partition constraint should return matching data") {
    assume(new File(s"$basePath/core-site.xml").exists)

    HiveSource(dbname, table)
      .addPartitionConstraint(PartitionConstraint.equals("state", "iowa"))
      .toDataStream()
      .collect.size shouldBe 2
  }

  test("hive source with non-existing partitions in constraint should return no data") {
    assume(new File(s"$basePath/core-site.xml").exists)

    HiveSource(dbname, table)
      .addPartitionConstraint(PartitionConstraint.equals("state", "pa"))
      .toDataStream()
      .collect.size shouldBe 0
  }
} 
Example 92
Source File: HiveStatsTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive

import java.io.File

import io.eels.Row
import io.eels.datastream.DataStream
import io.eels.schema._
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.util.{Random, Try}

class HiveStatsTest extends FunSuite with Matchers with BeforeAndAfterAll {

  import HiveConfig._

  private val dbname = HiveTestUtils.createTestDatabase
  private val table = "stats_test_" + System.currentTimeMillis()
  private val partitioned_table = "stats_test2_" + System.currentTimeMillis()

  val schema = StructType(
    Field("a", StringType),
    Field("b", IntType.Signed)
  )

  def createRow = Row(schema, Seq(Random.shuffle(List("a", "b", "c")).head, Random.shuffle(List(1, 2, 3, 4, 5)).head))

  val amount = 10000

  override def afterAll(): Unit = Try {
    HiveTable(dbname, table).drop()
    HiveTable(dbname, partitioned_table).drop()
  }

  Try {
    DataStream.fromIterator(schema, Iterator.continually(createRow).take(amount))
      .to(HiveSink(dbname, table).withCreateTable(true), 4)

    DataStream.fromIterator(schema, Iterator.continually(createRow).take(amount))
      .to(HiveSink(dbname, partitioned_table).withCreateTable(true, Seq("a")), 4)
  }

  test("stats should return row counts for a non-partitioned table") {
    assume(new File(s"$basePath/core-site.xml").exists)
    HiveTable(dbname, table).stats().count shouldBe amount
  }

  test("stats should return row counts for a partitioned table") {
    assume(new File(s"$basePath/core-site.xml").exists)
    HiveTable(dbname, partitioned_table).stats().count shouldBe amount
  }

  test("stats should throw exception when constraints specified on a non-partitioned table") {
    assume(new File(s"$basePath/core-site.xml").exists)
    intercept[RuntimeException] {
      val constraints = Seq(PartitionConstraint.equals("a", "b"))
      HiveTable(dbname, table).stats().count(constraints)
    }
  }

  test("stats should support row count constraints for a partitioned table") {
    assume(new File(s"$basePath/core-site.xml").exists)
    val constraints = Seq(PartitionConstraint.equals("a", "b"))
    HiveTable(dbname, partitioned_table).stats().count(constraints) > 0 shouldBe true
    HiveTable(dbname, partitioned_table).stats().count(constraints) should be < amount.toLong
  }

  test("stats should support min and max for a non-partitioned tabled") {
    assume(new File(s"$basePath/core-site.xml").exists)
    HiveTable(dbname, table).stats.max("b") shouldBe 5
    HiveTable(dbname, table).stats.min("b") shouldBe 1
  }

  test("stats should support min and max for a partitioned table") {
    assume(new File(s"$basePath/core-site.xml").exists)
    HiveTable(dbname, partitioned_table).stats.max("b") shouldBe 5
    HiveTable(dbname, partitioned_table).stats.min("b") shouldBe 1
  }
} 
Example 93
Source File: OrcPredicateTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.orc

import java.io.{File, FilenameFilter}

import io.eels.Predicate
import io.eels.datastream.DataStream
import io.eels.schema.{Field, LongType, StringType, StructType}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

class OrcPredicateTest extends FlatSpec with Matchers with BeforeAndAfterAll {
  cleanUpResidualOrcTestFiles

  val schema = StructType(
    Field("name", StringType, nullable = true),
    Field("city", StringType, nullable = true),
    Field("age", LongType.Signed, nullable = true)
  )

  val values = Vector.fill(1000) {
    Vector("sam", "middlesbrough", 37)
  } ++ Vector.fill(1000) {
    Vector("laura", "iowa city", 24)
  }

  val ds = DataStream.fromValues(schema, values)

  implicit val conf = new Configuration()
  implicit val fs = FileSystem.get(new Configuration())
  val path = new Path("test.orc")

  if (fs.exists(path))
    fs.delete(path, false)

  new File(path.toString).deleteOnExit()

  ds.to(OrcSink(path).withRowIndexStride(1000))

  override protected def afterAll(): Unit = fs.delete(path, false)

  "OrcSource" should "support string equals predicates" in {
    conf.set("eel.orc.predicate.row.filter", "false")
    val rows = OrcSource(path).withPredicate(Predicate.equals("name", "sam")).toDataStream().collect
    rows.map(_.values).toSet shouldBe Set(Vector("sam", "middlesbrough", 37L))
  }

  it should "support gt predicates" in {
    conf.set("eel.orc.predicate.row.filter", "false")
    val rows = OrcSource(path).withPredicate(Predicate.gt("age", 30L)).toDataStream().collect
    rows.map(_.values).toSet shouldBe Set(Vector("sam", "middlesbrough", 37L))
  }

  it should "support lt predicates" in {
    conf.set("eel.orc.predicate.row.filter", "false")
    val rows = OrcSource(path).withPredicate(Predicate.lt("age", 30)).toDataStream().collect
    rows.map(_.values).toSet shouldBe Set(Vector("laura", "iowa city", 24L))
  }

  it should "enable row level filtering with predicates by default" in {
    conf.set("eel.orc.predicate.row.filter", "true")
    val rows = OrcSource(path).withPredicate(Predicate.equals("name", "sam")).toDataStream().collect
    rows.head.schema shouldBe schema
    rows.head.values shouldBe Vector("sam", "middlesbrough", 37L)
  }

  private def cleanUpResidualOrcTestFiles = {
    new File(".").listFiles(new FilenameFilter {
      override def accept(dir: File, name: String): Boolean = {
        (name.startsWith("test_") && name.endsWith(".orc")) || (name.startsWith(".test_") && name.endsWith(".orc.crc"))
      }
    }).foreach(_.delete())
  }
} 
Example 94
Source File: KafkaSinkTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.kafka

import java.util
import java.util.{Properties, UUID}

import io.eels.Row
import io.eels.datastream.DataStream
import io.eels.schema.{Field, StringType, StructType}
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.clients.producer.KafkaProducer
import org.apache.kafka.common.serialization.{Deserializer, Serializer}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.collection.JavaConverters._
import scala.util.Try

class KafkaSinkTest extends FlatSpec with Matchers with BeforeAndAfterAll {

  implicit val kafkaConfig = EmbeddedKafkaConfig(
    kafkaPort = 6001,
    zooKeeperPort = 6000
  )
  Try {
    EmbeddedKafka.start()
  }

  val schema = StructType(
    Field("name", StringType, nullable = true),
    Field("location", StringType, nullable = true)
  )

  val ds = DataStream.fromValues(
    schema,
    Seq(
      Vector("clint eastwood", UUID.randomUUID().toString),
      Vector("elton john", UUID.randomUUID().toString)
    )
  )

  "KafkaSink" should "support default implicits" ignore {

    val topic = "mytopic-" + System.currentTimeMillis()

    val properties = new Properties()
    properties.put("bootstrap.servers", s"localhost:${kafkaConfig.kafkaPort}")
    properties.put("group.id", "test")
    properties.put("auto.offset.reset", "earliest")

    val producer = new KafkaProducer[String, Row](properties, StringSerializer, RowSerializer)
    val sink = KafkaSink(topic, producer)

    val consumer = new KafkaConsumer[String, String](properties, StringDeserializer, StringDeserializer)
    consumer.subscribe(util.Arrays.asList(topic))

    ds.to(sink)
    producer.close()

    val records = consumer.poll(4000)
    records.iterator().asScala.map(_.value).toList shouldBe ds.collect.map {
      case Row(_, values) => values.mkString(",")
    }.toList
  }
}

object RowSerializer extends Serializer[Row] {
  override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
  override def serialize(topic: String, data: Row): Array[Byte] = data.values.mkString(",").getBytes
  override def close(): Unit = ()
}

object StringSerializer extends Serializer[String] {
  override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
  override def close(): Unit = ()
  override def serialize(topic: String, data: String): Array[Byte] = data.getBytes
}

object StringDeserializer extends Deserializer[String] {
  override def configure(configs: util.Map[String, _], isKey: Boolean): Unit = ()
  override def close(): Unit = ()
  override def deserialize(topic: String, data: Array[Byte]): String = new String(data)
} 
Example 95
Source File: LocalSparkContext.scala    From flint   with Apache License 2.0 5 votes vote down vote up
package com.twosigma.flint

import org.apache.spark.SparkContext
import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import org.scalatest.Suite


trait LocalSparkContext extends BeforeAndAfterEach with BeforeAndAfterAll {

  self: Suite =>

  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
  }

  override def afterEach() {
    resetSparkContext()
    super.afterEach()
  }

  def resetSparkContext(): Unit = {
    LocalSparkContext.stop(sc)
    sc = null
  }
}

object LocalSparkContext {
  def stop(sc: SparkContext) {
    if (sc != null) {
      sc.stop()
    }
    System.clearProperty("spark.driver.port")
  }
} 
Example 96
Source File: SKRSpec.scala    From spark-kafka-writer   with Apache License 2.0 5 votes vote down vote up
package com.github.benfradet.spark.kafka.writer

import java.util.concurrent.atomic.AtomicInteger

import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

case class Foo(a: Int, b: String)

trait SKRSpec
  extends AnyWordSpec
  with Matchers
  with BeforeAndAfterEach
  with BeforeAndAfterAll
  with Eventually {

  val sparkConf = new SparkConf()
    .setMaster("local[1]")
    .setAppName(getClass.getSimpleName)

  var ktu: KafkaTestUtils = _
  override def beforeAll(): Unit = {
    ktu = new KafkaTestUtils
    ktu.setup()
  }
  override def afterAll(): Unit = {
    SKRSpec.callbackTriggerCount.set(0)
    if (ktu != null) {
      ktu.tearDown()
      ktu = null
    }
  }

  var topic: String = _
  var ssc: StreamingContext = _
  var spark: SparkSession = _
  override def afterEach(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }
    if (spark != null) {
      spark.stop()
      spark = null
    }
  }
  override def beforeEach(): Unit = {
    ssc = new StreamingContext(sparkConf, Seconds(1))
    spark = SparkSession.builder
      .config(sparkConf)
      .getOrCreate()
    topic = s"topic-${Random.nextInt()}"
    ktu.createTopics(topic)
  }

  def collect(ssc: StreamingContext, topic: String): ArrayBuffer[String] = {
    val kafkaParams = Map(
      "bootstrap.servers" -> ktu.brokerAddress,
      "auto.offset.reset" -> "earliest",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "test-collect"
    )
    val results = new ArrayBuffer[String]
    KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Set(topic), kafkaParams)
    ).map(_.value())
      .foreachRDD { rdd =>
        results ++= rdd.collect()
        ()
      }
    results
  }

  val producerConfig = Map(
    "bootstrap.servers" -> "127.0.0.1:9092",
    "key.serializer" -> classOf[StringSerializer].getName,
    "value.serializer" -> classOf[StringSerializer].getName
  )
}

object SKRSpec {
  val callbackTriggerCount = new AtomicInteger()
} 
Example 97
Source File: PointDStreamExtensionsSpec.scala    From reactiveinflux-spark   with Apache License 2.0 5 votes vote down vote up
package com.pygmalios.reactiveinflux.extensions

import com.holdenkarau.spark.testing.StreamingActionBase
import com.pygmalios.reactiveinflux.spark._
import com.pygmalios.reactiveinflux._
import org.apache.spark.streaming.dstream.DStream
import org.junit.runner.RunWith
import org.scalatest.BeforeAndAfterAll
import org.scalatest.junit.JUnitRunner

@RunWith(classOf[JUnitRunner])
class PointDStreamExtensionsSpec extends StreamingActionBase
  with BeforeAndAfterAll {
  import PointRDDExtensionsSpec._

  override def beforeAll: Unit = {
    super.beforeAll
    withInflux(_.create())
  }

  override def afterAll: Unit = {
    withInflux(_.drop())
    super.afterAll
  }

  test("write single point to Influx") {
    val points = List(point1)

    // Execute
    runAction(Seq(points), (dstream: DStream[Point]) => dstream.saveToInflux())

    // Assert
    val result = withInflux(
      _.query(Query(s"SELECT * FROM $measurement1")).result.singleSeries)

    assert(result.rows.size == 1)

    val row = result.rows.head
    assert(row.time == point1.time)
    assert(row.values.size == 5)
  }
} 
Example 98
Source File: Step4_SecondaryPersistenceSpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.ActorSystem
import akka.testkit.TestProbe
import scala.concurrent.duration._
import Arbiter._
import Persistence._
import org.scalactic.ConversionCheckedTripleEquals

class Step4_SecondaryPersistenceSpec extends TestKit(ActorSystem("Step4SecondaryPersistenceSpec"))
    with FunSuiteLike
        with BeforeAndAfterAll
    with Matchers
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  test("case1: Secondary should not acknowledge snapshots until persisted") {
    import Replicator._

    val arbiter = TestProbe()
    val persistence = TestProbe()
    val replicator = TestProbe()
    val secondary = system.actorOf(Replica.props(arbiter.ref, probeProps(persistence)), "case1-secondary")
    val client = session(secondary)

    arbiter.expectMsg(Join)
    arbiter.send(secondary, JoinedSecondary)

    client.get("k1") should ===(None)

    replicator.send(secondary, Snapshot("k1", Some("v1"), 0L))
    val persistId = persistence.expectMsgPF() {
      case Persist("k1", Some("v1"), id) => id
    }

    withClue("secondary replica should already serve the received update while waiting for persistence: ") {
      client.get("k1") should ===(Some("v1"))
    }

    replicator.expectNoMsg(500.milliseconds)

    persistence.reply(Persisted("k1", persistId))
    replicator.expectMsg(SnapshotAck("k1", 0L))
    client.get("k1") should ===(Some("v1"))
  }

  test("case2: Secondary should retry persistence in every 100 milliseconds") {
    import Replicator._

    val arbiter = TestProbe()
    val persistence = TestProbe()
    val replicator = TestProbe()
    val secondary = system.actorOf(Replica.props(arbiter.ref, probeProps(persistence)), "case2-secondary")
    val client = session(secondary)

    arbiter.expectMsg(Join)
    arbiter.send(secondary, JoinedSecondary)

    client.get("k1") should ===(None)

    replicator.send(secondary, Snapshot("k1", Some("v1"), 0L))
    val persistId = persistence.expectMsgPF() {
      case Persist("k1", Some("v1"), id) => id
    }

    withClue("secondary replica should already serve the received update while waiting for persistence: ") {
      client.get("k1") should ===(Some("v1"))
    }

    // Persistence should be retried
    persistence.expectMsg(200.milliseconds, Persist("k1", Some("v1"), persistId))
    persistence.expectMsg(200.milliseconds, Persist("k1", Some("v1"), persistId))

    replicator.expectNoMsg(500.milliseconds)

    persistence.reply(Persisted("k1", persistId))
    replicator.expectMsg(SnapshotAck("k1", 0L))
    client.get("k1") should ===(Some("v1"))
  }

} 
Example 99
Source File: IntegrationSpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.{ Actor, Props, ActorRef, ActorSystem }
import akka.testkit.{ TestProbe, ImplicitSender, TestKit }
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }
import scala.concurrent.duration._
import org.scalatest.FunSuiteLike
import org.scalactic.ConversionCheckedTripleEquals

class IntegrationSpec(_system: ActorSystem) extends TestKit(_system)
    with FunSuiteLike
        with Matchers
    with BeforeAndAfterAll
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  import Replica._
  import Replicator._
  import Arbiter._

  def this() = this(ActorSystem("ReplicatorSpec"))

  override def afterAll: Unit = system.shutdown()

  
  } 
Example 100
Source File: Step6_NewSecondarySpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.ActorSystem
import akka.testkit.TestProbe
import Arbiter._
import Replicator._
import org.scalactic.ConversionCheckedTripleEquals

class Step6_NewSecondarySpec extends TestKit(ActorSystem("Step6NewSecondarySpec"))
  with FunSuiteLike
  with BeforeAndAfterAll
  with Matchers
  with ConversionCheckedTripleEquals
  with ImplicitSender
  with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  test("case1: Primary must start replication to new replicas") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case1-primary")
        val user = session(primary)
    val secondary = TestProbe()

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)

    user.setAcked("k1", "v1")
    arbiter.send(primary, Replicas(Set(primary, secondary.ref)))

    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.reply(SnapshotAck("k1", 0L))

    val ack1 = user.set("k1", "v2")
    secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
    secondary.reply(SnapshotAck("k1", 1L))
    user.waitAck(ack1)

    val ack2 = user.remove("k1")
    secondary.expectMsg(Snapshot("k1", None, 2L))
    secondary.reply(SnapshotAck("k1", 2L))
    user.waitAck(ack2)
  }

  test("case2: Primary must stop replication to removed replicas and stop Replicator") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case2-primary")
        val user = session(primary)
    val secondary = TestProbe()

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)
    arbiter.send(primary, Replicas(Set(primary, secondary.ref)))

    val ack1 = user.set("k1", "v1")
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    val replicator = secondary.lastSender
    secondary.reply(SnapshotAck("k1", 0L))
    user.waitAck(ack1)

    watch(replicator)
    arbiter.send(primary, Replicas(Set(primary)))
    expectTerminated(replicator)
  }

  test("case3: Primary must stop replication to removed replicas and waive their outstanding acknowledgements") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case3-primary")
        val user = session(primary)
    val secondary = TestProbe()

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)
    arbiter.send(primary, Replicas(Set(primary, secondary.ref)))

    val ack1 = user.set("k1", "v1")
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.reply(SnapshotAck("k1", 0L))
    user.waitAck(ack1)

    val ack2 = user.set("k1", "v2")
    secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
    arbiter.send(primary, Replicas(Set(primary)))
    user.waitAck(ack2)
  }

} 
Example 101
Source File: Step1_PrimarySpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.TestKit
import akka.actor.ActorSystem
import org.scalatest.FunSuiteLike
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import akka.testkit.ImplicitSender
import akka.testkit.TestProbe
import scala.concurrent.duration._
import kvstore.Persistence.{ Persisted, Persist }
import kvstore.Replica.OperationFailed
import kvstore.Replicator.{ Snapshot }
import scala.util.Random
import scala.util.control.NonFatal
import org.scalactic.ConversionCheckedTripleEquals

class Step1_PrimarySpec extends TestKit(ActorSystem("Step1PrimarySpec"))
    with FunSuiteLike
        with BeforeAndAfterAll
    with Matchers
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  import Arbiter._

  test("case1: Primary (in isolation) should properly register itself to the provided Arbiter") {
    val arbiter = TestProbe()
        system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case1-primary")
    
    arbiter.expectMsg(Join)
  }

  test("case2: Primary (in isolation) should react properly to Insert, Remove, Get") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case2-primary")
        val client = session(primary)

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)

    client.getAndVerify("k1")
    client.setAcked("k1", "v1")
    client.getAndVerify("k1")
    client.getAndVerify("k2")
    client.setAcked("k2", "v2")
    client.getAndVerify("k2")
    client.removeAcked("k1")
    client.getAndVerify("k1")
  }

  
} 
Example 102
Source File: Step3_ReplicatorSpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.{ TestProbe, TestKit, ImplicitSender }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.ActorSystem
import scala.concurrent.duration._
import kvstore.Arbiter.{ JoinedSecondary, Join }
import kvstore.Persistence.{ Persisted, Persist }
import kvstore.Replicator.{ SnapshotAck, Snapshot, Replicate }
import org.scalactic.ConversionCheckedTripleEquals

class Step3_ReplicatorSpec extends TestKit(ActorSystem("Step3ReplicatorSpec"))
    with FunSuiteLike
        with BeforeAndAfterAll
    with Matchers
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  test("case1: Replicator should send snapshots when asked to replicate") {
    val secondary = TestProbe()
    val replicator = system.actorOf(Replicator.props(secondary.ref), "case1-replicator")

    replicator ! Replicate("k1", Some("v1"), 0L)
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.ignoreMsg({ case Snapshot(_, _, 0L) => true })
    secondary.reply(SnapshotAck("k1", 0L))

    replicator ! Replicate("k1", Some("v2"), 1L)
    secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
    secondary.ignoreMsg({ case Snapshot(_, _, 1L) => true })
    secondary.reply(SnapshotAck("k1", 1L))

    replicator ! Replicate("k2", Some("v1"), 2L)
    secondary.expectMsg(Snapshot("k2", Some("v1"), 2L))
    secondary.ignoreMsg({ case Snapshot(_, _, 2L) => true })
    secondary.reply(SnapshotAck("k2", 2L))

    replicator ! Replicate("k1", None, 3L)
    secondary.expectMsg(Snapshot("k1", None, 3L))
    secondary.reply(SnapshotAck("k1", 3L))
  }

  test("case2: Replicator should retry until acknowledged by secondary") {
    val secondary = TestProbe()
    val replicator = system.actorOf(Replicator.props(secondary.ref), "case2-replicator")

    replicator ! Replicate("k1", Some("v1"), 0L)
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.expectMsg(300.milliseconds, Snapshot("k1", Some("v1"), 0L))
    secondary.expectMsg(300.milliseconds, Snapshot("k1", Some("v1"), 0L))

    secondary.reply(SnapshotAck("k1", 0L))
  }

} 
Example 103
Source File: SidechainNodeViewHolderTest.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.actors

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.horizen.SidechainNodeViewHolder.ReceivableMessages.GetDataFromCurrentSidechainNodeView
import com.horizen.fixtures.SidechainNodeViewHolderFixture
import com.horizen.node.SidechainNodeView
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}

import scala.concurrent._
import scala.concurrent.duration._
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner


@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest extends Suites(
  new SidechainNodeViewHolderTest1,
  new SidechainNodeViewHolderTest2
)

@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest1
  extends TestKit(ActorSystem("testsystem"))
  with FunSuiteLike
  with BeforeAndAfterAll
  with SidechainNodeViewHolderFixture
{

  implicit val timeout = Timeout(5, TimeUnit.SECONDS)

  override def afterAll: Unit = {
    //info("Actor system is shutting down...")
    TestKit.shutdownActorSystem(system)
  }

  test ("Test1") {
    def f(v: SidechainNodeView) = v
    val sidechainNodeViewHolderRef: ActorRef = getSidechainNodeViewHolderRef
    val nodeView = (sidechainNodeViewHolderRef ? GetDataFromCurrentSidechainNodeView(f))
      .mapTo[SidechainNodeView]

    assert(Await.result(nodeView, 5 seconds) != null)
  }

  test("Test2") {
  }

}

@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest2
  extends TestKit(ActorSystem("testSystem"))
  with FeatureSpecLike
  with BeforeAndAfterAll
  with Matchers
  with SidechainNodeViewHolderFixture
{

  implicit val timeout = Timeout(5, TimeUnit.SECONDS)

  override def afterAll: Unit = {
    //info("Actor system is shutting down...")
    TestKit.shutdownActorSystem(system)
  }

  feature("Actor1") {
    scenario("Scenario 1"){
      system should not be(null)

      def f(v: SidechainNodeView) = v
      val sidechainNodeViewHolderRef: ActorRef = getSidechainNodeViewHolderRef
      val nodeView = (sidechainNodeViewHolderRef ? GetDataFromCurrentSidechainNodeView(f))
        .mapTo[SidechainNodeView]

      Await.result(nodeView, 5 seconds) should not be(null)

    }
  }
} 
Example 104
Source File: BlackListTests.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network

import java.net.{InetAddress, InetSocketAddress}

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import encry.modifiers.InstanceFactory
import encry.network.BlackList.BanReason._
import encry.network.PeerConnectionHandler.{ConnectedPeer, Outgoing}
import encry.network.PeerConnectionHandler.ReceivableMessages.CloseConnection
import encry.network.PeersKeeper.BanPeer
import encry.settings.TestNetSettings
import org.encryfoundation.common.network.BasicMessagesRepo.Handshake
import org.scalatest.{BeforeAndAfterAll, Matchers, OneInstancePerTest, WordSpecLike}
import scala.concurrent.duration._

class BlackListTests extends WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with InstanceFactory
  with OneInstancePerTest
  with TestNetSettings {

  implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = system.terminate()

  val knowPeersSettings = testNetSettings.copy(
    network = settings.network.copy(
      knownPeers = List(new InetSocketAddress("172.16.11.11", 9001)),
      connectOnlyWithKnownPeers = Some(true)
    ),
    blackList = settings.blackList.copy(
      banTime = 2 seconds,
      cleanupTime = 3 seconds
    ))

  
  "Peers keeper" should {
    "handle ban peer message correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SpamSender)
      peerHandler.expectMsg(CloseConnection)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
    "cleanup black list by scheduler correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SentPeersMessageWithoutRequest)
      Thread.sleep(6000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe false
    }
    "don't remove peer from black list before ban time expired" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      Thread.sleep(4000)
      peersKeeper ! BanPeer(connectedPeer, CorruptedSerializedBytes)
      Thread.sleep(2000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
  }
} 
Example 105
Source File: MemoryPoolTests.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.view.mempool

import akka.actor.ActorSystem
import akka.testkit.{ TestActorRef, TestProbe }
import com.typesafe.scalalogging.StrictLogging
import encry.modifiers.InstanceFactory
import encry.settings.{ EncryAppSettings, TestNetSettings }
import encry.utils.NetworkTimeProvider
import encry.view.mempool.MemoryPool.{ NewTransaction, TransactionsForMiner }
import org.scalatest.{ BeforeAndAfterAll, Matchers, OneInstancePerTest, WordSpecLike }

import scala.concurrent.duration._

class MemoryPoolTests
    extends WordSpecLike
    with Matchers
    with InstanceFactory
    with BeforeAndAfterAll
    with OneInstancePerTest
    with TestNetSettings
    with StrictLogging {

  implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = system.terminate()

  val timeProvider: NetworkTimeProvider = new NetworkTimeProvider(testNetSettings.ntp)

  "MemoryPool" should {
    "add new unique transactions" in {
      val mempool                = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions           = genValidPaymentTxs(10)
      val (newMempool, validTxs) = mempool.validateTransactions(transactions)
      newMempool.size shouldBe 10
      validTxs.map(_.encodedId).forall(transactions.map(_.encodedId).contains) shouldBe true
    }
    "reject not unique transactions" in {
      val mempool                          = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions                     = genValidPaymentTxs(10)
      val (newMempool, validTxs)           = mempool.validateTransactions(transactions)
      val (newMempoolAgain, validTxsAgain) = newMempool.validateTransactions(validTxs)
      newMempoolAgain.size shouldBe 10
      validTxsAgain.size shouldBe 0
    }
    "mempoolMaxCapacity works correct" in {
      val mempool                = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions           = genValidPaymentTxs(11)
      val (newMempool, validTxs) = mempool.validateTransactions(transactions)
      newMempool.size shouldBe 10
      validTxs.size shouldBe 10
    }
    "getTransactionsForMiner works fine" in {
      val mempool         = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions    = (0 until 10).map(k => coinbaseAt(k))
      val (newMempool, _) = mempool.validateTransactions(transactions)
      val (uPool, txs)    = newMempool.getTransactionsForMiner
      uPool.size shouldBe 0
      txs.map(_.encodedId).forall(transactions.map(_.encodedId).contains) shouldBe true
      transactions.map(_.encodedId).forall(txs.map(_.encodedId).contains) shouldBe true
    }
  }
  "Mempool actor" should {
    "send transactions to miner" in {
      val miner1 = TestProbe()
      val mempool1: TestActorRef[MemoryPool] =
        TestActorRef[MemoryPool](MemoryPool.props(testNetSettings, timeProvider, miner1.ref, Some(TestProbe().ref)))
      val transactions1 = (0 until 4).map { k =>
        val a = coinbaseAt(k)
        a
      }
      transactions1.foreach(mempool1 ! NewTransaction(_))
      mempool1.underlyingActor.memoryPool.size shouldBe 4
      logger.info(s"generated: ${transactions1.map(_.encodedId)}")
      miner1.expectMsg(20.seconds, TransactionsForMiner(transactions1))
    }
  }
} 
Example 106
Source File: TokenizerSuite.scala    From spark-nkp   with Apache License 2.0 5 votes vote down vote up
package com.github.uosdmlab.nkp

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature.{CountVectorizer, IDF}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfter, FunSuite}


class TokenizerSuite extends FunSuite with BeforeAndAfterAll with BeforeAndAfter {

  private var tokenizer: Tokenizer = _

  private val spark: SparkSession =
    SparkSession.builder()
      .master("local[2]")
      .appName("Tokenizer Suite")
      .getOrCreate

  spark.sparkContext.setLogLevel("WARN")

  import spark.implicits._

  override protected def afterAll(): Unit = {
    try {
      spark.stop
    } finally {
      super.afterAll()
    }
  }

  before {
    tokenizer = new Tokenizer()
      .setInputCol("text")
      .setOutputCol("words")
  }

  private val df = spark.createDataset(
    Seq(
      "아버지가방에들어가신다.",
      "사랑해요 제플린!",
      "스파크는 재밌어",
      "나는야 데이터과학자",
      "데이터야~ 놀자~"
    )
  ).toDF("text")

  test("Default parameters") {
    assert(tokenizer.getFilter sameElements Array.empty[String])
  }

  test("Basic operation") {
    val words = tokenizer.transform(df)

    assert(df.count == words.count)
    assert(words.schema.fieldNames.contains(tokenizer.getOutputCol))
  }

  test("POS filter") {
    val nvTokenizer = new Tokenizer()
      .setInputCol("text")
      .setOutputCol("nvWords")
      .setFilter("N", "V")

    val words = tokenizer.transform(df).join(nvTokenizer.transform(df), "text")

    assert(df.count == words.count)
    assert(words.schema.fieldNames.contains(nvTokenizer.getOutputCol))
    assert(words.where(s"SIZE(${tokenizer.getOutputCol}) < SIZE(${nvTokenizer.getOutputCol})").count == 0)
  }

  test("TF-IDF pipeline") {
    tokenizer.setFilter("N")

    val cntVec = new CountVectorizer()
      .setInputCol("words")
      .setOutputCol("tf")

    val idf = new IDF()
      .setInputCol("tf")
      .setOutputCol("tfidf")

    val pipe = new Pipeline()
      .setStages(Array(tokenizer, cntVec, idf))

    val pipeModel = pipe.fit(df)

    val result = pipeModel.transform(df)

    assert(result.count == df.count)

    val fields = result.schema.fieldNames
    assert(fields.contains(tokenizer.getOutputCol))
    assert(fields.contains(cntVec.getOutputCol))
    assert(fields.contains(idf.getOutputCol))

    result.show
  }
} 
Example 107
Source File: RDDCollectionOpsSpec.scala    From kontextfrei   with Apache License 2.0 5 votes vote down vote up
package com.danielwestheide.kontextfrei

import com.danielwestheide.kontextfrei.rdd.RDDOpsSupport
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.scalatest.BeforeAndAfterAll

class RDDCollectionOpsSpec
    extends DCollectionOpsProperties[RDD]
    with BeforeAndAfterAll {
  implicit val sparkContext = new SparkContext("local[2]", "dcollection-spec")
  override implicit val ops: DCollectionOps[RDD] =
    RDDOpsSupport.rddCollectionOps
  override protected def afterAll(): Unit = {
    sparkContext.stop()
  }
} 
Example 108
Source File: BugDemonstrationTest.scala    From spark-tsne   with Apache License 2.0 5 votes vote down vote up
package com.github.saurfang.spark.tsne

import org.apache.spark.mllib.linalg.{Vectors, Vector}
import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}


class BugDemonstrationTest extends FunSuite with Matchers with BeforeAndAfterAll {
  private var sparkSession : SparkSession = _
  override def beforeAll(): Unit = {
    super.beforeAll()
    sparkSession = SparkSession.builder().appName("BugTests").master("local[2]").getOrCreate()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    sparkSession.stop()
  }

  test("This demonstrates a bug was fixed in tsne-spark 2.1") {
    val sc = sparkSession.sparkContext

    val observations = sc.parallelize(
      Seq(
        Vectors.dense(1.0, 10.0, 100.0),
        Vectors.dense(2.0, 20.0, 200.0),
        Vectors.dense(3.0, 30.0, 300.0)
      )
    )

    // Compute column summary statistics.
    val summary: MultivariateStatisticalSummary = Statistics.colStats(observations)
    val expectedMean = Vectors.dense(2.0,20.0,200.0)
    val resultMean = summary.mean
    assertEqualEnough(resultMean, expectedMean)
    val expectedVariance = Vectors.dense(1.0,100.0,10000.0)
    assertEqualEnough(summary.variance, expectedVariance)
    val expectedNumNonZeros = Vectors.dense(3.0, 3.0, 3.0)
    assertEqualEnough(summary.numNonzeros, expectedNumNonZeros)
  }

  private def assertEqualEnough(sample: Vector, expected: Vector): Unit = {
    expected.toArray.zipWithIndex.foreach{ case(d: Double, i: Int) =>
      sample(i) should be (d +- 1E-12)
    }
  }
} 
Example 109
Source File: CouchbasePluginSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.support

import akka.actor.ActorSystem
import akka.persistence.couchbase.{CouchbaseExtension, LoggingConfig}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._

object CouchbasePluginSpec {

  val config = ConfigFactory.parseString(
    """
      |akka {
      |  persistence {
      |    journal {
      |      plugin = "couchbase-journal"
      |    }
      |
      |    snapshot-store {
      |      plugin =  "couchbase-snapshot-store"
      |    }
      |
      |    journal-plugin-fallback {
      |      replay-filter {
      |        mode = warn
      |      }
      |    }
      |  }
      |
      |  test.single-expect-default = 10s
      |  loglevel = WARNING
      |  log-dead-letters = 0
      |  log-dead-letters-during-shutdown = off
      |  test.single-expect-default = 10s
      |}
      |
      |couchbase-replay {
      |
      |  batchSize = "4"
      |}
    """.stripMargin)
}

trait CouchbasePluginSpec
  extends Suite
    with BeforeAndAfter
    with BeforeAndAfterAll {

  System.setProperty("java.util.logging.config.class", classOf[LoggingConfig].getName)

  def system: ActorSystem

  def couchbase = CouchbaseExtension(system)

  before {
    assert(couchbase.journalBucket.bucketManager.flush())
    assert(couchbase.snapshotStoreBucket.bucketManager.flush())
  }

  override protected def afterAll(): Unit = {
    Await.result(system.terminate(), 10.seconds)
    super.afterAll()
  }
} 
Example 110
Source File: SharedSparkContext.scala    From tispark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.apache.spark.SharedSparkContext._
import org.apache.spark.sql.internal.StaticSQLConf
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait SharedSparkContext extends BeforeAndAfterAll with BeforeAndAfterEach { self: Suite =>

  protected var _isHiveEnabled: Boolean = false
  protected var conf: SparkConf = new SparkConf(false)

  def sc: SparkContext = _sc

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    if (_sc != null) {
      SharedSparkContext.stop()
    }
    initializeContext()
  }

  protected def initializeContext(): Unit =
    synchronized {
      if (null == _sc) {
        conf.set("spark.sql.test.key", "true")
        if (_isHiveEnabled) {
          conf.set(StaticSQLConf.CATALOG_IMPLEMENTATION, "hive")
        }
        _sc = new SparkContext("local[4]", "tispark-integration-test", conf)
      }
    }

  override protected def afterAll(): Unit = {
    try {
      SharedSparkContext.stop()
    } finally {
      super.afterAll()
    }
  }
}

object SharedSparkContext {

  @transient private var _sc: SparkContext = _

  def stop(): Unit =
    synchronized {
      if (_sc != null) {
        _sc.stop()
        _sc = null
      }
      // To avoid RPC rebinding to the same port, since it doesn't unbind immediately on shutdown
      System.clearProperty("spark.driver.port")
    }

} 
Example 111
Source File: IngestionActorsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import hydra.common.util.ActorUtils
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike


class IngestionActorsSpec
    extends Matchers
    with AnyFlatSpecLike
    with BeforeAndAfterAll {

  "The ingestion actors sequence" should "contain all actors" in {
    IngestionActors.services.map(_._1) shouldBe Seq(
      ActorUtils.actorName[IngestionHandlerGateway],
      ActorUtils.actorName[TransportRegistrar],
      ActorUtils.actorName[IngestorRegistry],
      ActorUtils.actorName[IngestorRegistrar]
    )
  }
} 
Example 112
Source File: IngestorRegistrarSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import hydra.common.util.ActorUtils
import hydra.ingest.services.IngestorRegistrar.UnregisterAll
import hydra.ingest.services.IngestorRegistry.{
  FindAll,
  FindByName,
  LookupResult
}
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._


class IngestorRegistrarSpec
    extends TestKit(ActorSystem("IngestorRegistrarSpec"))
    with Matchers
    with AnyFunSpecLike
    with ImplicitSender
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually {

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(10, Seconds), interval = Span(1, Seconds))

  val registry = system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val act = system.actorOf(Props[IngestorRegistrar])

  implicit val timeout = Timeout(3, TimeUnit.SECONDS)

  describe("The ingestor registrar actor") {
    it("registers from classpath on bootstrap") {
      eventually {
        whenReady(
          (registry ? FindByName(ActorUtils.actorName(classOf[TestIngestor])))
            .mapTo[LookupResult]
        ) { i =>
          i.ingestors.size shouldBe 1
          i.ingestors(0).name shouldBe ActorUtils.actorName(
            classOf[TestIngestor]
          )
        }
      }
    }

    it("unregisters") {
      act ! UnregisterAll
      eventually {
        whenReady((registry ? FindAll).mapTo[LookupResult]) { i =>
          i.ingestors.size shouldBe 0
        }
      }
    }
  }
} 
Example 113
Source File: IngestionSocketActorSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.BeforeAndAfterAll
import akka.actor.ActorSystem
import akka.actor.Props
import akka.testkit.TestProbe

class IngestionSocketActorSpec
    extends AnyFlatSpecLike
    with Matchers
    with BeforeAndAfterAll {

  private implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = {
    system.terminate()
  }

  private def getIngestActorRef = system.actorOf(Props[IngestionSocketActor])

  it should "ack the init message in waiting state" in {
    val ingestActor = getIngestActorRef
    val probe = TestProbe()
    ingestActor.tell(SocketInit, probe.ref)
    probe.expectMsg(SocketAck)
  }

  it should "ack the init message in initialized state" in {
    val ingestActor = getIngestActorRef
    val probe = TestProbe()
    ingestActor ! SocketStarted(probe.ref)
    ingestActor.tell(SocketInit, probe.ref)
    probe.expectMsg(SocketAck)
  }

  private def testIngestionMessageAck(ingestionMessages: IncomingMessage*) = {
    it should s"ack the incoming messages of form: $ingestionMessages" in {
      val ingestActor = getIngestActorRef
      val probe = TestProbe()
      ingestActor ! SocketStarted(probe.ref)
      ingestActor.tell(SocketInit, probe.ref)
      probe.expectMsg(SocketAck)
      ingestionMessages.foreach { ingestionMessage =>
        ingestActor.tell(ingestionMessage, probe.ref)
        probe.expectMsgClass(classOf[SimpleOutgoingMessage])
        probe.expectMsg(SocketAck)
      }
    }
  }

  testIngestionMessageAck(IncomingMessage("-c HELP"))
  testIngestionMessageAck(IncomingMessage("-c SET hydra-ack = replicated"))
  testIngestionMessageAck(IncomingMessage("-c WHAT"))

} 
Example 114
Source File: RequestFactoriesSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class RequestFactoriesSpec
    extends TestKit(ActorSystem("RequestFactoriesSpec"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ScalaFutures {

  override def afterAll =
    TestKit.shutdownActorSystem(
      system,
      verifySystemShutdown = true,
      duration = 10.seconds
    )

  import RequestFactories._

  describe("The RequestFactories") {
    it("build a Hydra request from an HTTP request") {
      val hr = HttpRequest(entity = "test")
      val hydraReq = createRequest("1", hr)
      whenReady(hydraReq) { r => r.payload shouldBe "test" }
    }
  }
} 
Example 115
Source File: HydraIngestorRegistrySpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import hydra.common.util.ActorUtils
import hydra.core.bootstrap.ReflectionsWrapper
import hydra.ingest.IngestorInfo
import hydra.ingest.services.IngestorRegistry
import hydra.ingest.services.IngestorRegistry.RegisterWithClass
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class HydraIngestorRegistrySpec
    extends TestKit(ActorSystem("HydraIngestorRegistrySpec"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender
    with ScalaFutures {

  override def afterAll =
    TestKit.shutdownActorSystem(
      system,
      verifySystemShutdown = true,
      duration = 10.seconds
    )

  val testRegistry =
    system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val cfg = ConfigFactory.parseString(
    "ingest.ingestor-registry.path=/user/ingestor_registry"
  )
  val registry = HydraIngestorRegistryClient(cfg)

  implicit val actorRefFactory = system

  ReflectionsWrapper.rescan()

  registry.registry ! RegisterWithClass(classOf[TestIngestor], "global")
  expectMsgType[IngestorInfo]

  describe("The Ingestor Registry") {
    it("uses the default registry if no config") {
      val path = HydraIngestorRegistryClient.registryPath(ConfigFactory.empty())
      path shouldBe s"/user/service/${ActorUtils.actorName(classOf[IngestorRegistry])}"
    }

    it("looks up an ingestor") {
      implicit val timeout = akka.util.Timeout(10.seconds)
      whenReady(registry.lookupIngestor("test_ingestor")) { i =>
        i.ingestors.size shouldBe 1
        i.ingestors(0).name shouldBe "test_ingestor"
        i.ingestors(0).path shouldBe testRegistry.path / "test_ingestor"
      }
    }
  }
} 
Example 116
Source File: DataSourceConnectionProviderSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.sql

import java.sql.SQLException
import java.util.Properties

import com.typesafe.config.ConfigFactory
import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers

import scala.collection.JavaConverters._


class DataSourceConnectionProviderSpec
    extends Matchers
    with AnyFlatSpecLike
    with BeforeAndAfterAll {

  val properties = new Properties
  val cfg = ConfigFactory.load().getConfig("db-cfg")
  cfg
    .entrySet()
    .asScala
    .foreach(e => properties.setProperty(e.getKey(), cfg.getString(e.getKey())))

  private val hikariConfig = new HikariConfig(properties)

  private val ds = new HikariDataSource(hikariConfig)

  override def afterAll() = ds.close()

  "The DataSourceConnectionProvider" should "establish a connection" in {
    val p = new DataSourceConnectionProvider(ds)
    p.getConnection().isValid(1) shouldBe true
  }

  it should "close the connection" in {
    val p = new DataSourceConnectionProvider(ds)
    p.close()
    intercept[SQLException](p.getConnection())
  }

  "The DriverManagerConnectionProvider" should "be configured properly" in {
    val config = ConfigFactory.parseString("""
        |connection.url = url
        |connection.user = test
        |connection.password = password
        |connection.max.retries = 20
        |connection.retry.backoff = 10s
      """.stripMargin)

    val c = DriverManagerConnectionProvider(config)
    c.password shouldBe "password"
    c.connectionUrl shouldBe "url"
    c.username shouldBe "test"
    c.retryBackoff.toSeconds shouldBe 10
    c.maxConnectionAttempts shouldBe 20
    c.close()
  }

  it should "return a new connection" in {
    val config = ConfigFactory.parseString(
      """
        |connection.url = "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1"
        |connection.max.retries = 2
        |connection.retry.backoff = 1s
      """.stripMargin
    )

    val c = DriverManagerConnectionProvider(config)
    c.getConnection() should not be null
    c.getNewConnection() should not be null
    c.close()
    c.connection.isValid(2) shouldBe false
  }
} 
Example 117
Source File: RabbitIngestorSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.rabbit

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestActors.ForwardActor
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import hydra.core.ingest.HydraRequest
import hydra.core.protocol._
import hydra.core.transport.{AckStrategy, HydraRecord}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class RabbitIngestorSpec
    extends TestKit(ActorSystem("rabbit-ingestor-spec"))
    with Matchers
    with AnyFunSpecLike
    with ImplicitSender
    with BeforeAndAfterAll {

  val ingestor = system.actorOf(Props[RabbitIngestor])

  val probe = TestProbe()

  val rabbitTransport =
    system.actorOf(Props(new ForwardActor(probe.ref)), "rabbit_transport")

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  describe("When using the rabbit ingestor") {
    it("Joins if exchange provided") {
      val request = HydraRequest(
        "123",
        "{'name': 'test'}",
        None,
        Map(RabbitRecord.HYDRA_RABBIT_EXCHANGE -> "test.exchange")
      )
      ingestor ! Publish(request)
      expectMsg(10.seconds, Join)
    }

    it("Joins if queue provided") {
      val request = HydraRequest(
        "123",
        "{'name': 'test'}",
        None,
        Map(RabbitRecord.HYDRA_RABBIT_QUEUE -> "test.queue")
      )
      ingestor ! Publish(request)
      expectMsg(10.seconds, Join)
    }

    it("Ignores") {
      val request = HydraRequest("123", "test string")
      ingestor ! Publish(request)
      expectMsg(10.seconds, Ignore)
    }

    it("transports") {
      ingestor ! Ingest(
        TestRecord("test", "test", "", AckStrategy.NoAck),
        AckStrategy.NoAck
      )
      probe.expectMsg(
        Produce(
          TestRecord("test", "test", "", AckStrategy.NoAck),
          self,
          AckStrategy.NoAck
        )
      )
    }
  }
}

case class TestRecord(
    destination: String,
    payload: String,
    key: String,
    ackStrategy: AckStrategy
) extends HydraRecord[String, String] 
Example 118
Source File: KafkaAdminAlgebraSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import akka.actor.ActorSystem
import cats.effect.{ContextShift, IO}
import cats.implicits._
import hydra.kafka.util.KafkaUtils.TopicDetails
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContext

final class KafkaAdminAlgebraSpec
    extends AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll
    with EmbeddedKafka {

  private val port = 8023

  implicit private val kafkaConfig: EmbeddedKafkaConfig =
    EmbeddedKafkaConfig(kafkaPort = port, zooKeeperPort = 3027)

  implicit private val contextShift: ContextShift[IO] =
    IO.contextShift(ExecutionContext.global)

  implicit private val system: ActorSystem = ActorSystem(
    "kafka-client-spec-system"
  )

  override def beforeAll(): Unit = {
    super.beforeAll()
    EmbeddedKafka.start()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    EmbeddedKafka.stop()
  }

  (for {
    live <- KafkaAdminAlgebra
      .live[IO](s"localhost:$port")
    test <- KafkaAdminAlgebra.test[IO]
  } yield {
    runTests(live)
    runTests(test, isTest = true)
  }).unsafeRunSync()

  private def runTests(kafkaClient: KafkaAdminAlgebra[IO], isTest: Boolean = false): Unit = {
    (if (isTest) "KafkaAdmin#test" else "KafkaAdmin#live") must {
      "create a topic" in {
        val topicName = "Topic1"
        val topicDetails = TopicDetails(3, 1.toShort)
        (kafkaClient.createTopic(topicName, topicDetails) *> kafkaClient
          .describeTopic(topicName)
          .map {
            case Some(topic) =>
              topic.name shouldBe topicName
              topic.numberPartitions shouldBe topicDetails.numPartitions
            case None => fail("Found None when a Topic was Expected")
          }).unsafeRunSync()
      }

      "list all topics" in {
        kafkaClient.getTopicNames.unsafeRunSync() shouldBe List("Topic1")
      }

      "delete a topic" in {
        val topicToDelete = "topic_to_delete"
        (for {
          _ <- kafkaClient.createTopic(topicToDelete, TopicDetails(1, 1))
          _ <- kafkaClient.deleteTopic(topicToDelete)
          maybeTopic <- kafkaClient.describeTopic(topicToDelete)
        } yield maybeTopic should not be defined).unsafeRunSync()
      }
    }
  }
} 
Example 119
Source File: KafkaConsumerProxySpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.consumer

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import hydra.kafka.consumer.KafkaConsumerProxy._
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.apache.kafka.common.TopicPartition
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._


class KafkaConsumerProxySpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender {

  implicit val config =
    EmbeddedKafkaConfig(kafkaPort = 8092, zooKeeperPort = 3181)

  override def beforeAll() = {
    super.beforeAll()
    EmbeddedKafka.start()
    EmbeddedKafka.createCustomTopic("test-consumer1")
    EmbeddedKafka.createCustomTopic("test-consumer2")
  }

  override def afterAll() = {
    super.afterAll()
    EmbeddedKafka.stop()
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

  lazy val kafkaProxy = system.actorOf(Props[KafkaConsumerProxy])

  describe("When using KafkaConsumerProxy") {
    it("gets latest offsets for a topic") {
      kafkaProxy ! GetLatestOffsets("test-consumer1")
      expectMsg(
        10.seconds,
        LatestOffsetsResponse(
          "test-consumer1",
          Map(new TopicPartition("test-consumer1", 0) -> 0L)
        )
      )
    }

    it("lists topics") {
      kafkaProxy ! ListTopics
      expectMsgPF(10.seconds) {
        case ListTopicsResponse(topics) =>
          topics.keys should contain allOf ("test-consumer1", "test-consumer2")
      }
    }

    it("gets partition info") {
      kafkaProxy ! GetPartitionInfo("test-consumer2")
      expectMsgPF(10.seconds) {
        case PartitionInfoResponse(topic, response) =>
          topic shouldBe "test-consumer2"
          response.map(p => p.partition()) shouldBe Seq(0)
      }
    }

    it("handles errors") {
      kafkaProxy ! GetPartitionInfo("test-consumer-unknown")
      expectMsgPF(10.seconds) {
        case PartitionInfoResponse(topic, response) =>
          response(0).leader().idString shouldBe "0"
          topic should startWith("test-consumer-unknown")
      }
    }
  }
} 
Example 120
Source File: KafkaMetricsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.transport

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import hydra.core.transport.AckStrategy
import hydra.kafka.producer.KafkaRecordMetadata
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll
import spray.json.DefaultJsonProtocol


class KafkaMetricsSpec
    extends TestKit(ActorSystem("hydra"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with DefaultJsonProtocol {

  import KafkaRecordMetadata._

  implicit val config = EmbeddedKafkaConfig(
    kafkaPort = 8092,
    zooKeeperPort = 3181,
    customBrokerProperties = Map(
      "auto.create.topics.enable" -> "false",
      "offsets.topic.replication.factor" -> "1"
    )
  )

  override def afterAll() = {
    super.afterAll()
    EmbeddedKafka.stop()
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

  override def beforeAll() = {
    super.beforeAll()
    EmbeddedKafka.start()
    EmbeddedKafka.createCustomTopic("metrics_topic")
  }

  describe("When using the KafkaMetrics object") {

    it("uses the NoOpMetrics") {
      KafkaMetrics(ConfigFactory.empty()) shouldBe NoOpMetrics
      KafkaMetrics(
        ConfigFactory.parseString("transports.kafka.metrics.enabled=false")
      ) shouldBe NoOpMetrics
    }

    it("uses the PublishMetrics") {
      import spray.json._
      val cfg = ConfigFactory.parseString(s"""
           | transports.kafka.metrics.topic = metrics_topic
           | transports.kafka.metrics.enabled=true""".stripMargin)
      val pm = KafkaMetrics(cfg)
      pm shouldBe a[PublishMetrics]
      val kmd = KafkaRecordMetadata(1, 1, "topic", 1, 1, AckStrategy.NoAck)
      pm.saveMetrics(kmd)
      EmbeddedKafka
        .consumeFirstStringMessageFrom("metrics_topic")
        .parseJson shouldBe kmd.toJson

    }
  }
} 
Example 121
Source File: LoggingAdapterSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.logging

import akka.actor.{Actor, ActorSystem}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll


class LoggingAdapterSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll {

  override def afterAll = TestKit.shutdownActorSystem(system)

  describe("The logging adapter") {

    it("allows an actor to use the logger") {

      val act = TestActorRef(new Actor with ActorLoggingAdapter {
        override def receive = {
          case _ => log.info("got it"); sender ! "got it"
        }
      }, "logger-test")

      act.underlyingActor.log.getName shouldBe "akka.testkit.TestActorRef"

      // Send a message and make sure we get a response back
      val probe = TestProbe()
      probe.send(act, "test")
      probe.expectMsgType[String] shouldBe "got it"
    }
  }
} 
Example 122
Source File: ActorConfigSupportSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.config

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestKit}
import hydra.common.testing.DummyActor
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll


class ActorConfigSupportSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ConfigSupport {

  val dummy = TestActorRef[DummyActor]

  override def afterAll = TestKit.shutdownActorSystem(system)

  describe("When mixing the trait in an actor") {
    it("has the correct actor name") {
      dummy.underlyingActor.thisActorName shouldBe "dummy_actor"
    }

    it("has the correct actor config") {
      dummy.underlyingActor.actorConfig shouldBe rootConfig.getConfig(
        "hydraTest.actors.dummy_actor"
      )
    }

  }
} 
Example 123
Source File: TransportOpsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.ingest

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.TestActors.ForwardActor
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import com.pluralsight.hydra.reflect.DoNotScan
import hydra.core.akka.ActorInitializationException
import hydra.core.protocol.{IngestorError, Produce}
import hydra.core.test.TestRecordFactory
import hydra.core.transport.AckStrategy.NoAck
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._


class TransportOpsSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender
    with ScalaFutures {

  override def afterAll() = TestKit.shutdownActorSystem(system)

  val supervisor = TestProbe()

  val tm = TestProbe()

  val transport =
    system.actorOf(Props(new ForwardActor(tm.ref)), "test-transport")

  describe("TransportOps") {
    it("looks up a transport") {
      val t =
        system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref))
      t ! "hello"
      expectMsg("hi!")
    }

    it("won't initialize if transport can't be found") {
      val t = system.actorOf(Props[TestTransportIngestorError])
      t ! "hello"
      expectNoMessage()
    }

    it("transports a record") {
      val req = HydraRequest("123", "test-produce")
      val t =
        system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref))
      t ! req
      whenReady(TestRecordFactory.build(req))(r =>
        tm.expectMsg(Produce(r, self, NoAck))
      )
    }
  }
}

@DoNotScan
class TestTransportIngestor(supervisor: ActorRef)
    extends Ingestor
    with TransportOps {

  override val recordFactory = TestRecordFactory

  override def initTimeout = 500 millis

  ingest {
    case "hello" => sender ! "hi!"
    case req: HydraRequest =>
      val record = Await.result(TestRecordFactory.build(req), 3.seconds)
      transport(record, NoAck)
  }

  override def transportName = "test-transport"
}

class TestTransportIngestorError extends Ingestor with TransportOps {
  override val recordFactory = TestRecordFactory

  override def transportName = "test-transport-unknown"
} 
Example 124
Source File: TransportCallbackSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.transport

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import hydra.core.protocol.{RecordNotProduced, RecordProduced}
import hydra.core.test.{TestRecord, TestRecordMetadata}
import hydra.core.transport.Transport.{Confirm, TransportError}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll
import scala.concurrent.duration._

class TransportCallbackSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender {

  private val ingestor = TestProbe()
  private val supervisor = TestProbe()

  override def afterAll() {
    super.afterAll()
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

  describe("Transports Acks") {
    it("handles empty callbacks") {
      NoCallback.onCompletion(
        -1,
        None,
        Some(new IllegalArgumentException("test"))
      )
      ingestor.expectNoMessage(3 seconds)
      supervisor.expectNoMessage(3 seconds)
    }

    it("handles simple/transport only callbacks") {
      val probe = TestProbe()
      new TransportSupervisorCallback(probe.ref)
        .onCompletion(-11, None, Some(new IllegalArgumentException("test")))
      ingestor.expectNoMessage(3 seconds)
      supervisor.expectNoMessage(3 seconds)
      probe.expectMsg(TransportError(-11))

      new TransportSupervisorCallback(probe.ref).onCompletion(
        -11,
        Some(TestRecordMetadata(1, 0, "", AckStrategy.NoAck)),
        None
      )
      ingestor.expectNoMessage(3 seconds)
      supervisor.expectNoMessage(3 seconds)
      probe.expectMsg(Confirm(-11))
    }

    it("handles ingestor callbacks") {
      val rec = TestRecord("OK", "1", "test", AckStrategy.NoAck)
      val transport = TestProbe()
      val cb = new IngestorCallback[String, String](
        rec,
        ingestor.ref,
        supervisor.ref,
        transport.ref
      )

      cb.onCompletion(
        1,
        Some(TestRecordMetadata(1, 0, "", AckStrategy.NoAck)),
        None
      )
      ingestor.expectMsgPF() {
        case RecordProduced(md, sup) =>
          sup shouldBe supervisor.ref
          md shouldBe a[TestRecordMetadata]
      }
      transport.expectMsg(Confirm(1))

      cb.onCompletion(1, None, Some(new IllegalArgumentException("test")))
      ingestor.expectMsgPF() {
        case RecordNotProduced(r, e, s) =>
          r shouldBe rec
          e.getMessage shouldBe "test"
          s shouldBe supervisor.ref
      }
      transport.expectMsg(TransportError(1))
    }
  }
} 
Example 125
Source File: ComposeReceiveSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.akka

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike

class ComposeReceiveSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFlatSpecLike
    with BeforeAndAfterAll
    with ImplicitSender {

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  "The ComposingReceiveTrait" should "compose" in {
    system.actorOf(Props[TestBaseActor]) ! "foo"
    expectMsg("bar")

    system.actorOf(Props[TestComposeActor]) ! "foo"
    expectMsg("new-bar")
  }

}

trait TestBase extends Actor with ComposingReceive {

  override def baseReceive = {
    case "foo" => sender ! "bar"
  }
}

class TestBaseActor extends TestBase {
  compose(Actor.emptyBehavior)
}

class TestComposeActor extends TestBase {
  compose {
    case "foo" => sender ! "new-bar"
  }
} 
Example 126
Source File: HydraMetricsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.monitor

import akka.japi.Option.Some
import kamon.Kamon
import kamon.metric.{Counter, Gauge}
import org.scalamock.scalatest.proxy.MockFactory
import org.scalatest.{BeforeAndAfterAll, _}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Seconds, Span}
import scalacache.guava.GuavaCache

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Random, Try}

class HydraMetricsSpec
    extends Matchers
    with AnyFlatSpecLike
    with Eventually
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with MockFactory
    with ScalaFutures {

  import HydraMetrics._
  import scalacache.modes.try_._

  implicit override val patienceConfig =
    PatienceConfig(
      timeout = scaled(Span(2, Seconds)),
      interval = scaled(Span(5, Millis))
    )

  override def beforeEach() = {
    gaugesCache.removeAll()
    countersCache.removeAll()
    histogramsCache.removeAll()
  }

  override def afterAll = Try(Kamon.stopModules())

  val lookup = "lookup.xyz"
  val lookup2 = "lookup.abc"

  def generateTags: Seq[(String, String)] = Seq("tag1" -> "Everything's fine.")

  "An object mixing in HydraMetrics" should
    "create new counters with new lookup keys + metric names" in {
    shouldCreateNewMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "create new gauges with new lookup keys + metric names" in {
    shouldCreateNewMetric[Gauge](incrementGauge _, gaugesCache)
  }

  it should "lookup existing counters" in {
    shouldLookupExistingMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "lookup an existing gauge" in {
    shouldLookupExistingMetric[Gauge](decrementGauge _, gaugesCache)
  }

  it should
    "lookup an existing histogram" in {
    val f = recordToHistogram _

    whenReady(f(lookup, "histogram.metric", 100, generateTags)) { r =>
      whenReady(f(lookup, "histogram.metric", 100, generateTags)) { x =>
        r shouldEqual x
      }
    }
  }

  private def shouldCreateNewMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    cache.get(lookup).map { result => result shouldBe None }

    f(lookup, "metric" + Random.nextInt(Integer.MAX_VALUE), generateTags)

    cache.get(lookup).map { result => result shouldBe a[Some[_]] }
  }

  private def shouldLookupExistingMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    val metric = "metric" + Random.nextInt(Integer.MAX_VALUE)

    f(lookup, metric, generateTags) shouldEqual f(lookup, metric, generateTags)
  }
} 
Example 127
Source File: ReflectionsWrapperSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.bootstrap

import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike
import test.scan.TestServiceProvider


class ReflectionsWrapperSpec
    extends Matchers
    with AnyFlatSpecLike
    with BeforeAndAfterAll {

  "The ReflectionsWrapper object" should "load package list from configs in" in {
    //scan-packages
    ReflectionsWrapper.scanPkgs should contain allOf ("hydra", "test.scan")
  }

  it should "load by subtype" in {
    ReflectionsWrapper.reflections.getSubTypesOf(classOf[ServiceProvider]) should contain(
      classOf[TestServiceProvider]
    )
  }
} 
Example 128
Source File: BaseAkkaSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.Paths

import akka.actor.{ActorIdentity, ActorRef, ActorSystem, Identify, Props}
import akka.testkit.{EventFilter, TestEvent, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import play.api.libs.json._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.Await

class BaseAkkaSpec extends BaseSpec with BeforeAndAfterAll with LoggingTest{

  //Load default configuration for Fey when running tests
  resetCapturedLogs()
  CONFIG.loadUserConfiguration(Paths.get(TestSetup.configTest.toURI()).toFile().getAbsolutePath)
  TestSetup.setup()

  val systemName = "FEY-TEST"
  implicit val system = ActorSystem(systemName, ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]"""))
  system.eventStream.publish(TestEvent.Mute(EventFilter.debug()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.info()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.warning()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.error()))

  val globalIdentifierName = "GLOBAL-IDENTIFIER"
  val globalIdentifierRef = system.actorOf(Props[IdentifyFeyActors],globalIdentifierName)

  override protected def afterAll(): Unit = {
    //Force reload of GenericActor's jar
    Utils.loadedJars.remove("fey-test-actor.jar")
    Monitor.events.removeAllNodes()
    Await.ready(system.terminate(), 20.seconds)
  }

  implicit class TestProbeOps(probe: TestProbe) {

    def expectActor(path: String, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def expectActorInSystem(path: String, lookInSystem: ActorSystem, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (lookInSystem actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def verifyActorTermination(actor: ActorRef)(implicit system: ActorSystem): Unit = {
      val watcher = TestProbe()
      watcher.watch(actor)
      watcher.expectTerminated(actor)
    }

    def notExpectActor(path: String, max: FiniteDuration = 3.seconds): Unit = {
      probe.within(max) {
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, None) =>
          }
        }
      }
    }

    def isThreadRunning(threadName: String): Boolean = {
      Thread.getAllStackTraces.keySet().toArray
        .map(_.asInstanceOf[Thread])
        .find(_.getName == threadName) match {
        case Some(thread) =>
          if(thread.isAlive) true else false
        case None => false
      }
    }
  }

  //Utils Functions
  def getJSValueFromString(json: String): JsValue = {
    Json.parse(json)
  }

} 
Example 129
Source File: MultiNodeReplicationEndpoint.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate

import akka.actor._
import akka.remote.testkit.MultiNodeSpec

import org.scalatest.BeforeAndAfterAll

trait MultiNodeReplicationEndpoint extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec =>
  def logName: String = {
    val cn = getClass.getSimpleName
    cn.substring(0, cn.lastIndexOf("MultiJvm"))
  }

  def createEndpoint(endpointId: String, connections: Set[ReplicationConnection]): ReplicationEndpoint =
    createEndpoint(endpointId, Set(logName), connections)

  def createEndpoint(endpointId: String, logNames: Set[String], connections: Set[ReplicationConnection], activate: Boolean = true): ReplicationEndpoint = {
    val endpoint = new ReplicationEndpoint(endpointId, logNames, id => logProps(id), connections)
    if (activate) endpoint.activate()
    endpoint
  }

  implicit class RichAddress(address: Address) {
    def toReplicationConnection: ReplicationConnection =
      ReplicationConnection(address.host.get, address.port.get, address.system)
  }

  implicit class RichReplicationEndpoint(endpoint: ReplicationEndpoint) {
    def log: ActorRef =
      endpoint.logs(logName)

    def logId: String =
      endpoint.logId(logName)
  }

  def logProps(logId: String): Props
} 
Example 130
Source File: MultiNodeSupportCassandra.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate

import java.io.File

import akka.actor.Props
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec

import com.rbmhtechnology.eventuate.log.cassandra._

import org.apache.commons.io.FileUtils
import org.scalatest.BeforeAndAfterAll

trait MultiNodeSupportCassandra extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec =>
  val coordinator = RoleName("nodeA")

  def cassandraDir: String =
    MultiNodeEmbeddedCassandra.DefaultCassandraDir

  def logProps(logId: String): Props =
    CassandraEventLog.props(logId)

  override def atStartup(): Unit = {
    if (isNode(coordinator)) {
      MultiNodeEmbeddedCassandra.start(cassandraDir)
      Cassandra(system)
    }
    enterBarrier("startup")
  }

  override def afterAll(): Unit = {
    // get all config data before shutting down node
    val snapshotRootDir = new File(system.settings.config.getString("eventuate.snapshot.filesystem.dir"))

    // shut down node
    super.afterAll()

    // clean database and delete snapshot files
    if (isNode(coordinator)) {
      FileUtils.deleteDirectory(snapshotRootDir)
      MultiNodeEmbeddedCassandra.clean()
    }
  }
} 
Example 131
Source File: VertxAdapterSpec.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EventProducer, VertxAdapterConfig }
import com.rbmhtechnology.eventuate.log.EventLogWriter
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog
import com.rbmhtechnology.eventuate.utilities._
import com.rbmhtechnology.eventuate.{ LocationCleanupLeveldb, ReplicationEndpoint }
import com.typesafe.config.Config
import org.scalatest.{ BeforeAndAfterAll, MustMatchers, WordSpecLike }

import scala.collection.immutable.Seq

object VertxAdapterSpec {
  case class Event(id: String)

  val Config = TestConfig.withReplayBatchSize(10)
}

class VertxAdapterSpec extends TestKit(ActorSystem("test", VertxAdapterSpec.Config))
  with WordSpecLike with MustMatchers with BeforeAndAfterAll with StopSystemAfterAll with LocationCleanupLeveldb
  with VertxEnvironment with VertxEventBusProbes {

  import VertxAdapterSpec._
  import utilities._

  val logName = "logA"
  val adapterId = "adapter1"
  var storage: ActorStorageProvider = _
  var endpoint: ReplicationEndpoint = _

  override def config: Config = VertxAdapterSpec.Config

  override def beforeAll(): Unit = {
    super.beforeAll()
    storage = new ActorStorageProvider(adapterId)
    endpoint = new ReplicationEndpoint(id = "1", logNames = Set(logName), logFactory = logId => LeveldbEventLog.props(logId), connections = Set())
  }

  "A VertxAdapter" must {
    "read events from an inbound log and deliver them to the Vert.x eventbus" in {
      val log = endpoint.logs(logName)
      val adapterConfig = VertxAdapterConfig()
        .addProducer(EventProducer.fromLog(log)
          .publishTo {
            case _ => endpoint1.address
          }
          .as("adapter1"))
        .registerDefaultCodecFor(classOf[Event])

      val vertxAdapter = VertxAdapter(adapterConfig, vertx, storage)
      val logWriter = new EventLogWriter("w1", endpoint.logs(logName))

      endpoint.activate()
      vertxAdapter.start()

      logWriter.write(Seq(Event("1"))).await.head

      storage.expectRead(replySequenceNr = 0)
      storage.expectWrite(sequenceNr = 1)

      endpoint1.probe.expectVertxMsg(body = Event("1"))

      logWriter.write(Seq(Event("2"))).await

      storage.expectWrite(sequenceNr = 2)

      endpoint1.probe.expectVertxMsg(body = Event("2"))

      logWriter.write(Seq(Event("3"), Event("4"))).await

      storage.expectWriteAnyOf(sequenceNrs = Seq(3, 4))

      endpoint1.probe.expectVertxMsg(body = Event("3"))
      endpoint1.probe.expectVertxMsg(body = Event("4"))
    }
  }
} 
Example 132
Source File: MultiNodeSupportLeveldb.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate

import java.io.File

import akka.actor.Props
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec

import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog

import org.apache.commons.io.FileUtils
import org.scalatest.BeforeAndAfterAll

trait MultiNodeSupportLeveldb extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec =>
  val coordinator = RoleName("nodeA")

  def logProps(logId: String): Props =
    LeveldbEventLog.props(logId)

  override def afterAll(): Unit = {
    // get all config data before shutting down node
    val snapshotRootDir = new File(system.settings.config.getString("eventuate.snapshot.filesystem.dir"))
    val logRootDir = new File(system.settings.config.getString("eventuate.log.leveldb.dir"))

    // shut down node
    super.afterAll()

    // delete log and snapshot files
    if (isNode(coordinator)) {
      FileUtils.deleteDirectory(snapshotRootDir)
      FileUtils.deleteDirectory(logRootDir)
    }
  }
} 
Example 133
Source File: AkkaUnitTestLike.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import akka.actor.{ActorSystem, Scheduler}
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKitBase
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContextExecutor


trait AkkaUnitTestLike extends TestKitBase with ScalaFutures with BeforeAndAfterAll {
  self: Suite =>

  implicit lazy val config: Config                = ConfigFactory.load("sample.conf")
  implicit lazy val system: ActorSystem           = ActorSystem(suiteName, config)
  implicit lazy val scheduler: Scheduler          = system.scheduler
  implicit lazy val mat: Materializer             = ActorMaterializer()
  implicit lazy val ctx: ExecutionContextExecutor = system.dispatcher

  abstract override def afterAll(): Unit = {
    super.afterAll()
    // intentionally shutdown the actor system last.
    system.terminate().futureValue
  }
} 
Example 134
Source File: KinesisProducerIntegrationSpec.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis

import java.io.File

import com.amazonaws.services.kinesis.producer.{KinesisProducer => AWSKinesisProducer}
import com.typesafe.config.ConfigFactory
import com.weightwatchers.reactive.kinesis.common.{
  KinesisSuite,
  KinesisTestConsumer,
  TestCredentials
}
import com.weightwatchers.reactive.kinesis.consumer.KinesisConsumer.ConsumerConf
import com.weightwatchers.reactive.kinesis.models.ProducerEvent
import com.weightwatchers.reactive.kinesis.producer.{KinesisProducer, ProducerConf}
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FreeSpec, Matchers}

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

//scalastyle:off magic.number
class KinesisProducerIntegrationSpec
    extends FreeSpec
    with Matchers
    with MockitoSugar
    with BeforeAndAfterAll
    with Eventually
    with KinesisSuite {

  implicit val ece = scala.concurrent.ExecutionContext.global

  val TestStreamNrOfMessagesPerShard: Long = 0

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Millis))

  "The KinesisProducer" - {

    "Should publish a message to a stream" in new withKinesisConfForApp(
      "int-test-stream-producer-1"
    ) {

      val conf     = producerConf()
      val producer = KinesisProducer(conf)

      val existingRecordCount = testConsumer.retrieveRecords(conf.streamName, 10).size

      val event = ProducerEvent("1234", Random.alphanumeric.take(10).mkString)
      producer.addUserRecord(event)

      eventually {
        val records: Seq[String] = testConsumer.retrieveRecords(conf.streamName, 10)
        records.size shouldBe (existingRecordCount + 1)
        records should contain(
          new String(event.payload.array(), java.nio.charset.StandardCharsets.UTF_8)
        )
      }
    }
  }
}

//scalastyle:on 
Example 135
Source File: TypesafeConfigExtensionsSpec.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.utils

import com.typesafe.config.ConfigFactory
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{BeforeAndAfterAll, FreeSpec, Matchers}


class TypesafeConfigExtensionsSpec
    extends FreeSpec
    with Matchers
    with MockitoSugar
    with BeforeAndAfterAll {

  val kplConfig = ConfigFactory.parseString("""
      |kpl {
      |   AggregationEnabled = true
      |   AggregationMaxCount = 4294967295
      |   AggregationMaxSize = 51200
      |   CollectionMaxCount = 500
      |}
      |
    """.stripMargin).getConfig("kpl")

  //scalastyle:off magic.number
  "The RichConfig" - {

    "Should convert typesafe config key values into Java Properties" in {

      import TypesafeConfigExtensions._

      val javaProperties = kplConfig.toProperties

      javaProperties.size() should equal(4)
      javaProperties.getProperty("AggregationEnabled") should equal("true")
      javaProperties.getProperty("AggregationMaxCount") should equal("4294967295")
      javaProperties.getProperty("AggregationMaxSize") should equal("51200")
      javaProperties.getProperty("CollectionMaxCount") should equal("500")

    }
  }
  //scalastyle:on
} 
Example 136
Source File: MainServiceSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api

import akka.http.scaladsl.Http
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfterAll, Matchers, FlatSpec }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class MainServiceSpec extends FlatSpec with Matchers with BeforeAndAfterAll with ScalaFutures with MainService {
  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  it should "inject configuration of http" in {
    val config = ConfigFactory.load()

    host shouldEqual config.getString("http.listen.host")
    port shouldEqual config.getInt("http.listen.port")
  }

  it should "bind and handle" in {
    val http = Http().bindAndHandle(handler.routes, host, port)
    http.futureValue.localAddress.getPort shouldEqual port
    http.futureValue.unbind()
  }
} 
Example 137
Source File: HttpClientSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.actor.ActorSystem
import akka.stream.scaladsl.Flow
import org.scalatest.{ BeforeAndAfterAll, Matchers, FlatSpec }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class HttpClientSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit val system = ActorSystem("http-client-spec")
  implicit val executor = system.dispatcher

  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  "#conectionHttps" should "return outgoing connection flow" in {
    val httpClient = new HttpClientImpl(system)
    val connection = httpClient.connectionHttps("127.0.0.1", 8000, 5)

    connection shouldBe a[Flow[_, _, _]]
  }
} 
Example 138
Source File: GitHubSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ HttpMethods, HttpRequest, HttpResponse }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Source }
import akka.stream.testkit.scaladsl.TestSink
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class GitHubSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory {
  implicit val system = ActorSystem("github-spec")
  implicit val executor = system.dispatcher
  implicit val materializer = ActorMaterializer()

  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  "#from" should "merge original headers to github request" in {
    val github = new GitHubImpl("127.0.0.1", 8000, 5, mock[HttpClient])
    val request = HttpRequest(HttpMethods.GET, "/")
      .addHeader(RawHeader("host", "dummy"))
      .addHeader(RawHeader("timeout-access", "dummy"))

    val result = Source.single(HttpRequest(HttpMethods.GET, "/v1/github/users/xxxxxx"))
      .via(github.from(request))
      .runWith(TestSink.probe[HttpRequest])
      .request(1)
      .expectNext()

    result.headers.filter(_.lowercaseName() == "host") shouldBe empty
    result.headers.filter(_.lowercaseName() == "timeout-access") shouldBe empty
    result.headers.filter(_.lowercaseName() == "x-forwarded-host") shouldNot be(empty)
  }

  "#send" should "connect using http client" in {
    val httpResponse = HttpResponse()
    val httpClient = mock[HttpClient]
    (httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))

    val github = new GitHubImpl("127.0.0.1", 8000, 5, httpClient)
    val result = Source.single(HttpRequest(HttpMethods.GET, "/"))
      .via(github.send)
      .runWith(TestSink.probe[HttpResponse])
      .request(1)
      .expectNext()

    result shouldBe httpResponse
  }
} 
Example 139
Source File: TestSchemaClickhouseQuerySpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import com.crobox.clickhouse.dsl.TestSchema
import com.crobox.clickhouse.dsl.execution.ClickhouseQueryExecutor
import com.crobox.clickhouse.dsl.schemabuilder.{CreateTable, Engine}
import com.crobox.clickhouse.testkit.ClickhouseSpec
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContext

trait TestSchemaClickhouseQuerySpec extends ClickhouseSpec with BeforeAndAfterAll with TestSchema with ScalaFutures {
  this: Suite =>
  val table1Entries: Seq[Table1Entry] = Seq()
  val table2Entries: Seq[Table2Entry] = Seq()

  implicit val ec: ExecutionContext

  implicit lazy val chExecutor: ClickhouseQueryExecutor = ClickhouseQueryExecutor.default(clickClient)

  override def beforeAll(): Unit = {
    super.beforeAll()
    val tables = for {
      _ <- clickClient.execute(
        CreateTable(OneTestTable, Engine.Memory, ifNotExists = true).query
      )
      _ <- clickClient.execute(
        CreateTable(
          TwoTestTable,
          Engine.Memory,
          ifNotExists = true
        ).query
      )
    } yield {}
    whenReady(tables) { _ =>
      val inserts = for {
        _ <- table1Entries.into(OneTestTable)
        _ <- table2Entries.into(TwoTestTable)
      } yield {}
      inserts.futureValue
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
  }
} 
Example 140
Source File: ClickhouseClientSpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import java.util.UUID

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Random

abstract class ClickhouseClientSpec(val config: Config = ConfigFactory.load())
    extends TestKit(ActorSystem("clickhouseClientTestSystem", config.getConfig("crobox.clickhouse.client")))
    with AnyFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures {

  implicit val materializer: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  override implicit def patienceConfig: PatienceConfig  = PatienceConfig(1.seconds, 50.millis)

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally Await.result(system.terminate(), 10.seconds)
  }

  def randomUUID: UUID =
    UUID.randomUUID

  def randomString: String =
    Random.alphanumeric.take(10).mkString

  def randomInt: Int =
    Random.nextInt(100000)
} 
Example 141
Source File: TestSparkContext.scala    From gihyo-spark-book-example   with Apache License 2.0 5 votes vote down vote up
package jp.gihyo.spark

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

private[spark]
trait TestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("SparkUnitTest")
      .set("spark.sql.shuffle.partitions", "2")
    sc = new SparkContext(conf)
    SQLContext.clearActive()
    sqlContext = new SQLContext(sc)
    SQLContext.setActive(sqlContext)
  }

  override def afterAll() {
    try {
      sqlContext = null
      SQLContext.clearActive()
      if (sc != null) {
        sc.stop()
      }
      sc = null
    } finally {
      super.afterAll()
    }
  }
} 
Example 142
Source File: SqliteTestBase.scala    From smui   with Apache License 2.0 5 votes vote down vote up
package utils

import java.io.File

import org.scalatest.{BeforeAndAfterAll, Suite}
import play.api.db.evolutions.Evolutions
import play.api.db.{Database, Databases}

trait SqliteTestBase extends BeforeAndAfterAll { self: Suite =>

  private lazy val dbFile = File.createTempFile("sqlitetest", ".db")

  lazy val db: Database = {
    // Use a temp file for the database - in-memory DB cannot be used
    // since it would be a different DB for each connection in the connection pool
    // (see https://www.sqlite.org/inmemorydb.html)
    val d = Databases("org.sqlite.JDBC", s"jdbc:sqlite:${dbFile.getAbsolutePath}")
    Evolutions.applyEvolutions(d)
    d
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    db.shutdown()
    dbFile.delete()
  }

} 
Example 143
Source File: ApplicationTestBase.scala    From smui   with Apache License 2.0 5 votes vote down vote up
package models

import models.rules._
import org.scalatest.{BeforeAndAfterAll, Suite}
import play.api.{Application, Mode}
import play.api.db.{Database, Databases}
import play.api.inject.Injector
import play.api.inject.guice.GuiceApplicationBuilder


trait ApplicationTestBase extends BeforeAndAfterAll { self: Suite =>

  protected lazy val db: Database = Databases.inMemory()

  // Use logging settings from logback-test.xml for test application
  System.setProperty("logger.resource", "logback-test.xml")

  protected lazy val application: Application = new GuiceApplicationBuilder().
    in(Mode.Test).
    configure("db.default.url" -> db.url, "db.default.driver" -> "org.h2.Driver",
      "db.default.username" -> "", "db.default.password" -> "", "toggle.rule-deployment.log-rule-id" -> true).
    build()

  protected lazy val injector: Injector = application.injector

  protected lazy val repo: SearchManagementRepository = injector.instanceOf[SearchManagementRepository]

  protected val (core1Id, core2Id) = (SolrIndexId(), SolrIndexId())

  protected def createTestCores(): Unit = {
    repo.addNewSolrIndex(SolrIndex(core1Id, "core1", "First core"))
    repo.addNewSolrIndex(SolrIndex(core2Id, "core2", "Second core"))
  }

  protected def createTestRule(): Seq[SearchInputId] = {
    val synonymRules = List (SynonymRule(SynonymRuleId(), 0, "mercury", isActive = true))
    val upDownRules = List(
      UpDownRule(UpDownRuleId(), UpDownRule.TYPE_UP, 10, "notebook", isActive = true),
      UpDownRule(UpDownRuleId(), UpDownRule.TYPE_UP, 10, "lenovo", isActive = false),
      UpDownRule(UpDownRuleId(), UpDownRule.TYPE_DOWN, 10, "battery", isActive = true)
    )
    val deleteRules = List(DeleteRule(DeleteRuleId(), "freddy", isActive = true))
    val filterRules = List(FilterRule(FilterRuleId(), "zz top", isActive = true))

    val id = repo.addNewSearchInput(core1Id, "aerosmith", Seq.empty)
    val searchInput = SearchInputWithRules(id, "aerosmith", synonymRules, upDownRules, filterRules, isActive = true, comment = "")
    repo.updateSearchInput(searchInput)

    val shippingId = repo.addNewSearchInput(core1Id, "shipping", Seq.empty)
    val redirectRule = RedirectRule(RedirectRuleId(), "http://xyz.com/shipping", isActive = true)
    val searchInputForRedirect = SearchInputWithRules(shippingId, "shipping", redirectRules = List(redirectRule), isActive = true, comment = "")
    repo.updateSearchInput(searchInputForRedirect)

    Seq(id, shippingId)
  }

  override protected def afterAll(): Unit = {
    application.stop()
    db.shutdown()
  }

} 
Example 144
Source File: XmlPartitioningSuite.scala    From spark-xml   with Apache License 2.0 5 votes vote down vote up
package com.databricks.spark.xml

import org.apache.spark.sql.SparkSession
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers


final class XmlPartitioningSuite extends AnyFunSuite with Matchers with BeforeAndAfterAll {

  private def doPartitionTest(suffix: String, blockSize: Long, large: Boolean): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("XmlPartitioningSuite")
      .config("spark.ui.enabled", false)
      .config("spark.hadoop.fs.local.block.size", blockSize)
      .getOrCreate()
    try {
      val fileName = s"fias_house${if (large) ".large" else ""}.xml$suffix"
      val xmlFile = getClass.getClassLoader.getResource(fileName).getFile
      val results = spark.read.option("rowTag", "House").option("mode", "FAILFAST").xml(xmlFile)
      // Test file has 37 records; large file is 20x the records
      assert(results.count() === (if (large) 740 else 37))
    } finally {
      spark.stop()
    }
  }

  test("Uncompressed small file with specially chosen block size") {
    doPartitionTest("", 8342, false)
  }

  test("Uncompressed small file with small block size") {
    doPartitionTest("", 500, false)
  }

  test("bzip2 small file with small block size") {
    doPartitionTest(".bz2", 500, false)
  }

  test("bzip2 large file with small block size") {
    // Note, the large bzip2 test file was compressed such that there are several blocks
    // in the compressed input (e.g. bzip2 -1 on a file with much more than 100k data)
    doPartitionTest(".bz2", 500, true)
  }

  test("gzip small file") {
    // Block size won't matter
    doPartitionTest(".gz", 500, false)
  }

  test("gzip large file") {
    // Block size won't matter
    doPartitionTest(".gz", 500, true)
  }

} 
Example 145
Source File: StaxXmlParserUtilsSuite.scala    From spark-xml   with Apache License 2.0 5 votes vote down vote up
package com.databricks.spark.xml.parsers

import java.io.StringReader
import javax.xml.stream.events.Attribute
import javax.xml.stream.{XMLInputFactory, XMLStreamConstants}

import scala.collection.JavaConverters._

import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite

import com.databricks.spark.xml.XmlOptions

final class StaxXmlParserUtilsSuite extends AnyFunSuite with BeforeAndAfterAll {

  private val factory = XMLInputFactory.newInstance()
  factory.setProperty(XMLInputFactory.IS_NAMESPACE_AWARE, false)
  factory.setProperty(XMLInputFactory.IS_COALESCING, true)

  test("Test if elements are skipped until the given event type") {
    val input = <ROW><id>2</id><name>Sam Mad Dog Smith</name><amount>93</amount></ROW>
    val parser = factory.createXMLEventReader(new StringReader(input.toString))
    val event = StaxXmlParserUtils.skipUntil(parser, XMLStreamConstants.END_DOCUMENT)
    assert(event.isEndDocument)
  }

  test("Check the end of element") {
    val input = <ROW><id>2</id></ROW>
    val parser = factory.createXMLEventReader(new StringReader(input.toString))
    // Skip until </id>
    StaxXmlParserUtils.skipUntil(parser, XMLStreamConstants.END_ELEMENT)
    assert(StaxXmlParserUtils.checkEndElement(parser))
  }

  test("Convert attributes to a map with keys and values") {
    val input = <ROW id="2"></ROW>
    val parser = factory.createXMLEventReader(new StringReader(input.toString))
    val event =
      StaxXmlParserUtils.skipUntil(parser, XMLStreamConstants.START_ELEMENT)
    val attributes =
      event.asStartElement().getAttributes.asScala.map(_.asInstanceOf[Attribute]).toArray
    val valuesMap =
      StaxXmlParserUtils.convertAttributesToValuesMap(attributes, new XmlOptions())
    assert(valuesMap === Map(s"${XmlOptions.DEFAULT_ATTRIBUTE_PREFIX}id" -> "2"))
  }

  test("Convert current structure to string") {
    val input = <ROW><id>2</id><info>
      <name>Sam Mad Dog Smith</name><amount><small>1</small><large>9</large></amount></info></ROW>
    val parser = factory.createXMLEventReader(new StringReader(input.toString))
    // Skip until </id>
    StaxXmlParserUtils.skipUntil(parser, XMLStreamConstants.END_ELEMENT)
    val xmlString = StaxXmlParserUtils.currentStructureAsString(parser)
    val expected = <info>
      <name>Sam Mad Dog Smith</name><amount><small>1</small><large>9</large></amount></info>
    assert(xmlString === expected.toString())
  }

  test("Skip XML children") {
    val input = <ROW><info>
      <name>Sam Mad Dog Smith</name><amount><small>1</small>
        <large>9</large></amount></info><abc>2</abc><test>2</test></ROW>
    val parser = factory.createXMLEventReader(new StringReader(input.toString))
    // We assume here it's reading the value within `id` field.
    StaxXmlParserUtils.skipUntil(parser, XMLStreamConstants.CHARACTERS)
    StaxXmlParserUtils.skipChildren(parser)
    assert(parser.nextEvent().asEndElement().getName.getLocalPart === "info")
    parser.next()
    StaxXmlParserUtils.skipChildren(parser)
    assert(parser.nextEvent().asEndElement().getName.getLocalPart === "abc")
    parser.next()
    StaxXmlParserUtils.skipChildren(parser)
    assert(parser.nextEvent().asEndElement().getName.getLocalPart === "test")
  }
} 
Example 146
Source File: XmlFileSuite.scala    From spark-xml   with Apache License 2.0 5 votes vote down vote up
package com.databricks.spark.xml.util

import java.nio.charset.{StandardCharsets, UnsupportedCharsetException}

import org.apache.spark.SparkContext
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite

final class XmlFileSuite extends AnyFunSuite with BeforeAndAfterAll {

  private val booksFile = "src/test/resources/books.xml"
  private val booksUnicodeInTagNameFile = "src/test/resources/books-unicode-in-tag-name.xml"
  private val booksFileTag = "book"
  private val booksUnicodeFileTag = "\u66F8" // scalastyle:ignore
  private val numBooks = 12
  private val numBooksUnicodeInTagName = 3
  private val fiasHouse = "src/test/resources/fias_house.xml"
  private val fiasRowTag = "House"
  private val numHouses = 37
  private val utf8 = StandardCharsets.UTF_8.name

  private var sparkContext: SparkContext = _

  override def beforeAll(): Unit = {
    super.beforeAll()
    sparkContext = new SparkContext("local[2]", "TextFileSuite")
  }

  override def afterAll(): Unit = {
    try {
      sparkContext.stop()
      sparkContext = null
    } finally {
      super.afterAll()
    }
  }

  test("read utf-8 encoded file") {
    val baseRDD = XmlFile.withCharset(sparkContext, booksFile, utf8, rowTag = booksFileTag)
    assert(baseRDD.count() === numBooks)
  }

  test("read file with unicode chars in row tag name") {
    val baseRDD = XmlFile.withCharset(
      sparkContext, booksUnicodeInTagNameFile, utf8, rowTag = booksUnicodeFileTag)
    assert(baseRDD.count() === numBooksUnicodeInTagName)
  }

  test("read utf-8 encoded file with empty tag") {
    val baseRDD = XmlFile.withCharset(sparkContext, fiasHouse, utf8, rowTag = fiasRowTag)
    assert(baseRDD.count() == numHouses)
    baseRDD.collect().foreach(x => assert(x.contains("/>")))
  }

  test("unsupported charset") {
    val exception = intercept[UnsupportedCharsetException] {
      XmlFile.withCharset(sparkContext, booksFile, "frylock", rowTag = booksFileTag).count()
    }
    assert(exception.getMessage.contains("frylock"))
  }

} 
Example 147
Source File: HttpRequestRecorderSpec.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.persistence

import java.net.InetAddress

import akka.actor.{ ActorSystem, PoisonPill, Props }
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.http.scaladsl.model._
import akka.testkit.{ ImplicitSender, TestKit }
import com.ing.wbaa.rokku.proxy.data._
import com.ing.wbaa.rokku.proxy.persistence.HttpRequestRecorder.{ ExecutedRequestCmd, LatestRequests, LatestRequestsResult }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.immutable

class HttpRequestRecorderSpec extends TestKit(ActorSystem("RequestRecorderTest")) with ImplicitSender
  with AnyWordSpecLike with Diagrams with BeforeAndAfterAll {

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }

  private def convertStringsToAkkaHeaders(headers: List[String]): immutable.Seq[HttpHeader] = headers.map { p =>
    val kv = p.split("=")
    HttpHeader.parse(kv(0), kv(1)) match {
      case ParsingResult.Ok(header, _) => header
      case ParsingResult.Error(error)  => throw new Exception(s"Unable to convert to HttpHeader: ${error.summary}")
    }
  }

  val requestRecorder = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-1")

  val headers = List("Remote-Address=0:0:0:0:0:0:0:1:58170", "Host=localhost:8987",
    "X-Amz-Content-SHA256=02502914aca52472205417e4c418ee499ba39ca1b283d99da26e295df2eccf32",
    "User-Agent=aws-cli/1.16.30 Python/2.7.5 Linux/3.10.0-862.14.4.el7.x86_64 botocore/1.12.20",
    "Content-MD5=Wf7l+rCPsVw8eqc34kVJ1g==",
    "Authorization=AWS4-HMAC-SHA256 Credential=6r24619bHVWvrxR5AMHNkGZ6vNRXoGCP/20190704/us-east-1/s3/aws4_request",
    "SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date;x-amz-security-token",
    "Signature=271dda503da6fcf04cc058cb514b28a6d522a9b712ab553bfb88fb7814ab082f")

  val httpRequest = HttpRequest(
    HttpMethods.PUT,
    Uri("http://127.0.0.1:8010/home/testuser/file34"),
    convertStringsToAkkaHeaders(headers),
    HttpEntity.Empty.withContentType(ContentTypes.`application/octet-stream`).toString(),
    HttpProtocols.`HTTP/1.1`
  )
  val userSTS = User(UserName("okUser"), Set(UserGroup("okGroup")), AwsAccessKey("accesskey"), AwsSecretKey("secretkey"), UserAssumeRole(""))
  val clientIPAddress = RemoteAddress(InetAddress.getByName("localhost"), Some(1234))

  "RequestRecorder" should {
    "persist Http request event" in {
      requestRecorder ! ExecutedRequestCmd(httpRequest, userSTS, clientIPAddress)
      requestRecorder ! LatestRequests(1)
      expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress))))
      requestRecorder ! PoisonPill

      val requestRecorder1 = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-2")
      requestRecorder1 ! LatestRequests(1)
      expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress))))
    }
  }

} 
Example 148
Source File: ConsulCoordinationSpec.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination.consul

import akka.Done
import akka.actor.{ ActorSystem, AddressFromURIString }
import akka.testkit.{ TestDuration, TestProbe }
import com.typesafe.config.ConfigFactory
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }
import scala.concurrent.duration.{ Duration, DurationInt, FiniteDuration }
import scala.concurrent.{ Await, Awaitable }
import scala.util.Random

object ConsulCoordinationSpec {

  private val coordinationHost = {
    val dockerHostPattern = """tcp://(\S+):\d{1,5}""".r
    sys.env
      .get("DOCKER_HOST")
      .collect { case dockerHostPattern(address) => address }
      .getOrElse("127.0.0.1")
  }
}

class ConsulCoordinationSpec extends WordSpec with Matchers with BeforeAndAfterAll {
  import ConsulCoordinationSpec._

  private implicit val system = {
    val config =
      ConfigFactory
        .parseString(s"constructr.coordination.host = $coordinationHost")
        .withFallback(ConfigFactory.load())
    ActorSystem("default", config)
  }

  private val address1 = AddressFromURIString("akka.tcp://default@a:2552")
  private val address2 = AddressFromURIString("akka.tcp://default@b:2552")

  "ConsulCoordination" should {
    "correctly interact with consul" in {
      val coordination = new ConsulCoordination(randomString(), system)

      // Getting nodes
      resultOf(coordination.getNodes()) shouldBe 'empty

      // Lock (ttl >= 10s)
      resultOf(coordination.lock(address1, 10.seconds)) shouldBe true
      resultOf(coordination.lock(address1, 10.seconds)) shouldBe true
      resultOf(coordination.lock(address2, 10.seconds)) shouldBe false

      // Add self
      resultOf(coordination.addSelf(address1, 10.seconds)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address1)

      // Refresh
      resultOf(coordination.refresh(address1, 10.seconds)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address1)

      val probe = TestProbe()
      import probe._
      awaitAssert(
        resultOf(coordination.getNodes()) shouldBe 'empty,
        25.seconds // Wait until open sessions expire
      )
    }
  }

  override protected def afterAll() = {
    Await.ready(system.terminate(), Duration.Inf)
    super.afterAll()
  }

  private def resultOf[A](awaitable: Awaitable[A], max: FiniteDuration = 3.seconds.dilated) =
    Await.result(awaitable, max)

  private def randomString() = math.abs(Random.nextInt).toString
} 
Example 149
Source File: TestUtils.scala    From odsc-east-realish-predictions   with Apache License 2.0 5 votes vote down vote up
package com.twilio.open.odsc.realish

import com.holdenkarau.spark.testing.{LocalSparkContext, SparkContextProvider}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, Suite}

object TestUtils {

}

@SerialVersionUID(1L)
case class UserPersonality(uuid: String, name: String, tags: Array[String])
  extends Serializable

@SerialVersionUID(1L)
case class Author(uuid: String, name: String, age: Int) extends Serializable

@SerialVersionUID(1L)
case class LibraryBook(uuid: String, name: String, author: Author) extends Serializable

case class MockKafkaDataFrame(key: Array[Byte], value: Array[Byte])

trait SharedSparkSql extends BeforeAndAfterAll with SparkContextProvider {
  self: Suite =>

  @transient var _sparkSql: SparkSession = _
  @transient private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  def conf: SparkConf

  def sparkSql: SparkSession = _sparkSql

  override def beforeAll() {
    _sparkSql = SparkSession.builder().config(conf).getOrCreate()

    _sc = _sparkSql.sparkContext
    setup(_sc)
    super.beforeAll()
  }

  override def afterAll() {
    try {
      _sparkSql.close()
      _sparkSql = null
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

} 
Example 150
Source File: TestUtils.scala    From odsc-east-realish-predictions   with Apache License 2.0 5 votes vote down vote up
package com.twilio.open.odsc.realish

import com.holdenkarau.spark.testing.{LocalSparkContext, SparkContextProvider}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, Suite}

object TestUtils {

}

@SerialVersionUID(1L)
case class UserPersonality(uuid: String, name: String, tags: Array[String])
  extends Serializable

@SerialVersionUID(1L)
case class Author(uuid: String, name: String, age: Int) extends Serializable

@SerialVersionUID(1L)
case class LibraryBook(uuid: String, name: String, author: Author) extends Serializable

case class MockKafkaDataFrame(key: Array[Byte], value: Array[Byte])

trait SharedSparkSql extends BeforeAndAfterAll with SparkContextProvider {
  self: Suite =>

  @transient var _sparkSql: SparkSession = _
  @transient private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  def conf: SparkConf

  def sparkSql: SparkSession = _sparkSql

  override def beforeAll() {
    _sparkSql = SparkSession.builder().config(conf).getOrCreate()

    _sc = _sparkSql.sparkContext
    setup(_sc)
    super.beforeAll()
  }

  override def afterAll() {
    try {
      _sparkSql.close()
      _sparkSql = null
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

} 
Example 151
Source File: StreamingConnectorSpec.scala    From scalanda   with MIT License 5 votes vote down vote up
package com.msilb.scalanda.streamapi

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.msilb.scalanda.common.model.Side.Buy
import com.msilb.scalanda.common.model.Transaction.MarketOrderCreate
import com.msilb.scalanda.restapi.Request.{ClosePositionRequest, CreateOrderRequest}
import com.msilb.scalanda.restapi.RestConnector
import com.msilb.scalanda.restapi.model.OrderType.Market
import com.msilb.scalanda.streamapi.StreamingConnector._
import com.msilb.scalanda.streamapi.model.Tick
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

import scala.concurrent.duration._

class StreamingConnectorSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FlatSpecLike with Matchers with BeforeAndAfterAll {

  def this() = this(ActorSystem("test"))

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }

  val testAccountId = 6535195

  val restConnector = system.actorOf(RestConnector.props(accountId = testAccountId))
  val streamingConnector = system.actorOf(StreamingConnector.props)

  "StreamingConnector" should "successfully connect to the streaming end-point" in {
    within(5.seconds) {
      streamingConnector ! Connect()
      expectMsg(ConnectionEstablished)
    }
  }

  it should "register listeners" in {
    within(5.seconds) {
      streamingConnector ! AddListeners(Set(testActor))
      expectMsg(Set(testActor))
    }
  }

  it should "subscribe for price stream and receive price ticks" in {
    within(5.seconds) {
      streamingConnector ! StartRatesStreaming(testAccountId, Set("EUR_USD"))
      expectMsgType[Tick]
    }
  }

  it should "subscribe for events stream and receive account events" in {
    within(5.seconds) {
      streamingConnector ! StartEventsStreaming(Some(Set(testAccountId)))
      restConnector ! CreateOrderRequest("EUR_USD", 10000, Buy, Market)
      restConnector ! ClosePositionRequest("EUR_USD")
      fishForMessage() {
        case t: MarketOrderCreate if t.instrument == "EUR_USD" && t.side == Buy && t.units == 10000 => true
        case _ => false
      }
    }
  }

  it should "de-register listeners" in {
    within(5.seconds) {
      streamingConnector ! RemoveListeners(Set(testActor))
      fishForMessage() {
        case s: Set[_] if s.isEmpty => true
        case _ => false
      }
    }
  }
} 
Example 152
Source File: HBaseConnectorSuite.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.connector.hbase

import java.nio.file.Files

import com.typesafe.config.{ConfigFactory, ConfigValueFactory}
import it.agilelab.darwin.common.Connector
import org.apache.avro.reflect.ReflectData
import org.apache.avro.{Schema, SchemaNormalization}
import org.apache.hadoop.hbase.HBaseTestingUtility
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class HBaseConnectorSuite extends AnyFlatSpec with Matchers with BeforeAndAfterAll {

  var connector: Connector = _

  "HBaseConnector" should "load all existing schemas" in {
    connector.fullLoad()
  }

  it should "insert and retrieve" in {
    val schemas = Seq(ReflectData.get().getSchema(classOf[HBaseMock]), ReflectData.get().getSchema(classOf[HBase2Mock]))
      .map(s => SchemaNormalization.parsingFingerprint64(s) -> s)
    connector.insert(schemas)
    val loaded: Seq[(Long, Schema)] = connector.fullLoad()
    assert(loaded.size == schemas.size)
    assert(loaded.forall(schemas.contains))
    val schema = connector.findSchema(loaded.head._1)
    assert(schema.isDefined)
    assert(schema.get == loaded.head._2)
    val noSchema = connector.findSchema(-1L)
    assert(noSchema.isEmpty)
  }

  "connector.tableCreationHint" should "print the correct hint for table creation" in {
    connector.tableCreationHint() should be(
      """To create namespace and table from an HBase shell issue:
        |  create_namespace 'AVRO'
        |  create 'AVRO:SCHEMA_REPOSITORY', '0'""".stripMargin)
  }

  "connector.tableExists" should "return true with existent table" in {
    connector.tableExists() should be(true)
  }

  override def beforeAll(): Unit = {

    connector = new HBaseConnectorCreator().create(HBaseConnectorSuite.config)

    connector.createTable()
  }


}

object HBaseConnectorSuite {
  private lazy val config = {
    val util = new HBaseTestingUtility()
    val minicluster = util.startMiniCluster()

    //Hbase connector can only load configurations from a file path so we need to render the hadoop conf
    val confFile = Files.createTempFile("prefix", "suffix")
    val stream = Files.newOutputStream(confFile)
    minicluster.getConfiguration.writeXml(stream)
    stream.flush()
    stream.close()
    val hbaseConfigPath = ConfigValueFactory.fromAnyRef(confFile.toAbsolutePath.toString)

    //HbaseConnector will only load conf if hbase-site and core-site are given,
    //we give the same file to each.
    sys.addShutdownHook(minicluster.shutdown())
    ConfigFactory.load()
      .withValue(ConfigurationKeys.HBASE_SITE, hbaseConfigPath)
      .withValue(ConfigurationKeys.CORE_SITE, hbaseConfigPath)
  }

} 
Example 153
Source File: PostgresConnectorSuite.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.connector.postgres

import com.typesafe.config.{Config, ConfigFactory}
import it.agilelab.darwin.common.Connector
import org.apache.avro.{Schema, SchemaNormalization}
import org.scalatest.BeforeAndAfterAll
import ru.yandex.qatools.embed.postgresql.EmbeddedPostgres
import ru.yandex.qatools.embed.postgresql.distribution.Version
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class PostgresConnectorSuite extends AnyFlatSpec with Matchers with BeforeAndAfterAll {
  val config: Config = ConfigFactory.load("postgres.properties")
  val connector: Connector = new PostgresConnectorCreator().create(config)
  val embeddedPostgres: EmbeddedPostgres = new EmbeddedPostgres(Version.V9_6_11)

  override protected def beforeAll(): Unit = {
    super.beforeAll()

    val port = 5432
    val host = "localhost"
    val dbname = "postgres"
    val username = "postgres"
    val password = "mysecretpassword"

    embeddedPostgres.start(host, port, dbname, username, password)

    connector.createTable()
  }

  override protected def afterAll(): Unit = {
    super.afterAll()

    embeddedPostgres.stop()
  }



  "PostgresConnector" should "load all existing schemas" in {
    connector.fullLoad()
  }

  ignore should "insert and retrieve" in {
    val outerSchema = new Schema.Parser().parse(getClass.getClassLoader.getResourceAsStream("postgresmock.avsc"))
    val innerSchema = outerSchema.getField("four").schema()

    val schemas = Seq(innerSchema, outerSchema)
      .map(s => SchemaNormalization.parsingFingerprint64(s) -> s)
    connector.insert(schemas)
    val loaded: Seq[(Long, Schema)] = connector.fullLoad()
    assert(loaded.size == schemas.size)
    assert(loaded.forall(schemas.contains))
  }

} 
Example 154
Source File: TestZooKeeper.scala    From mango   with Apache License 2.0 5 votes vote down vote up
package com.kakao.mango.zk

import java.io.{File, IOException}
import java.net.{ServerSocket, Socket}
import java.util.concurrent.TimeUnit

import com.kakao.mango.concurrent.NamedExecutors
import com.kakao.mango.logging.{LogLevelOverrider, Logging}
import com.kakao.shaded.guava.io.Files
import org.apache.zookeeper.server.persistence.FileTxnSnapLog
import org.apache.zookeeper.server.{ServerCnxnFactory, ServerConfig, ZooKeeperServer}
import org.scalatest.{BeforeAndAfterAll, Suite}

trait TestZooKeeper extends BeforeAndAfterAll with Logging { this: Suite =>

  
  val zkServerPort = 2181
  val zkServerExecutor = NamedExecutors.single("zookeeper-server")
  var zk: ZooKeeperConnection = _

  override protected def beforeAll(): Unit = {
    logger.info("Launching a standalone ZooKeeper server for testing...")

    try {
      val socket = new ServerSocket(zkServerPort)
      socket.close()
    } catch {
      case e: IOException =>
        throw new RuntimeException(s"TCP port $zkServerPort is required for tests but not available")
    }

    zkServerExecutor.submit {
      LogLevelOverrider.error("org.apache.zookeeper")

      val datadir = Files.createTempDir().getAbsolutePath
      val config = new ServerConfig
      config.parse(Array(zkServerPort.toString, datadir))

      val zkServer = new ZooKeeperServer
      zkServer.setTxnLogFactory(new FileTxnSnapLog(new File(datadir), new File(datadir)))
      zkServer.setTickTime(6000)
      zkServer.setMinSessionTimeout(6000)
      zkServer.setMaxSessionTimeout(6000)

      val cnxnFactory = ServerCnxnFactory.createFactory

      try {
        cnxnFactory.configure(config.getClientPortAddress, 60)
        cnxnFactory.startup(zkServer)
        cnxnFactory.join()
      } catch {
        case _: InterruptedException =>
          logger.info("ZooKeeper server interrupted; shutting down...")
          cnxnFactory.shutdown()
          cnxnFactory.join()
          if (zkServer.isRunning) {
            zkServer.shutdown()
          }
          logger.info("ZooKeeper server stopped")
      }
    }

    var connected = false
    while (!connected) {
      logger.info("Waiting for ZooKeeper server to launch...")
      try {
        val socket = new Socket("localhost", zkServerPort)
        logger.info("ZooKeeper server is available")
        socket.close()

        zk = ZooKeeperConnection(s"localhost:$zkServerPort")
        connected = true
      } catch {
        case _: IOException => Thread.sleep(1000) // retry
      }
    }

    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally {
      zk.close()
      logger.info("Interrupting ZooKeeper server...")
      zkServerExecutor.shutdownNow()
      while (!zkServerExecutor.awaitTermination(1, TimeUnit.SECONDS)) {
        logger.info("awaiting ZooKeeper server termination...")
      }
      logger.info("ZooKeeper server terminated")
    }
  }
} 
Example 155
Source File: ConcurrentHiveSuite.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.execution

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.sql.hive.test.TestHiveContext

class ConcurrentHiveSuite extends SparkFunSuite with BeforeAndAfterAll {
  ignore("multiple instances not supported") {
    test("Multiple Hive Instances") {
      (1 to 10).map { i =>
        val conf = new SparkConf()
        conf.set("spark.ui.enabled", "false")
        val ts =
          new TestHiveContext(new SparkContext("local", s"TestSQLContext$i", conf))
        ts.sparkSession.sql("SHOW TABLES").collect()
        ts.sparkSession.sql("SELECT * FROM src").collect()
        ts.sparkSession.sql("SHOW TABLES").collect()
      }
    }
  }
} 
Example 156
Source File: ListTablesSuite.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.hive.test.TestHiveSingleton

class ListTablesSuite extends QueryTest with TestHiveSingleton with BeforeAndAfterAll {
  import hiveContext._
  import hiveContext.implicits._

  val df = sparkContext.parallelize((1 to 10).map(i => (i, s"str$i"))).toDF("key", "value")

  override def beforeAll(): Unit = {
    super.beforeAll()
    // The catalog in HiveContext is a case insensitive one.
    sessionState.catalog.createTempView(
      "ListTablesSuiteTable", df.logicalPlan, overrideIfExists = true)
    sql("CREATE TABLE HiveListTablesSuiteTable (key int, value string)")
    sql("CREATE DATABASE IF NOT EXISTS ListTablesSuiteDB")
    sql("CREATE TABLE ListTablesSuiteDB.HiveInDBListTablesSuiteTable (key int, value string)")
  }

  override def afterAll(): Unit = {
    try {
      sessionState.catalog.dropTable(
        TableIdentifier("ListTablesSuiteTable"), ignoreIfNotExists = true, purge = false)
      sql("DROP TABLE IF EXISTS HiveListTablesSuiteTable")
      sql("DROP TABLE IF EXISTS ListTablesSuiteDB.HiveInDBListTablesSuiteTable")
      sql("DROP DATABASE IF EXISTS ListTablesSuiteDB")
    } finally {
      super.afterAll()
    }
  }

  test("get all tables of current database") {
    Seq(tables(), sql("SHOW TABLes")).foreach {
      case allTables =>
        // We are using default DB.
        checkAnswer(
          allTables.filter("tableName = 'listtablessuitetable'"),
          Row("", "listtablessuitetable", true))
        checkAnswer(
          allTables.filter("tableName = 'hivelisttablessuitetable'"),
          Row("default", "hivelisttablessuitetable", false))
        assert(allTables.filter("tableName = 'hiveindblisttablessuitetable'").count() === 0)
    }
  }

  test("getting all tables with a database name") {
    Seq(tables("listtablessuiteDb"), sql("SHOW TABLes in listTablesSuitedb")).foreach {
      case allTables =>
        checkAnswer(
          allTables.filter("tableName = 'listtablessuitetable'"),
          Row("", "listtablessuitetable", true))
        assert(allTables.filter("tableName = 'hivelisttablessuitetable'").count() === 0)
        checkAnswer(
          allTables.filter("tableName = 'hiveindblisttablessuitetable'"),
          Row("listtablessuitedb", "hiveindblisttablessuitetable", false))
    }
  }
} 
Example 157
Source File: TestHiveSingleton.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.test

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.hive.HiveExternalCatalog
import org.apache.spark.sql.hive.client.HiveClient


trait TestHiveSingleton extends SparkFunSuite with BeforeAndAfterAll {
  override protected val enableAutoThreadAudit = false
  protected val spark: SparkSession = TestHive.sparkSession
  protected val hiveContext: TestHiveContext = TestHive
  protected val hiveClient: HiveClient =
    spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client

  protected override def afterAll(): Unit = {
    try {
      hiveContext.reset()
    } finally {
      super.afterAll()
    }
  }

} 
Example 158
Source File: UISeleniumSuite.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import scala.util.Random

import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually._
import org.scalatest.selenium.WebBrowser
import org.scalatest.time.SpanSugar._

import org.apache.spark.ui.SparkUICssErrorHandler

class UISeleniumSuite
  extends HiveThriftJdbcTest
  with WebBrowser with Matchers with BeforeAndAfterAll {

  implicit var webDriver: WebDriver = _
  var server: HiveThriftServer2 = _
  val uiPort = 20000 + Random.nextInt(10000)
  override def mode: ServerMode.Value = ServerMode.binary

  override def beforeAll(): Unit = {
    webDriver = new HtmlUnitDriver {
      getWebClient.setCssErrorHandler(new SparkUICssErrorHandler)
    }
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    if (webDriver != null) {
      webDriver.quit()
    }
    super.afterAll()
  }

  override protected def serverStartCommand(port: Int) = {
    val portConf = if (mode == ServerMode.binary) {
      ConfVars.HIVE_SERVER2_THRIFT_PORT
    } else {
      ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT
    }

    s"""$startScript
        |  --master local
        |  --hiveconf hive.root.logger=INFO,console
        |  --hiveconf ${ConfVars.METASTORECONNECTURLKEY}=$metastoreJdbcUri
        |  --hiveconf ${ConfVars.METASTOREWAREHOUSE}=$warehousePath
        |  --hiveconf ${ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST}=localhost
        |  --hiveconf ${ConfVars.HIVE_SERVER2_TRANSPORT_MODE}=$mode
        |  --hiveconf $portConf=$port
        |  --driver-class-path ${sys.props("java.class.path")}
        |  --conf spark.ui.enabled=true
        |  --conf spark.ui.port=$uiPort
     """.stripMargin.split("\\s+").toSeq
  }

  ignore("thrift server ui test") {
    withJdbcStatement("test_map") { statement =>
      val baseURL = s"http://localhost:$uiPort"

      val queries = Seq(
        "CREATE TABLE test_map(key INT, value STRING)",
        s"LOAD DATA LOCAL INPATH '${TestData.smallKv}' OVERWRITE INTO TABLE test_map")

      queries.foreach(statement.execute)

      eventually(timeout(10 seconds), interval(50 milliseconds)) {
        go to baseURL
        find(cssSelector("""ul li a[href*="sql"]""")) should not be None
      }

      eventually(timeout(10 seconds), interval(50 milliseconds)) {
        go to (baseURL + "/sql")
        find(id("sessionstat")) should not be None
        find(id("sqlstat")) should not be None

        // check whether statements exists
        queries.foreach { line =>
          findAll(cssSelector("""ul table tbody tr td""")).map(_.text).toList should contain (line)
        }
      }
    }
  }
} 
Example 159
Source File: SchemaPruningTest.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.internal.SQLConf.NESTED_SCHEMA_PRUNING_ENABLED


private[sql] trait SchemaPruningTest extends PlanTest with BeforeAndAfterAll {
  private var originalConfSchemaPruningEnabled = false

  override protected def beforeAll(): Unit = {
    originalConfSchemaPruningEnabled = conf.nestedSchemaPruningEnabled
    conf.setConf(NESTED_SCHEMA_PRUNING_ENABLED, true)
    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      conf.setConf(NESTED_SCHEMA_PRUNING_ENABLED, originalConfSchemaPruningEnabled)
    }
  }
} 
Example 160
Source File: BenchmarkQueryTest.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql.catalyst.expressions.codegen.{CodeFormatter, CodeGenerator}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.execution.{SparkPlan, WholeStageCodegenExec}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.Utils

abstract class BenchmarkQueryTest extends QueryTest with SharedSQLContext with BeforeAndAfterAll {

  // When Utils.isTesting is true, the RuleExecutor will issue an exception when hitting
  // the max iteration of analyzer/optimizer batches.
  assert(Utils.isTesting, "spark.testing is not set to true")

  
  protected override def afterAll(): Unit = {
    try {
      // For debugging dump some statistics about how much time was spent in various optimizer rules
      logWarning(RuleExecutor.dumpTimeSpent())
      spark.sessionState.catalog.reset()
    } finally {
      super.afterAll()
    }
  }

  override def beforeAll() {
    super.beforeAll()
    RuleExecutor.resetMetrics()
  }

  protected def checkGeneratedCode(plan: SparkPlan): Unit = {
    val codegenSubtrees = new collection.mutable.HashSet[WholeStageCodegenExec]()
    plan foreach {
      case s: WholeStageCodegenExec =>
        codegenSubtrees += s
      case _ =>
    }
    codegenSubtrees.toSeq.foreach { subtree =>
      val code = subtree.doCodeGen()._2
      try {
        // Just check the generated code can be properly compiled
        CodeGenerator.compile(code)
      } catch {
        case e: Exception =>
          val msg =
            s"""
               |failed to compile:
               |Subtree:
               |$subtree
               |Generated code:
               |${CodeFormatter.format(code)}
             """.stripMargin
          throw new Exception(msg, e)
      }
    }
  }
} 
Example 161
Source File: OrcTest.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources.orc

import java.io.File

import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql._
import org.apache.spark.sql.internal.SQLConf.ORC_IMPLEMENTATION
import org.apache.spark.sql.test.SQLTestUtils


  protected def withOrcTable[T <: Product: ClassTag: TypeTag]
      (data: Seq[T], tableName: String)
      (f: => Unit): Unit = {
    withOrcDataFrame(data) { df =>
      df.createOrReplaceTempView(tableName)
      withTempView(tableName)(f)
    }
  }

  protected def makeOrcFile[T <: Product: ClassTag: TypeTag](
      data: Seq[T], path: File): Unit = {
    data.toDF().write.mode(SaveMode.Overwrite).orc(path.getCanonicalPath)
  }

  protected def makeOrcFile[T <: Product: ClassTag: TypeTag](
      df: DataFrame, path: File): Unit = {
    df.write.mode(SaveMode.Overwrite).orc(path.getCanonicalPath)
  }

  protected def checkPredicatePushDown(df: DataFrame, numRows: Int, predicate: String): Unit = {
    withTempPath { file =>
      // It needs to repartition data so that we can have several ORC files
      // in order to skip stripes in ORC.
      df.repartition(numRows).write.orc(file.getCanonicalPath)
      val actual = stripSparkFilter(spark.read.orc(file.getCanonicalPath).where(predicate)).count()
      assert(actual < numRows)
    }
  }
} 
Example 162
Source File: WithSparkContext.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package com.sap.spark

import com.sap.spark.util.TestUtils._
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}

trait WithSparkContext extends BeforeAndAfterAll {
  self: Suite =>

  override def beforeAll(): Unit = {
    try {
      super.beforeAll()
      setUpSparkContext()
    } catch {
      case ex: Throwable =>
        tearDownSparkContext()
        throw ex
    }
  }

  override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      tearDownSparkContext()
    }
  }

  
    conf.set("spark.sql.autoBroadcastJoinThreshold", "-1")
    conf.set("spark.broadcast.factory", "org.apache.spark.broadcast.HttpBroadcastFactory")
    conf.set("spark.shuffle.spill", "false")
    conf.set("spark.shuffle.compress", "false")
    conf.set("spark.ui.enabled", "false")
    conf.set("spark.ui.showConsoleProgress", "false")
  }

  def sc: SparkContext

  protected def setUpSparkContext(): Unit

  protected def tearDownSparkContext(): Unit

} 
Example 163
Source File: GlobalSparkContext.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package com.sap.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}


  }

}

object GlobalSparkContext {
  @transient private var _sc: SparkContext = _

  def init(sparkMaster: String, sparkConf: SparkConf): Unit = {
    if (_sc == null) {
      this.synchronized {
        if (_sc == null) {
          _sc = new SparkContext(sparkMaster, "test", sparkConf)
        }
      }
    }
  }

  def reset(): Unit = {
    if (_sc != null) {
      _sc.cancelAllJobs()
    }
  }

  def close(): Unit = {
    if (_sc != null) {
      _sc.stop()
      _sc = null
    }
  }

} 
Example 164
Source File: SapThriftJdbcTest.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.sap.thriftserver

import java.sql.{DriverManager, Statement}

import org.apache.hive.jdbc.HiveDriver
import org.scalatest.{BeforeAndAfterAll, FunSuite}


abstract class SapThriftJdbcTest(val thriftServer: SapThriftServer2Test){

  def jdbcUri: String

  def withMultipleConnectionJdbcStatement(fs: (Statement => Unit)*) {
    val user = System.getProperty("user.name")
    val connections = fs.map { _ => DriverManager.getConnection(jdbcUri, user, "") }
    val statements = connections.map(_.createStatement())

    try {
      statements.zip(fs).foreach { case (s, f) => f(s) }
    } finally {
      statements.foreach(_.close())
      connections.foreach(_.close())
    }
  }

  def withJdbcStatement(f: Statement => Unit): Unit = {
    withMultipleConnectionJdbcStatement(f)
  }

}

class SapThriftJdbcHiveDriverTest(override val thriftServer: SapThriftServer2Test)
  extends SapThriftJdbcTest(thriftServer) {
  Class.forName(classOf[HiveDriver].getCanonicalName)

  override def jdbcUri: String = if (thriftServer.mode == ServerMode.http) {
    s"""jdbc:hive2://${thriftServer.getServerAdressAndPort()}/
        |default?
        |hive.server2.transport.mode=http;
        |hive.server2.thrift.http.path=cliservice
     """.stripMargin.split("\n").mkString.trim
  } else {
    s"jdbc:hive2://${thriftServer.getServerAdressAndPort()}/"
  }

} 
Example 165
Source File: LevelDBAccessorTest.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2017, Yahoo Holdings Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.
package com.yahoo.maha.leveldb

import com.yahoo.maha.serde.StringSerDe
import org.junit.Assert._
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.collection.mutable


class LevelDBAccessorTest extends FunSuite with Matchers with BeforeAndAfterAll {

  private val levelDBAccessor: LevelDBAccessor[String, String] = new LevelDBAccessorBuilder("testdb", Some("/tmp")).addKeySerDe(StringSerDe).addValSerDe(StringSerDe).toLevelDBAccessor

  override protected def afterAll(): Unit = {
    levelDBAccessor.close()
    levelDBAccessor.destroy()
  }

  test("basic put and get operations should succeed") {
    val testKey = "test-key"
    val testVal = "test-val"

    assertTrue(levelDBAccessor.put(testKey, testVal))
    assertTrue(levelDBAccessor.get(testKey).isDefined)
    assertEquals(testVal, levelDBAccessor.get(testKey).get)
  }

  test("Failure put") {
    val testKey = null
    val testVal = null
    assertFalse(levelDBAccessor.put(testKey, testVal))
    assertFalse(levelDBAccessor.putBatch(mutable.Map.empty))
    assertFalse(levelDBAccessor.putBatch(null))
    assertEquals(None, levelDBAccessor.get(null))
  }
  
  test("successfully perform put batch and should be able to retrieve") {
    val kv = new collection.mutable.HashMap[String, String]
    kv += "one" -> "1"
    kv += "two" -> "2"
    kv += "three" -> "3"
    levelDBAccessor.putBatch(kv)
    
    assert(levelDBAccessor.get("one").get === "1")
    assert(levelDBAccessor.get("two").get === "2")
    assert(levelDBAccessor.get("three").get === "3")
  }

  test("Set LevelDBAccessorBuilder parameters") {
    val _1MB : Int = 1024 * 1024
    val builder = new LevelDBAccessorBuilder("mutable", Some("/tmp")).addBlockSize(_1MB).addCacheSize(500 * _1MB).addMaxOpenFiles(1000).addWriteBufferSize(10 * _1MB).setCreateIfMissing(true)
    assertTrue(builder.createIfMissing)
    assertEquals(builder.blockSize, _1MB)
    assertEquals(builder.cacheSize, 500 * _1MB)
    assertEquals(builder.maxOpenFiles, 1000)
    assertEquals(builder.writeBufferSize, 10 * _1MB)
  }

  test("DB closed, error cases") {
    val _1MB : Int = 1024 * 1024
    val key : String = "key-val"
    val value : String = "value-val"
    val builder : LevelDBAccessor[String, String] = new LevelDBAccessorBuilder("mutable", Some("/tmp"))
      .addBlockSize(_1MB)
      .addCacheSize(500 * _1MB)
      .addMaxOpenFiles(1000)
      .addWriteBufferSize(10 * _1MB)
      .setCreateIfMissing(true)
      .toLevelDBAccessor
    builder.close
    builder.destroy()
    assertFalse(builder.put("key", "value"))
    assertFalse(builder.putBatch(mutable.Map(key->value)))
    assertEquals(None, builder.get(key))
  }
} 
Example 166
Source File: BaseWorkerTest.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2018, Yahoo Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.

package com.yahoo.maha.worker

import com.yahoo.maha.service.BaseMahaServiceTest
import org.joda.time.DateTime
import org.scalatest.{BeforeAndAfterAll, Matchers}


trait BaseWorkerTest extends BaseMahaServiceTest with Matchers with BeforeAndAfterAll {
  val mahaJobWorkerTable =
    s"""
       | create table maha_worker_job(
       | jobId NUMBER(10) PRIMARY KEY,
       | jobType VARCHAR(100),
       | jobStatus VARCHAR(100),
       | jobResponse VARCHAR(100),
       | numAcquired NUMBER(2),
       | createdTimestamp TIMESTAMP,
       | acquiredTimestamp TIMESTAMP,
       | endedTimestamp TIMESTAMP,
       | jobParentId NUMBER(10),
       | jobRequest VARCHAR(100),
       | hostname VARCHAR(100),
       | cubeName VARCHAR(100),
       | isDeleted NUMBER(1)
       | );
     """.stripMargin
  val now = new DateTime()

  override def beforeAll(): Unit = {
    initJdbcToH2()
    val result = jdbcConnection.get.execute(mahaJobWorkerTable)
    assert(result.isSuccess, s"Failed to create job table $result")

  }

} 
Example 167
Source File: AccessTokenSpec.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ HttpEntity, HttpResponse, StatusCodes }
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.stream.{ ActorMaterializer, Materializer }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec }

import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration.Duration

class AccessTokenSpec extends FlatSpec with DiagrammedAssertions with ScalaFutures with BeforeAndAfterAll {
  implicit val system: ActorSystem        = ActorSystem()
  implicit val ec: ExecutionContext       = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()
  implicit val defaultPatience: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(700, Millis))

  override def afterAll(): Unit = {
    Await.ready(system.terminate(), Duration.Inf)
  }

  behavior of "AccessToken"

  it should "apply from HttpResponse" in {
    val accessToken  = "xxx"
    val tokenType    = "bearer"
    val expiresIn    = 86400
    val refreshToken = "yyy"

    val httpResponse = HttpResponse(
      status = StatusCodes.OK,
      headers = Nil,
      entity = HttpEntity(
        `application/json`,
        s"""
           |{
           |  "access_token": "$accessToken",
           |  "token_type": "$tokenType",
           |  "expires_in": $expiresIn,
           |  "refresh_token": "$refreshToken"
           |}
         """.stripMargin
      )
    )

    val result = AccessToken(httpResponse)

    whenReady(result) { token =>
      assert(token.accessToken == accessToken)
      assert(token.tokenType == tokenType)
      assert(token.expiresIn == expiresIn)
      assert(token.refreshToken.contains(refreshToken))
    }
  }
} 
Example 168
Source File: StorageNodeActorTest.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.actors

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import com.typesafe.config.ConfigFactory
import justin.db.actors.protocol.RegisterNode
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica.N
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

class StorageNodeActorTest extends TestKit(StorageNodeActorTest.actorSystem)
  with FlatSpecLike
  with ImplicitSender
  with Matchers
  with ScalaFutures
  with BeforeAndAfterAll {

  behavior of "Storage Node Actor"

  it should "send msg back with targeted NodeId when registration of other node has correctly happened" in {
    // given
    val nodeId = NodeId(1)
    val testActor = TestActorRef(new TestActor(nodeId, Ring.apply(3, 1)))

    // when
    testActor ! RegisterNode(NodeId(2))

    // then
    expectMsg(RegisterNode(nodeId))
  }

  it should "has defined role \"storagenode\"" in {
    StorageNodeActor.role shouldBe "storagenode"
  }

  it should "compose its name based on datacenter it belongs to and given id" in {
    StorageNodeActor.name(NodeId(0), Datacenter("dc1"))   shouldBe "dc1-id-0"
    StorageNodeActor.name(NodeId(10), Datacenter("dc2"))  shouldBe "dc2-id-10"
    StorageNodeActor.name(NodeId(20), Datacenter("dc1"))  shouldBe "dc1-id-20"
    StorageNodeActor.name(NodeId(999), Datacenter("dc1")) shouldBe "dc1-id-999"
  }

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }

  class TestActor(nodeId: NodeId, ring: Ring) extends StorageNodeActor(nodeId, Datacenter("default"), null, ring, N(1))
}

object StorageNodeActorTest {
  def actorSystem: ActorSystem = {
    val config = ConfigFactory.parseString(
      """
        |akka.loglevel = off
        |akka.actor.provider = cluster
        |akka.cluster.auto-join = off
        |akka.cluster.metrics.enabled = off
      """.stripMargin).withFallback(ConfigFactory.load())

    ActorSystem("test-system", config)
  }
} 
Example 169
Source File: DropwizardMarshallersSpec.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.dropwizard.marshalling

import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.testkit.ScalatestRouteTest
import fr.davit.akka.http.metrics.core.HttpMetricsRegistry.StatusGroupDimension
import fr.davit.akka.http.metrics.core.scaladsl.server.HttpMetricsDirectives._
import fr.davit.akka.http.metrics.dropwizard.DropwizardRegistry
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import spray.json.{DefaultJsonProtocol, JsValue}

import scala.concurrent.duration._

class DropwizardMarshallersSpec extends AnyFlatSpec with Matchers with ScalatestRouteTest with BeforeAndAfterAll {

  private case class JsonResponse(metrics: Map[String, JsValue])

  private trait Fixture extends SprayJsonSupport with DefaultJsonProtocol with DropwizardMarshallers {
    implicit val metricsFormat = jsonFormat1(JsonResponse)

    val registry = DropwizardRegistry()
    registry.underlying.counter("other.metric")
  }

  override def afterAll(): Unit = {
    cleanUp()
    super.afterAll()
  }

  "DropwizardMarshallers" should "expose metrics as json format" in new Fixture {
    // use metrics so they appear in the report
    val dimensions = Seq(StatusGroupDimension(StatusCodes.OK))
    registry.requests.inc()
    registry.receivedBytes.update(10)
    registry.active.inc()
    registry.responses.inc(dimensions)
    registry.errors.inc()
    registry.duration.observe(1.second, dimensions)
    registry.sentBytes.update(10)

    Get() ~> metrics(registry) ~> check {
      val json = responseAs[JsonResponse]
      // println(json)
      json.metrics.keys should contain theSameElementsAs Seq(
        "akka.http.requests.active",
        "akka.http.requests",
        "akka.http.requests.bytes",
        "akka.http.responses{status=2xx}",
        "akka.http.responses.errors",
        "akka.http.responses.duration{status=2xx}",
        "akka.http.responses.bytes",
        "other.metric"
      ).toSet
    }
  }

} 
Example 170
Source File: PrometheusMarshallersSpec.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.prometheus.marshalling

import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.testkit.ScalatestRouteTest
import fr.davit.akka.http.metrics.core.HttpMetricsRegistry.StatusGroupDimension
import fr.davit.akka.http.metrics.core.scaladsl.server.HttpMetricsDirectives.metrics
import fr.davit.akka.http.metrics.prometheus.{PrometheusRegistry, PrometheusSettings}
import io.prometheus.client.CollectorRegistry
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._

class PrometheusMarshallersSpec extends AnyFlatSpec with Matchers with ScalatestRouteTest with BeforeAndAfterAll {

  trait Fixture extends PrometheusMarshallers {

    val registry = PrometheusRegistry(
      new CollectorRegistry(),
      PrometheusSettings.default.withIncludeStatusDimension(true)
    )

    io.prometheus.client.Counter
      .build("other_metric", "An other metric")
      .register(registry.underlying)
  }

  override def afterAll(): Unit = {
    cleanUp()
    super.afterAll()
  }

  "PrometheusMarshallers" should "expose metrics as prometheus format" in new Fixture {
    // register labeled metrics so they appear at least once
    // use metrics so they appear in the report
    val dimensions = Seq(StatusGroupDimension(StatusCodes.OK))
    registry.requests.inc()
    registry.receivedBytes.update(10)
    registry.active.inc()
    registry.responses.inc(dimensions)
    registry.errors.inc(dimensions)
    registry.duration.observe(1.second, dimensions)
    registry.sentBytes.update(10, dimensions)

    Get() ~> metrics(registry) ~> check {
      response.entity.contentType shouldBe PrometheusMarshallers.PrometheusContentType
      val text = responseAs[String]
      // println(text)
      val metrics = text
        .split('\n')
        .filterNot(_.startsWith("#"))
        .map(_.takeWhile(c => c != ' ' && c != '{'))
        .distinct
      metrics should contain theSameElementsAs Seq(
        "akka_http_requests_active",
        "akka_http_requests_total",
        "akka_http_requests_size_bytes_bucket",
        "akka_http_requests_size_bytes_count",
        "akka_http_requests_size_bytes_sum",
        "akka_http_responses_total",
        "akka_http_responses_errors_total",
        "akka_http_responses_duration_seconds_bucket",
        "akka_http_responses_duration_seconds_count",
        "akka_http_responses_duration_seconds_sum",
        "akka_http_responses_size_bytes_bucket",
        "akka_http_responses_size_bytes_count",
        "akka_http_responses_size_bytes_sum",
        "akka_http_connections_active",
        "akka_http_connections_total",
        "other_metric"
      )
    }
  }
} 
Example 171
Source File: DatadogRegistrySpec.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.datadog

import java.net.InetSocketAddress

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.io.{IO, Udp}
import akka.testkit.{TestKit, TestProbe}
import com.timgroup.statsd.NonBlockingStatsDClient
import fr.davit.akka.http.metrics.core.HttpMetricsRegistry.{PathDimension, StatusGroupDimension}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._

class DatadogRegistrySpec extends TestKit(ActorSystem("DatadogRegistrySpec")) with AnyFlatSpecLike with Matchers with BeforeAndAfterAll {

  val dimensions = Seq(StatusGroupDimension(StatusCodes.OK), PathDimension("/api"))

  def withFixture(test: (TestProbe, DatadogRegistry) => Any) = {
    val statsd = TestProbe()
    statsd.send(IO(Udp), Udp.Bind(statsd.ref, new InetSocketAddress(0)))
    val port = statsd.expectMsgType[Udp.Bound].localAddress.getPort
    val socket = statsd.sender()
    val client = new NonBlockingStatsDClient("", "localhost", port)
    val registry = DatadogRegistry(client)
    try {
      test(statsd, registry)
    } finally {
      client.close()
      socket ! Udp.Unbind
    }
  }

  override def afterAll(): Unit = {
    shutdown()
    super.afterAll()
  }

  "DatadogRegistry" should "send active datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.active.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_active:1|c"
  }

  it should "send requests datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.requests.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_count:1|c"
  }

  it should "send receivedBytes datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.receivedBytes.update(3)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_bytes:3|d"

    registry.receivedBytes.update(3, dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.requests_bytes:3|d|#path:/api,status:2xx"
  }

  it should "send responses datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.responses.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_count:1|c"

    registry.responses.inc(dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_count:1|c|#path:/api,status:2xx"
  }

  it should "send errors datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.errors.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_errors_count:1|c"

    registry.errors.inc(dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_errors_count:1|c|#path:/api,status:2xx"
  }

  it should "send duration datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.duration.observe(3.seconds)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_duration:3000|d"

    registry.duration.observe(3.seconds, dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_duration:3000|d|#path:/api,status:2xx"
  }

  it should "send sentBytes datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.sentBytes.update(3)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_bytes:3|d"

    registry.sentBytes.update(3, dimensions)
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.responses_bytes:3|d|#path:/api,status:2xx"
  }

  it should "send connected datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.connected.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.connections_active:1|c"
  }
  it should "send connections datagrams to the statsd server" in withFixture { (statsd, registry) =>
    registry.connections.inc()
    statsd.expectMsgType[Udp.Received].data.utf8String shouldBe "akka.http.connections_count:1|c"
  }
} 
Example 172
Source File: DockerContainers.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.dockertestkit

import com.spotify.docker.client.messages.{ ContainerConfig, HostConfig }
import org.scalatest.{ BeforeAndAfterAll, Suite }
import stormlantern.dockertestkit.client.Container

trait DockerContainers extends BeforeAndAfterAll { this: Suite =>

  def containerConfigs: Set[ContainerConfig]
  val hostConfig = HostConfig.builder()
    .publishAllPorts(true)
    .networkMode("bridge")
    .build()
  val containers = containerConfigs.map(new Container(_))

  def withDockerHosts[T](ports: Set[String])(f: Map[String, (String, Int)] => T): T = {
    // Find the mapped available ports in the network settings
    f(ports.zip(ports.flatMap(p => containers.map(c => c.mappedPort(p).headOption))).map {
      case (port, Some(binding)) => port -> (DockerClientProvider.hostname, binding.hostPort().toInt)
      case (port, None) => throw new IndexOutOfBoundsException(s"Cannot find mapped port $port")
    }.toMap)
  }

  override def beforeAll(): Unit = containers.foreach(_.start())

  override def afterAll(): Unit = containers.foreach { container =>
    container.stop()
    container.remove()
  }
} 
Example 173
Source File: ServiceBrokerSpec.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client

import akka.actor.{ ActorRef, ActorSystem }
import akka.actor.Status.Failure
import akka.testkit.{ ImplicitSender, TestKit }
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfterAll, FlatSpecLike, Matchers }
import stormlantern.consul.client.dao.ConsulHttpClient
import stormlantern.consul.client.discovery.ConnectionHolder
import stormlantern.consul.client.helpers.CallingThreadExecutionContext
import stormlantern.consul.client.loadbalancers.LoadBalancerActor
import stormlantern.consul.client.util.Logging

import scala.concurrent.Future

class ServiceBrokerSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FlatSpecLike
    with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory with Logging {

  implicit val ec = CallingThreadExecutionContext()
  def this() = this(ActorSystem("ServiceBrokerSpec"))

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }

  trait TestScope {
    val connectionHolder: ConnectionHolder = mock[ConnectionHolder]
    val httpClient: ConsulHttpClient = mock[ConsulHttpClient]
    val loadBalancer: ActorRef = self
  }

  "The ServiceBroker" should "return a service connection when requested" in new TestScope {
    (connectionHolder.connection _).expects().returns(Future.successful(true))
    (connectionHolder.loadBalancer _).expects().returns(loadBalancer)
    val sut = new ServiceBroker(self, httpClient)
    val result: Future[Boolean] = sut.withService("service1") { service: Boolean ⇒
      Future.successful(service)
    }
    expectMsgPF() {
      case ServiceBrokerActor.GetServiceConnection("service1") ⇒
        lastSender ! connectionHolder
        result.map(_ shouldEqual true).futureValue
    }
    expectMsg(LoadBalancerActor.ReturnConnection(connectionHolder))
  }

  it should "return the connection when an error occurs" in new TestScope {
    (connectionHolder.connection _).expects().returns(Future.successful(true))
    (connectionHolder.loadBalancer _).expects().returns(loadBalancer)
    val sut = new ServiceBroker(self, httpClient)
    val result: Future[Boolean] = sut.withService[Boolean, Boolean]("service1") { service: Boolean ⇒
      throw new RuntimeException()
    }
    expectMsgPF() {
      case ServiceBrokerActor.GetServiceConnection("service1") ⇒
        lastSender ! connectionHolder
        an[RuntimeException] should be thrownBy result.futureValue
    }
    expectMsg(LoadBalancerActor.ReturnConnection(connectionHolder))
  }

  it should "throw an error when an excpetion is returned" in new TestScope {
    val sut = new ServiceBroker(self, httpClient)
    val result: Future[Boolean] = sut.withService("service1") { service: Boolean ⇒
      Future.successful(service)
    }
    expectMsgPF() {
      case ServiceBrokerActor.GetServiceConnection("service1") ⇒
        lastSender ! Failure(new RuntimeException())
        an[RuntimeException] should be thrownBy result.futureValue
    }
  }
} 
Example 174
Source File: ServiceAvailabilityActorSpec.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.discovery

import akka.actor.ActorSystem
import akka.testkit.{ ImplicitSender, TestActorRef, TestKit }
import org.scalamock.scalatest.MockFactory
import org.scalatest.{ BeforeAndAfterAll, FlatSpecLike, Matchers }
import stormlantern.consul.client.dao.{ ConsulHttpClient, IndexedServiceInstances }
import stormlantern.consul.client.discovery.ServiceAvailabilityActor.Start
import stormlantern.consul.client.helpers.ModelHelpers
import stormlantern.consul.client.util.Logging

import scala.concurrent.Future
import scala.concurrent.duration._

class ServiceAvailabilityActorSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FlatSpecLike
    with Matchers with BeforeAndAfterAll with MockFactory with Logging {

  def this() = this(ActorSystem("ServiceAvailabilityActorSpec"))

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }

  "The ServiceAvailabilityActor" should "receive one service update when there are no changes" in {
    val httpClient: ConsulHttpClient = mock[ConsulHttpClient]
    val sut = TestActorRef(ServiceAvailabilityActor.props(httpClient, ServiceDefinition("bogus123", "bogus"), self))
    (httpClient.getService _).expects("bogus", None, Some(0L), Some("1s"), None).returns(Future.successful(IndexedServiceInstances(1, Set.empty)))
    (httpClient.getService _).expects("bogus", None, Some(1L), Some("1s"), None).onCall { p ⇒
      sut.stop()
      Future.successful(IndexedServiceInstances(1, Set.empty))
    }
    sut ! Start
    expectMsg(1.second, ServiceAvailabilityActor.ServiceAvailabilityUpdate("bogus123"))
    expectMsg(1.second, ServiceAvailabilityActor.Started)
    expectNoMsg(1.second)
  }

  it should "receive two service updates when there is a change" in {
    val httpClient: ConsulHttpClient = mock[ConsulHttpClient]
    lazy val sut = TestActorRef(ServiceAvailabilityActor.props(httpClient, ServiceDefinition("bogus123", "bogus"), self))
    val service = ModelHelpers.createService("bogus123", "bogus")
    (httpClient.getService _).expects("bogus", None, Some(0L), Some("1s"), None).returns(Future.successful(IndexedServiceInstances(1, Set.empty)))
    (httpClient.getService _).expects("bogus", None, Some(1L), Some("1s"), None).returns(Future.successful(IndexedServiceInstances(2, Set(service))))
    (httpClient.getService _).expects("bogus", None, Some(2L), Some("1s"), None).onCall { p ⇒
      sut.stop()
      Future.successful(IndexedServiceInstances(2, Set(service)))
    }
    sut ! Start
    expectMsg(1.second, ServiceAvailabilityActor.ServiceAvailabilityUpdate("bogus123"))
    expectMsg(1.second, ServiceAvailabilityActor.Started)
    expectMsg(1.second, ServiceAvailabilityActor.ServiceAvailabilityUpdate("bogus123", Set(service), Set.empty))
    expectNoMsg(1.second)
  }

  it should "receive one service update when there are two with different tags" in {
    val httpClient: ConsulHttpClient = mock[ConsulHttpClient]
    lazy val sut = TestActorRef(ServiceAvailabilityActor.props(httpClient, ServiceDefinition("bogus123", "bogus", Set("one", "two")), self))
    val nonMatchingservice = ModelHelpers.createService("bogus123", "bogus", tags = Set("one"))
    val matchingService = nonMatchingservice.copy(serviceTags = Set("one", "two"))
    (httpClient.getService _).expects("bogus", Some("one"), Some(0L), Some("1s"), None).returns(Future.successful(IndexedServiceInstances(1, Set.empty)))
    (httpClient.getService _).expects("bogus", Some("one"), Some(1L), Some("1s"), None).returns(Future.successful(IndexedServiceInstances(2, Set(nonMatchingservice, matchingService))))
    (httpClient.getService _).expects("bogus", Some("one"), Some(2L), Some("1s"), None).onCall { p ⇒
      sut.stop()
      Future.successful(IndexedServiceInstances(2, Set(nonMatchingservice, matchingService)))
    }
    sut ! Start
    expectMsg(1.second, ServiceAvailabilityActor.ServiceAvailabilityUpdate("bogus123"))
    expectMsg(1.second, ServiceAvailabilityActor.Started)
    expectMsg(1.second, ServiceAvailabilityActor.ServiceAvailabilityUpdate("bogus123", Set(matchingService), Set.empty))
    expectNoMsg(1.second)
  }
} 
Example 175
Source File: LeaderFollowerActorSpec.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.election

import java.util
import java.util.UUID

import akka.actor.ActorSystem
import akka.testkit.{ TestActorRef, ImplicitSender, TestKit }
import org.scalamock.scalatest.MockFactory
import org.scalatest.{ BeforeAndAfterAll, Matchers, FlatSpecLike }
import stormlantern.consul.client.dao.{ BinaryData, KeyData, AcquireSession, ConsulHttpClient }
import stormlantern.consul.client.election.LeaderFollowerActor.Participate

import scala.concurrent.Future

class LeaderFollowerActorSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FlatSpecLike
    with Matchers with BeforeAndAfterAll with MockFactory {

  def this() = this(ActorSystem("LeaderFollowerActorSpec"))

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }

  trait TestScope {
    val sessionId: UUID = UUID.fromString("9A3BB9C-E2E7-43DF-BFD5-845417146552")
    val key = "path/to/our/key"
    val host = "myhost.mynetwork.net"
    val port = 1337
    val consulHttpClient: ConsulHttpClient = mock[ConsulHttpClient]
    val leaderInfoBytes: Array[Byte] = s"""{"host":"$host","port":$port}""".getBytes("UTF-8")
  }

  "The LeaderFollowerActor" should "participate in an election, win, watch for changes and participate again when session is lost" in new TestScope {
    val sut = TestActorRef(LeaderFollowerActor.props(consulHttpClient, sessionId, key, host, port))
    (consulHttpClient.putKeyValuePair _).expects(where { (k, lib, op) ⇒
      k == key && util.Arrays.equals(lib, leaderInfoBytes) && op.contains(AcquireSession(sessionId))
    }).returns(Future.successful(true))
    (consulHttpClient.getKeyValuePair _).expects(key, Some(0L), Some("1s"), false, false).returns {
      Future.successful(Seq(KeyData(key, 1, 1, 1, 0, BinaryData(leaderInfoBytes), Some(sessionId))))
    }
    (consulHttpClient.getKeyValuePair _).expects(key, Some(1L), Some("1s"), false, false).returns {
      Future.successful(Seq(KeyData(key, 1, 2, 1, 0, BinaryData(leaderInfoBytes), None)))
    }
    (consulHttpClient.putKeyValuePair _).expects(where { (k, lib, op) ⇒
      k == key && util.Arrays.equals(lib, leaderInfoBytes) && op.contains(AcquireSession(sessionId))
    }).onCall { p ⇒
      sut.stop()
      Future.successful(false)
    }
    sut ! Participate
  }

  it should "participate in an election, lose, watch for changes and participate again when session is lost" in new TestScope {
    val otherSessionId: UUID = UUID.fromString("9A3BB9C-E2E7-43DF-BFD5-845417146553")
    val sut = TestActorRef(LeaderFollowerActor.props(consulHttpClient, sessionId, key, host, port))
    (consulHttpClient.putKeyValuePair _).expects(where { (k, lib, op) ⇒
      k == key && util.Arrays.equals(lib, leaderInfoBytes) && op.contains(AcquireSession(sessionId))
    }).returns(Future.successful(false))
    (consulHttpClient.getKeyValuePair _).expects(key, Some(0L), Some("1s"), false, false).returns {
      Future.successful(Seq(KeyData(key, 1, 1, 1, 0, BinaryData(leaderInfoBytes), Some(otherSessionId))))
    }
    (consulHttpClient.getKeyValuePair _).expects(key, Some(1L), Some("1s"), false, false).returns {
      Future.successful(Seq(KeyData(key, 1, 2, 1, 0, BinaryData(leaderInfoBytes), None)))
    }
    (consulHttpClient.putKeyValuePair _).expects(where { (k, lib, op) ⇒
      k == key && util.Arrays.equals(lib, leaderInfoBytes) && op.contains(AcquireSession(sessionId))
    }).onCall { p ⇒
      sut.stop()
      Future.successful(true)
    }
    sut ! Participate
  }
} 
Example 176
Source File: TempDirectory.scala    From spark-tda   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.feature

import java.io.File
import org.scalatest.{BeforeAndAfterAll, Suite}
import com.holdenkarau.spark.testing.Utils


  protected def tempDir: File = _tempDir

  override def beforeAll(): Unit = {
    super.beforeAll()
    _tempDir = Utils.createTempDir()
  }

  override def afterAll(): Unit = {
    try {
      Utils.deleteRecursively(_tempDir)
    } finally {
      super.afterAll()
    }
  }
} 
Example 177
Source File: IntegrationBaseSpec.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package support

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.{JsValue, Json}
import play.api.libs.ws.{WSClient, WSRequest, WSResponse}
import play.api.{Application, Environment, Mode}

trait IntegrationBaseSpec extends UnitSpec with WireMockHelper with GuiceOneServerPerSuite
  with BeforeAndAfterEach with BeforeAndAfterAll {

  val mockHost: String = WireMockHelper.host
  val mockPort: String = WireMockHelper.wireMockPort.toString

  lazy val client: WSClient = app.injector.instanceOf[WSClient]

  def servicesConfig: Map[String, Any] = Map(
    "microservice.services.des.host" -> mockHost,
    "microservice.services.des.port" -> mockPort,
    "microservice.services.auth.host" -> mockHost,
    "microservice.services.auth.port" -> mockPort,
    "auditing.consumer.baseUri.port" -> mockPort,
    "microservice.services.non-repudiation.host" -> mockHost,
    "microservice.services.non-repudiation.port" -> mockPort,
    "feature-switch.refactor.enabled" -> true,
    "feature-switch.refactor.prod.enabled" -> false,
    "microservice.services.non-repudiation.maxTimeout" -> 5000
  )

  override implicit lazy val app: Application = new GuiceApplicationBuilder()
    .in(Environment.simple(mode = Mode.Dev))
    .configure(servicesConfig)
    .build()

  override def beforeAll(): Unit = {
    super.beforeAll()
    startWireMock()
  }

  override def afterAll(): Unit = {
    stopWireMock()
    super.afterAll()
  }

  def buildRequest(path: String): WSRequest = client.url(s"http://localhost:$port$path").withFollowRedirects(false)

  def document(response: WSResponse): JsValue = Json.parse(response.body)
} 
Example 178
Source File: TestApplication.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi

import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.client.WireMock
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.core.WireMockConfiguration._
import com.github.tomakehurst.wiremock.stubbing.StubMapping
import org.scalamock.scalatest.MockFactory
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import play.api.http.Status._

import scala.concurrent.duration._
import scala.language.postfixOps

trait TestApplication
  extends UnitSpec
    with BeforeAndAfterEach
    with BeforeAndAfterAll
    with MockFactory {

  override implicit val timeout: FiniteDuration = 100 seconds

  val mockPort = 22222
  val mockHost = "localhost"

  protected val wiremockBaseUrl: String = s"http://$mockHost:$mockHost"
  private val wireMockServer = new WireMockServer(wireMockConfig().port(mockPort))

  protected def baseBeforeAll(): StubMapping = {
    wireMockServer.stop()
    wireMockServer.start()
    WireMock.configureFor(mockHost, mockPort)
    // the below stub is here so that the application finds the registration endpoint which is called on startup
    stubFor(post(urlPathEqualTo("/registration")).willReturn(aResponse().withStatus(OK)))
  }

  override def beforeAll(): Unit = {
    super.beforeAll()
    baseBeforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    wireMockServer.stop()
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
    WireMock.reset()
  }

} 
Example 179
Source File: SeleniumTest.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web

import java.util.concurrent.TimeUnit

import org.openqa.selenium.firefox.{FirefoxDriver, FirefoxOptions}
import org.openqa.selenium.remote.RemoteWebDriver
import org.openqa.selenium.{Dimension, WebElement}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

private trait ServerConfig {
  def init(): Unit
  def createUrl(part: String): String
  def destroy(): Unit
}

// Doesn't launch embedded guide app server
private final class ExternalServerConfig(urlPrefix: String) extends ServerConfig {
  require(!urlPrefix.endsWith("/"))

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    urlPrefix + part
  }

  override def init(): Unit = {}
  override def destroy(): Unit = {}
}

// Launches embedded guide server
private final class InternalServerConfig extends ServerConfig {
  private val server = Launcher.createApplicationServer()

  override def init(): Unit = server.start()

  override def destroy(): Unit = server.stop()

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    s"http://127.0.0.2:${server.port}$part"
  }
}

abstract class SeleniumTest extends AnyWordSpec with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with Eventually {
  override implicit val patienceConfig: PatienceConfig = PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(50, Millis)))

  protected final val driver: RemoteWebDriver = new FirefoxDriver(new FirefoxOptions().setHeadless(true))
  driver.manage().timeouts().implicitlyWait(200, TimeUnit.MILLISECONDS)
  driver.manage().window().setSize(new Dimension(1440, 800))

  protected final def findElementById(id: String): WebElement = eventually {
    driver.findElementById(id)
  }

  protected def url: String

  private val server: ServerConfig = new InternalServerConfig

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    server.init()
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    driver.get(server.createUrl(url))
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    server.destroy()
    driver.close()
  }
} 
Example 180
Source File: UsesHttpServer.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash
package rest

import org.eclipse.jetty.server.Server
import org.scalatest.{BeforeAndAfterAll, Suite}

trait UsesHttpServer extends BeforeAndAfterAll { this: Suite =>
  def port: Int
  val server: Server = new Server(port)
  def baseUrl = s"http://localhost:$port"

  protected def setupServer(server: Server): Unit

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    setupServer(server)
    server.start()
  }

  override protected def afterAll(): Unit = {
    server.stop()
    super.afterAll()
  }
} 
Example 181
Source File: ClientSuite.scala    From spark-power-bi   with Apache License 2.0 5 votes vote down vote up
package com.granturing.spark.powerbi

import org.apache.spark.SparkConf
import org.scalatest.{BeforeAndAfterAll, Matchers, FunSuite}
import scala.concurrent.Await

class ClientSuite extends FunSuite with Matchers with BeforeAndAfterAll {

  val clientConf = ClientConf.fromSparkConf(new SparkConf())
  val client = new Client(clientConf)

  val dataset = "PowerBI Spark Test"
  var datasetId: String = _
  val group = sys.env.get("POWERBI_GROUP")
  var groupId: Option[String] = None
  val table = "People"
  val tableSchema = Table(
    table, Seq(
      Column("name", "string"),
      Column("age", "Int64"),
      Column("birthday", "Datetime"),
      Column("timestamp", "Datetime")
    ))

  override def beforeAll = {
    groupId = group match {
      case Some(grp) => {
        val grpOpt = Await.result(client.getGroups, clientConf.timeout).filter(g => grp.equals(g.name)).map(_.id).headOption

        grpOpt match {
          case Some(g) => Some(g)
          case None => sys.error(s"group $grp not found")
        }
      }
      case None => None
    }
  }

  test("client can list groups") {
    val groups = Await.result(client.getGroups, clientConf.timeout)

    groups should not be null
  }

  test("client can list datasets") {
    val ds = Await.result(client.getDatasets(groupId), clientConf.timeout)

    ds should not be null
  }

} 
Example 182
Source File: ProducerStreamSpec.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package akka.kafka

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.{DefaultTimeout, ImplicitSender, TestKit, TestProbe}
import com.omearac.consumers.ConsumerStream
import com.omearac.producers.ProducerStream
import com.omearac.settings.Settings
import com.omearac.shared.JsonMessageConversion.Conversion
import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage}
import org.apache.kafka.clients.producer.ProducerRecord
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}


class ProducerStreamSpec extends TestKit(ActorSystem("ProducerStreamSpec"))
    with DefaultTimeout with ImplicitSender
    with WordSpecLike with Matchers with BeforeAndAfterAll
    with ConsumerStream with ProducerStream {

    val settings = Settings(system).KafkaProducers
    val probe = TestProbe()

    override def afterAll: Unit = {
        shutdown()
    }

    "Sending KafkaMessages to the KafkaMessage producerStream" should {
        "be converted to JSON and obtained by the Stream Sink " in {

            //Creating Producer Stream Components for publishing KafkaMessages
            val producerProps = settings.KafkaProducerInfo("KafkaMessage")
            val numOfMessages = 50
            val kafkaMsgs = for { i <- 0 to numOfMessages} yield KafkaMessage("sometime", "somestuff", i)
            val producerSource= Source(kafkaMsgs)
            val producerFlow = createStreamFlow[KafkaMessage](producerProps)
            val producerSink = Sink.actorRef(probe.ref, "complete")

            val jsonKafkaMsgs = for { msg <- kafkaMsgs} yield Conversion[KafkaMessage].convertToJson(msg)

            producerSource.via(producerFlow).runWith(producerSink)
            for (i <- 0 to jsonKafkaMsgs.length) {
                probe.expectMsgPF(){
                    case m: ProducerRecord[_,_] => if (jsonKafkaMsgs.contains(m.value())) () else fail()
                    case "complete" => ()
                }
            }
        }
    }

    "Sending ExampleAppEvent messages to the EventMessage producerStream" should {
        "be converted to JSON and obtained by the Stream Sink " in {

            //Creating Producer Stream Components for publishing ExampleAppEvent messages
            val producerProps = settings.KafkaProducerInfo("ExampleAppEvent")
            val numOfMessages = 50
            val eventMsgs = for { i <- 0 to 50} yield ExampleAppEvent("sometime", "senderID", s"Event number $i occured")

            val producerSource= Source(eventMsgs)
            val producerFlow = createStreamFlow[ExampleAppEvent](producerProps)
            val producerSink = Sink.actorRef(probe.ref, "complete")

            val jsonAppEventMsgs = for{ msg <- eventMsgs} yield Conversion[ExampleAppEvent].convertToJson(msg)
            producerSource.via(producerFlow).runWith(producerSink)
            for (i <- 0 to jsonAppEventMsgs.length){
                probe.expectMsgPF(){
                    case m: ProducerRecord[_,_] => if (jsonAppEventMsgs.contains(m.value())) () else fail()
                    case "complete" => ()
                }
            }
        }
    }
} 
Example 183
Source File: EventConsumerSpec.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package akka.kafka

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit}
import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream}
import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream}
import com.omearac.consumers.EventConsumer
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.collection.mutable.ArrayBuffer


class EventConsumerSpec extends TestKit(ActorSystem("EventConsumerSpec"))
  with DefaultTimeout with ImplicitSender
  with WordSpecLike with Matchers with BeforeAndAfterAll {

  //Creating the Actors
  val testConsumer = TestActorRef(new EventConsumer)
  val mockStreamAndManager = system.actorOf(Props(new MockStreamAndManager), "mockStreamAndManager")

  override def afterAll: Unit = {
    shutdown()
  }

  class MockStreamAndManager extends Actor {
    val receive: Receive = {
      case InitializeConsumerStream(_, _) => testConsumer ! "STREAM_INIT"
      case TerminateConsumerStream(_) => testConsumer ! "STREAM_DONE"
    }
  }


  "Sending ManuallyTerminateStream to EventConsumer in receive state" should {
    "return a Stream Already Stopped reply " in {
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped"))
    }
  }

  "Sending ManuallyInitializeStream to EventConsumer in receive state" should {
    "forward the message to the ConsumerStreamManager and change state to consuming" in {
      testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager
      testConsumer ! ManuallyInitializeStream
      expectMsg(ConsumerActorReply("Event Consumer Stream Started"))
      //Now check for state change
      Thread.sleep(750)
      testConsumer ! ManuallyInitializeStream
      expectMsg(ConsumerActorReply("Event Consumer Already Started"))
    }
  }

  "Sending STREAM_DONE to EventConsumer while in consuming state" should {
    "change state to idle state" in {
      val consuming = testConsumer.underlyingActor.consumingEvents
      testConsumer.underlyingActor.context.become(consuming)
      testConsumer ! "STREAM_DONE"
      //Now check for state change
      Thread.sleep(750)
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped"))
    }
  }
  "Sending ManuallyTerminateStream to EventConsumer while in consuming state" should {
    "forward the message to the ConsumerStreamManager and then upon reply, change state to idle" in {
      val consuming = testConsumer.underlyingActor.consumingEvents
      testConsumer.underlyingActor.context.become(consuming)
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Event Consumer Stream Stopped"))
      //Now check for state change
      Thread.sleep(750)
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped"))
    }
  }

  "Sending ConsumerMessageBatch message" should {
    "reply OK" in {
      val msgBatch: ArrayBuffer[String] = ArrayBuffer("test1")
      val consuming = testConsumer.underlyingActor.consumingEvents
      testConsumer.underlyingActor.context.become(consuming)
      testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager
      testConsumer ! msgBatch
      expectMsg("OK")
    }
  }
} 
Example 184
Source File: EventProducerSpec.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package akka.kafka

import java.util.Date

import akka.Done
import akka.actor.ActorSystem
import akka.serialization.Serialization
import akka.stream.QueueOfferResult
import akka.stream.QueueOfferResult.Enqueued
import akka.stream.scaladsl.SourceQueueWithComplete
import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe}
import com.omearac.producers.EventProducer
import com.omearac.shared.AkkaStreams
import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished}
import com.omearac.shared.KafkaMessages.ExampleAppEvent
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.Future


class EventProducerSpec extends TestKit(ActorSystem("EventProducerSpec",ConfigFactory.parseString("""
    akka.loggers = ["akka.testkit.TestEventListener"] """)))
    with DefaultTimeout with ImplicitSender
    with WordSpecLike with Matchers with BeforeAndAfterAll
    with AkkaStreams {

    val testProducer = TestActorRef(new EventProducer)
    val producerActor = testProducer.underlyingActor
    val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] {
        override def complete(): Unit = println("complete")

        override def fail(ex: Throwable): Unit = println("fail")

        override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued}

        override def watchCompletion(): Future[Done] = Future{Done}
    }

    override def afterAll: Unit = {
        shutdown()
    }

    //Create an test event listener for the local message bus
    val testEventListener = TestProbe()
    system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent])


    "Sending ActivatedProducerStream to EventProducer in receive state" should {
        "save the stream ref and change state to producing " in {
            testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic")
            Thread.sleep(500)
            producerActor.producerStream should be(mockProducerStream)
            EventFilter.error(message = "EventProducer got the unknown message while producing: testMessage", occurrences = 1) intercept {
                testProducer ! "testMessage"
            }
        }
    }

    "Sending ExampleAppEvent to system bus while EventProducer is in publishEvent state" should {
        "offer the ExampleAppEvent to the stream " in {
            val producingState = producerActor.publishEvent
            producerActor.context.become(producingState)
            producerActor.producerStream = mockProducerStream
            val dateFormat = new java.text.SimpleDateFormat("dd:MM:yy:HH:mm:ss.SSS")
            lazy val timetag = dateFormat.format(new Date(System.currentTimeMillis()))
            val eventMsg = MessagesPublished(5)
            val testMessage = ExampleAppEvent(timetag,Serialization.serializedActorPath(self),eventMsg.toString)
            system.eventStream.publish(testMessage)
            testEventListener.expectMsgPF(){
                case ExampleAppEvent(_,_,m) => if (m == eventMsg.toString) () else fail()
            }
        }
    }
 } 
Example 185
Source File: DataProducerSpec.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package akka.kafka

import akka.Done
import akka.actor.ActorSystem
import akka.stream.QueueOfferResult
import akka.stream.QueueOfferResult.Enqueued
import akka.stream.scaladsl.SourceQueueWithComplete
import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe}
import com.omearac.producers.DataProducer
import com.omearac.producers.DataProducer.PublishMessages
import com.omearac.shared.AkkaStreams
import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished}
import com.omearac.shared.KafkaMessages.ExampleAppEvent
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.Future


class DataProducerSpec extends TestKit(ActorSystem("DataProducerSpec", ConfigFactory.parseString(
  """
    akka.loggers = ["akka.testkit.TestEventListener"] """)))
  with DefaultTimeout with ImplicitSender
  with WordSpecLike with Matchers with BeforeAndAfterAll
  with AkkaStreams {

  val testProducer = TestActorRef(new DataProducer)
  val producerActor = testProducer.underlyingActor

  val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] {
    override def complete(): Unit = println("complete")

    override def fail(ex: Throwable): Unit = println("fail")

    override def offer(elem: Any): Future[QueueOfferResult] = Future {
      Enqueued
    }

    override def watchCompletion(): Future[Done] = Future {
      Done
    }
  }

  override def afterAll: Unit = {
    shutdown()
  }

  //Create an test event listener for the local message bus
  val testEventListener = TestProbe()
  system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent])


  "Sending ActivatedProducerStream to DataProducer in receive state" should {
    "save the stream ref and change state to producing " in {
      testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic")
      Thread.sleep(500)
      producerActor.producerStream should be(mockProducerStream)
      EventFilter.error(message = "DataProducer got the unknown message while producing: testMessage", occurrences = 1) intercept {
        testProducer ! "testMessage"
      }
    }
  }

  "Sending PublishMessages(number: Int) to DataProducer in publishData state" should {
    "return MessagesPublished(number: Int) and publish the local event " in {
      val producing = producerActor.publishData
      producerActor.context.become(producing)
      producerActor.producerStream = mockProducerStream
      val resultMessage = MessagesPublished(5)
      testProducer ! PublishMessages(5)
      expectMsg(resultMessage)
      testEventListener.expectMsgPF() {
        case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail()
      }
    }
  }
} 
Example 186
Source File: DataConsumerSpec.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package akka.kafka

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit}
import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream}
import com.omearac.consumers.DataConsumer
import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.collection.mutable.ArrayBuffer


class DataConsumerSpec extends TestKit(ActorSystem("DataConsumerSpec"))
  with DefaultTimeout with ImplicitSender
  with WordSpecLike with Matchers with BeforeAndAfterAll {

  //Creating the Actors
  val testConsumer = TestActorRef(new DataConsumer)
  val mockStreamAndManager = system.actorOf(Props(new MockStreamAndManager), "mockStreamAndManager")

  override def afterAll: Unit = {
    shutdown()
  }

  class MockStreamAndManager extends Actor {
    val receive: Receive = {
      case InitializeConsumerStream(_, _) => testConsumer ! "STREAM_INIT"
      case TerminateConsumerStream(_) => testConsumer ! "STREAM_DONE"
    }
  }


  "Sending ManuallyTerminateStream to DataConsumer in receive state" should {
    "return a Stream Already Stopped reply " in {
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped"))
    }
  }

  "Sending ManuallyInitializeStream to DataConsumer in receive state" should {
    "forward the message to the ConsumerStreamManager and change state to consuming" in {
      testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager
      testConsumer ! ManuallyInitializeStream
      expectMsg(ConsumerActorReply("Data Consumer Stream Started"))
      //Now check for state change
      Thread.sleep(750)
      testConsumer ! ManuallyInitializeStream
      expectMsg(ConsumerActorReply("Data Consumer Already Started"))
    }
  }

  "Sending STREAM_DONE to DataConsumer while in consuming state" should {
    "change state to idle state" in {
      val consuming = testConsumer.underlyingActor.consumingData
      testConsumer.underlyingActor.context.become(consuming)
      testConsumer ! "STREAM_DONE"
      //Now check for state change
      Thread.sleep(750)
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped"))
    }
  }
  "Sending ManuallyTerminateStream to DataConsumer while in consuming state" should {
    "forward the message to the ConsumerStreamManager and then upon reply, change state to idle" in {
      val consuming = testConsumer.underlyingActor.consumingData
      testConsumer.underlyingActor.context.become(consuming)
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Data Consumer Stream Stopped"))
      //Now check for state change
      Thread.sleep(750)
      testConsumer ! ManuallyTerminateStream
      expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped"))
    }
  }

  "Sending ConsumerMessageBatch message" should {
    "reply OK" in {
      val msgBatch: ArrayBuffer[String] = ArrayBuffer("test1")
      val consuming = testConsumer.underlyingActor.consumingData
      testConsumer.underlyingActor.context.become(consuming)
      testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager
      testConsumer ! msgBatch
      expectMsg("OK")
    }
  }
} 
Example 187
Source File: ProducerStreamManagerSpec.scala    From reactive-kafka-microservice-template   with Apache License 2.0 5 votes vote down vote up
package akka.kafka

import akka.actor.ActorSystem
import akka.stream.scaladsl.SourceQueueWithComplete
import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit, TestProbe}
import com.omearac.producers.ProducerStreamManager
import com.omearac.producers.ProducerStreamManager.InitializeProducerStream
import com.omearac.shared.AkkaStreams
import com.omearac.shared.EventMessages.ActivatedProducerStream
import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}


class ProducerStreamManagerSpec extends TestKit(ActorSystem("ProducerStreamManagerSpec"))
  with DefaultTimeout with ImplicitSender
  with WordSpecLike with Matchers with BeforeAndAfterAll
  with AkkaStreams {

  val testProducerStreamManager = TestActorRef(new ProducerStreamManager)
  val producerStreamManagerActor = testProducerStreamManager.underlyingActor

  //Create an test event listener for the local message bus
  val testEventListener = TestProbe()
  system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent])

  override def afterAll: Unit = {
    shutdown()
  }


  "Sending InitializeProducerStream(self, KafkaMessage) to ProducerStreamManager" should {
    "initialize the stream for that particular message type, return ActivatedProducerStream(streaRef, \"TempChannel1\") and produce local event " in {
      testProducerStreamManager ! InitializeProducerStream(self, KafkaMessage)
      Thread.sleep(500)
      var streamRef: SourceQueueWithComplete[Any] = null
      expectMsgPF() {
        case ActivatedProducerStream(sr, kt) => if (kt == "TempChannel1") {
          streamRef = sr; ()
        } else fail()
      }

      Thread.sleep(500)
      val resultMessage = ActivatedProducerStream(streamRef, "TempChannel1")
      testEventListener.expectMsgPF() {
        case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail()
      }
    }
  }

  "Sending InitializeProducerStream(self, ExampleAppEvent) to ProducerStreamManager" should {
    "initialize the stream for that particular message type, return ActivatedProducerStream(streaRef, \"TempChannel2\") and produce local event " in {
      testProducerStreamManager ! InitializeProducerStream(self, ExampleAppEvent)
      Thread.sleep(500)
      var streamRef: SourceQueueWithComplete[Any] = null
      expectMsgPF() {
        case ActivatedProducerStream(sr, kt) => if (kt == "TempChannel2") {
          streamRef = sr; ()
        } else fail()
      }

      Thread.sleep(500)
      val resultMessage = ActivatedProducerStream(streamRef, "TempChannel2")
      testEventListener.expectMsgPF() {
        case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail()
      }
    }
  }
} 
Example 188
Source File: DockerTestKit.scala    From docker-it-scala   with MIT License 5 votes vote down vote up
package com.whisk.docker.scalatest

import com.whisk.docker.DockerKit
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time._
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.LoggerFactory

trait DockerTestKit extends BeforeAndAfterAll with ScalaFutures with DockerKit { self: Suite =>

  private lazy val log = LoggerFactory.getLogger(this.getClass)

  def dockerInitPatienceInterval =
    PatienceConfig(scaled(Span(20, Seconds)), scaled(Span(10, Millis)))

  def dockerPullImagesPatienceInterval =
    PatienceConfig(scaled(Span(1200, Seconds)), scaled(Span(250, Millis)))

  override def beforeAll(): Unit = {
    super.beforeAll()
    startAllOrFail()
  }

  override def afterAll(): Unit = {
    stopAllQuietly()
    super.afterAll()

  }
} 
Example 189
Source File: PrometheusTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common
import akka.http.scaladsl.coding.Gzip
import akka.http.scaladsl.model.{HttpCharsets, HttpResponse}
import akka.http.scaladsl.model.headers.HttpEncodings.gzip
import akka.http.scaladsl.model.headers.{`Accept-Encoding`, `Content-Encoding`, HttpEncoding, HttpEncodings}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.http.scaladsl.unmarshalling.Unmarshal
import com.typesafe.config.ConfigFactory
import kamon.Kamon
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.Matcher
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.duration._

@RunWith(classOf[JUnitRunner])
class PrometheusTests extends FlatSpec with Matchers with ScalatestRouteTest with BeforeAndAfterAll with ScalaFutures {
  behavior of "Prometheus"

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    //Modify Kamon to have a very small tick interval
    val newConfig = ConfigFactory.parseString("""kamon {
      |  metric {
      |    tick-interval = 50 ms
      |    optimistic-tick-alignment = no
      |  }
      |}""".stripMargin).withFallback(ConfigFactory.load())
    Kamon.reconfigure(newConfig)
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    Kamon.reconfigure(ConfigFactory.load())
  }

  it should "respond to /metrics" in {
    val api = new KamonPrometheus
    Kamon.counter("foo_bar").withoutTags().increment(42)

    //Sleep to ensure that Kamon metrics are pushed to reporters
    Thread.sleep(2.seconds.toMillis)
    Get("/metrics") ~> `Accept-Encoding`(gzip) ~> api.route ~> check {
      // Check that response confirms to what Prometheus scrapper accepts
      contentType.charsetOption shouldBe Some(HttpCharsets.`UTF-8`)
      contentType.mediaType.params("version") shouldBe "0.0.4"
      response should haveContentEncoding(gzip)

      val responseText = Unmarshal(Gzip.decodeMessage(response)).to[String].futureValue
      withClue(responseText) {
        responseText should include("foo_bar")
      }
    }
    api.close()
  }

  it should "not be enabled by default" in {
    Get("/metrics") ~> MetricsRoute() ~> check {
      handled shouldBe false
    }
  }

  private def haveContentEncoding(encoding: HttpEncoding): Matcher[HttpResponse] =
    be(encoding) compose {
      (_: HttpResponse).header[`Content-Encoding`].map(_.encodings.head).getOrElse(HttpEncodings.identity)
    }
} 
Example 190
Source File: UserEventTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common

import java.nio.charset.StandardCharsets

import akka.actor.ActorSystem
import common._
import common.rest.WskRestOperations
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import org.apache.openwhisk.connector.kafka.KafkaConsumerConnector
import org.apache.openwhisk.core.WhiskConfig
import org.apache.openwhisk.core.connector.{Activation, EventMessage, Metric}

import scala.concurrent.duration._

@RunWith(classOf[JUnitRunner])
class UserEventTests extends FlatSpec with Matchers with WskTestHelpers with StreamLogging with BeforeAndAfterAll {

  implicit val wskprops = WskProps()
  implicit val system = ActorSystem("UserEventTestSystem")

  val wsk = new WskRestOperations

  val groupid = "kafkatest"
  val topic = "events"
  val maxPollInterval = 60.seconds

  lazy val consumer = new KafkaConsumerConnector(kafkaHosts, groupid, topic)
  val testActionsDir = WhiskProperties.getFileRelativeToWhiskHome("tests/dat/actions")
  behavior of "UserEvents"

  override def afterAll(): Unit = {
    consumer.close()
  }

  def kafkaHosts: String = new WhiskConfig(WhiskConfig.kafkaHosts).kafkaHosts

  def userEventsEnabled: Boolean = UserEvents.enabled

  if (userEventsEnabled) {
    it should "invoke an action and produce user events" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
      val file = Some(TestUtils.getTestActionFilename("hello.js"))
      val name = "testUserEvents"

      assetHelper.withCleaner(wsk.action, name, confirmDelete = true) { (action, _) =>
        action.create(name, file)
      }

      val run = wsk.action.invoke(name, blocking = true)

      withActivation(wsk.activation, run) { result =>
        withClue("invoking an action was unsuccessful") {
          result.response.status shouldBe "success"
        }
      }
      // checking for any metrics to arrive
      val received =
        consumer.peek(maxPollInterval).map {
          case (_, _, _, msg) => EventMessage.parse(new String(msg, StandardCharsets.UTF_8))
        }
      received.map(event => {
        event.get.body match {
          case a: Activation =>
            Seq(a.statusCode) should contain oneOf (0, 1, 2, 3)
            event.get.source should fullyMatch regex "(invoker|controller)\\w+".r
          case m: Metric =>
            Seq(m.metricName) should contain oneOf ("ConcurrentInvocations", "ConcurrentRateLimit", "TimedRateLimit")
            event.get.source should fullyMatch regex "controller\\w+".r
        }
      })
      // produce at least 2 events - an Activation and a 'ConcurrentInvocations' Metric
      // >= 2 is due to events that might have potentially occurred in between
      received.size should be >= 2
      consumer.commit()
    }

  }

} 
Example 191
Source File: KubeClientSupport.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.containerpool.kubernetes.test

import common.StreamLogging
import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer
import io.fabric8.kubernetes.client.utils.HttpClientUtils.createHttpClientForMockServer
import io.fabric8.kubernetes.client.{ConfigBuilder, DefaultKubernetesClient}
import okhttp3.TlsVersion.TLS_1_0
import org.scalatest.{BeforeAndAfterAll, Suite, TestSuite}

import scala.concurrent.duration._

trait KubeClientSupport extends TestSuite with BeforeAndAfterAll with StreamLogging {
  self: Suite =>

  protected def useMockServer = true

  val server = new KubernetesMockServer(false)

  protected lazy val (kubeClient, closeable) = {
    if (useMockServer) {
      server.init()
      def defaultClient = {
        val config = new ConfigBuilder()
          .withMasterUrl(server.url("/"))
          .withTrustCerts(true)
          .withTlsVersions(TLS_1_0)
          .withNamespace("test")
          .build
        new DefaultKubernetesClient(createHttpClientForMockServer(config), config)
      }
      (defaultClient, () => server.destroy())
    } else {
      val client = new DefaultKubernetesClient(
        new ConfigBuilder()
          .withConnectionTimeout(1.minute.toMillis.toInt)
          .withRequestTimeout(1.minute.toMillis.toInt)
          .build())
      (client, () => client.close())
    }
  }

  override def beforeAll(): Unit = {
    if (!useMockServer) {
      val kubeconfig = sys.env.get("KUBECONFIG")
      assume(kubeconfig.isDefined, "KUBECONFIG env must be defined")
      println(s"Using kubeconfig from ${kubeconfig.get}")
    }
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    closeable.apply()
  }
} 
Example 192
Source File: LogDriverLogStoreTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.containerpool.logging

import akka.actor.ActorSystem
import akka.testkit.TestKit
import org.junit.runner.RunWith
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.scalatest.junit.JUnitRunner
import org.apache.openwhisk.core.containerpool.ContainerArgsConfig

@RunWith(classOf[JUnitRunner])
class LogDriverLogStoreTests
    extends TestKit(ActorSystem("LogDriverLogStore"))
    with FlatSpecLike
    with Matchers
    with BeforeAndAfterAll {

  val testConfig = ContainerArgsConfig(
    network = "network",
    extraArgs =
      Map("log-driver" -> Set("fluentd"), "log-opt" -> Set("fluentd-address=localhost:24225", "tag=OW_CONTAINER")))
  behavior of "LogDriver LogStore"

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  it should "set the container parameters from the config" in {
    val logDriverLogStore = new LogDriverLogStore(system)
    logDriverLogStore.containerParameters shouldBe Map.empty
  }
} 
Example 193
Source File: WhiskAdminCliTestBase.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database

import akka.stream.ActorMaterializer
import common.{StreamLogging, WskActorSystem}
import org.rogach.scallop.throwError
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FlatSpec, Matchers}
import org.apache.openwhisk.core.cli.{Conf, WhiskAdmin}
import org.apache.openwhisk.core.database.test.DbUtils
import org.apache.openwhisk.core.entity.WhiskAuthStore

import scala.util.Random

trait WhiskAdminCliTestBase
    extends FlatSpec
    with WskActorSystem
    with DbUtils
    with StreamLogging
    with BeforeAndAfterEach
    with BeforeAndAfterAll
    with ScalaFutures
    with Matchers {

  implicit val materializer = ActorMaterializer()
  //Bring in sync the timeout used by ScalaFutures and DBUtils
  implicit override val patienceConfig: PatienceConfig = PatienceConfig(timeout = dbOpTimeout)
  protected val authStore = WhiskAuthStore.datastore()

  //Ensure scalaop does not exit upon validation failure
  throwError.value = true

  override def afterEach(): Unit = {
    cleanup()
  }

  override def afterAll(): Unit = {
    println("Shutting down store connections")
    authStore.shutdown()
    super.afterAll()
  }

  protected def randomString(len: Int = 5): String = Random.alphanumeric.take(len).mkString

  protected def resultOk(args: String*): String =
    WhiskAdmin(new Conf(args.toSeq))
      .executeCommand()
      .futureValue
      .right
      .get

  protected def resultNotOk(args: String*): String =
    WhiskAdmin(new Conf(args.toSeq))
      .executeCommand()
      .futureValue
      .left
      .get
      .message
} 
Example 194
Source File: CosmosDBTestSupport.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb

import com.microsoft.azure.cosmosdb.{Database, SqlParameter, SqlParameterCollection, SqlQuerySpec}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike}
import pureconfig._
import pureconfig.generic.auto._
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreTestUtil.storeAvailable

import scala.collection.mutable.ListBuffer
import scala.util.{Random, Try}

trait CosmosDBTestSupport extends FlatSpecLike with BeforeAndAfterAll with RxObservableImplicits {
  private val dbsToDelete = ListBuffer[Database]()

  lazy val storeConfigTry = Try { loadConfigOrThrow[CosmosDBConfig](ConfigKeys.cosmosdb) }
  lazy val client = storeConfig.createClient()
  val useExistingDB = java.lang.Boolean.getBoolean("whisk.cosmosdb.useExistingDB")

  def storeConfig = storeConfigTry.get

  override protected def withFixture(test: NoArgTest) = {
    assume(storeAvailable(storeConfigTry), "CosmosDB not configured or available")
    super.withFixture(test)
  }

  protected def generateDBName() = {
    s"travis-${getClass.getSimpleName}-${Random.alphanumeric.take(5).mkString}"
  }

  protected def createTestDB() = {
    if (useExistingDB) {
      val db = getOrCreateDatabase()
      println(s"Using existing database ${db.getId}")
      db
    } else {
      val databaseDefinition = new Database
      databaseDefinition.setId(generateDBName())
      val db = client.createDatabase(databaseDefinition, null).blockingResult()
      dbsToDelete += db
      println(s"Created database ${db.getId}")
      db
    }
  }

  private def getOrCreateDatabase(): Database = {
    client
      .queryDatabases(querySpec(storeConfig.db), null)
      .blockingOnlyResult()
      .getOrElse {
        client.createDatabase(newDatabase, null).blockingResult()
      }
  }

  protected def querySpec(id: String) =
    new SqlQuerySpec("SELECT * FROM root r WHERE r.id=@id", new SqlParameterCollection(new SqlParameter("@id", id)))

  private def newDatabase = {
    val databaseDefinition = new Database
    databaseDefinition.setId(storeConfig.db)
    databaseDefinition
  }

  override def afterAll(): Unit = {
    super.afterAll()
    if (!useExistingDB) {
      dbsToDelete.foreach(db => client.deleteDatabase(db.getSelfLink, null).blockingResult())
    }
    client.close()
  }
} 
Example 195
Source File: S3Minio.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.s3

import java.net.ServerSocket

import actionContainers.ActionContainer
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.s3.AmazonS3ClientBuilder
import com.typesafe.config.ConfigFactory
import common.{SimpleExec, StreamLogging}
import org.scalatest.{BeforeAndAfterAll, FlatSpec}
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer}

import scala.concurrent.duration._
import scala.reflect.ClassTag

trait S3Minio extends FlatSpec with BeforeAndAfterAll with StreamLogging {
  def makeS3Store[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem,
                                                       logging: Logging,
                                                       materializer: ActorMaterializer): AttachmentStore = {
    val config = ConfigFactory.parseString(s"""
      |whisk {
      |     s3 {
      |      alpakka {
      |         aws {
      |           credentials {
      |             provider = static
      |             access-key-id = "$accessKey"
      |             secret-access-key = "$secretAccessKey"
      |           }
      |           region {
      |             provider = static
      |             default-region = us-west-2
      |           }
      |         }
      |         endpoint-url = "http://localhost:$port"
      |      }
      |      bucket = "$bucket"
      |      $prefixConfig
      |     }
      |}
      """.stripMargin).withFallback(ConfigFactory.load())
    S3AttachmentStoreProvider.makeStore[D](config)
  }

  private val accessKey = "TESTKEY"
  private val secretAccessKey = "TESTSECRET"
  private val port = freePort()
  private val bucket = "test-ow-travis"

  private def prefixConfig = {
    if (bucketPrefix.nonEmpty) s"prefix = $bucketPrefix" else ""
  }

  protected def bucketPrefix: String = ""

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    dockerExec(
      s"run -d -e MINIO_ACCESS_KEY=$accessKey -e MINIO_SECRET_KEY=$secretAccessKey -p $port:9000 minio/minio server /data")
    println(s"Started minio on $port")
    createTestBucket()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    val containerId = dockerExec("ps -q --filter ancestor=minio/minio")
    containerId.split("\n").map(_.trim).foreach(id => dockerExec(s"stop $id"))
    println(s"Stopped minio container")
  }

  def createTestBucket(): Unit = {
    val endpoint = new EndpointConfiguration(s"http://localhost:$port", "us-west-2")
    val client = AmazonS3ClientBuilder.standard
      .withPathStyleAccessEnabled(true)
      .withEndpointConfiguration(endpoint)
      .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretAccessKey)))
      .build

    org.apache.openwhisk.utils.retry(client.createBucket(bucket), 6, Some(1.minute))
    println(s"Created bucket $bucket")
  }

  private def dockerExec(cmd: String): String = {
    implicit val tid: TransactionId = TransactionId.testing
    val command = s"${ActionContainer.dockerCmd} $cmd"
    val cmdSeq = command.split(" ").map(_.trim).filter(_.nonEmpty)
    val (out, err, code) = SimpleExec.syncRunCmd(cmdSeq)
    assert(code == 0, s"Error occurred for command '$command'. Exit code: $code, Error: $err")
    out
  }

  private def freePort(): Int = {
    val socket = new ServerSocket(0)
    try socket.getLocalPort
    finally if (socket != null) socket.close()
  }
} 
Example 196
Source File: WskActorSystem.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package common

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

import akka.actor.ActorSystem
import akka.http.scaladsl.Http

import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite


trait WskActorSystem extends BeforeAndAfterAll {
  self: Suite =>

  implicit val actorSystem: ActorSystem = ActorSystem()

  implicit def executionContext: ExecutionContext = actorSystem.dispatcher

  override def afterAll() = {
    try {
      Await.result(Http().shutdownAllConnectionPools(), 30.seconds)
    } finally {
      actorSystem.terminate()
      Await.result(actorSystem.whenTerminated, 30.seconds)
    }
    super.afterAll()
  }
} 
Example 197
Source File: ApiTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.monitoring.metrics

import akka.http.scaladsl.model.headers.HttpEncodings._
import akka.http.scaladsl.model.headers.{`Accept-Encoding`, `Content-Encoding`, HttpEncoding, HttpEncodings}
import akka.http.scaladsl.model.{HttpCharsets, HttpEntity, HttpResponse}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import kamon.prometheus.PrometheusReporter
import org.apache.openwhisk.core.monitoring.metrics.OpenWhiskEvents.MetricConfig
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.Matcher
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import pureconfig.loadConfigOrThrow
import io.prometheus.client.CollectorRegistry
import pureconfig.generic.auto._

import scala.concurrent.duration.DurationInt

@RunWith(classOf[JUnitRunner])
class ApiTests
    extends FlatSpec
    with Matchers
    with ScalatestRouteTest
    with EventsTestHelper
    with ScalaFutures
    with BeforeAndAfterAll {
  implicit val timeoutConfig = PatienceConfig(1.minute)

  private var api: PrometheusEventsApi = _
  private var consumer: EventConsumer = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    CollectorRegistry.defaultRegistry.clear()
    val metricConfig = loadConfigOrThrow[MetricConfig](system.settings.config, "user-events")
    val mericRecorder = PrometheusRecorder(new PrometheusReporter, metricConfig)
    consumer = createConsumer(56754, system.settings.config, mericRecorder)
    api = new PrometheusEventsApi(consumer, createExporter())
  }

  protected override def afterAll(): Unit = {
    consumer.shutdown().futureValue
    super.afterAll()
  }

  behavior of "EventsApi"

  it should "respond ping request" in {
    Get("/ping") ~> api.routes ~> check {
      //Due to retries using a random port does not immediately result in failure
      handled shouldBe true
    }
  }

  it should "respond metrics request" in {
    Get("/metrics") ~> `Accept-Encoding`(gzip) ~> api.routes ~> check {
      contentType.charsetOption shouldBe Some(HttpCharsets.`UTF-8`)
      contentType.mediaType.params("version") shouldBe "0.0.4"
      response should haveContentEncoding(gzip)
    }
  }

  private def haveContentEncoding(encoding: HttpEncoding): Matcher[HttpResponse] =
    be(encoding) compose {
      (_: HttpResponse).header[`Content-Encoding`].map(_.encodings.head).getOrElse(HttpEncodings.identity)
    }

  private def createExporter(): PrometheusExporter = () => HttpEntity(PrometheusExporter.textV4, "foo".getBytes)
} 
Example 198
Source File: GrpcSpec.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.grpc.server

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import io.grpc.{ManagedChannel, Server}
import ml.combust.mleap.executor.service.TransformService
import ml.combust.mleap.executor.testkit.TransformServiceSpec
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.concurrent.ScalaFutures

import scala.concurrent.duration._
import ml.combust.mleap.grpc.server.TestUtil._

class GrpcSpec extends TestKit(ActorSystem("grpc-server-test"))
  with TransformServiceSpec
  with BeforeAndAfterEach
  with BeforeAndAfterAll
  with ScalaFutures {

  private lazy val server = createServer(system)
  private lazy val channel = inProcessChannel
  private lazy val client = createClient(channel)

  override lazy val transformService: TransformService = {
    server
    client
  }

  override implicit def materializer: Materializer = ActorMaterializer()(system)

  override protected def afterAll(): Unit = {
    server.shutdown()
    channel.shutdown()
    TestKit.shutdownActorSystem(system, 5.seconds, verifySystemShutdown = true)
  }
} 
Example 199
Source File: MleapExecutorSpec.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import ml.combust.mleap.executor.testkit.{TestUtil, TransformServiceSpec}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class MleapExecutorSpec extends TestKit(ActorSystem("MleapExecutorSpec"))
  with TransformServiceSpec
  with BeforeAndAfterAll
  with ScalaFutures {

  override lazy val transformService: MleapExecutor = MleapExecutor(system)
  private val frame = TestUtil.frame
  override implicit val materializer: Materializer = ActorMaterializer()(system)

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system, 5.seconds, verifySystemShutdown = true)
  }
} 
Example 200
Source File: FileRepositorySpec.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor.repository

import java.io.File
import java.net.URI
import java.nio.file.Files

import ml.combust.mleap.executor.error.BundleException
import ml.combust.mleap.executor.testkit.TestUtil
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import org.scalatest.concurrent.ScalaFutures

class FileRepositorySpec extends FunSpec
  with ScalaFutures
  with Matchers
  with BeforeAndAfterAll {
  val repository = new FileRepository()

  override protected def afterAll(): Unit = repository.shutdown()

  describe("downloading a local bundle") {
    it("returns the local file path") {
      val path = repository.downloadBundle(TestUtil.lrUri)

      whenReady(path) {
        p => assert(Files.readAllBytes(new File(TestUtil.lrUri.getPath).toPath).sameElements(Files.readAllBytes(p)))
      }
    }

    it("throws an exception when local file doesn't exist") {
      whenReady(repository.downloadBundle(URI.create("does-not-exist")).failed) {
        ex => ex shouldBe a [BundleException]
      }
    }

    it("throws an exception with empty file path") {
      whenReady(repository.downloadBundle(URI.create("")).failed) {
        ex => ex shouldBe a [BundleException]
      }
    }
  }
}