scala.concurrent.duration.Duration Scala Examples

The following examples show how to use scala.concurrent.duration.Duration. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TimeLimitedFutureSpec.scala    From gfc-concurrent   with Apache License 2.0 9 votes vote down vote up
package com.gilt.gfc.concurrent

import java.util.concurrent.TimeoutException
import scala.concurrent.{ Future, Await }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import org.scalatest.{WordSpec, Matchers}

class TimeLimitedFutureSpec extends WordSpec with Matchers {
  import TimeLimitedFutureSpec._

  "RichFuture" when {
    import ScalaFutures._

    "waiting for a result to happen" should {
      "return the completed original Future if it completes before the given timeout" in {
        val now = System.currentTimeMillis
        val future: Future[String] = (Future { Thread.sleep(1000); "Here I am" }).withTimeout(Duration(5, "seconds"))
        val msg: String = Await.result(future, Duration(10, "seconds"))
        val elapsed = (System.currentTimeMillis - now)
        msg should equal ("Here I am")
        elapsed should be (2000L +- 1000L)
      }

      "return the failure of the original Future if it fails before the given timeout" in {
        val now = System.currentTimeMillis
        val future = (Future { Thread.sleep(1000); throw new NullPointerException("That hurts!") }).withTimeout(Duration(5, "seconds"))
        a [NullPointerException] should be thrownBy { Await.result(future, Duration(10, "seconds")) }
        val elapsed = (System.currentTimeMillis - now)
        elapsed should be (2000L +- 1000L)
      }

      "return the timeout of the original Future if it had one and it went off and was shorter than the given one" in {
        val now = System.currentTimeMillis
        val timingOutEarlier = Timeouts.timeout(Duration(1, "seconds"))
        val future = timingOutEarlier.withTimeout(Duration(5, "seconds"))
        a [TimeoutException] should be thrownBy { Await.result(future, Duration(10, "seconds")) }
        val elapsed: Long = (System.currentTimeMillis - now)
        elapsed should be >= 500l
        elapsed should be <= 4000l
      }

      "return the timeout if the original Future does not timeout of its own" in {
        val now = System.currentTimeMillis
        val timingOutLater = Timeouts.timeout(Duration(3, "seconds"))
        val future = timingOutLater.withTimeout(Duration(1, "seconds"))
        a [TimeoutException] should be thrownBy  { Await.result(future, Duration(10, "seconds")) }
        val elapsed: Long = (System.currentTimeMillis - now)
        elapsed should be >= 1000l
        elapsed should be <= 2500l
      }
    }

    // an example of how it could be used
    "used in our most common use case" should {
      "fit nicely" in {
        val call: Future[String] = svcCall(1000).withTimeout(Duration(5000, "milliseconds")).recover {
          case _: TimeoutException => "recover.timeout"
          case other => s"recover.${other.getMessage}"
        }
        Await.result(call, Duration(10, "seconds")) should be ("data-1000")

        val call2: Future[String] = svcCall(5000).withTimeout(Duration(1000, "milliseconds")).recover {
          case _: TimeoutException => "recover.timeout"
          case other => s"recover.${other.getMessage}"
        }
        Await.result(call2, Duration(10, "seconds")) should be ("recover.timeout")
      }
    }
  }
}

object TimeLimitedFutureSpec {
  def svcCall(latency: Long): Future[String] = Future { Thread.sleep(latency); s"data-${latency}" }
} 
Example 2
Source File: DsmReceiver.scala    From Linkis   with Apache License 2.0 8 votes vote down vote up
package com.webank.wedatasphere.linkis.datasourcemanager.core.receivers

import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.datasourcemanager.core.service.{DataSourceInfoService, DataSourceRelateService}
import com.webank.wedatasphere.linkis.datasourcemanager.common.protocol.DsInfoResponse
import com.webank.wedatasphere.linkis.rpc.{Receiver, Sender}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Component

import scala.concurrent.duration.Duration
import java.util

import com.webank.wedatasphere.linkis.datasourcemanager.common.domain.DataSource
import com.webank.wedatasphere.linkis.datasourcemanager.common.protocol.{DsInfoQueryRequest, DsInfoResponse}
import com.webank.wedatasphere.linkis.datasourcemanager.core.restful.RestfulApiHelper
import com.webank.wedatasphere.linkis.datasourcemanager.core.service.{DataSourceInfoService, DataSourceRelateService}
import com.webank.wedatasphere.linkis.datasourcemanager.core.restful.RestfulApiHelper

@Component
class DsmReceiver extends Receiver with Logging{

  @Autowired
  private var dataSourceInfoService: DataSourceInfoService = _

  @Autowired
  private var dataSourceRelateService: DataSourceRelateService = _

  override def receive(message: Any, sender: Sender): Unit = ???

  override def receiveAndReply(message: Any, sender: Sender): Any = message match {
    case DsInfoQueryRequest(id, system) =>
      if ( id.toLong > 0 &&  Some(system).isDefined ) {
        Utils.tryCatch {
          val dataSource: DataSource = dataSourceInfoService.getDataSourceInfo(id.toLong, system)
          if ( null != dataSource ) {
            RestfulApiHelper.decryptPasswordKey(dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId),
              dataSource.getConnectParams)
            DsInfoResponse(status = true, dataSource.getDataSourceType.getName,
              dataSource.getConnectParams, dataSource.getCreateUser)
          }else DsInfoResponse(status = true, "", new util.HashMap[String, Object](), "")
        }{
          case e: Exception => logger.error(s"Fail to get data source information, id:$id system:$system", e)
            DsInfoResponse(status = false, "", new util.HashMap[String, Object](), "")
          case t: Throwable => logger.error(s"Fail to get data source information, id:$id system:$system", t)
            DsInfoResponse(status = false, "", new util.HashMap[String, Object](), "")
        }
      } else {
        DsInfoResponse(status = true, "", new util.HashMap[String, Object](), "")
      }
    case _ => new Object()
  }

  override def receiveAndReply(message: Any, duration: Duration, sender: Sender): Any = message match {
    case DsInfoQueryRequest(id, system) =>
      if ( id.toLong > 0 &&  Some(system).isDefined ) {
        Utils.tryCatch {
          val dataSource: DataSource = dataSourceInfoService.getDataSourceInfo(id.toLong, system)
          if ( null != dataSource ) {
            RestfulApiHelper.decryptPasswordKey(dataSourceRelateService.getKeyDefinitionsByType(dataSource.getDataSourceTypeId),
              dataSource.getConnectParams)
            DsInfoResponse(status = true, dataSource.getDataSourceType.getName,
              dataSource.getConnectParams, dataSource.getCreateUser)
          }else DsInfoResponse(status = true, "", new util.HashMap[String, Object](), "")
        }{
          case e: Exception => logger.error(s"Fail to get data source information, id:$id system:$system", e)
            DsInfoResponse(status = false, "", new util.HashMap[String, Object](), "")
          case t: Throwable => logger.error(s"Fail to get data source information, id:$id system:$system", t)
            DsInfoResponse(status = false, "", new util.HashMap[String, Object](), "")
        }
      } else {
        DsInfoResponse(status = true, "", new util.HashMap[String, Object](), "")
      }
    case _ => new Object()
  }
} 
Example 3
Source File: AbstractWebServer.scala    From ohara   with Apache License 2.0 6 votes vote down vote up
package oharastream.ohara.shabondi.common

import akka.Done
import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.{Directives, Route}
import akka.http.scaladsl.settings.ServerSettings
import oharastream.ohara.common.util.Releasable

import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.io.StdIn
import scala.util.{Failure, Success}


private[shabondi] abstract class AbstractWebServer extends Directives with Releasable {
  implicit protected val actorSystem: ActorSystem = ActorSystem(Logging.simpleName(this).replaceAll("\\$", ""))

  protected def routes: Route

  protected def postBinding(binding: ServerBinding): Unit = {
    val hostname = binding.localAddress.getHostName
    val port     = binding.localAddress.getPort
    actorSystem.log.info(s"Server online at http://$hostname:$port/")
  }

  protected def postBindingFailure(cause: Throwable): Unit = {
    actorSystem.log.error(cause, s"Error starting the server ${cause.getMessage}")
  }

  protected def waitForShutdownSignal()(implicit ec: ExecutionContext): Future[Done] = {
    val promise = Promise[Done]()
    sys.addShutdownHook {
      promise.trySuccess(Done)
    }
    Future {
      blocking {
        if (StdIn.readLine("Press <RETURN> to stop Shabondi WebServer...\n") != null)
          promise.trySuccess(Done)
      }
    }
    promise.future
  }

  protected def postServerShutdown(): Unit = actorSystem.log.info("Shutting down the server")

  def start(bindInterface: String, port: Int): Unit = {
    start(bindInterface, port, ServerSettings(actorSystem))
  }

  def start(bindInterface: String, port: Int, settings: ServerSettings): Unit = {
    implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher

    val bindingFuture: Future[Http.ServerBinding] = Http().bindAndHandle(
      handler = routes,
      interface = bindInterface,
      port = port,
      settings = settings
    )

    bindingFuture.onComplete {
      case Success(binding) =>
        postBinding(binding)
      case Failure(cause) =>
        postBindingFailure(cause)
    }

    Await.ready(
      bindingFuture.flatMap(_ => waitForShutdownSignal()),
      Duration.Inf
    )

    bindingFuture
      .flatMap(_.unbind())
      .onComplete { _ =>
        postServerShutdown()
        actorSystem.terminate()
      }
  }

  override def close(): Unit = actorSystem.terminate()
} 
Example 4
Source File: BasicTestPerformance4Ftp.scala    From ohara   with Apache License 2.0 6 votes vote down vote up
package oharastream.ohara.it.performance

import java.io.{BufferedWriter, OutputStreamWriter}
import java.util.concurrent.atomic.LongAdder

import oharastream.ohara.common.data.Row
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import org.junit.AssumptionViolatedException
import spray.json.{JsNumber, JsString, JsValue}

import scala.jdk.CollectionConverters._
import oharastream.ohara.client.filesystem.FileSystem

import scala.concurrent.duration.Duration

abstract class BasicTestPerformance4Ftp extends BasicTestPerformance {
  private[this] val ftpHostname = value(PerformanceTestingUtils.FTP_HOSTNAME_KEY)
    .getOrElse(throw new AssumptionViolatedException(s"${PerformanceTestingUtils.FTP_HOSTNAME_KEY} is required"))

  private[this] val ftpPort = value(PerformanceTestingUtils.FTP_PORT_KEY)
    .getOrElse(throw new AssumptionViolatedException(s"${PerformanceTestingUtils.FTP_PORT_KEY} is required"))
    .toInt

  private[this] val ftpUser = value(PerformanceTestingUtils.FTP_USER_KEY)
    .getOrElse(throw new AssumptionViolatedException(s"${PerformanceTestingUtils.FTP_USER_KEY} is required"))

  private[this] val ftpPassword = value(PerformanceTestingUtils.FTP_PASSWORD_KEY)
    .getOrElse(throw new AssumptionViolatedException(s"${PerformanceTestingUtils.FTP_PASSWORD_KEY} is required"))

  
  protected val ftpSettings: Map[String, JsValue] = Map(
    // convert the hostname to IP address
    oharastream.ohara.connector.ftp.FTP_HOSTNAME_KEY  -> JsString(ftpHostname),
    oharastream.ohara.connector.ftp.FTP_PORT_KEY      -> JsNumber(ftpPort),
    oharastream.ohara.connector.ftp.FTP_USER_NAME_KEY -> JsString(ftpUser),
    oharastream.ohara.connector.ftp.FTP_PASSWORD_KEY  -> JsString(ftpPassword)
  )

  private[this] val csvInputFolderKey       = PerformanceTestingUtils.CSV_INPUT_KEY
  private[this] val csvOutputFolder: String = value(csvInputFolderKey).getOrElse("/input")

  private[this] val cleanupTestDataKey   = PerformanceTestingUtils.DATA_CLEANUP_KEY
  protected val cleanupTestData: Boolean = value(cleanupTestDataKey).forall(_.toBoolean)

  protected def setupInputData(timeout: Duration): (String, Long, Long) = {
    val client = ftpClient()
    try {
      if (!PerformanceTestingUtils.exists(client, csvOutputFolder))
        PerformanceTestingUtils.createFolder(client, csvOutputFolder)

      val result = generateData(
        numberOfRowsToFlush,
        timeout,
        (rows: Seq[Row]) => {
          val file        = s"$csvOutputFolder/${CommonUtils.randomString()}"
          val writer      = new BufferedWriter(new OutputStreamWriter(client.create(file)))
          val count       = new LongAdder()
          val sizeInBytes = new LongAdder()

          try {
            val cellNames: Set[String] = rows.head.cells().asScala.map(_.name).toSet
            writer
              .append(cellNames.mkString(","))
              .append("\n")
            rows.foreach(row => {
              val content = row.cells().asScala.map(_.value).mkString(",")
              count.increment()
              sizeInBytes.add(content.length)
              writer.append(content).append("\n")
            })
            (count.longValue(), sizeInBytes.longValue())
          } finally Releasable.close(writer)
        }
      )
      (csvOutputFolder, result._1, result._2)
    } finally Releasable.close(client)
  }

  protected[this] def ftpClient() =
    FileSystem.ftpBuilder
      .hostname(ftpHostname)
      .port(ftpPort)
      .user(ftpUser)
      .password(ftpPassword)
      .build
} 
Example 5
Source File: KVStore.scala    From Freasy-Monad   with MIT License 6 votes vote down vote up
package examples.cats

import cats.Id
import cats.free.Free
import freasymonad.cats.free

import scala.collection.mutable
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

@free trait KVStore {
  type KVStoreF[A] = Free[GrammarADT, A]
  sealed trait GrammarADT[A]

  def put[T](key: String, value: T): KVStoreF[Unit]
  def get[T](key: String): KVStoreF[Option[T]]
  def delete(key: String): KVStoreF[Unit]

  def update[T](key: String, f: T => T): KVStoreF[Unit] =
    for {
      vMaybe <- get[T](key)
      _      <- vMaybe.map(v => put[T](key, f(v))).getOrElse(Free.pure(()))
    } yield ()
}

object Main extends App {
  import KVStore.ops._

  def program: KVStoreF[Option[Int]] =
    for {
      _ <- put("wild-cats", 2)
      _ <- update[Int]("wild-cats", _ + 12)
      _ <- put("tame-cats", 5)
      n <- get[Int]("wild-cats")
      _ <- delete("tame-cats")
    } yield n

  val idInterpreter = new KVStore.Interp[Id] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Id[Option[T]] = {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Id[Unit] = {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Id[Unit] = {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resId: Id[Option[Int]] = idInterpreter.run(program)

  import cats.implicits.catsStdInstancesForFuture
  import scala.concurrent.ExecutionContext.Implicits.global

  val futureInterpreter = new KVStore.Interp[Future] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Future[Option[T]] = Future {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Future[Unit] = Future {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Future[Unit] = Future {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resFuture: Future[Option[Int]] = futureInterpreter.run(program)
  Await.ready(resFuture, Duration.Inf)
} 
Example 6
Source File: GitHubSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ HttpMethods, HttpRequest, HttpResponse }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Source }
import akka.stream.testkit.scaladsl.TestSink
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class GitHubSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory {
  implicit val system = ActorSystem("github-spec")
  implicit val executor = system.dispatcher
  implicit val materializer = ActorMaterializer()

  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  "#from" should "merge original headers to github request" in {
    val github = new GitHubImpl("127.0.0.1", 8000, 5, mock[HttpClient])
    val request = HttpRequest(HttpMethods.GET, "/")
      .addHeader(RawHeader("host", "dummy"))
      .addHeader(RawHeader("timeout-access", "dummy"))

    val result = Source.single(HttpRequest(HttpMethods.GET, "/v1/github/users/xxxxxx"))
      .via(github.from(request))
      .runWith(TestSink.probe[HttpRequest])
      .request(1)
      .expectNext()

    result.headers.filter(_.lowercaseName() == "host") shouldBe empty
    result.headers.filter(_.lowercaseName() == "timeout-access") shouldBe empty
    result.headers.filter(_.lowercaseName() == "x-forwarded-host") shouldNot be(empty)
  }

  "#send" should "connect using http client" in {
    val httpResponse = HttpResponse()
    val httpClient = mock[HttpClient]
    (httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))

    val github = new GitHubImpl("127.0.0.1", 8000, 5, httpClient)
    val result = Source.single(HttpRequest(HttpMethods.GET, "/"))
      .via(github.send)
      .runWith(TestSink.probe[HttpResponse])
      .request(1)
      .expectNext()

    result shouldBe httpResponse
  }
} 
Example 7
Source File: CustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{Cancellable, Scheduler, Address, Actor}
import akka.cluster.ClusterEvent._
import akka.cluster.MemberStatus.{Exiting, Down}
import akka.cluster._
import scala.concurrent.duration.{Duration, FiniteDuration}

object CustomDowning {
  case class UnreachableTimeout(member: Member)
}

abstract class CustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends Actor {

  import CustomDowning._

  def selfAddress: Address

  def down(node: Address): Unit

  def downOrAddPending(member: Member): Unit

  def downOrAddPendingAll(members: Set[Member]): Unit

  def scheduler: Scheduler

  import context.dispatcher

  val skipMemberStatus = Set[MemberStatus](Down, Exiting)

  private var scheduledUnreachable: Map[Member, Cancellable] = Map.empty
  private var pendingUnreachable: Set[Member] = Set.empty
  private var unstableUnreachable: Set[Member] = Set.empty

  override def postStop(): Unit = {
    scheduledUnreachable.values foreach { _.cancel }
    super.postStop()
  }

  def receiveEvent: Receive

  def receive: Receive = receiveEvent orElse predefinedReceiveEvent

  def predefinedReceiveEvent: Receive = {
    case state: CurrentClusterState =>
      initialize(state)
      state.unreachable foreach unreachableMember

    case UnreachableTimeout(member) =>
      if (scheduledUnreachable contains member) {
        scheduledUnreachable -= member
        if (scheduledUnreachable.isEmpty) {
          unstableUnreachable += member
          downOrAddPendingAll(unstableUnreachable)
          unstableUnreachable = Set.empty
        } else {
          unstableUnreachable += member
        }
      }

    case _: ClusterDomainEvent =>
  }

  def initialize(state: CurrentClusterState) = {}

  def unreachableMember(m: Member): Unit =
    if (!skipMemberStatus(m.status) && !scheduledUnreachable.contains(m))
      scheduleUnreachable(m)

  def scheduleUnreachable(m: Member): Unit = {
    if (autoDownUnreachableAfter == Duration.Zero) {
      downOrAddPending(m)
    } else {
      val task = scheduler.scheduleOnce(autoDownUnreachableAfter, self, UnreachableTimeout(m))
      scheduledUnreachable += (m -> task)
    }
  }

  def remove(member: Member): Unit = {
    scheduledUnreachable.get(member) foreach { _.cancel }
    scheduledUnreachable -= member
    pendingUnreachable -= member
    unstableUnreachable -= member
  }

  def scheduledUnreachableMembers: Map[Member, Cancellable] = scheduledUnreachable

  def pendingUnreachableMembers: Set[Member] = pendingUnreachable

  def pendingAsUnreachable(member: Member): Unit = pendingUnreachable += member

  def downPendingUnreachableMembers(): Unit = {
    pendingUnreachable.foreach(member => down(member.address))
    pendingUnreachable = Set.empty
  }

  def unstableUnreachableMembers: Set[Member] = unstableUnreachable
} 
Example 8
Source File: DemoApp.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination
package demo

import akka.actor.{ ActorRef, ActorSystem, Address }
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{ Duration, MILLISECONDS }

object DemoApp {

  val conf     = ConfigFactory.load()
  val hostname = conf.getString("demo.hostname")
  val httpPort = conf.getInt("demo.port")

  def main(args: Array[String]): Unit = {
    // Create an Akka system
    implicit val system = ActorSystem("ConstructR-Consul")
    import system.dispatcher
    implicit val mat = ActorMaterializer()

    // Create an actor that handles cluster domain events
    val cluster =
      system.actorOf(SimpleClusterListener.props, SimpleClusterListener.Name)
    Http().bindAndHandle(route(cluster), hostname, httpPort)
  }

  private def route(cluster: ActorRef) = {
    import Directives._
    implicit val timeout = Timeout(
      Duration(
        conf.getDuration("demo.cluster-view-timeout").toMillis,
        MILLISECONDS
      )
    )
    path("member-nodes") { // List cluster nodes
      get {
        onSuccess(
          (cluster ? SimpleClusterListener.GetMemberNodes).mapTo[Set[Address]]
        )(addresses => complete(addresses.mkString("\n")))
      }
    }
  }

} 
Example 9
Source File: ConsulCoordinationSpec.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination.consul

import akka.Done
import akka.actor.{ ActorSystem, AddressFromURIString }
import akka.testkit.{ TestDuration, TestProbe }
import com.typesafe.config.ConfigFactory
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }
import scala.concurrent.duration.{ Duration, DurationInt, FiniteDuration }
import scala.concurrent.{ Await, Awaitable }
import scala.util.Random

object ConsulCoordinationSpec {

  private val coordinationHost = {
    val dockerHostPattern = """tcp://(\S+):\d{1,5}""".r
    sys.env
      .get("DOCKER_HOST")
      .collect { case dockerHostPattern(address) => address }
      .getOrElse("127.0.0.1")
  }
}

class ConsulCoordinationSpec extends WordSpec with Matchers with BeforeAndAfterAll {
  import ConsulCoordinationSpec._

  private implicit val system = {
    val config =
      ConfigFactory
        .parseString(s"constructr.coordination.host = $coordinationHost")
        .withFallback(ConfigFactory.load())
    ActorSystem("default", config)
  }

  private val address1 = AddressFromURIString("akka.tcp://default@a:2552")
  private val address2 = AddressFromURIString("akka.tcp://default@b:2552")

  "ConsulCoordination" should {
    "correctly interact with consul" in {
      val coordination = new ConsulCoordination(randomString(), system)

      // Getting nodes
      resultOf(coordination.getNodes()) shouldBe 'empty

      // Lock (ttl >= 10s)
      resultOf(coordination.lock(address1, 10.seconds)) shouldBe true
      resultOf(coordination.lock(address1, 10.seconds)) shouldBe true
      resultOf(coordination.lock(address2, 10.seconds)) shouldBe false

      // Add self
      resultOf(coordination.addSelf(address1, 10.seconds)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address1)

      // Refresh
      resultOf(coordination.refresh(address1, 10.seconds)) shouldBe Done
      resultOf(coordination.getNodes()) shouldBe Set(address1)

      val probe = TestProbe()
      import probe._
      awaitAssert(
        resultOf(coordination.getNodes()) shouldBe 'empty,
        25.seconds // Wait until open sessions expire
      )
    }
  }

  override protected def afterAll() = {
    Await.ready(system.terminate(), Duration.Inf)
    super.afterAll()
  }

  private def resultOf[A](awaitable: Awaitable[A], max: FiniteDuration = 3.seconds.dilated) =
    Await.result(awaitable, max)

  private def randomString() = math.abs(Random.nextInt).toString
} 
Example 10
Source File: MongoConnectorCreator.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.connector.mongo

import com.mongodb.Block
import com.typesafe.config.Config
import it.agilelab.darwin.common.{Connector, ConnectorCreator}
import it.agilelab.darwin.connector.mongo.ConfigurationMongoModels.MongoConnectorConfig
import org.mongodb.scala.connection.ClusterSettings
import org.mongodb.scala.{MongoClient, MongoClientSettings, MongoCredential, ServerAddress}
import it.agilelab.darwin.common.compat._
import scala.concurrent.duration.Duration

class MongoConnectorCreator extends ConnectorCreator {

  override def create(config: Config): Connector = {

    val mongoConfig: MongoConnectorConfig = createConfig(config)
    new MongoConnector(createConnection(mongoConfig), mongoConfig)
  }

  
  def createConfig(config: Config): MongoConnectorConfig = {
    require(config.hasPath(ConfigurationKeys.USERNAME))
    require(config.hasPath(ConfigurationKeys.PASSWORD))
    require(config.hasPath(ConfigurationKeys.HOST))
    require(config.hasPath(ConfigurationKeys.DATABASE))
    require(config.hasPath(ConfigurationKeys.COLLECTION))

    MongoConnectorConfig(
      config.getString(ConfigurationKeys.USERNAME),
      config.getString(ConfigurationKeys.PASSWORD),
      config.getString(ConfigurationKeys.DATABASE),
      config.getString(ConfigurationKeys.COLLECTION),
      config.getStringList(ConfigurationKeys.HOST).toScala().toSeq,
      if (config.hasPath(ConfigurationKeys.TIMEOUT)) {
        Duration.create(config.getInt(ConfigurationKeys.TIMEOUT), "millis")
      } else {
        Duration.create(ConfigurationMongoModels.DEFAULT_DURATION, "millis")
      }
    )
  }

} 
Example 11
Source File: ConfigurationMongoModels.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.connector.mongo

import scala.concurrent.duration.Duration

object ConfigurationMongoModels {

  sealed trait BaseMongoConfig {
    def database: String
    def collection: String
    def timeout: Duration
  }

  case class MongoConfig(
                          database: String,
                          collection: String,
                          timeout: Duration
                        ) extends BaseMongoConfig

  case class MongoConnectorConfig(
                                   username: String,
                                   password: String,
                                   database: String,
                                   collection: String,
                                   hosts: Seq[String],
                                   timeout: Duration
                                 ) extends BaseMongoConfig

  val DEFAULT_DURATION = 5000

} 
Example 12
Source File: HttpApp.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.server.rest

import java.util.concurrent.Executor

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.RouteConcatenation
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import it.agilelab.darwin.common.Logging

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor}

class HttpApp(config: Config, services: Service*)
             (implicit system: ActorSystem, materializer: ActorMaterializer) extends Logging {
  def run(): Unit = {
    val interface = config.getString("interface")
    val port = config.getInt("port")


    val route = RouteConcatenation.concat(services.map(_.route): _*)

    log.info("Starting http server on {}:{}", interface, port)
    val eventuallyBinding = Http().bindAndHandle(route, interface, port)
    val binding = Await.result(eventuallyBinding, Duration.Inf)
    log.info("Started http server on {}:{}", interface, port)

    val shutdownThread = new Thread(new Runnable {
      override def run(): Unit = {
        implicit val ec: ExecutionContext = newSameThreadExecutor
        log.info("Received shutdown hook")

        val termination = for {
          _ <- binding.unbind()
          terminated <- system.terminate()
        } yield terminated

        Await.ready(termination, Duration.Inf)
        log.info("Shutdown")
      }
    })

    shutdownThread.setName("shutdown")

    Runtime.getRuntime.addShutdownHook(shutdownThread)

    log.info("registered shutdown hook")
  }


  private def newSameThreadExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(new Executor {
    override def execute(command: Runnable): Unit = command.run()
  })
}

object HttpApp {
  def apply(config:Config, services: Service*)(implicit system: ActorSystem, materializer: ActorMaterializer): HttpApp =
    new HttpApp(config, services: _*)
} 
Example 13
Source File: WSConfigParser.scala    From play-ws   with Apache License 2.0 5 votes vote down vote up
package play.api.libs.ws

import javax.inject.Inject
import javax.inject.Provider
import javax.inject.Singleton

import com.typesafe.config.Config
import com.typesafe.config.ConfigException
import com.typesafe.sslconfig.ssl.SSLConfigParser
import com.typesafe.sslconfig.util.EnrichedConfig

import scala.concurrent.duration.Duration


@Singleton
class WSConfigParser @Inject() (config: Config, classLoader: ClassLoader) extends Provider[WSClientConfig] {

  def parse(): WSClientConfig = {
    val wsConfig = config.getConfig("play.ws")

    val connectionTimeout = Duration(wsConfig.getString("timeout.connection"))
    val idleTimeout       = Duration(wsConfig.getString("timeout.idle"))
    val requestTimeout    = Duration(wsConfig.getString("timeout.request"))

    val followRedirects    = wsConfig.getBoolean("followRedirects")
    val useProxyProperties = wsConfig.getBoolean("useProxyProperties")

    val userAgent = {
      try {
        Some(wsConfig.getString("useragent"))
      } catch {
        case e: ConfigException.Null =>
          None
      }
    }

    val compressionEnabled = wsConfig.getBoolean("compressionEnabled")

    val sslConfig = new SSLConfigParser(EnrichedConfig(wsConfig.getConfig("ssl")), classLoader).parse()

    WSClientConfig(
      connectionTimeout = connectionTimeout,
      idleTimeout = idleTimeout,
      requestTimeout = requestTimeout,
      followRedirects = followRedirects,
      useProxyProperties = useProxyProperties,
      userAgent = userAgent,
      compressionEnabled = compressionEnabled,
      ssl = sslConfig
    )
  }

  override lazy val get: WSClientConfig = parse()
} 
Example 14
Source File: RichScheduledExecutorService.scala    From mango   with Apache License 2.0 5 votes vote down vote up
package com.kakao.mango.concurrent

import java.util.concurrent.{Callable, ScheduledFuture, TimeUnit, ScheduledExecutorService}

import scala.concurrent.duration.Duration
import scala.concurrent.duration._
import scala.language.postfixOps


class RichScheduledExecutorService(underlying: ScheduledExecutorService) extends RichExecutorService(underlying) with ScheduledExecutorService {

  def scheduleIn[T](delay: Duration)(command: => T): ScheduledFuture[T] = schedule(new Callable[T] {
    override def call(): T = command
  }, delay.toMillis, TimeUnit.MILLISECONDS)

  def withFixedRate[T](rate: Duration, initialDelay: Duration = 0.second)(command: => Unit) = scheduleAtFixedRate(new Runnable {
    override def run(): Unit = command
  }, initialDelay.toMillis, rate.toMillis, TimeUnit.MILLISECONDS)

  def withFixedDelay[T](delay: Duration, initialDelay: Duration = 0.second)(command: => Unit) = scheduleWithFixedDelay(new Runnable {
    override def run(): Unit = command
  }, initialDelay.toMillis, delay.toMicros, TimeUnit.MILLISECONDS)

  // delegating to underlying
  override def schedule(command: Runnable, delay: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.schedule(wrap(command), delay, unit)
  override def scheduleAtFixedRate(command: Runnable, initialDelay: Long, period: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.scheduleAtFixedRate(wrap(command), initialDelay, period, unit)
  override def schedule[V](callable: Callable[V], delay: Long, unit: TimeUnit): ScheduledFuture[V] = underlying.schedule(wrap(callable), delay, unit)
  override def scheduleWithFixedDelay(command: Runnable, initialDelay: Long, delay: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.scheduleWithFixedDelay(wrap(command), initialDelay, delay, unit)
} 
Example 15
Source File: RPCContinuousShuffleWriter.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming.continuous.shuffle

import scala.concurrent.Future
import scala.concurrent.duration.Duration

import org.apache.spark.Partitioner
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.util.ThreadUtils


class RPCContinuousShuffleWriter(
    writerId: Int,
    outputPartitioner: Partitioner,
    endpoints: Array[RpcEndpointRef]) extends ContinuousShuffleWriter {

  if (outputPartitioner.numPartitions != 1) {
    throw new IllegalArgumentException("multiple readers not yet supported")
  }

  if (outputPartitioner.numPartitions != endpoints.length) {
    throw new IllegalArgumentException(s"partitioner size ${outputPartitioner.numPartitions} did " +
      s"not match endpoint count ${endpoints.length}")
  }

  def write(epoch: Iterator[UnsafeRow]): Unit = {
    while (epoch.hasNext) {
      val row = epoch.next()
      endpoints(outputPartitioner.getPartition(row)).askSync[Unit](ReceiverRow(writerId, row))
    }

    val futures = endpoints.map(_.ask[Unit](ReceiverEpochMarker(writerId))).toSeq
    implicit val ec = ThreadUtils.sameThread
    ThreadUtils.awaitResult(Future.sequence(futures), Duration.Inf)
  }
} 
Example 16
Source File: ContinuousTrigger.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming.continuous

import java.util.concurrent.TimeUnit

import scala.concurrent.duration.Duration

import org.apache.commons.lang3.StringUtils

import org.apache.spark.annotation.{Experimental, InterfaceStability}
import org.apache.spark.sql.streaming.{ProcessingTime, Trigger}
import org.apache.spark.unsafe.types.CalendarInterval


@InterfaceStability.Evolving
case class ContinuousTrigger(intervalMs: Long) extends Trigger {
  require(intervalMs >= 0, "the interval of trigger should not be negative")
}

private[sql] object ContinuousTrigger {
  def apply(interval: String): ContinuousTrigger = {
    if (StringUtils.isBlank(interval)) {
      throw new IllegalArgumentException(
        "interval cannot be null or blank.")
    }
    val cal = if (interval.startsWith("interval")) {
      CalendarInterval.fromString(interval)
    } else {
      CalendarInterval.fromString("interval " + interval)
    }
    if (cal == null) {
      throw new IllegalArgumentException(s"Invalid interval: $interval")
    }
    if (cal.months > 0) {
      throw new IllegalArgumentException(s"Doesn't support month or year interval: $interval")
    }
    new ContinuousTrigger(cal.microseconds / 1000)
  }

  def apply(interval: Duration): ContinuousTrigger = {
    ContinuousTrigger(interval.toMillis)
  }

  def create(interval: String): ContinuousTrigger = {
    apply(interval)
  }

  def create(interval: Long, unit: TimeUnit): ContinuousTrigger = {
    ContinuousTrigger(unit.toMillis(interval))
  }
} 
Example 17
Source File: ApplicationMonitor.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.monitor.application

import java.sql.{Connection, Timestamp}
import java.text.SimpleDateFormat
import java.util.Date
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.Duration

import org.apache.spark.alarm.AlertMessage
import org.apache.spark.alarm.AlertType._
import org.apache.spark.monitor.Monitor
import org.apache.spark.monitor.MonitorItem.MonitorItem

abstract class ApplicationMonitor extends Monitor {
  override val alertType = Seq(Application)
}

class ApplicationInfo(
    title: MonitorItem,
    appName: String,
    appId: String,
    md5: String,
    startTime: Date,
    duration: Long,
    appUiUrl: String,
    historyUrl: String,
    eventLogDir: String,
    minExecutor: Int,
    maxExecutor: Int,
    executorCore: Int,
    executorMemoryMB: Long,
    executorAccu: Double,
    user: String)
  extends AlertMessage(title) {
  override def toCsv(): String = {
    s"${user},${appId}," +
      s"${new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(startTime)}," +
      s"${Duration(duration, TimeUnit.MILLISECONDS).toSeconds}," +
      s"${executorMemoryMB},${executorCore},${executorAccu.formatted("%.2f")},${appName}"
  }
  // scalastyle:off
  override def toHtml(): String = {
    val html = <h1>任务完成! </h1>
        <h2>任务信息 </h2>
        <ul>
          <li>作业名:{appName}</li>
          <li>作业ID:{appId}</li>
          <li>开始时间:{new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(startTime)}</li>
          <li>任务用时:{Duration(duration, TimeUnit.MILLISECONDS).toSeconds} s</li>
        </ul>
        <h2>资源用量</h2>
        <ul>
          <li>Executor个数:{minExecutor}~{maxExecutor}</li>
          <li>Executor内存:{executorMemoryMB} MB</li>
          <li>Executor核数:{executorCore}</li>
          <li>Executor累积用量:{executorAccu.formatted("%.2f")} executor*min</li>
        </ul>
        <h2>调试信息</h2>
        <ul>
          <li>回看链接1:<a href={appUiUrl.split(",").head}>{appUiUrl.split(",").head}</a></li>
          <li>回看链接2:<a href={historyUrl}>{historyUrl}</a></li>
          <li>日志文件所在目录:{eventLogDir}</li>
        </ul>
    html.mkString
  }

  override def toJdbc(conn: Connection, appId: String): Unit = {
    val query = "INSERT INTO `xsql_monitor`.`spark_history`(" +
      "`user`, `md5`, `appId`, `startTime`, `duration`, " +
      "`yarnURL`, `sparkHistoryURL`, `eventLogDir`, `coresPerExecutor`, `memoryPerExecutorMB`," +
      " `executorAcc`, `appName`, `minExecutors`, `maxExecutors`)" +
      " SELECT ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? FROM DUAL" +
      " WHERE NOT EXISTS (SELECT * FROM `xsql_monitor`.`spark_history` WHERE `appId` = ?);"

    val preparedStmt = conn.prepareStatement(query)
    preparedStmt.setString(1, user)
    preparedStmt.setString(2, md5)
    preparedStmt.setString(3, appId)
    preparedStmt.setTimestamp(4, new Timestamp(startTime.getTime))
    preparedStmt.setLong(5, Duration(duration, TimeUnit.MILLISECONDS).toSeconds)
    preparedStmt.setString(6, appUiUrl)
    preparedStmt.setString(7, historyUrl)
    preparedStmt.setString(8, eventLogDir)
    preparedStmt.setInt(9, executorCore)
    preparedStmt.setLong(10, executorMemoryMB)
    preparedStmt.setDouble(11, executorAccu)
    preparedStmt.setString(12, appName)
    preparedStmt.setInt(13, minExecutor)
    preparedStmt.setInt(14, maxExecutor)
    preparedStmt.setString(15, appId)
    preparedStmt.execute
  }
} 
Example 18
Source File: SQLMonitor.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.monitor.sql

import java.text.SimpleDateFormat
import java.util.Date
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.Duration

import org.apache.spark.alarm.AlertMessage
import org.apache.spark.alarm.AlertType._
import org.apache.spark.monitor.Monitor
import org.apache.spark.monitor.MonitorItem.MonitorItem

abstract class SQLMonitor extends Monitor {
  override val alertType = Seq(SQL)

}

class SQLInfo(
    title: MonitorItem,
    sqlId: String,
    aeFlag: Boolean,
    appId: String,
    executionId: Long,
    submissionTime: Date,
    duration: Long)
  extends AlertMessage(title) {
  override def toCsv(): String = {
    s"${sqlId},${aeFlag},${appId},${executionId}," +
      s"${new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(submissionTime)}," +
      s"${Duration(duration, TimeUnit.MILLISECONDS).toSeconds}"
  }

} 
Example 19
Source File: AccessTokenSpec.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ HttpEntity, HttpResponse, StatusCodes }
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.stream.{ ActorMaterializer, Materializer }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec }

import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration.Duration

class AccessTokenSpec extends FlatSpec with DiagrammedAssertions with ScalaFutures with BeforeAndAfterAll {
  implicit val system: ActorSystem        = ActorSystem()
  implicit val ec: ExecutionContext       = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()
  implicit val defaultPatience: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(700, Millis))

  override def afterAll(): Unit = {
    Await.ready(system.terminate(), Duration.Inf)
  }

  behavior of "AccessToken"

  it should "apply from HttpResponse" in {
    val accessToken  = "xxx"
    val tokenType    = "bearer"
    val expiresIn    = 86400
    val refreshToken = "yyy"

    val httpResponse = HttpResponse(
      status = StatusCodes.OK,
      headers = Nil,
      entity = HttpEntity(
        `application/json`,
        s"""
           |{
           |  "access_token": "$accessToken",
           |  "token_type": "$tokenType",
           |  "expires_in": $expiresIn,
           |  "refresh_token": "$refreshToken"
           |}
         """.stripMargin
      )
    )

    val result = AccessToken(httpResponse)

    whenReady(result) { token =>
      assert(token.accessToken == accessToken)
      assert(token.tokenType == tokenType)
      assert(token.expiresIn == expiresIn)
      assert(token.refreshToken.contains(refreshToken))
    }
  }
} 
Example 20
Source File: DurationGeneratorsSpec.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.scalacheck

import org.scalatest.WordSpec
import org.scalatest.prop.GeneratorDrivenPropertyChecks._
import play.api.libs.json.scalacheck.DurationGenerators._

import scala.concurrent.duration.{Duration, FiniteDuration}

class DurationGeneratorsSpec extends WordSpec {

  "Arbitrary[FiniteDuration]" should {
    "always produce a valid finite value" in {
      forAll() { (duration: FiniteDuration) =>
        assert(duration.isFinite())
      }
    }
  }

  "Arbitrary[Duration]" should {
    "always produce a valid value" in {
      forAll() { (duration: Duration) =>
        assert(duration ne null)
      }
    }
  }
} 
Example 21
Source File: DurationGeneratorsSpec.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.scalacheck

import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks._
import play.api.libs.json.scalacheck.DurationGenerators._

import scala.concurrent.duration.{Duration, FiniteDuration}

class DurationGeneratorsSpec extends AnyWordSpec {

  "Arbitrary[FiniteDuration]" should {
    "always produce a valid finite value" in {
      forAll() { (duration: FiniteDuration) =>
        assert(duration.isFinite())
      }
    }
  }

  "Arbitrary[Duration]" should {
    "always produce a valid value" in {
      forAll() { (duration: Duration) =>
        assert(duration ne null)
      }
    }
  }
} 
Example 22
Source File: DurationGeneratorsSpec.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package play.api.libs.json.scalacheck

import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import play.api.libs.json.scalacheck.DurationGenerators._

import scala.concurrent.duration.{Duration, FiniteDuration}

class DurationGeneratorsSpec extends AnyWordSpec
  with ScalaCheckDrivenPropertyChecks {

  "Arbitrary[FiniteDuration]" should {
    "always produce a valid finite value" in {
      forAll() { (duration: FiniteDuration) =>
        assert(duration.isFinite)
      }
    }
  }

  "Arbitrary[Duration]" should {
    "always produce a valid value" in {
      forAll() { (duration: Duration) =>
        assert(duration ne null)
      }
    }
  }
} 
Example 23
Source File: CarbonClient.scala    From akka-http-metrics   with Apache License 2.0 5 votes vote down vote up
package fr.davit.akka.http.metrics.graphite

import java.time.{Clock, Instant}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.scaladsl.{Flow, Keep, RestartFlow, Sink, Source, Tcp}
import akka.stream.{OverflowStrategy, QueueOfferResult}
import akka.util.ByteString
import fr.davit.akka.http.metrics.core.Dimension

import scala.concurrent.Await
import scala.concurrent.duration.{Duration, _}

object CarbonClient {

  def apply(host: String, port: Int)(implicit system: ActorSystem): CarbonClient = new CarbonClient(host, port)
}

class CarbonClient(host: String, port: Int)(implicit system: ActorSystem) extends AutoCloseable {

  private val logger         = Logging(system.eventStream, classOf[CarbonClient])
  protected val clock: Clock = Clock.systemUTC()

  private def serialize[T](name: String, value: T, dimensions: Seq[Dimension], ts: Instant): ByteString = {
    val tags         = dimensions.map(d => d.key + "=" + d.value).toList
    val taggedMetric = (name :: tags).mkString(";")
    ByteString(s"$taggedMetric $value ${ts.getEpochSecond}\n")
  }

  // TODO read backoff from config
  private def connection: Flow[ByteString, ByteString, NotUsed] =
    RestartFlow.withBackoff(
      minBackoff = 3.seconds,
      maxBackoff = 30.seconds,
      randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly
      maxRestarts = -1 // keep retrying forever
    )(() => Tcp().outgoingConnection(host, port))

  private val queue = Source
    .queue[ByteString](19, OverflowStrategy.dropHead)
    .via(connection)
    .toMat(Sink.ignore)(Keep.left)
    .run()

  def publish[T](
      name: String,
      value: T,
      dimensions: Seq[Dimension] = Seq.empty,
      ts: Instant = Instant
        .now(clock)
  ): Unit = {
    // it's reasonable to block until the message in enqueued
    Await.result(queue.offer(serialize(name, value, dimensions, ts)), Duration.Inf) match {
      case QueueOfferResult.Enqueued    => logger.debug("Metric {} enqueued", name)
      case QueueOfferResult.Dropped     => logger.debug("Metric {} dropped", name)
      case QueueOfferResult.Failure(e)  => logger.error(e, s"Failed publishing metric $name")
      case QueueOfferResult.QueueClosed => throw new Exception("Failed publishing metric to closed carbon client")
    }
  }

  override def close(): Unit = {
    queue.complete()
    Await.result(queue.watchCompletion(), Duration.Inf)
  }
} 
Example 24
Source File: MockWsClient.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package v1.mocks
import org.scalamock.handlers.CallHandler
import org.scalamock.scalatest.MockFactory
import play.api.libs.ws.{BodyWritable, WSClient, WSRequest, WSResponse}

import scala.concurrent.Future
import scala.concurrent.duration.Duration

trait MockWsClient extends MockFactory {

  val mockWsClient: WSClient = mock[WSClient]
  val mockWsRequest: WSRequest = mock[WSRequest]

  object MockWsClient {

    def url(url: String): CallHandler[WSRequest] = {
      (mockWsClient.url(_: String))
        .expects(url)
    }
  }

  object MockWsRequest {

    def withHttpHeaders(headers: Seq[(String, String)]): CallHandler[WSRequest] = {
      (mockWsRequest.withHttpHeaders _ ).expects(*)
    }

    def withRequestTimeout(timeout: Duration): CallHandler[WSRequest] = {
      (mockWsRequest.withRequestTimeout(_: Duration))
        .expects(timeout)
    }

    def post[I: BodyWritable](body: I): CallHandler[Future[WSResponse]] = {
      (mockWsRequest.post(_: I)(_: BodyWritable[I]))
        .expects(body, *)
    }
  }

} 
Example 25
Source File: MockAppConfig.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package mocks

import config.AppConfig
import org.scalamock.handlers.CallHandler
import org.scalamock.scalatest.MockFactory
import play.api.Configuration

import scala.concurrent.duration.Duration

trait MockAppConfig extends MockFactory {

  val mockAppConfig: AppConfig = mock[AppConfig]

  object MockedAppConfig {
    def desBaseUrl: CallHandler[String] = (mockAppConfig.desBaseUrl _: () => String).expects()
    def desToken: CallHandler[String] = (mockAppConfig.desToken _).expects()
    def desEnvironment: CallHandler[String] = (mockAppConfig.desEnv _).expects()
    def featureSwitch: CallHandler[Option[Configuration]] = (mockAppConfig.featureSwitch _: () => Option[Configuration]).expects()
    def apiGatewayContext: CallHandler[String]            = (mockAppConfig.apiGatewayContext _: () => String).expects()
    def apiStatus: CallHandler[String] = (mockAppConfig.apiStatus: String => String).expects("1.0")
    def endpointsEnabled: CallHandler[Boolean] = (mockAppConfig.endpointsEnabled: String => Boolean).expects("1.0")

    // NRS config items
    def nrsApiKey: CallHandler[String] = (mockAppConfig.nrsApiKey _).expects()
    def nrsMaxTimeout: CallHandler[Duration] = (mockAppConfig.nrsMaxTimeout _).expects()
    def appName: CallHandler[String] = (mockAppConfig.appName _).expects()
    def nrsBaseUrl: CallHandler[String] = (mockAppConfig.nrsBaseUrl _).expects()
  }
} 
Example 26
Source File: ArrowsStdlib.scala    From arrows   with Apache License 2.0 5 votes vote down vote up
package benchmarks

import scala.concurrent.ExecutionContext.Implicits.global
import arrows.stdlib.Arrow
import arrows.stdlib.Task
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.Benchmark
import scala.util.Try
import scala.concurrent.Promise
import scala.concurrent.Await
import scala.concurrent.duration.Duration

trait ArrowsStdlib {
  this: Benchmarks =>

  private[this] final val arrowGen = ArrowsStdlibArrowGen(dist)
  private[this] final val taskGen = ArrowsStdlibTaskGen(dist)

  @Benchmark
  def arrowsStdlibArrow = {
    Try(Await.result(arrowGen.run(1), Duration.Inf))
  }

  @Benchmark
  def arrowsStdlibTask = {
    Try(Await.result(taskGen(1).run, Duration.Inf))
  }
}

object ArrowsStdlibTaskGen extends Gen[Int => Task[Int]] {

  def sync = Task.successful _

  def async(schedule: Runnable => Unit) = {
    v =>
      val p = Promise[Int]()
      schedule(() => p.success(v))
      Task.async(p.future)
  }

  def failure(ex: Throwable) = v => Task.failed(ex)

  def map(t: Int => Task[Int], f: Int => Int) =
    t.andThen(_.map(f))

  def flatMap(t: Int => Task[Int], f: Int => Task[Int]) =
    t.andThen(_.flatMap(f))

  def handle(t: Int => Task[Int], i: Int) =
    t.andThen(_.recover { case _ => i })
}

object ArrowsStdlibArrowGen extends Gen[Arrow[Int, Int]] {

  def sync = Arrow[Int]

  def async(schedule: Runnable => Unit) =
    Arrow[Int].flatMap { v =>
      val p = Promise[Int]()
      schedule(() => p.success(v))
      Task.async(p.future)
    }

  def failure(ex: Throwable) = Arrow.failed(ex)

  def map(t: Arrow[Int, Int], f: Int => Int) =
    t.map(f)

  def flatMap(t: Arrow[Int, Int], f: Arrow[Int, Int]) =
    t.flatMap(f)

  def handle(t: Arrow[Int, Int], i: Int) =
    t.recover { case _ => i }
} 
Example 27
Source File: UsesServerRPC.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.rpc.internals

import com.avsystem.commons.SharedExtensions._
import io.udash.rpc._
import io.udash.utils.{CallbacksHandler, Registration}
import org.scalajs.dom

import scala.concurrent.duration.{Duration, DurationInt}
import scala.concurrent.{Future, Promise}
import scala.scalajs.js
import scala.scalajs.js.Dictionary


  def onCallFailure(callback: exceptionCallbacks.CallbackType): Registration =
    exceptionCallbacks.register(callback)

  private def handleException(ex: Throwable): Unit =
    exceptionCallbacks.fire(ex)

  def handleResponse(response: RpcResponse): Unit = {
    pendingCalls.remove(response.callId)
      .foreach { promise =>
        response match {
          case RpcResponseSuccess(r, _) =>
            promise.success(r)
          case RpcResponseException(_, exception, _) =>
            handleException(exception)
            promise.failure(exception)
          case RpcResponseFailure(cause, error, _) =>
            val exception = RpcFailure(cause, error)
            handleException(exception)
            promise.failure(exception)
        }
      }
  }

  override protected[rpc] def fireRemote(getterChain: List[RpcInvocation], invocation: RpcInvocation): Unit =
    sendRpcRequest(RpcFire(invocation, getterChain))

  protected[rpc] def callRemote(callId: String, getterChain: List[RpcInvocation], invocation: RpcInvocation): Unit =
    sendRpcRequest(RpcCall(invocation, getterChain, callId))

  private def sendRpcRequest(request: RpcRequest): Unit =
    connector.sendRpcRequest(request)

  protected class RawRemoteRPC(getterChain: List[RpcInvocation]) extends ServerRawRpc {
    def fire(invocation: RpcInvocation): Unit =
      fireRemote(getterChain, invocation)

    def call(invocation: RpcInvocation): Future[JsonStr] =
      Promise[JsonStr]().setup { promise =>
        val callId = newCallId()
        callRemote(callId, getterChain, invocation)
        pendingCalls.put(callId, promise)
        dom.window.setTimeout(
          () => handleResponse(RpcResponseException("Request timeout", UsesServerRPC.CallTimeout(callTimeout), callId)),
          callTimeout.toMillis.toDouble
        )
      }.future

    def get(invocation: RpcInvocation): ServerRawRpc =
      new RawRemoteRPC(invocation :: getterChain)
  }
}

object UsesServerRPC {
  case class CallTimeout(callTimeout: Duration) extends RuntimeException(s"Response missing after $callTimeout.")
} 
Example 28
Source File: DefaultServerRPC.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.rpc

import com.avsystem.commons.rpc.AsReal
import io.udash.rpc.internals.UsesServerRPC
import io.udash.rpc.serialization.{DefaultExceptionCodecRegistry, ExceptionCodecRegistry}

import scala.concurrent.duration.{Duration, DurationDouble}

abstract class ServerRPC[ServerRPCType] extends UsesServerRPC[ServerRPCType]


  def apply[ClientRPCType: ClientRawRpc.AsRawRpc, ServerRPCType: ServerRawRpc.AsRealRpc](
    localRpc: ClientRPCType, serverUrl: String = "/atm/",
    exceptionsRegistry: ExceptionCodecRegistry = new DefaultExceptionCodecRegistry,
    rpcFailureInterceptors: Seq[PartialFunction[Throwable, Any]] = Seq.empty,
    callTimeout: Duration = 30 seconds
  ): ServerRPCType = {
    val clientRPC = new DefaultExposesClientRPC[ClientRPCType](localRpc)
    lazy val serverConnector = new DefaultAtmosphereServerConnector(
      clientRPC, resp => serverRPC.handleResponse(resp), serverUrl, exceptionsRegistry)
    lazy val serverRPC: DefaultServerRPC[ServerRPCType] =
      new DefaultServerRPC[ServerRPCType](serverConnector, callTimeout)
    rpcFailureInterceptors.foreach(serverRPC.onCallFailure)
    serverRPC.remoteRpc
  }
} 
Example 29
Source File: TooltipUtils.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.bootstrap
package tooltip

import com.avsystem.commons.misc.{AbstractValueEnum, AbstractValueEnumCompanion, EnumCtx}
import io.udash.wrappers.jquery._
import org.scalajs.dom

import scala.collection.mutable
import scala.concurrent.duration.{Duration, DurationInt}
import scala.scalajs.js
import scala.scalajs.js.|

trait Tooltip extends Listenable {

  override final type EventType = TooltipEvent

  
  def apply(
    animation: Boolean = true,
    boundary: String | dom.Node = "scrollParent",
    container: Option[String | dom.Node] = None,
    content: js.Function1[dom.Node, String] | dom.Node = io.udash.emptyStringNode(),
    delay: Delay | Long = Delay(0 millis, 0 millis),
    html: Boolean = false,
    offset: Int | String = "0",
    placement: Placement = defaultPlacement,
    template: Option[String] = None,
    title: String | js.Function1[dom.Node, String] | dom.Node = "",
    trigger: Seq[Trigger] = defaultTrigger
  )(el: dom.Node): TooltipType =
    initTooltip(
      js.Dictionary(
        "animation" -> animation,
        "boundary" -> boundary,
        "container" -> container.getOrElse(false),
        "content" -> content,
        "delay" -> delay,
        "html" -> html,
        "offset" -> offset,
        "placement" -> placement.jsValue,
        "template" -> template.getOrElse(defaultTemplate),
        "title" -> title,
        "trigger" -> trigger.map(_.jsValue).mkString(" ")
      )
    )(el)

  protected def initTooltip(options: js.Dictionary[Any])(el: dom.Node): TooltipType
  protected val defaultPlacement: Placement
  protected val defaultTemplate: String
  protected val defaultTrigger: Seq[Trigger]
} 
Example 30
Source File: TextInputsModifier.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.bindings.inputs

import io.udash.bindings.modifiers.Binding
import io.udash.properties.single.Property
import org.scalajs.dom._

import scala.concurrent.duration.Duration


private[bindings] abstract class TextInputsModifier(property: Property[String], debounce: Option[Duration]) extends Binding {
  def elementValue(t: Element): String
  def setElementValue(t: Element, v: String): Unit
  def setElementKeyUp(t: Element, callback: KeyboardEvent => Unit): Unit
  def setElementOnChange(t: Element, callback: Event => Unit): Unit
  def setElementOnInput(t: Element, callback: Event => Unit): Unit
  def setElementOnPaste(t: Element, callback: Event => Unit): Unit

  override def applyTo(t: Element): Unit = {
    if (property.get != null) setElementValue(t, property.get)

    propertyListeners += property.listen { value =>
      if (elementValue(t) != value) setElementValue(t, value)
    }

    var propertyUpdateHandler: Int = 0
    val callback = if (debounce.nonEmpty && debounce.get.toMillis > 0) {
      _: Event => {
        if (propertyUpdateHandler != 0) window.clearTimeout(propertyUpdateHandler)
        propertyUpdateHandler = window.setTimeout(() => {
          val value: String = elementValue(t)
          if (property.get != value) property.set(value)
        }, debounce.get.toMillis.toDouble)
      }
    } else {
      _: Event => {
        val value: String = elementValue(t)
        if (property.get != value) property.set(value)
      }
    }
    setElementKeyUp(t, callback)
    setElementOnChange(t, callback)
    setElementOnInput(t, callback)
    setElementOnPaste(t, callback)
  }
} 
Example 31
Source File: TextArea.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.bindings.inputs

import io.udash._
import org.scalajs.dom.html.TextArea
import org.scalajs.dom.{Element, Event, KeyboardEvent}
import scalatags.JsDom.all._

import scala.concurrent.duration.{Duration, DurationInt}


  def apply(value: Property[String], debounce: Duration = 20 millis)(textareaModifiers: Modifier*): InputBinding[TextArea] =
    new InputBinding[TextArea] {
      private val element = textarea(
        textareaModifiers, nestedInterceptor(new TextAreaModifier(value, Some(debounce)))
      ).render

      override def render: TextArea = element
    }

  private class TextAreaModifier(property: Property[String], debounce: Option[Duration])
    extends TextInputsModifier(property, debounce)  {

    override def elementValue(t: Element): String =
      t.asInstanceOf[TextArea].value

    override def setElementValue(t: Element, v: String): Unit =
      t.asInstanceOf[TextArea].value = v

    override def setElementKeyUp(t: Element, callback: KeyboardEvent => Unit): Unit =
      t.asInstanceOf[TextArea].onkeyup = callback

    override def setElementOnChange(t: Element, callback: Event => Unit): Unit =
      t.asInstanceOf[TextArea].onchange = callback

    override def setElementOnInput(t: Element, callback: Event => Unit): Unit =
      t.asInstanceOf[TextArea].oninput = callback

    override def setElementOnPaste(t: Element, callback: Event => Unit): Unit =
      t.asInstanceOf[TextArea].onpaste = callback
  }
} 
Example 32
Source File: RetryPolicies.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry

import java.util.concurrent.TimeUnit

import cats.Applicative
import cats.syntax.functor._
import cats.syntax.show._
import cats.instances.finiteDuration._
import cats.instances.int._
import retry.PolicyDecision._

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.util.Random

object RetryPolicies {
  private val LongMax: BigInt = BigInt(Long.MaxValue)

  
  def limitRetriesByCumulativeDelay[M[_]: Applicative](
      threshold: FiniteDuration,
      policy: RetryPolicy[M]
  ): RetryPolicy[M] = {
    def decideNextRetry(status: RetryStatus): M[PolicyDecision] =
      policy.decideNextRetry(status).map {
        case r @ DelayAndRetry(delay) =>
          if (status.cumulativeDelay + delay >= threshold) GiveUp else r
        case GiveUp => GiveUp
      }

    RetryPolicy.withShow[M](
      decideNextRetry,
      show"limitRetriesByCumulativeDelay(threshold=$threshold, $policy)"
    )
  }
} 
Example 33
Source File: RetryStatus.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry

import scala.concurrent.duration.{Duration, FiniteDuration}

final case class RetryStatus(
    retriesSoFar: Int,
    cumulativeDelay: FiniteDuration,
    previousDelay: Option[FiniteDuration]
) {
  def addRetry(delay: FiniteDuration): RetryStatus = RetryStatus(
    retriesSoFar = this.retriesSoFar + 1,
    cumulativeDelay = this.cumulativeDelay + delay,
    previousDelay = Some(delay)
  )
}

object RetryStatus {
  val NoRetriesYet = RetryStatus(0, Duration.Zero, None)
} 
Example 34
Source File: DefaultSource.scala    From spark-power-bi   with Apache License 2.0 5 votes vote down vote up
package com.granturing.spark.powerbi

import org.apache.spark.sql.{DataFrame, SaveMode, SQLContext}
import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider}
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.duration.Duration

class DefaultSource extends CreatableRelationProvider with PowerBISink {

  override def createRelation(
      sqlContext: SQLContext,
      mode: SaveMode,
      parameters: Map[String, String],
      data: DataFrame): BaseRelation = {

    val conf = ClientConf.fromSparkConf(sqlContext.sparkContext.getConf)
    implicit val client = new Client(conf)

    val dataset = parameters.getOrElse("dataset", sys.error("'dataset' must be specified"))
    val table = parameters.getOrElse("table", sys.error("'table' must be specified"))
    val batchSize = parameters.getOrElse("batchSize", conf.batchSize.toString).toInt
    val group = parameters.get("group")

    val step = for {
      groupId <- getGroupId(group)
      ds <- getOrCreateDataset(mode, groupId, dataset, table, data.schema)
    } yield (groupId, ds)

    val result = step map { case (groupId, ds) =>
      val fields = data.schema.fieldNames.zipWithIndex
      val _conf = conf
      val _token = Some(client.currentToken)
      val _table = table
      val _batchSize = batchSize

      val coalesced = data.rdd.partitions.size > _conf.maxPartitions match {
        case true => data.coalesce(_conf.maxPartitions)
        case false => data
      }

      coalesced foreachPartition { p =>
        val rows = p map { r =>
          fields map { case(name, index) => (name -> r(index)) } toMap
        } toSeq

        val _client = new Client(_conf, _token)

        val submit = rows.
          sliding(_batchSize, _batchSize).
          foldLeft(future()) { (fAccum, batch) =>
          fAccum flatMap { _ => _client.addRows(ds.id, _table, batch, groupId) } }

        submit.onComplete { _ => _client.shutdown() }

        Await.result(submit, _conf.timeout)
      }
    }

    result.onComplete { _ => client.shutdown() }

    Await.result(result, Duration.Inf)

    new BaseRelation {
      val sqlContext = data.sqlContext

      val schema = data.schema
    }
  }

} 
Example 35
Source File: OpenTsdbWSMock.scala    From prometheus-opentsdb-exporter   with Apache License 2.0 5 votes vote down vote up
package tools

import scala.concurrent.duration.Duration
import scala.concurrent.Future

import play.api.http.{HeaderNames, Writeable}
import play.api.libs.ws._
import play.api.libs.json._
import play.api.http.Status._

import org.specs2.mock.Mockito


abstract class OpenTsdbWSMock extends Mockito with WSClient {
  private val request = mock[WSRequest]
  private val response = mock[WSResponse]

  private var metrics: List[String] = List.empty

  private val urls:collection.mutable.Buffer[String] = new collection.mutable.ArrayBuffer[String]()

  request.withRequestTimeout(any[Duration]) returns request
  request.withFollowRedirects(any[Boolean]) returns request

  response.status returns OK
  response.header(HeaderNames.CONTENT_TYPE) returns Some("application/json;charset=UTF-8")
  response.json answers { _ => this.synchronized {
    val payload = responsePayload(metrics.head)
    metrics = metrics.tail
    payload
  }}

  request.post(anyString)(any[Writeable[String]]) answers { args => this.synchronized {
    val payload = args.asInstanceOf[Array[Object]](0).asInstanceOf[JsValue]
    val metric = (payload \ "queries") (0) \ "metric" match {
      case JsDefined(m) => m.toString.replace("\"", "")
      case _ => ""
    }

    metrics = metrics ++ List(metric)

    Future.successful(response)
  }}

  def url(url: String): WSRequest = {
    urls += url
    request
  }

  def underlying[T]: T = this.asInstanceOf[T]

  protected def responsePayload: Map[String, JsValue]

  override def close(): Unit = ()
} 
Example 36
Source File: ActivationStoreBehaviorBase.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.test.behavior

import java.time.Instant

import akka.stream.ActorMaterializer
import common.{StreamLogging, WskActorSystem}
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database.{ActivationStore, CacheChangeNotification, UserContext}
import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreTestUtil.storeAvailable
import org.apache.openwhisk.core.entity._
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers, Outcome}

import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
import scala.util.{Random, Try}

trait ActivationStoreBehaviorBase
    extends FlatSpec
    with ScalaFutures
    with Matchers
    with StreamLogging
    with WskActorSystem
    with IntegrationPatience
    with BeforeAndAfterEach {

  protected implicit val materializer: ActorMaterializer = ActorMaterializer()
  protected implicit val notifier: Option[CacheChangeNotification] = None

  def context: UserContext
  def activationStore: ActivationStore
  private val docsToDelete = ListBuffer[(UserContext, ActivationId)]()

  def storeType: String

  protected def transId() = TransactionId(Random.alphanumeric.take(32).mkString)

  override def afterEach(): Unit = {
    cleanup()
    stream.reset()
  }

  override protected def withFixture(test: NoArgTest): Outcome = {
    assume(storeAvailable(storeAvailableCheck), s"$storeType not configured or available")
    val outcome = super.withFixture(test)
    if (outcome.isFailed) {
      println(logLines.mkString("\n"))
    }
    outcome
  }

  protected def storeAvailableCheck: Try[Any] = Try(true)
  //~----------------------------------------< utility methods >

  protected def store(activation: WhiskActivation, context: UserContext)(
    implicit transid: TransactionId,
    notifier: Option[CacheChangeNotification]): DocInfo = {
    val doc = activationStore.store(activation, context).futureValue
    docsToDelete.append((context, ActivationId(activation.docid.asString)))
    doc
  }

  protected def newActivation(ns: String, actionName: String, start: Long): WhiskActivation = {
    WhiskActivation(
      EntityPath(ns),
      EntityName(actionName),
      Subject(),
      ActivationId.generate(),
      Instant.ofEpochMilli(start),
      Instant.ofEpochMilli(start + 1000))
  }

  
  def cleanup()(implicit timeout: Duration = 10 seconds): Unit = {
    implicit val tid: TransactionId = transId()
    docsToDelete.map { e =>
      Try {
        Await.result(activationStore.delete(e._2, e._1), timeout)
      }
    }
    docsToDelete.clear()
  }

} 
Example 37
Source File: Analyser.scala    From codacy-analysis-cli   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.codacy.analysis.core.analysis

import java.nio.file.Path

import better.files.File
import com.codacy.analysis.core.model.{Configuration, DuplicationClone, FileMetrics, ToolResult}
import com.codacy.analysis.core.tools.{DuplicationTool, MetricsTool, Tool}
import org.log4s.{Logger, getLogger}

import scala.concurrent.duration.Duration
import scala.util.Try

trait AnalyserCompanion[T[_]] {
  def name: String
  def apply(): Analyser[T]
}

trait Analyser[T[_]] {

  def analyse(tool: Tool,
              directory: File,
              files: Set[Path],
              config: Configuration,
              timeout: Option[Duration] = Option.empty[Duration]): T[Set[ToolResult]]

  def metrics(metricsTool: MetricsTool,
              directory: File,
              files: Option[Set[Path]],
              timeout: Option[Duration] = Option.empty[Duration]): T[Set[FileMetrics]]

  def duplication(duplicationTool: DuplicationTool,
                  directory: File,
                  files: Set[Path],
                  timeout: Option[Duration] = Option.empty[Duration]): T[Set[DuplicationClone]]

}

object Analyser {

  private val logger: Logger = getLogger

  val defaultAnalyser: AnalyserCompanion[Try] = CodacyPluginsAnalyser

  val allAnalysers: Set[AnalyserCompanion[Try]] = Set(defaultAnalyser)

  def apply(name: String): Analyser[Try] = {
    val builder = allAnalysers.find(_.name.equalsIgnoreCase(name)).getOrElse {
      logger.warn(s"Could not find analyser for name $name. Using ${defaultAnalyser.name} as fallback.")
      defaultAnalyser
    }

    builder()
  }

  sealed trait Error {
    val message: String
  }

  object Error {

    final case class ToolExecutionFailure(toolType: String, toolName: String) extends Error {
      override val message: String = s"Failed $toolType for $toolName"
    }

    final case class ToolNeedsNetwork(toolName: String) extends Error {
      override val message: String = s"The tool $toolName needs network access to execute."
    }

    final case class NonExistingToolInput(toolName: String, availableTools: Set[String]) extends Error {

      override val message: String =
        s"""The selected tool "$toolName" is not supported or does not exist.
                                        |The tool should be one of (${availableTools.mkString(", ")})""".stripMargin
    }

    case object NoActiveToolInConfiguration extends Error {
      override val message: String = "No active tool found on the remote configuration"
    }

    case object NoToolsFoundForFiles extends Error {
      override val message: String = "No tools found for files provided"
    }

  }
} 
Example 38
Source File: MetricsTool.scala    From codacy-analysis-cli   with GNU Affero General Public License v3.0 5 votes vote down vote up
package com.codacy.analysis.core.tools

import java.nio.file.Paths

import better.files.File
import com.codacy.analysis.core.model.FileMetrics
import com.codacy.plugins.api
import com.codacy.plugins.api.Source
import com.codacy.plugins.api.languages.Language
import com.codacy.plugins.api.metrics.MetricsTool.CodacyConfiguration
import com.codacy.plugins.metrics.traits
import com.codacy.plugins.metrics.traits.{MetricsRequest, MetricsRunner}
import com.codacy.plugins.runners.{BinaryDockerRunner, DockerRunner}
import com.codacy.plugins.utils.PluginHelper
import org.log4s.getLogger

import scala.concurrent.duration.Duration
import scala.util.Try

class MetricsTool(private val metricsTool: traits.MetricsTool, val languageToRun: Language) extends ITool {
  override def name: String = "metrics"

  override def supportedLanguages: Set[Language] = metricsTool.languages.to[Set]

  def run(directory: File,
          files: Option[Set[Source.File]],
          timeout: Option[Duration] = Option.empty[Duration]): Try[List[FileMetrics]] = {
    val request = MetricsRequest(directory.pathAsString)

    val dockerRunner = new BinaryDockerRunner[api.metrics.FileMetrics](metricsTool)
    val runner = new MetricsRunner(metricsTool, dockerRunner)

    val configuration = CodacyConfiguration(files, Some(languageToRun), None)

    val toolFileMetrics =
      runner.run(request, configuration, timeout.getOrElse(DockerRunner.defaultRunTimeout), None)

    toolFileMetrics.map {
      _.collect {
        case fileMetrics if unignoredFile(fileMetrics, files) =>
          FileMetrics(
            filename = Paths.get(fileMetrics.filename),
            complexity = fileMetrics.complexity,
            loc = fileMetrics.loc,
            cloc = fileMetrics.cloc,
            nrMethods = fileMetrics.nrMethods,
            nrClasses = fileMetrics.nrClasses,
            lineComplexities = fileMetrics.lineComplexities)
      }
    }
  }

  def unignoredFile(metrics: api.metrics.FileMetrics, files: Option[Set[Source.File]]): Boolean = {
    files.forall(_.exists(_.path == metrics.filename))
  }
}

object MetricsToolCollector {

  private val logger: org.log4s.Logger = getLogger

  private val availableTools = PluginHelper.dockerMetricsPlugins

  def fromLanguages(languages: Set[Language]): Set[MetricsTool] = {
    languages.flatMap { lang =>
      val collectedTools = availableTools.collect {
        case tool if tool.languages.contains(lang) =>
          new MetricsTool(tool, lang)
      }
      if (collectedTools.isEmpty) {
        logger.info(s"No metrics tools found for language ${lang.name}")
      }
      collectedTools
    }
  }

} 
Example 39
Source File: JavaKSYParser.scala    From kaitai_struct_compiler   with GNU General Public License v3.0 5 votes vote down vote up
package io.kaitai.struct.formats

import java.io._
import java.nio.charset.Charset
import java.util.{List => JList, Map => JMap}

import io.kaitai.struct.JavaMain.CLIConfig
import io.kaitai.struct.format.{ClassSpec, ClassSpecs}
import io.kaitai.struct.precompile.YAMLParserError
import io.kaitai.struct.{Log, Main}
import org.yaml.snakeyaml.constructor.SafeConstructor
import org.yaml.snakeyaml.error.MarkedYAMLException
import org.yaml.snakeyaml.representer.Representer
import org.yaml.snakeyaml.{DumperOptions, LoaderOptions, Yaml}

import scala.collection.JavaConversions._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

object JavaKSYParser {
  def localFileToSpecs(yamlFilename: String, config: CLIConfig): ClassSpecs = {
    val firstSpec = fileNameToSpec(yamlFilename)
    val yamlDir = Option(new File(yamlFilename).getParent).getOrElse(".")
    val specs = new JavaClassSpecs(yamlDir, config.importPaths, firstSpec)

    Await.result(Main.importAndPrecompile(specs, config.runtime), Duration.Inf)
    specs
  }

  def fileNameToSpec(yamlFilename: String): ClassSpec = {
    Log.fileOps.info(() => s"reading $yamlFilename...")

    // This complex string of classes is due to the fact that Java's
    // default "FileReader" implementation always uses system locale,
    // which screws up encoding on some systems and screws up reading
    // UTF-8 files with BOM
    val fis = new FileInputStream(yamlFilename)
    val isr = new InputStreamReader(fis, Charset.forName("UTF-8"))
    val br = new BufferedReader(isr)
    try {
      val scalaSrc = readerToYaml(br)
      ClassSpec.fromYaml(scalaSrc)
    } catch {
      case marked: MarkedYAMLException =>
        val mark = marked.getProblemMark
        throw YAMLParserError(
          marked.getProblem,
          Some(yamlFilename),
          Some(mark.getLine + 1),
          Some(mark.getColumn + 1)
        )
    }
  }

  def getYamlLoader: Yaml = {
    val loaderOptions = new LoaderOptions
    loaderOptions.setAllowDuplicateKeys(false)
    new Yaml(
      new SafeConstructor,
      new Representer,
      new DumperOptions,
      loaderOptions
    )
  }

  def readerToYaml(reader: Reader): Any = {
    yamlJavaToScala(getYamlLoader.load(reader))
  }

  def stringToYaml(data: String): Any = {
    yamlJavaToScala(getYamlLoader.load(data))
  }

  def yamlJavaToScala(src: Any): Any = {
    src match {
      case jlist: JList[AnyRef] =>
        jlist.toList.map(yamlJavaToScala)
      case jmap: JMap[String, AnyRef] =>
        jmap.toMap.mapValues(yamlJavaToScala)
      case _: String =>
        src
      case _: Double =>
        src
      case _: Boolean =>
        src
      case javaInt: java.lang.Integer =>
        javaInt.intValue
      case javaLong: java.lang.Long =>
        javaLong.longValue
      case _: java.math.BigInteger =>
        src.toString
      case null =>
        // may be not the very best idea, but these nulls
        // should be handled by real parsing code, i.e. where
        // it tracks tree depth, etc.
        null
    }
  }
} 
Example 40
Source File: ServiceApp.scala    From BusFloatingData   with Apache License 2.0 5 votes vote down vote up
package de.nierbeck.floating.data.server

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model.ws.UpgradeToWebSocket
import akka.http.scaladsl.model.{HttpRequest, HttpResponse, Uri}
import akka.stream.ActorMaterializer
import de.nierbeck.floating.data.server.actors.websocket.{FLINK, RouterActor, SPARK, TiledVehiclesFromKafkaActor}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}

object ServiceApp extends RestService {

  import ServiceConfig._
  import system.dispatcher

  implicit val system = ActorSystem("service-api-http")
  implicit val mat = ActorMaterializer()

  override val logger = Logging(system, getClass.getName)
  override val session = CassandraConnector.connect()

  def main(args: Array[String]): Unit = {

    val router: ActorRef = system.actorOf(Props[RouterActor], "router")
    val sparkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "tiledVehicles", SPARK), "Kafka-Consumer-Spark")
    val flinkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "flinkTiledVehicles", FLINK), "Kafka-Consumer-Flink")


    val requestHandler: HttpRequest => HttpResponse = {
      case req@HttpRequest(GET, Uri.Path("/ws/vehicles"), _, _, _) =>
        req.header[UpgradeToWebSocket] match {
          case Some(upgrade) => upgrade.handleMessages(Flows.graphFlowWithStats(router))
          case None => HttpResponse(400, entity = "Not a valid websocket request!")
        }
      case _: HttpRequest => HttpResponse(404, entity = "Unknown resource!")
    }

    Http()
      .bindAndHandle(route(), serviceInterface, servicePort)
      .onComplete {
        case Success(_) => logger.info(s"Successfully bound to $serviceInterface:$servicePort")
        case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}")
      }

    Http()
      .bindAndHandleSync(requestHandler, serviceInterface, 8001)
      .onComplete {
        case Success(_) => logger.info(s"Successfully started Server to $serviceInterface:8001")
        case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}")
      }

    Await.ready(system.whenTerminated, Duration.Inf)
    CassandraConnector.close(session)
  }

} 
Example 41
Source File: ComputerChecker.scala    From Aton   with GNU General Public License v3.0 5 votes vote down vote up
package config

import javax.inject._

import akka.actor._
import dao.{ComputerStateDAO, ConnectedUserDAO}
import model.{ComputerState, ConnectedUser}
import services.{ComputerService, SSHOrderService, Timer}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}


  @scala.throws[Exception](classOf[Exception])
  override def onReceive(message: Any): Unit = {
    // Only execute when it's not executing.
    if (!isExecuting) {

      // Set flag. It starts to execute now.
      isExecuting = true

      play.Logger.info("Executing computer checker.")

      // Create a checker task for every computer on the database.
      val task = computerService.listAllSimple.map { computers =>
        time {
          computers.map { computer =>
            play.Logger.debug("Checking: " + computer)
            sSHOrderService.check(computer)("Scheduled Checker")
          }
        }
      }

      // Execute all the task collected in the last step.
      val results: Seq[(ComputerState, Seq[ConnectedUser])] = Await.result(task, Duration.Inf)
      play.Logger.debug(s"""Computers checked, proceeding to save: $results""")

      // Save every result on the database.
      for (result <- results) {
        val computerState = result._1
        val addComputerStateTask = computerStateDAO.add(computerState)
        Await.result(addComputerStateTask, Duration.Inf)
        val connectedUsers = result._2
        val addConnectedUsersTasks = connectedUsers.map {
          connectedUserDAO.add
        }
        val f = Future.sequence(addConnectedUsersTasks.toList)
        Await.result(f, Duration.Inf)
      }
      // Reset the execution flag.
      isExecuting = false
    } else {
      // It is now executing
      play.Logger.debug("Already executing computer checker. Omitting")
    }
  }


} 
Example 42
Source File: Shutdown.scala    From typed-actors   with Apache License 2.0 5 votes vote down vote up
package de.knutwalker.akka.typed

import akka.actor.ActorSystem
import akka.util.Timeout

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

object Shutdown {
  def apply(system: ActorSystem): Unit = {
    Await.result(system.terminate(), Duration.Inf)
    ()
  }
}

object TimeoutMessage {
  def apply[A](ref: ActorRef[A])(implicit ct: ClassTag[A], timeout: Timeout): String = {
    s"""Ask timed out on [$ref] after [${timeout.duration.toMillis} ms]. Sender[null] sent message of type "${ct.runtimeClass.getName}"."""
  }
} 
Example 43
Source File: BuildInFutureTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe

import java.util.concurrent.Executors

import wvlet.airspec.AirSpec

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

case class Config1(port: Int = 8080)
case class Config2()

class BuildInFutureTest extends AirSpec {

  // We need to use an executor which can load applicttion classes #918.
  //
  // https://github.com/sbt/sbt/issues/5410
  private val threadPool              = Executors.newCachedThreadPool()
  private implicit val futureExecutor = ExecutionContext.fromExecutor(threadPool)

  override protected def afterAll: Unit = {
    threadPool.shutdownNow()
  }

  def `Building in Future causes MISSING_DEPENDENCY` = {
    val f = Future {
      newSilentDesign.build[Config1] { config => debug(config) }
    }
    Await.result(f, Duration.Inf)
  }

  def `Building in Future causes java.lang.ClassCastException` = {
    val f = Future {
      newSilentDesign
        .bind[Config2].toInstance(Config2())
        .build[Config1] { config => debug(config) }
    }
    Await.result(f, Duration.Inf)
  }
} 
Example 44
Source File: AirSpecTask.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airspec.runner

import sbt.testing._
import wvlet.airframe.Design
import wvlet.airspec.runner.AirSpecSbtRunner.AirSpecConfig
import wvlet.log.LogSupport

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Promise}


  def execute(
      eventHandler: EventHandler,
      loggers: Array[sbt.testing.Logger],
      continuation: Array[sbt.testing.Task] => Unit
  ): Unit = {
    try {
      new AirSpecTaskRunner(taskDef, config, taskLogger, eventHandler, classLoader).runTask
    } finally {
      continuation(Array.empty)
    }
  }
} 
Example 45
Source File: LogLevelScannerTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.log

import java.util.concurrent.TimeUnit

import scala.concurrent.duration.Duration


class LogLevelScannerTest extends Spec {
  override protected def before: Unit = {
    // Ensure stopping log level scanner
    Logger.stopScheduledLogLevelScan
  }

  override protected def after: Unit = {
    Logger.stopScheduledLogLevelScan
  }

  protected def withScanner[U](config: LogLevelScannerConfig)(f: => U): U = {
    val scanner = new LogLevelScanner
    try {
      val lastScanCount = scanner.scanCount.get
      scanner.setConfig(config)
      scanner.start
      // Wait the first scan
      while (scanner.scanCount.get == lastScanCount) {
        Thread.sleep(15)
      }
      f
    } finally {
      scanner.stop
    }
  }

  def `scan log levels only once`: Unit = {
    val l = Logger("wvlet.log.test")
    l.setLogLevel(LogLevel.WARN)
    assert(l.getLogLevel == LogLevel.WARN)
    // Load log-test.properties
    LogLevelScanner.scanLogLevels
    assert(l.getLogLevel == LogLevel.DEBUG)
  }

  def `scan loglevels`: Unit = {
    val l = Logger("wvlet.log.test")
    l.setLogLevel(LogLevel.WARN)
    assert(l.getLogLevel == LogLevel.WARN)

    // Load log-test.properties
    withScanner(
      LogLevelScannerConfig(LogLevelScanner.DEFAULT_LOGLEVEL_FILE_CANDIDATES, Duration(10, TimeUnit.MILLISECONDS))
    ) {
      assert(l.getLogLevel == LogLevel.DEBUG)
    }
  }

  def `load another loglevel file`: Unit = {
    val l = Logger("wvlet.log.test")
    l.setLogLevel(LogLevel.WARN)
    assert(l.getLogLevel == LogLevel.WARN)

    withScanner(LogLevelScannerConfig(List("wvlet/log/custom-log.properties"), Duration(10, TimeUnit.MILLISECONDS))) {
      assert(l.getLogLevel == LogLevel.ERROR)
    }
  }

  def `load invalid loglevel file safely`: Unit = {
    val l = Logger("wvlet.log.test")
    l.setLogLevel(LogLevel.TRACE)

    withScanner(
      LogLevelScannerConfig(List("wvlet/log/invalid-loglevel.properties"), Duration(10, TimeUnit.MILLISECONDS))
    ) {
      // Should ignore unknown log level string
      assert(l.getLogLevel == LogLevel.TRACE)
    }
  }
} 
Example 46
Source File: BaseConfig.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.models

import scala.concurrent.duration.Duration

import play.api.Configuration
import play.api.libs.json._

import org.elastic4play.utils.Collection.distinctBy

case class BaseConfig(name: String, workerNames: Seq[String], items: Seq[ConfigurationDefinitionItem], config: Option[WorkerConfig]) {
  def +(other: BaseConfig) = BaseConfig(name, workerNames ++ other.workerNames, distinctBy(items ++ other.items)(_.name), config.orElse(other.config))
}

object BaseConfig {
  implicit val writes: Writes[BaseConfig] = Writes[BaseConfig] { baseConfig ⇒
    Json.obj(
      "name"               → baseConfig.name,
      "workers"            → baseConfig.workerNames,
      "configurationItems" → baseConfig.items,
      "config"             → baseConfig.config.fold(JsObject.empty)(_.jsonConfig)
    )
  }

  def global(tpe: WorkerType.Type, configuration: Configuration): BaseConfig = {
    val typedItems = tpe match {
      case WorkerType.responder ⇒ Nil
      case WorkerType.analyzer ⇒
        Seq(
          ConfigurationDefinitionItem(
            "auto_extract_artifacts",
            "extract artifacts from full report automatically",
            WorkerConfigItemType.boolean,
            multi = false,
            required = false,
            Some(JsFalse)
          ),
          ConfigurationDefinitionItem(
            "jobCache",
            "maximum time, in minutes, previous result is used if similar job is requested",
            WorkerConfigItemType.number,
            multi = false,
            required = false,
            configuration.getOptional[Duration]("cache.job").map(d ⇒ JsNumber(d.toMinutes))
          )
        )
    }
    BaseConfig(
      "global",
      Nil,
      typedItems ++ Seq(
        ConfigurationDefinitionItem("proxy_http", "url of http proxy", WorkerConfigItemType.string, multi = false, required = false, None),
        ConfigurationDefinitionItem("proxy_https", "url of https proxy", WorkerConfigItemType.string, multi = false, required = false, None),
        ConfigurationDefinitionItem("cacerts", "certificate authorities", WorkerConfigItemType.text, multi = false, required = false, None),
        ConfigurationDefinitionItem(
          "jobTimeout",
          "maximum allowed job execution time (in minutes)",
          WorkerConfigItemType.number,
          multi = false,
          required = false,
          configuration.getOptional[Duration]("job.timeout").map(d ⇒ JsNumber(d.toMinutes))
        )
      ),
      None
    )
  }

  val tlp = BaseConfig(
    "tlp",
    Nil,
    Seq(
      ConfigurationDefinitionItem("check_tlp", "", WorkerConfigItemType.boolean, multi = false, required = false, None),
      ConfigurationDefinitionItem("max_tlp", "", WorkerConfigItemType.number, multi = false, required = false, None)
    ),
    None
  )

  val pap = BaseConfig(
    "pap",
    Nil,
    Seq(
      ConfigurationDefinitionItem("check_pap", "", WorkerConfigItemType.boolean, multi = false, required = false, None),
      ConfigurationDefinitionItem("max_pap", "", WorkerConfigItemType.number, multi = false, required = false, None)
    ),
    None
  )
} 
Example 47
Source File: OrganizationSrv.scala    From Cortex   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.thp.cortex.services

import javax.inject.{Inject, Singleton}

import scala.concurrent.Future
import scala.concurrent.duration.Duration

import play.api.Configuration
import play.api.cache.AsyncCacheApi
import play.api.libs.json.JsObject

import akka.NotUsed
import akka.stream.scaladsl.Source
import org.thp.cortex.models.{Organization, OrganizationModel}

import org.elastic4play.controllers.Fields
import org.elastic4play.database.ModifyConfig
import org.elastic4play.services._

@Singleton
class OrganizationSrv(
    cacheExpiration: Duration,
    organizationModel: OrganizationModel,
    getSrv: GetSrv,
    updateSrv: UpdateSrv,
    findSrv: FindSrv,
    deleteSrv: DeleteSrv,
    createSrv: CreateSrv,
    cache: AsyncCacheApi
) {

  @Inject() def this(
      config: Configuration,
      organizationModel: OrganizationModel,
      getSrv: GetSrv,
      updateSrv: UpdateSrv,
      findSrv: FindSrv,
      deleteSrv: DeleteSrv,
      createSrv: CreateSrv,
      cache: AsyncCacheApi
  ) = this(config.get[Duration]("cache.organization"), organizationModel, getSrv, updateSrv, findSrv, deleteSrv, createSrv, cache)

  def create(fields: Fields)(implicit authContext: AuthContext): Future[Organization] =
    createSrv[OrganizationModel, Organization](organizationModel, fields)

  def get(orgId: String): Future[Organization] = cache.getOrElseUpdate(s"org-$orgId", cacheExpiration) {
    getSrv[OrganizationModel, Organization](organizationModel, orgId)
  }

  def update(orgId: String, fields: Fields)(implicit Context: AuthContext): Future[Organization] =
    update(orgId, fields, ModifyConfig.default)

  def update(orgId: String, fields: Fields, modifyConfig: ModifyConfig)(implicit Context: AuthContext): Future[Organization] = {
    cache.remove(s"org-$orgId")
    updateSrv[OrganizationModel, Organization](organizationModel, orgId, fields, modifyConfig)
  }

  def update(organization: Organization, fields: Fields)(implicit Context: AuthContext): Future[Organization] =
    update(organization, fields, ModifyConfig.default)

  def update(organization: Organization, fields: Fields, modifyConfig: ModifyConfig)(implicit Context: AuthContext): Future[Organization] = {
    cache.remove(s"org-${organization.id}")
    updateSrv(organization, fields, modifyConfig)
  }

  def delete(orgId: String)(implicit Context: AuthContext): Future[Organization] = {
    cache.remove(s"org-$orgId")
    deleteSrv[OrganizationModel, Organization](organizationModel, orgId)
  }

  def find(queryDef: QueryDef, range: Option[String], sortBy: Seq[String]): (Source[Organization, NotUsed], Future[Long]) =
    findSrv[OrganizationModel, Organization](organizationModel, queryDef, range, sortBy)

  def stats(queryDef: QueryDef, aggs: Seq[Agg]): Future[JsObject] = findSrv(organizationModel, queryDef, aggs: _*)
} 
Example 48
Source File: TestExecutorHelper.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package org.coursera.naptime.ari.graphql.schema

import com.linkedin.data.DataMap
import org.coursera.naptime.ari.graphql.Models
import org.coursera.naptime.ari.graphql.SangriaGraphQlContext
import org.coursera.naptime.ari.graphql.SangriaGraphQlSchemaBuilder
import org.coursera.naptime.ari.graphql.marshaller.NaptimeMarshaller._
import org.coursera.naptime.ari.graphql.models.RecordWithUnionTypes
import org.coursera.naptime.ari.graphql.models.MergedCourse
import org.coursera.naptime.ari.graphql.models.MergedInstructor
import org.coursera.naptime.ari.graphql.models.MergedPartner
import org.coursera.naptime.ari.graphql.resolvers.NaptimeResolver
import play.api.libs.json.JsObject
import sangria.execution.Executor
import sangria.parser.QueryParser
import sangria.schema.Schema

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration

class TestExecutorHelper {

  def executeQuery(
      queryString: String,
      resourceData: Map[String, Map[String, List[DataMap]]]): JsObject = {
    val schemaTypes = Map(
      "org.coursera.naptime.ari.graphql.models.MergedCourse" -> MergedCourse.SCHEMA,
      "org.coursera.naptime.ari.graphql.models.FakeModel" -> RecordWithUnionTypes.SCHEMA,
      "org.coursera.naptime.ari.graphql.models.MergedPartner" -> MergedPartner.SCHEMA,
      "org.coursera.naptime.ari.graphql.models.MergedInstructor" -> MergedInstructor.SCHEMA)
    val allResources =
      Set(
        Models.courseResource,
        Models.instructorResource,
        Models.partnersResource,
        Models.fakeModelResource)
    val builder = new SangriaGraphQlSchemaBuilder(allResources, schemaTypes)
    val schema = builder.generateSchema().data.asInstanceOf[Schema[SangriaGraphQlContext, Any]]

    val queryAst = QueryParser.parse(queryString).get

    val context = SangriaGraphQlContext(
      FakeFetcherApi(resourceData),
      null,
      ExecutionContext.global,
      debugMode = true)

    Await
      .result(
        Executor
          .execute(
            schema,
            queryAst,
            context,
            variables = JsObject(Map.empty[String, JsObject]),
            deferredResolver = new NaptimeResolver()),
        Duration.Inf)
      .asInstanceOf[JsObject]
  }

} 
Example 49
Source File: PollingSpec.scala    From canoe   with MIT License 5 votes vote down vote up
package canoe.api.sources

import canoe.TestIO._
import canoe.api.TelegramClient
import canoe.methods.Method
import canoe.methods.updates.GetUpdates
import canoe.models.messages.TextMessage
import canoe.models.{MessageReceived, PrivateChat, Update}
import cats.effect.IO
import org.scalatest.freespec.AnyFreeSpec

import scala.concurrent.duration.Duration

class PollingSpec extends AnyFreeSpec {
  implicit val updatesClient: TelegramClient[IO] = new TelegramClient[IO] {
    def execute[Req, Res](request: Req)(implicit M: Method[Req, Res]): IO[Res] =
      if (M.name != GetUpdates.method.name) throw new UnsupportedOperationException
      else {
        val getUpdates: GetUpdates = request.asInstanceOf[GetUpdates]
        val update: Update =
          MessageReceived(getUpdates.offset.get, TextMessage(-1, PrivateChat(-1, None, None, None), -1, ""))
        IO.pure(List(update).asInstanceOf[Res])
      }
  }

  val polling = new Polling(Duration.Zero)
  "polling" - {
    "starts with given offset" in {
      assert(polling.pollUpdates(0).take(1).value().head.updateId == 0)
    }

    "uses last offset increased by 1 for each new call" in {
      val updates = polling
        .pollUpdates(0)
        .zipWithNext
        .collect { case (u1, Some(u2)) => u1.last -> u2.head }
        .take(5)
        .toList()

      assert(updates.forall { case (u1, u2) => u2.updateId == u1.updateId + 1 })
    }
  }
} 
Example 50
Source File: Socket.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.server

import java.net.{DatagramPacket, DatagramSocket, SocketTimeoutException}

import bad.robot.temperature.server.DiscoveryServer._
import bad.robot.temperature.{Error, Timeout, UnexpectedError}

import scala.concurrent.duration.Duration
import scalaz.\/

object Socket {

  implicit class SocketOps(socket: DatagramSocket) {
    def await(timeout: Duration = Duration(0, "seconds")): Error \/ DatagramPacket = {
      val packet = new DatagramPacket(new Array[Byte](BufferSize), BufferSize)
      socket.setSoTimeout(timeout.toMillis.toInt)
      \/.fromTryCatchNonFatal {
        socket.receive(packet)
        packet
      }.leftMap {
        case _: SocketTimeoutException => Timeout(s"socket timed out after $timeout")
        case e: Throwable              => UnexpectedError(e.getMessage)
      }
    }
  }

} 
Example 51
Source File: Scheduler.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.task

import java.util.concurrent.{ScheduledExecutorService, ScheduledFuture}

import bad.robot.logging._

import scala.concurrent.duration.Duration

object Scheduler {

  implicit class ScheduledExecutorServiceOps(executor: ScheduledExecutorService) {
    def schedule(frequency: Duration, tasks: Runnable*): List[ScheduledFuture[_]] = {
      this.schedule(frequency, printError(_), tasks:_*)
    }

    def schedule(frequency: Duration, errorHandler: Throwable => Runnable => Unit, tasks: Runnable*): List[ScheduledFuture[_]] = {
      tasks.map(task => {
        executor.scheduleAtFixedRate(wrapWithErrorHandler(task, errorHandler), 0, frequency.length, frequency.unit)
      }).toList
    }
  }

  def wrapWithErrorHandler(task: Runnable, errorHandler: Throwable => Runnable => Unit): Runnable = {
    () => try {
      task.run()
    } catch {
      case e: Throwable => errorHandler(e)(task)
    }
  }

  private def printError(e: Throwable): Runnable => Unit = {
    task => Log.error(s"An error occurred executed a scheduled task ($task) ${e.getMessage}")
  }
} 
Example 52
Source File: Seconds.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.rrd

import java.time.Instant

import scala.concurrent.duration.Duration

object Seconds {

  def now() = Seconds(millisToSeconds(System.currentTimeMillis()))
  def millisToSeconds(millis: Long) = (millis + 500) / 1000L

  implicit def secondsToLong(seconds: Seconds): Long = seconds.value
  implicit def longToSeconds(seconds: Long): Seconds = Seconds(seconds)
  implicit def durationToSeconds(duration: Duration): Seconds = Seconds(duration.toSeconds)
  implicit def secondsToDuration(seconds: Seconds): Duration = Duration(seconds.value, "seconds")
}

case class Seconds(value: Long) {
  def +(other: Seconds) = Seconds(value + other.value)
  def -(other: Seconds) = Seconds(value - other.value)
  def toInstant: Instant = Instant.ofEpochSecond(value)
  def isAfter(instant: Instant) = toInstant.isAfter(instant)
  def isBefore(instant: Instant) = toInstant.isBefore(instant)

  override def toString: String = value + " seconds"
} 
Example 53
Source File: package.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature

import org.rrd4j.core.{RrdDb, Sample}

import scala.Double._
import scala.concurrent.duration.Duration

package object rrd {

  val anHour = Duration(1, "hour")
  val aDay = Duration(24, "hours")
  val aWeek = Duration(7, "days")
  val aMonth = Duration(30, "days")
  
  implicit class RrdDbOps(database: RrdDb) {
    def hasValuesFor(datasource: String): Boolean = {
      DataSources.updated.contains(database.getDatasource(database.getDsIndex(datasource)).getName)
    }
  }

  implicit class RrdSampleOps(sample: Sample) {
    def setValues(database: RrdDb, time: Seconds, values: Double*) = {
      val matched = (0 until values.size)
        .dropRight(values.size - database.getDsCount)
        .filter(index => !(values(index) equals NaN))
        .map(index => database.getDatasource(index).getName)

      sample.setTime(time)
      sample.setValues(values: _*)
      sample.update()

      DataSources.updated = DataSources.updated ++ matched.toSet
    }
  }

  object DataSources {
    var updated = Set[String]()
  }
} 
Example 54
Source File: ArchiveTest.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.rrd

import org.specs2.mutable.Specification

import scala.concurrent.duration.Duration

class ArchiveTest extends Specification {

  "Some typical RRA values" >> {
    val frequency = Duration(30, "seconds")

    val daily = Archive.apply(aDay, frequency, frequency)
    val weekHourlyAvg = Archive.apply(aWeek, frequency, anHour)
    val monthTwoHourlyAvg = Archive.apply(aMonth, frequency, anHour * 2)

    daily             must_== Archive(1, 2880)
    weekHourlyAvg     must_== Archive(120, 168)
    monthTwoHourlyAvg must_== Archive(240, 360)
  }

  "Some typical RRA values (1 min frequency)" >> {
    val frequency = Duration(1, "minute")

    val daily = Archive.apply(aDay, frequency, frequency)
    val weekHourlyAvg = Archive.apply(aWeek, frequency, anHour)
    val monthTwoHourlyAvg = Archive.apply(aMonth, frequency, anHour * 2)

    daily             must_== Archive(1, 1440)
    weekHourlyAvg     must_== Archive(60, 168)
    monthTwoHourlyAvg must_== Archive(120, 360)
  }
} 
Example 55
Source File: Example.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.rrd

import bad.robot.temperature.rrd.Seconds.{now, secondsToLong}
import bad.robot.temperature.server.JsonFile
import bad.robot.temperature.task.FixedTimeMeasurement
import bad.robot.temperature.{Error, Measurement, SensorReading, Temperature}

import scala.concurrent.duration.Duration
import scala.util.Random
import scalaz.{-\/, \/}

object Example extends App {

  sys.props += ("org.slf4j.simpleLogger.defaultLogLevel" -> "info")

  val random = new Random()

  val duration = Duration(1, "days")

  val start = now() - duration.toSeconds
  val end = now()

  val frequency = Duration(30, "seconds")

  val hosts = List(Host("bedroom"), Host("lounge"))

  RrdFile(hosts, frequency).create(start - 5)

  populateRrd(hosts)

  val xml = Xml(start, start + aDay, hosts)
  xml.exportJson(JsonFile.filename)
  xml.exportXml("temperature.xml")

  Graph.create(start, start + aDay, hosts, "A day")
  Graph.create(start, start + aDay * 2, hosts, "2 days")
  Graph.create(start, start + aWeek, hosts, "A week")
  Graph.create(start, start + aMonth, hosts, "A month")

  println("Done generating " + duration)


  def populateRrd(hosts: List[Host]) = {
    def seed = random.nextInt(30) + random.nextDouble()

    def smooth = (value: Double) => if (random.nextDouble() > 0.5) value + random.nextDouble() else value - random.nextDouble()

    val temperatures = Stream.iterate(seed)(smooth).zip(Stream.iterate(seed)(smooth))
    val times = Stream.iterate(start)(_ + frequency.toSeconds).takeWhile(_ < end)

    times.zip(temperatures).foreach({
      case (time, (temperature1, temperature2)) => {
        handleError(RrdUpdate(hosts).apply(FixedTimeMeasurement(time, List(
          Measurement(hosts(0), time, List(
            SensorReading("?", Temperature(temperature1)),
            SensorReading("?", Temperature(temperature1 + 6.3)))
          ))
        )))
        handleError(RrdUpdate(hosts).apply(FixedTimeMeasurement(time + 1, List(
          Measurement(hosts(1), time + 1, List(
            SensorReading("?", Temperature(temperature2)),
            SensorReading("?", Temperature(temperature2 + 1.3)))
          ))
        )))
      }
    })

    def handleError(f: => Error \/ Any): Unit = {
      f match {
        case -\/(error) => println(error)
        case _          => ()
      }
    }
  }

} 
Example 56
Source File: ExponentialBackOff.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.utils

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.util.Random


case class ExponentialBackOff(backOffSlotTime: FiniteDuration,
                              backOffSlot: Int = 1,
                              backOffWaitTime: FiniteDuration = Duration.Zero,
                              constantDelay: FiniteDuration = Duration.Zero,
                              ceiling: Int = 10,
                              resetOnCeiling: Boolean = false,
                              retries: Int = 0,
                              resets: Int = 0,
                              totalRetries: Long = 0) {

  private def updateTime = backOffSlotTime * expectedBackOff(backOffSlot) + constantDelay

  private def expectedBackOff(backOffSlot: Int) = {
    val rand = new Random().nextInt(backOffSlot + 1)
    math.round(math.pow(2, rand) - 1)
  }

  def nextBackOff: ExponentialBackOff = {
    if (backOffSlot >= ceiling && resetOnCeiling)
    // reset
      copy(backOffSlot = 1,
        backOffWaitTime = Duration.Zero,
        resets = resets + 1,
        retries = 0,
        totalRetries = totalRetries + 1)
    else {
      val newBackOffSlot = if (backOffSlot >= ceiling) ceiling else backOffSlot + 1
      // increase 1 collision
      copy(backOffSlot = newBackOffSlot,
        backOffWaitTime = updateTime,
        retries = retries + 1,
        totalRetries = totalRetries + 1)
    }
  }
} 
Example 57
Source File: TFuturesTest.scala    From Scala-for-Machine-Learning-Second-Edition   with MIT License 5 votes vote down vote up
package org.scalaml.scalability.akka

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout
import org.scalaml.Logging
import org.scalaml.Predef.DblVec
import org.scalaml.filtering.dft.DFT
import org.scalaml.scalability.akka.message._
import org.scalaml.util.FormatUtils._
import org.scalatest.{FlatSpec, Matchers}


  protected[this] val name: String = "Scala futures"

  private val NUM_WORKERS = 8
  private val NUM_DATA_POINTS = 1000000
  private val h = (x: Double) => 2.0 * Math.cos(Math.PI * 0.005 * x) + // simulated first harmonic
    Math.cos(Math.PI * 0.05 * x) + // simulated second harmonic
    0.5 * Math.cos(Math.PI * 0.2 * x) + // simulated third harmonic
    0.2 * Random.nextDouble

  private val TimeOut = 5000L
  private val duration = Duration(TimeOut, "millis")
  implicit val timeout = new Timeout(duration)


  it should s"$name Data transformation futures using Akka actors" in {
    show("$name Data transformation futures using Akka actors")

    val actorSystem = ActorSystem("System")
    val xt = Vector.tabulate(NUM_DATA_POINTS)(h(_))

    val master = actorSystem.actorOf(
      Props(new DFTFutures(xt, NUM_WORKERS)),
      "DFTTransform"
    )

    val future = master ? Start()
    Thread.sleep(TimeOut)

    actorSystem.shutdown()
  }
}

// -----------------------------------------------  EOF --------------------------- 
Example 58
Source File: package.scala    From zio-akka-cluster   with Apache License 2.0 5 votes vote down vote up
package zio.akka.cluster

import akka.actor.ActorContext
import zio.{ Has, Ref, Tag, Task, UIO, URIO, ZIO }

import scala.concurrent.duration.Duration

package object sharding {
  type Entity[State] = Has[Entity.Service[State]]

  object Entity {

    trait Service[State] {
      def context: ActorContext
      def replyToSender[R](msg: R): Task[Unit]
      def id: String
      def state: Ref[Option[State]]
      def stop: UIO[Unit]
      def passivate: UIO[Unit]
      def passivateAfter(duration: Duration): UIO[Unit]
    }

    def replyToSender[State: Tag, R](msg: R): ZIO[Entity[State], Throwable, Unit]         =
      ZIO.accessM[Entity[State]](_.get.replyToSender(msg))
    def context[State: Tag]: URIO[Entity[State], ActorContext]                            =
      ZIO.access[Entity[State]](_.get.context)
    def id[State: Tag]: URIO[Entity[State], String]                                       =
      ZIO.access[Entity[State]](_.get.id)
    def state[State: Tag]: URIO[Entity[State], Ref[Option[State]]]                        =
      ZIO.access[Entity[State]](_.get.state)
    def stop[State: Tag]: ZIO[Entity[State], Nothing, Unit]                               =
      ZIO.accessM[Entity[State]](_.get.stop)
    def passivate[State: Tag]: ZIO[Entity[State], Nothing, Unit]                          =
      ZIO.accessM[Entity[State]](_.get.passivate)
    def passivateAfter[State: Tag](duration: Duration): ZIO[Entity[State], Nothing, Unit] =
      ZIO.accessM[Entity[State]](_.get.passivateAfter(duration))

  }
} 
Example 59
Source File: ReceiveTimeout.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.actor.dungeon

import ReceiveTimeout.emptyReceiveTimeoutData
import akka.actor.ActorCell
import akka.actor.ActorCell.emptyCancellable
import akka.actor.Cancellable
import scala.concurrent.duration.Duration
import scala.concurrent.duration.FiniteDuration

private[akka] object ReceiveTimeout {
  final val emptyReceiveTimeoutData: (Duration, Cancellable) = (Duration.Undefined, ActorCell.emptyCancellable)
}

private[akka] trait ReceiveTimeout { this: ActorCell ⇒

  import ReceiveTimeout._
  import ActorCell._

  private var receiveTimeoutData: (Duration, Cancellable) = emptyReceiveTimeoutData

  final def receiveTimeout: Duration = receiveTimeoutData._1

  final def setReceiveTimeout(timeout: Duration): Unit = receiveTimeoutData = receiveTimeoutData.copy(_1 = timeout)

  final def checkReceiveTimeout() {
    val recvtimeout = receiveTimeoutData
    //Only reschedule if desired and there are currently no more messages to be processed
    if (!mailbox.hasMessages) recvtimeout._1 match {
      case f: FiniteDuration ⇒
        recvtimeout._2.cancel() //Cancel any ongoing future
        val task = system.scheduler.scheduleOnce(f, self, akka.actor.ReceiveTimeout)(this.dispatcher)
        receiveTimeoutData = (f, task)
      case _ ⇒ cancelReceiveTimeout()
    }
    else cancelReceiveTimeout()

  }

  final def cancelReceiveTimeout(): Unit =
    if (receiveTimeoutData._2 ne emptyCancellable) {
      receiveTimeoutData._2.cancel()
      receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable)
    }

} 
Example 60
Source File: PinnedDispatcher.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.dispatch

import akka.actor.ActorCell
import scala.concurrent.duration.Duration
import scala.concurrent.duration.FiniteDuration


class PinnedDispatcher(
  _configurator: MessageDispatcherConfigurator,
  _actor: ActorCell,
  _id: String,
  _shutdownTimeout: FiniteDuration,
  _threadPoolConfig: ThreadPoolConfig)
  extends Dispatcher(
    _configurator,
    _id,
    Int.MaxValue,
    Duration.Zero,
    _threadPoolConfig.copy(corePoolSize = 1, maxPoolSize = 1),
    _shutdownTimeout) {

  @volatile
  private var owner: ActorCell = _actor

  //Relies on an external lock provided by MessageDispatcher.attach
  protected[akka] override def register(actorCell: ActorCell) = {
    val actor = owner
    if ((actor ne null) && actorCell != actor) throw new IllegalArgumentException("Cannot register to anyone but " + actor)
    owner = actorCell
    super.register(actorCell)
  }
  //Relies on an external lock provided by MessageDispatcher.detach
  protected[akka] override def unregister(actor: ActorCell) = {
    super.unregister(actor)
    owner = null
  }
} 
Example 61
Source File: SimpleDnsManager.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.io

import java.util.concurrent.TimeUnit

import akka.actor.{ ActorLogging, Actor, Deploy, Props }
import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics }
import akka.routing.FromConfig

import scala.concurrent.duration.Duration

class SimpleDnsManager(val ext: DnsExt) extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] with ActorLogging {

  import context._

  private val resolver = actorOf(FromConfig.props(Props(ext.provider.actorClass, ext.cache, ext.Settings.ResolverConfig).withDeploy(Deploy.local).withDispatcher(ext.Settings.Dispatcher)), ext.Settings.Resolver)
  private val cacheCleanup = ext.cache match {
    case cleanup: PeriodicCacheCleanup ⇒ Some(cleanup)
    case _ ⇒ None
  }

  private val cleanupTimer = cacheCleanup map { _ ⇒
    val interval = Duration(ext.Settings.ResolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS)
    system.scheduler.schedule(interval, interval, self, SimpleDnsManager.CacheCleanup)
  }

  override def receive = {
    case r @ Dns.Resolve(name) ⇒
      log.debug("Resolution request for {} from {}", name, sender())
      resolver.forward(r)
    case SimpleDnsManager.CacheCleanup ⇒
      for (c ← cacheCleanup)
        c.cleanup()
  }

  override def postStop(): Unit = {
    for (t ← cleanupTimer) t.cancel()
  }
}

object SimpleDnsManager {
  private case object CacheCleanup
} 
Example 62
Source File: LanguageClient.scala    From lsp4s   with Apache License 2.0 5 votes vote down vote up
package scala.meta.jsonrpc

import cats.syntax.either._
import io.circe.Decoder
import io.circe.Encoder
import io.circe.syntax._
import java.io.OutputStream
import java.nio.ByteBuffer
import monix.eval.Callback
import monix.eval.Task
import monix.execution.Ack
import monix.execution.Cancelable
import monix.execution.atomic.Atomic
import monix.execution.atomic.AtomicInt
import monix.reactive.Observer
import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import MonixEnrichments._
import scribe.LoggerSupport

class LanguageClient(out: Observer[ByteBuffer], logger: LoggerSupport)
    extends JsonRpcClient {
  def this(out: OutputStream, logger: LoggerSupport) =
    this(Observer.fromOutputStream(out, logger), logger)
  private val writer = new MessageWriter(out, logger)
  private val counter: AtomicInt = Atomic(1)
  private val activeServerRequests =
    TrieMap.empty[RequestId, Callback[Response]]
  def notify[A: Encoder](method: String, notification: A): Future[Ack] =
    writer.write(Notification(method, Some(notification.asJson)))
  def serverRespond(response: Response): Future[Ack] = response match {
    case Response.Empty => Ack.Continue
    case x: Response.Success => writer.write(x)
    case x: Response.Error =>
      logger.error(s"Response error: $x")
      writer.write(x)
  }
  def clientRespond(response: Response): Unit =
    for {
      id <- response match {
        case Response.Empty => None
        case Response.Success(_, requestId) => Some(requestId)
        case Response.Error(_, requestId) => Some(requestId)
      }
      callback <- activeServerRequests.get(id).orElse {
        logger.error(s"Response to unknown request: $response")
        None
      }
    } {
      activeServerRequests.remove(id)
      callback.onSuccess(response)
    }

  def request[A: Encoder, B: Decoder](
      method: String,
      request: A
  ): Task[Either[Response.Error, B]] = {
    val nextId = RequestId(counter.incrementAndGet())
    val response = Task.create[Response] { (out, cb) =>
      val scheduled = out.scheduleOnce(Duration(0, "s")) {
        val json = Request(method, Some(request.asJson), nextId)
        activeServerRequests.put(nextId, cb)
        writer.write(json)
      }
      Cancelable { () =>
        scheduled.cancel()
        this.notify("$/cancelRequest", CancelParams(nextId.value))
      }
    }
    response.map {
      case Response.Empty =>
        Left(
          Response.invalidParams(
            s"Got empty response for request $request",
            nextId
          )
        )
      case err: Response.Error =>
        Left(err)
      case Response.Success(result, _) =>
        result.as[B].leftMap { err =>
          Response.invalidParams(err.toString, nextId)
        }
    }
  }
}

object LanguageClient {
  def fromOutputStream(out: OutputStream, logger: LoggerSupport) =
    new LanguageClient(Observer.fromOutputStream(out, logger), logger)
} 
Example 63
Source File: Server.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain

import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.pubsub.DistributedPubSub
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import com.elleflorio.scalachain.actor.Node
import com.elleflorio.scalachain.api.NodeRoutes
import com.elleflorio.scalachain.cluster.ClusterManager
import com.typesafe.config.{Config, ConfigFactory}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Server extends App with NodeRoutes {

  implicit val system: ActorSystem = ActorSystem("scalachain")
  implicit val materializer: ActorMaterializer = ActorMaterializer()

  val config: Config = ConfigFactory.load()
  val address = config.getString("http.ip")
  val port = config.getInt("http.port")
  val nodeId = config.getString("scalachain.node.id")

  lazy val routes: Route = statusRoutes ~ transactionRoutes ~ mineRoutes

  val clusterManager: ActorRef = system.actorOf(ClusterManager.props(nodeId), "clusterManager")
  val mediator: ActorRef = DistributedPubSub(system).mediator
  val node: ActorRef = system.actorOf(Node.props(nodeId, mediator), "node")

  Http().bindAndHandle(routes, address, port)
  println(s"Server online at http://$address:$port/")

  Await.result(system.whenTerminated, Duration.Inf)

} 
Example 64
Source File: LongInputTests.scala    From boson   with Apache License 2.0 5 votes vote down vote up
package io.zink.boson

import bsonLib.BsonObject
import io.netty.util.ResourceLeakDetector
import io.vertx.core.json.JsonObject
import io.zink.boson.bson.bsonImpl.BosonImpl
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.junit.Assert._

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.io.Source


@RunWith(classOf[JUnitRunner])
class LongInputTests extends FunSuite {
  ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.ADVANCED)

  val bufferedSource: Source = Source.fromURL(getClass.getResource("/jsonOutput.txt"))
  val finale: String = bufferedSource.getLines.toSeq.head
  bufferedSource.close

  val json: JsonObject = new JsonObject(finale)
  val bson: BsonObject = new BsonObject(json)

  test("extract top field") {
    val expression: String = ".Epoch"
    val boson: Boson = Boson.extractor(expression, (out: Int) => {
      assertTrue(3 == out)
    })
    val res = boson.go(bson.encode.getBytes)
    Await.result(res, Duration.Inf)
  }

  test("extract bottom field") {
    val expression: String = "SSLNLastName"
    val expected: String = "de Huanuco"
    val boson: Boson = Boson.extractor(expression, (out: String) => {
      assertTrue(expected.zip(out).forall(e => e._1.equals(e._2)))
    })
    val res = boson.go(bson.encode.getBytes)
    Await.result(res, Duration.Inf)
  }

  test("extract positions of an Array") {
    val expression: String = "Markets[3 to 5]"
    val mutableBuffer: ArrayBuffer[Array[Byte]] = ArrayBuffer()
    val boson: Boson = Boson.extractor(expression, (out: Array[Byte]) => {
      mutableBuffer += out
    })
    val res = boson.go(bson.encode.getBytes)
    Await.result(res, Duration.Inf)
    assertEquals(3, mutableBuffer.size)
  }

  test("extract further positions of an Array") {
    val expression: String = "Markets[50 to 55]"
    val mutableBuffer: ArrayBuffer[Array[Byte]] = ArrayBuffer()
    val boson: Boson = Boson.extractor(expression, (out: Array[Byte]) => {
      mutableBuffer += out
    })
    val res = boson.go(bson.encode.getBytes)
    Await.result(res, Duration.Inf)
    assertEquals(6, mutableBuffer.size)
  }

  test("size of all occurrences of Key") {
    val expression: String = "Price"
    val mutableBuffer: ArrayBuffer[Float] = ArrayBuffer()
    val boson: Boson = Boson.extractor(expression, (out: Float) => {
      mutableBuffer += out
    })
    val res = boson.go(bson.encode.getBytes)
    Await.result(res, Duration.Inf)
    assertEquals(195, mutableBuffer.size)
  }

} 
Example 65
Source File: MonixParallelTests.scala    From freestyle   with Apache License 2.0 5 votes vote down vote up
package freestyle.free.tests

import freestyle.free.implicits._
import monix.eval.Task
import monix.execution.Scheduler.Implicits.global
import org.scalatest.{Matchers, WordSpec}
import scala.concurrent.Await
import scala.concurrent.duration.Duration

class MonixParallelTests extends WordSpec with Matchers {
  "Applicative Parallel Support" should {
    "allow non deterministic execution when interpreting to monix.eval.Task" ignore {

      val test = new freestyle.NonDeterminismTestShared
      import test._

      implicit val interpreter = new freestyle.MixedFreeS.Handler[Task] {
        override def x: Task[Int] = Task(blocker(1, 1000L))
        override def y: Task[Int] = Task(blocker(2, 0L))
        override def z: Task[Int] = Task(blocker(3, 2000L))
      }

      Await.result(program.interpret[Task].runAsync, Duration.Inf) shouldBe List(3, 1, 2, 3)
      buf.toArray shouldBe Array(3, 2, 1, 3)
    }
  }
} 
Example 66
Source File: AsyncGuavaTests.scala    From freestyle   with Apache License 2.0 5 votes vote down vote up
package freestyle.async
package guava

import java.util.concurrent.{Callable, Executors}

import com.google.common.util.concurrent.{ListenableFuture, ListeningExecutorService, MoreExecutors}
import org.scalatest._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}

class AsyncGuavaTests extends WordSpec with Matchers with Implicits {

  import ExecutionContext.Implicits.global
  import implicits._

  val exception: Throwable = new RuntimeException("Test exception")

  val service: ListeningExecutorService =
    MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(10))

  def failedFuture[T]: ListenableFuture[T] =
    service.submit(new Callable[T] {
      override def call(): T = throw exception
    })

  def successfulFuture[T](value: T): ListenableFuture[T] =
    service.submit(new Callable[T] {
      override def call(): T = value
    })

  val foo = "Bar"

  "Guava ListenableFuture Freestyle integration" should {

    "transform guava ListenableFutures into scala.concurrent.Future successfully" in {
      Await.result(listenableFuture2Async(successfulFuture(foo)), Duration.Inf) shouldBe foo
    }

    "recover from failed guava ListenableFutures wrapping them into scala.concurrent.Future" in {
      Await.result(listenableFuture2Async(failedFuture[String]).failed, Duration.Inf) shouldBe exception
    }

    "transform guava ListenableFuture[Void] into scala.concurrent.Future successfully through an implicit conversion" in {
      Await.result(
        listenableFuture2Async(listenableVoidToListenableUnit(successfulFuture[Void](None.orNull))),
        Duration.Inf) shouldBe ((): Unit)
    }

    "recover from failed guava ListenableFuture[Void] wrapping them into scala.concurrent.Future through an implicit conversion" in {
      Await.result(
        listenableFuture2Async(listenableVoidToListenableUnit(failedFuture[Void])).failed,
        Duration.Inf) shouldBe exception
    }

  }

} 
Example 67
Source File: TradeAggregation.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.model

import java.time.Instant
import java.util.concurrent.TimeUnit

import org.json4s.JsonAST.JObject
import org.json4s.{DefaultFormats, JValue}
import stellar.sdk.model.response.ResponseParser

import scala.concurrent.duration.Duration

case class TradeAggregation(instant: Instant, tradeCount: Int, baseVolume: Double, counterVolume: Double,
                            average: Double, open: Price, high: Price, low: Price, close: Price)

object TradeAggregationDeserializer extends ResponseParser[TradeAggregation]({ o: JObject =>
  implicit val formats = DefaultFormats

  def price(p: JValue): Price = Price((p \ "N").extract[Int], (p \ "D").extract[Int])

  TradeAggregation(
    instant = Instant.ofEpochMilli((o \ "timestamp").extract[String].toLong),
    tradeCount = (o \ "trade_count").extract[String].toInt,
    baseVolume = (o \ "base_volume").extract[String].toDouble,
    counterVolume = (o \ "counter_volume").extract[String].toDouble,
    average = (o \ "avg").extract[String].toDouble,
    open = price(o \ "open_r"),
    high = price(o \ "high_r"),
    low = price(o \ "low_r"),
    close = price(o \ "close_r"))
})

object TradeAggregation {

  sealed class Resolution(val duration: Duration)

  val OneMinute = new Resolution(Duration.create(1, TimeUnit.MINUTES))
  val FiveMinutes = new Resolution(OneMinute.duration * 5.0)
  val FifteenMinutes = new Resolution(FiveMinutes.duration * 3.0)
  val OneHour = new Resolution(FifteenMinutes.duration * 4.0)
  val OneDay = new Resolution(OneHour.duration * 24.0)
  val OneWeek = new Resolution(OneDay.duration * 7.0)

} 
Example 68
Source File: HorizonServerError.scala    From scala-stellar-sdk   with Apache License 2.0 5 votes vote down vote up
package stellar.sdk.inet

import okhttp3.HttpUrl
import org.json4s.native.JsonMethods
import org.json4s.{DefaultFormats, Formats, JObject, JValue}

import scala.concurrent.duration.Duration
import scala.util.Try

case class HorizonServerError(uri: HttpUrl, body: JObject)(implicit val formats: Formats) extends Exception(
  s"Server error when communicating with Horizon. $uri -> ${
    implicit val formats: Formats = DefaultFormats
    Try((body \ "detail").extract[String]).getOrElse(JsonMethods.compact(JsonMethods.render(body)))
  }"
)

case class HorizonEntityNotFound(uri: HttpUrl, body: JValue)(implicit val formats: Formats) extends Exception(
  s"Requested entity was not found in Horizon. $uri -> ${
    implicit val formats: Formats = DefaultFormats
    Try((body \ "detail").extract[String]).getOrElse(JsonMethods.compact(JsonMethods.render(body)))
  }"
)

case class HorizonRateLimitExceeded(uri: HttpUrl, retryAfter: Duration)(implicit val formats: Formats) extends Exception(
  s"Horizon request rate limit was exceeded. Try again in $retryAfter"
)

case class HorizonBadRequest(uri: HttpUrl, body: String) extends Exception(
  s"Bad request. $uri -> ${
    implicit val formats: Formats = DefaultFormats
    Try(
      (JsonMethods.parse(body) \ "extras" \ "reason").extract[String]
    ).getOrElse(body)
  }")

case class FailedResponse(cause: String) extends Exception(cause) 
Example 69
Source File: DistributedShellClient.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.examples.distributedshell

import java.util.concurrent.TimeUnit
import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.pattern.ask
import org.slf4j.{Logger, LoggerFactory}

import org.apache.gearpump.cluster.client.ClientContext
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.examples.distributedshell.DistShellAppMaster.ShellCommand
import org.apache.gearpump.util.{AkkaApp, Constants}


object DistributedShellClient extends AkkaApp with ArgumentsParser {
  implicit val timeout = Constants.FUTURE_TIMEOUT
  private val LOG: Logger = LoggerFactory.getLogger(getClass)

  override val options: Array[(String, CLIOption[Any])] = Array(
    "appid" -> CLIOption[Int]("<the distributed shell appid>", required = true),
    "command" -> CLIOption[String]("<shell command>", required = true)
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    val context = ClientContext(akkaConf)
    implicit val system = context.system
    implicit val dispatcher = system.dispatcher
    val appid = config.getInt("appid")
    val command = config.getString("command")
    val appMaster = context.resolveAppID(appid)
    LOG.info(s"Resolved appMaster $appid address $appMaster, sending command $command")
    val future = (appMaster ? ShellCommand(command)).map { result =>
      LOG.info(s"Result: \n$result")
      context.close()
    }
    Await.ready(future, Duration(60, TimeUnit.SECONDS))
  }
} 
Example 70
Source File: ShellExecutorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.examples.distributedshell

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.sys.process._
import scala.util.{Failure, Success, Try}

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import org.scalatest.{Matchers, WordSpec}

import org.apache.gearpump.cluster.appmaster.WorkerInfo
import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.cluster.{ExecutorContext, TestUtil, UserConfig}
import org.apache.gearpump.examples.distributedshell.DistShellAppMaster.{ShellCommand, ShellCommandResult}

class ShellExecutorSpec extends WordSpec with Matchers {

  "ShellExecutor" should {
    "execute the shell command and return the result" in {
      val executorId = 1
      val workerId = WorkerId(2, 0L)
      val appId = 0
      val appName = "app"
      val resource = Resource(1)
      implicit val system = ActorSystem("ShellExecutor", TestUtil.DEFAULT_CONFIG)
      val mockMaster = TestProbe()(system)
      val worker = TestProbe()
      val workerInfo = WorkerInfo(workerId, worker.ref)
      val executorContext = ExecutorContext(executorId, workerInfo, appId, appName,
        mockMaster.ref, resource)
      val executor = system.actorOf(Props(classOf[ShellExecutor], executorContext,
        UserConfig.empty))

      val process = Try(s"ls /".!!)
      val result = process match {
        case Success(msg) => msg
        case Failure(ex) => ex.getMessage
      }
      executor.tell(ShellCommand("ls /"), mockMaster.ref)
      assert(mockMaster.receiveN(1).head.asInstanceOf[ShellCommandResult].equals(
        ShellCommandResult(executorId, result)))

      system.terminate()
      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 71
Source File: DistShellAppMasterSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.examples.distributedshell

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import org.scalatest.{BeforeAndAfter, Matchers, WordSpec}

import org.apache.gearpump.cluster.AppMasterToMaster.{GetAllWorkers, RegisterAppMaster, RequestResource}
import org.apache.gearpump.cluster.AppMasterToWorker.LaunchExecutor
import org.apache.gearpump.cluster.MasterToAppMaster.{AppMasterRegistered, ResourceAllocated, WorkerList}
import org.apache.gearpump.cluster._
import org.apache.gearpump.cluster.appmaster.{AppMasterRuntimeEnvironment, ApplicationRuntimeInfo}
import org.apache.gearpump.cluster.scheduler.{Relaxation, Resource, ResourceAllocation, ResourceRequest}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.util.ActorSystemBooter.RegisterActorSystem
import org.apache.gearpump.util.ActorUtil

class DistShellAppMasterSpec extends WordSpec with Matchers with BeforeAndAfter {
  implicit val system = ActorSystem("AppMasterSpec", TestUtil.DEFAULT_CONFIG)
  val mockMaster = TestProbe()(system)
  val mockWorker1 = TestProbe()(system)
  val masterProxy = mockMaster.ref
  val appId = 0
  val userName = "test"
  val masterExecutorId = 0
  val workerList = List(WorkerId(1, 0L), WorkerId(2, 0L), WorkerId(3, 0L))
  val resource = Resource(1)
  val appJar = None
  val appDescription = AppDescription("app0", classOf[DistShellAppMaster].getName, UserConfig.empty)

  "DistributedShell AppMaster" should {
    "launch one ShellTask on each worker" in {
      val appMasterInfo = ApplicationRuntimeInfo(appId, appName = appId.toString)
      val appMasterContext = AppMasterContext(appId, userName, resource, null, appJar, masterProxy)
      TestActorRef[DistShellAppMaster](
        AppMasterRuntimeEnvironment.props(List(masterProxy.path), appDescription,
          appMasterContext))
      mockMaster.expectMsgType[RegisterAppMaster]
      mockMaster.reply(AppMasterRegistered(appId))
      // The DistributedShell AppMaster asks for worker list from Master.
      mockMaster.expectMsg(GetAllWorkers)
      mockMaster.reply(WorkerList(workerList))
      // After worker list is ready, DistributedShell AppMaster requests resource on each worker
      workerList.foreach { workerId =>
        mockMaster.expectMsg(RequestResource(appId, ResourceRequest(Resource(1), workerId,
          relaxation = Relaxation.SPECIFICWORKER)))
      }
      mockMaster.reply(ResourceAllocated(
        Array(ResourceAllocation(resource, mockWorker1.ref, WorkerId(1, 0L)))))
      mockWorker1.expectMsgClass(classOf[LaunchExecutor])
      mockWorker1.reply(RegisterActorSystem(ActorUtil.getSystemAddress(system).toString))
    }
  }

  after {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 72
Source File: NumberGeneratorProcessorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.state.processor

import java.time.Instant

import org.apache.gearpump.streaming.source.Watermark

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.ActorSystem
import akka.testkit.TestProbe
import org.mockito.Mockito._
import org.mockito.{Matchers => MockitoMatchers}
import org.scalatest.{Matchers, WordSpec}

import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.MockUtil

class NumberGeneratorProcessorSpec extends WordSpec with Matchers {
  "NumberGeneratorProcessor" should {
    "send random numbers" in {

      val taskContext = MockUtil.mockTaskContext

      implicit val system = ActorSystem("test")

      val mockTaskActor = TestProbe()

      // Mock self ActorRef
      when(taskContext.self).thenReturn(mockTaskActor.ref)

      val conf = UserConfig.empty
      val genNum = new NumberGeneratorProcessor(taskContext, conf)
      genNum.onStart(Instant.EPOCH)
      mockTaskActor.expectMsgType[Watermark]

      genNum.onNext(Message("next"))
      verify(taskContext).output(MockitoMatchers.any[Message])

      system.terminate()
      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 73
Source File: SplitSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.wordcount

import java.time.Instant

import akka.actor.ActorSystem
import org.apache.gearpump.Message
import akka.testkit.TestProbe
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.streaming.MockUtil
import org.mockito.Mockito._
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class SplitSpec extends WordSpec with Matchers {

  "Split" should {
    "split the text and deliver to next task" in {

      val taskContext = MockUtil.mockTaskContext

      implicit val system: ActorSystem = ActorSystem("test", TestUtil.DEFAULT_CONFIG)

      val mockTaskActor = TestProbe()

      when(taskContext.self).thenReturn(mockTaskActor.ref)

      val split = new Split
      split.open(taskContext, Instant.now())
      split.read() shouldBe a[Message]
      split.close()
      split.getWatermark
      system.terminate()
      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 74
Source File: StormProducer.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.storm.producer

import java.time.Instant
import java.util.concurrent.TimeUnit

import akka.actor.Actor.Receive
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.experiments.storm.topology.GearpumpStormComponent.GearpumpSpout
import org.apache.gearpump.experiments.storm.util._
import org.apache.gearpump.streaming.source.Watermark
import org.apache.gearpump.streaming.task._

import scala.concurrent.duration.Duration

object StormProducer {
  private[storm] val TIMEOUT = Message("timeout")
}


private[storm] class StormProducer(gearpumpSpout: GearpumpSpout,
    taskContext: TaskContext, conf: UserConfig)
  extends Task(taskContext, conf) {
  import org.apache.gearpump.experiments.storm.producer.StormProducer._

  def this(taskContext: TaskContext, conf: UserConfig) = {
    this(StormUtil.getGearpumpStormComponent(taskContext, conf)(taskContext.system)
      .asInstanceOf[GearpumpSpout], taskContext, conf)
  }

  private val timeoutMillis = gearpumpSpout.getMessageTimeout

  override def onStart(startTime: Instant): Unit = {
    gearpumpSpout.start(startTime)
    if (gearpumpSpout.ackEnabled) {
      getCheckpointClock
    }
    timeoutMillis.foreach(scheduleTimeout)
    self ! Watermark(Instant.now)
  }

  override def onNext(msg: Message): Unit = {
    msg match {
      case TIMEOUT =>
        timeoutMillis.foreach { timeout =>
          gearpumpSpout.timeout(timeout)
          scheduleTimeout(timeout)
        }
      case _ =>
        gearpumpSpout.next(msg)
    }
    self ! Watermark(Instant.now)
  }

  override def receiveUnManagedMessage: Receive = {
    case CheckpointClock(optClock) =>
      optClock.foreach { clock =>
        gearpumpSpout.checkpoint(clock)
      }
      getCheckpointClock()
  }

  def getCheckpointClock(): Unit = {
    taskContext.scheduleOnce(Duration(StormConstants.CHECKPOINT_INTERVAL_MILLIS,
      TimeUnit.MILLISECONDS))(taskContext.appMaster ! GetCheckpointClock)
  }

  private def scheduleTimeout(timeout: Long): Unit = {
    taskContext.scheduleOnce(Duration(timeout, TimeUnit.MILLISECONDS)) {
      self ! TIMEOUT
    }
  }
} 
Example 75
Source File: StormProcessor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.storm.processor

import java.time.Instant
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.Duration
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.experiments.storm.topology.GearpumpStormComponent.GearpumpBolt
import org.apache.gearpump.experiments.storm.util._
import org.apache.gearpump.streaming.task._

object StormProcessor {
  private[storm] val TICK = Message("tick")
}


private[storm] class StormProcessor(gearpumpBolt: GearpumpBolt,
    taskContext: TaskContext, conf: UserConfig)
  extends Task(taskContext, conf) {
  import org.apache.gearpump.experiments.storm.processor.StormProcessor._

  def this(taskContext: TaskContext, conf: UserConfig) = {
    this(StormUtil.getGearpumpStormComponent(taskContext, conf)(taskContext.system)
      .asInstanceOf[GearpumpBolt], taskContext, conf)
  }

  private val freqOpt = gearpumpBolt.getTickFrequency

  override def onStart(startTime: Instant): Unit = {
    gearpumpBolt.start(startTime)
    freqOpt.foreach(scheduleTick)
  }

  override def onNext(message: Message): Unit = {
    message match {
      case TICK =>
        freqOpt.foreach { freq =>
          gearpumpBolt.tick(freq)
          scheduleTick(freq)
        }
      case _ =>
        gearpumpBolt.next(message)
    }
  }

  private def scheduleTick(freq: Int): Unit = {
    taskContext.scheduleOnce(Duration(freq, TimeUnit.SECONDS)) {
      self ! TICK
    }
  }
} 
Example 76
Source File: UIServiceSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.yarn.appmaster

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.experiments.yarn.appmaster.UIServiceSpec.{Info, MockUI}
import org.apache.gearpump.transport.HostPort
import org.apache.gearpump.util.Constants
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class UIServiceSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit var system: ActorSystem = null

  override def beforeAll(): Unit = {
    system = ActorSystem(getClass.getSimpleName, TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "start UI server correctly" in {
    val probe = TestProbe()
    val masters = List(
      HostPort("127.0.0.1", 3000),
      HostPort("127.0.0.1", 3001),
      HostPort("127.0.0.1", 3002)
    )
    val host = "local"
    val port = 8091

    val ui = system.actorOf(Props(new MockUI(masters, host, port, probe.ref)))

    probe.expectMsgPF() {
      case info: Info => {
        assert(info.masterHost == "127.0.0.1")
        assert(info.masterPort == 3000)
        val conf = ConfigFactory.parseFile(new java.io.File(info.configFile))
        assert(conf.getString(Constants.GEARPUMP_SERVICE_HOST) == host)
        assert(conf.getString(Constants.GEARPUMP_SERVICE_HTTP) == "8091")
        assert(conf.getString(Constants.NETTY_TCP_HOSTNAME) == host)
      }
    }

    system.stop(ui)
  }
}

object UIServiceSpec {

  case class Info(supervisor: String, masterHost: String, masterPort: Int, configFile: String)

  class MockUI(masters: List[HostPort], host: String, port: Int, probe: ActorRef)
    extends UIService(masters, host, port) {

    override def launch(
        supervisor: String, masterHost: String, masterPort: Int, configFile: String): Unit = {
      probe ! Info(supervisor, masterHost, masterPort, configFile)
    }
  }
} 
Example 77
Source File: StreamAppSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.dsl.scalaapi

import akka.actor.ActorSystem
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.client.ClientContext
import org.apache.gearpump.streaming.dsl.scalaapi
import org.apache.gearpump.streaming.partitioner.PartitionerDescription
import org.apache.gearpump.streaming.source.DataSourceTask
import org.apache.gearpump.streaming.{ProcessorDescription, StreamApplication}
import org.apache.gearpump.util.Graph
import org.mockito.Mockito.when
import org.scalatest._
import org.scalatest.mock.MockitoSugar

import scala.concurrent.Await
import scala.concurrent.duration.Duration
class StreamAppSpec extends FlatSpec with Matchers with BeforeAndAfterAll with MockitoSugar {

  implicit var system: ActorSystem = _

  override def beforeAll(): Unit = {
    system = ActorSystem("test", TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "be able to generate multiple new streams" in {
    val context: ClientContext = mock[ClientContext]
    when(context.system).thenReturn(system)

    val dsl = StreamApp("dsl", context)
    dsl.source(List("A"), 2, "A") shouldBe a [scalaapi.Stream[_]]
    dsl.source(List("B"), 3, "B") shouldBe a [scalaapi.Stream[_]]

    val application = dsl.plan()
    application shouldBe a [StreamApplication]
    application.name shouldBe "dsl"
    val dag = application.userConfig
      .getValue[Graph[ProcessorDescription, PartitionerDescription]](StreamApplication.DAG).get
    dag.getVertices.size shouldBe 2
    dag.getVertices.foreach { processor =>
      processor.taskClass shouldBe classOf[DataSourceTask[_, _]].getName
      if (processor.description == "A") {
        processor.parallelism shouldBe 2
      } else if (processor.description == "B") {
        processor.parallelism shouldBe 3
      } else {
        fail(s"undefined source ${processor.description}")
      }
    }
  }
} 
Example 78
Source File: DagManagerSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.appmaster

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import org.apache.gearpump.cluster.{TestUtil, UserConfig}
import org.apache.gearpump.streaming.partitioner.{HashPartitioner, Partitioner}
import org.apache.gearpump.streaming.appmaster.DagManager.{DAGOperationFailed, DAGOperationSuccess, GetLatestDAG, GetTaskLaunchData, LatestDAG, NewDAGDeployed, ReplaceProcessor, TaskLaunchData, WatchChange}
import org.apache.gearpump.streaming.task.{Subscriber, TaskActor}
import org.apache.gearpump.streaming._
import org.apache.gearpump.util.Graph
import org.apache.gearpump.util.Graph._
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class DagManagerSpec extends WordSpecLike with Matchers with BeforeAndAfterAll {

  val hash = Partitioner[HashPartitioner]
  val task1 = ProcessorDescription(id = 1, taskClass = classOf[TaskActor].getName, parallelism = 1)
  val task2 = ProcessorDescription(id = 2, taskClass = classOf[TaskActor].getName, parallelism = 1)
  val graph = Graph(task1 ~ hash ~> task2)
  val dag = DAG(graph)
  implicit var system: ActorSystem = null
  val appId = 0
  lazy val userConfig = UserConfig.empty.withValue(StreamApplication.DAG, graph)

  "DagManager" should {
    import org.apache.gearpump.streaming.appmaster.ClockServiceSpec.Store
    "maintain the dags properly" in {
      val store = new Store

      val dagManager = system.actorOf(Props(new DagManager(appId, userConfig, store, Some(dag))))
      val client = TestProbe()
      client.send(dagManager, GetLatestDAG)
      client.expectMsg(LatestDAG(dag))

      client.send(dagManager, GetTaskLaunchData(dag.version, task1.id, null))
      val task1LaunchData = TaskLaunchData(task1, Subscriber.of(task1.id, dag))
      client.expectMsg(task1LaunchData)

      val task2LaunchData = TaskLaunchData(task2, Subscriber.of(task2.id, dag))
      client.send(dagManager, GetTaskLaunchData(dag.version, task2.id, null))
      client.expectMsg(task2LaunchData)

      val watcher = TestProbe()
      client.send(dagManager, WatchChange(watcher.ref))
      val task3 = task2.copy(id = 3, life = LifeTime(100, Long.MaxValue))

      client.send(dagManager, ReplaceProcessor(task2.id, task3, inheritConf = false))
      client.expectMsg(DAGOperationSuccess)

      client.send(dagManager, GetLatestDAG)
      val newDag = client.expectMsgPF() {
        case LatestDAG(latestDag) => latestDag
      }
      assert(newDag.processors.contains(task3.id))
      watcher.expectMsgType[LatestDAG]

      val task4 = task3.copy(id = 4)
      client.send(dagManager, ReplaceProcessor(task3.id, task4, inheritConf = false))
      client.expectMsgType[DAGOperationFailed]

      client.send(dagManager, NewDAGDeployed(newDag.version))
      client.send(dagManager, ReplaceProcessor(task3.id, task4, inheritConf = false))
      client.expectMsg(DAGOperationSuccess)
    }

    "retrieve last stored dag properly" in {
      val store = new Store
      val newGraph = Graph(task1 ~ hash ~> task2)
      val newDag = DAG(newGraph)
      val dagManager = system.actorOf(Props(new DagManager(appId, userConfig, store, Some(newDag))))
      dagManager ! NewDAGDeployed(0)
      val client = TestProbe()
      client.send(dagManager, GetLatestDAG)
      client.expectMsgType[LatestDAG].dag shouldBe newDag
    }
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  override def beforeAll(): Unit = {
    this.system = ActorSystem("DagManagerSpec", TestUtil.DEFAULT_CONFIG)
  }
} 
Example 79
Source File: ExecutorSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.executor

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestProbe
import org.mockito.Matchers._
import org.mockito.Mockito.{times, _}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import org.apache.gearpump.cluster.appmaster.WorkerInfo
import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.cluster.{ExecutorContext, TestUtil, UserConfig}
import org.apache.gearpump.streaming.AppMasterToExecutor._
import org.apache.gearpump.streaming.ExecutorToAppMaster.RegisterTask
import org.apache.gearpump.streaming.appmaster.TaskRegistry.TaskLocations
import org.apache.gearpump.streaming.executor.TaskLauncherSpec.MockTask
import org.apache.gearpump.streaming.task.{Subscriber, TaskId}
import org.apache.gearpump.streaming.{LifeTime, ProcessorDescription}
import org.apache.gearpump.transport.HostPort

class ExecutorSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  val appId = 0
  val executorId = 0
  val workerId = WorkerId(0, 0L)
  var appMaster: TestProbe = null
  implicit var system: ActorSystem = null
  val userConf = UserConfig.empty

  override def beforeAll(): Unit = {
    system = ActorSystem("TaskLauncherSpec", TestUtil.DEFAULT_CONFIG)
    appMaster = TestProbe()
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "call launcher to launch task" in {
    val worker = TestProbe()
    val workerInfo = WorkerInfo(workerId, worker.ref)
    val executorContext = ExecutorContext(executorId, workerInfo, appId, "app",
      appMaster.ref, Resource(2))
    val taskLauncher = mock(classOf[ITaskLauncher])
    val executor = system.actorOf(Props(new Executor(executorContext, userConf, taskLauncher)))
    val processor = ProcessorDescription(id = 0, taskClass = classOf[MockTask].getName,
      parallelism = 2)
    val taskIds = List(TaskId(0, 0), TaskId(0, 1))
    val launchTasks = LaunchTasks(taskIds, dagVersion = 0, processor, List.empty[Subscriber])

    val task = TestProbe()
    when(taskLauncher.launch(any(), any(), any(), any(), any()))
      .thenReturn(taskIds.map((_, task.ref)).toMap)

    val client = TestProbe()
    client.send(executor, launchTasks)
    client.expectMsg(TasksLaunched)

    verify(taskLauncher, times(1)).launch(any(), any(), any(), any(), any())

    executor ! RegisterTask(TaskId(0, 0), executorId, HostPort("localhost:80"))
    executor ! RegisterTask(TaskId(0, 1), executorId, HostPort("localhost:80"))

    executor ! TaskRegistered(TaskId(0, 0), 0, 0)

    task.expectMsgType[TaskRegistered]

    executor ! TaskRegistered(TaskId(0, 1), 0, 0)

    task.expectMsgType[TaskRegistered]

    executor ! TaskLocationsReady(TaskLocations(Map.empty), dagVersion = 0)
    executor ! StartAllTasks(dagVersion = 0)

    task.expectMsgType[StartTask]
    task.expectMsgType[StartTask]

    val changeTasks = ChangeTasks(taskIds, dagVersion = 1, life = LifeTime(0, Long.MaxValue),
      List.empty[Subscriber])

    client.send(executor, changeTasks)
    client.expectMsgType[TasksChanged]

    executor ! TaskLocationsReady(TaskLocations(Map.empty), 1)
    executor ! StartAllTasks(dagVersion = 1)

    task.expectMsgType[ChangeTask]
    task.expectMsgType[ChangeTask]
  }
} 
Example 80
Source File: TaskLauncherSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.executor

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.{Actor, ActorSystem}
import akka.testkit.TestProbe
import org.scalatest._

import org.apache.gearpump.cluster.{TestUtil, UserConfig}
import org.apache.gearpump.serializer.SerializationFramework
import org.apache.gearpump.streaming.ProcessorDescription
import org.apache.gearpump.streaming.executor.TaskLauncher.TaskArgument
import org.apache.gearpump.streaming.executor.TaskLauncherSpec.{MockTask, MockTaskActor}
import org.apache.gearpump.streaming.task.{Task, TaskContext, TaskContextData, TaskId, TaskWrapper}

class TaskLauncherSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  val appId = 0
  val executorId = 0
  var appMaster: TestProbe = null
  implicit var system: ActorSystem = null
  val userConf = UserConfig.empty

  override def beforeAll(): Unit = {
    system = ActorSystem("TaskLauncherSpec", TestUtil.DEFAULT_CONFIG)
    appMaster = TestProbe()
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  it should "able to launch tasks" in {
    val launcher = new TaskLauncher(appId, "app", executorId, appMaster.ref,
      userConf, classOf[MockTaskActor])
    val taskIds = List(TaskId(0, 0), TaskId(0, 1))
    val processor = ProcessorDescription(id = 0, taskClass = classOf[MockTask].getName,
      parallelism = 2)
    val argument = TaskArgument(0, processor, null)

    val tasks = launcher.launch(taskIds, argument, system, null,
      "gearpump.shared-thread-pool-dispatcher")
    tasks.keys.toSet shouldBe taskIds.toSet
  }
}

object TaskLauncherSpec {
  class MockTaskActor(
      val taskId: TaskId,
      val taskContextData : TaskContextData,
      userConf : UserConfig,
      val task: TaskWrapper,
      serializer: SerializationFramework) extends Actor {
    def receive: Receive = null
  }

  class MockTask(taskContext: TaskContext, userConf: UserConfig)
    extends Task(taskContext, userConf) {
  }
} 
Example 81
Source File: Worker.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.main

import akka.actor.{ActorSystem, Props}
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.master.MasterProxy
import org.apache.gearpump.cluster.worker.{Worker => WorkerActor}
import org.apache.gearpump.transport.HostPort
import org.apache.gearpump.util.Constants._
import org.apache.gearpump.util.LogUtil.ProcessType
import org.apache.gearpump.util.{AkkaApp, LogUtil}
import org.slf4j.Logger

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration


object Worker extends AkkaApp with ArgumentsParser {
  protected override def akkaConfig = ClusterConfig.worker()

  override val description = "Start a worker daemon"

  var LOG: Logger = LogUtil.getLogger(getClass)

  private def uuid = java.util.UUID.randomUUID.toString

  def main(akkaConf: Config, args: Array[String]): Unit = {
    val id = uuid

    this.LOG = {
      LogUtil.loadConfiguration(akkaConf, ProcessType.WORKER)
      // Delay creation of LOG instance to avoid creating an empty log file as we
      // reset the log file name here
      LogUtil.getLogger(getClass)
    }

    val system = ActorSystem(id, akkaConf)

    val masterAddress = akkaConf.getStringList(GEARPUMP_CLUSTER_MASTERS).asScala.map { address =>
      val hostAndPort = address.split(":")
      HostPort(hostAndPort(0), hostAndPort(1).toInt)
    }

    LOG.info(s"Trying to connect to masters " + masterAddress.mkString(",") + "...")
    val masterProxy = system.actorOf(MasterProxy.props(masterAddress), s"masterproxy${system.name}")

    system.actorOf(Props(classOf[WorkerActor], masterProxy),
      classOf[WorkerActor].getSimpleName + id)

    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 82
Source File: Local.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.main

import akka.actor.{ActorSystem, Props}
import com.typesafe.config.ConfigValueFactory
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.master.{Master => MasterActor}
import org.apache.gearpump.cluster.worker.{Worker => WorkerActor}
import org.apache.gearpump.util.Constants._
import org.apache.gearpump.util.LogUtil.ProcessType
import org.apache.gearpump.util.{ActorUtil, Constants, LogUtil, MasterClientCommand, Util}
import org.slf4j.Logger

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Local extends MasterClientCommand with ArgumentsParser {
  override def akkaConfig: Config = ClusterConfig.master()

  var LOG: Logger = LogUtil.getLogger(getClass)

  override val options: Array[(String, CLIOption[Any])] =
    Array("sameprocess" -> CLIOption[Boolean]("", required = false, defaultValue = Some(false)),
      "workernum" -> CLIOption[Int]("<how many workers to start>", required = false,
        defaultValue = Some(2)))

  override val description = "Start a local cluster"

  def main(akkaConf: Config, args: Array[String]): Unit = {

    this.LOG = {
      LogUtil.loadConfiguration(akkaConf, ProcessType.LOCAL)
      LogUtil.getLogger(getClass)
    }

    val config = parse(args)
    if (null != config) {
      local(config.getInt("workernum"), config.getBoolean("sameprocess"), akkaConf)
    }
  }

  def local(workerCount: Int, sameProcess: Boolean, akkaConf: Config): Unit = {
    if (sameProcess) {
      LOG.info("Starting local in same process")
      System.setProperty("LOCAL", "true")
    }
    val masters = akkaConf.getStringList(Constants.GEARPUMP_CLUSTER_MASTERS)
      .asScala.flatMap(Util.parseHostList)
    val local = akkaConf.getString(Constants.GEARPUMP_HOSTNAME)

    if (masters.size != 1 && masters.head.host != local) {
      LOG.error(s"The ${Constants.GEARPUMP_CLUSTER_MASTERS} is not match " +
        s"with ${Constants.GEARPUMP_HOSTNAME}")
    } else {

      val hostPort = masters.head
      implicit val system = ActorSystem(MASTER, akkaConf.
        withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(hostPort.port))
      )

      val master = system.actorOf(Props[MasterActor], MASTER)
      val masterPath = ActorUtil.getSystemAddress(system).toString + s"/user/$MASTER"

      0.until(workerCount).foreach { id =>
        system.actorOf(Props(classOf[WorkerActor], master), classOf[WorkerActor].getSimpleName + id)
      }

      Await.result(system.whenTerminated, Duration.Inf)
    }
  }
} 
Example 83
Source File: EmbeddedCluster.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.embedded

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import akka.actor.{ActorRef, ActorSystem, Props}
import com.typesafe.config.{Config, ConfigValueFactory}
import org.apache.gearpump.cluster.ClusterConfig
import org.apache.gearpump.cluster.master.{Master => MasterActor}
import org.apache.gearpump.cluster.worker.{Worker => WorkerActor}
import org.apache.gearpump.util.Constants.{GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS, GEARPUMP_CLUSTER_MASTERS, GEARPUMP_METRIC_ENABLED, MASTER}
import org.apache.gearpump.util.{LogUtil, Util}


class EmbeddedCluster(inputConfig: Config) {
  private val LOG = LogUtil.getLogger(getClass)
  private val workerCount: Int = 1
  private val port = Util.findFreePort().get
  private[embedded] val config: Config = getConfig(inputConfig, port)
  private[embedded] val system: ActorSystem = ActorSystem(MASTER, config)
  private[embedded] val master: ActorRef = system.actorOf(Props[MasterActor], MASTER)

  0.until(workerCount).foreach { id =>
    system.actorOf(Props(classOf[WorkerActor], master), classOf[WorkerActor].getSimpleName + id)
  }

  LOG.info("=================================")
  LOG.info("Local Cluster is started at: ")
  LOG.info(s"                 127.0.0.1:$port")
  LOG.info(s"To see UI, run command: services -master 127.0.0.1:$port")

  private def getConfig(inputConfig: Config, port: Int): Config = {
    val config = inputConfig.
      withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(port)).
      withValue(GEARPUMP_CLUSTER_MASTERS,
        ConfigValueFactory.fromIterable(List(s"127.0.0.1:$port").asJava)).
      withValue(GEARPUMP_CLUSTER_EXECUTOR_WORKER_SHARE_SAME_PROCESS,
        ConfigValueFactory.fromAnyRef(true)).
      withValue(GEARPUMP_METRIC_ENABLED, ConfigValueFactory.fromAnyRef(true)).
      withValue("akka.actor.provider",
        ConfigValueFactory.fromAnyRef("akka.cluster.ClusterActorRefProvider"))
    config
  }

  def stop(): Unit = {
    system.stop(master)
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
}

object EmbeddedCluster {
  def apply(): EmbeddedCluster = {
    new EmbeddedCluster(ClusterConfig.master())
  }
} 
Example 84
Source File: JarStoreClient.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.jarstore

import java.io.File
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.Await

import akka.pattern.ask
import akka.actor.{ActorSystem, ActorRef}
import com.typesafe.config.Config
import org.apache.gearpump.cluster.master.MasterProxy
import org.apache.gearpump.util.{Util, Constants, LogUtil}
import org.slf4j.Logger

import org.apache.gearpump.cluster.ClientToMaster.{GetJarStoreServer, JarStoreServerAddress}
import scala.concurrent.{Future, ExecutionContext}

class JarStoreClient(config: Config, system: ActorSystem) {
  private def LOG: Logger = LogUtil.getLogger(getClass)
  private implicit val timeout = Constants.FUTURE_TIMEOUT
  private implicit def dispatcher: ExecutionContext = system.dispatcher

  private val master: ActorRef = {
    val masters = config.getStringList(Constants.GEARPUMP_CLUSTER_MASTERS)
      .asScala.flatMap(Util.parseHostList)
    system.actorOf(MasterProxy.props(masters), s"masterproxy${Util.randInt()}")
  }

  private lazy val client = (master ? GetJarStoreServer).asInstanceOf[Future[JarStoreServerAddress]]
    .map { address =>
      val client = new FileServer.Client(system, address.url)
      client
    }

  
  def copyFromLocal(localFile: File): FilePath = {
    val future = client.flatMap(_.upload(localFile))
    Await.result(future, Duration(60, TimeUnit.SECONDS))
  }
} 
Example 85
Source File: SerializerSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.serializer

import akka.actor.{ActorSystem, ExtendedActorSystem}

import com.esotericsoftware.kryo.io.{Input, Output}
import com.esotericsoftware.kryo.{Kryo, Serializer => KryoSerializer}
import com.typesafe.config.{ConfigFactory, ConfigValueFactory}

import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.serializer.SerializerSpec._

import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration


class SerializerSpec extends FlatSpec with Matchers with MockitoSugar {
  val config = ConfigFactory.empty.withValue("gearpump.serializers",
    ConfigValueFactory.fromAnyRef(Map(classOf[ClassA].getName -> classOf[ClassASerializer].getName,
      classOf[ClassB].getName -> classOf[ClassBSerializer].getName).asJava))

  "GearpumpSerialization" should "register custom serializers" in {
    val serialization = new GearpumpSerialization(config)
    val kryo = new Kryo
    serialization.customize(kryo)

    val forB = kryo.getRegistration(classOf[ClassB])
    assert(forB.getSerializer.isInstanceOf[ClassBSerializer])

    val forA = kryo.getRegistration(classOf[ClassA])
    assert(forA.getSerializer.isInstanceOf[ClassASerializer])
  }

  "FastKryoSerializer" should "serialize correctly" in {
    val myConfig = config.withFallback(TestUtil.DEFAULT_CONFIG.withoutPath("gearpump.serializers"))
    val system = ActorSystem("my", myConfig)

    val serializer = new FastKryoSerializer(system.asInstanceOf[ExtendedActorSystem])

    val bytes = serializer.serialize(new ClassA)
    val anotherA = serializer.deserialize(bytes)

    assert(anotherA.isInstanceOf[ClassA])
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
}

object SerializerSpec {

  class ClassA {}

  class ClassASerializer extends KryoSerializer[ClassA] {
    override def write(kryo: Kryo, output: Output, `object`: ClassA): Unit = {
      output.writeString(classOf[ClassA].getName)
    }

    override def read(kryo: Kryo, input: Input, `type`: Class[ClassA]): ClassA = {
      val className = input.readString()
      Class.forName(className).newInstance().asInstanceOf[ClassA]
    }
  }

  class ClassB {}

  class ClassBSerializer extends KryoSerializer[ClassA] {
    override def write(kryo: Kryo, output: Output, `object`: ClassA): Unit = {}

    override def read(kryo: Kryo, input: Input, `type`: Class[ClassA]): ClassA = {
      null
    }
  }
} 
Example 86
Source File: RunningApplicationSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.client

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.testkit.TestProbe
import akka.util.Timeout
import org.apache.gearpump.cluster.ClientToMaster.{ResolveAppId, ShutdownApplication}
import org.apache.gearpump.cluster.MasterToClient.{ResolveAppIdResult, ShutdownApplicationResult}
import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.cluster.client.RunningApplicationSpec.{MockAskAppMasterRequest, MockAskAppMasterResponse}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global

class RunningApplicationSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit var system: ActorSystem = _

  override def beforeAll(): Unit = {
    system = ActorSystem("test", TestUtil.DEFAULT_CONFIG)
  }

  override def afterAll(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }

  "RunningApplication" should "be able to shutdown application" in {
    val errorMsg = "mock exception"
    val master = TestProbe()
    val timeout = Timeout(90, TimeUnit.SECONDS)
    val application = new RunningApplication(1, master.ref, timeout)
    Future {
      application.shutDown()
    }
    master.expectMsg(ShutdownApplication(1))
    master.reply(ShutdownApplicationResult(Success(1)))

    val result = Future {
      intercept[Exception] {
        application.shutDown()
      }
    }
    master.expectMsg(ShutdownApplication(1))
    master.reply(ShutdownApplicationResult(Failure(new Exception(errorMsg))))
    val exception = Await.result(result, Duration.Inf)
    assert(exception.getMessage.equals(errorMsg))
  }

  "RunningApplication" should "be able to ask appmaster" in {
    val master = TestProbe()
    val appMaster = TestProbe()
    val appId = 1
    val timeout = Timeout(90, TimeUnit.SECONDS)
    val request = MockAskAppMasterRequest("request")
    val application = new RunningApplication(appId, master.ref, timeout)
    val future = application.askAppMaster[MockAskAppMasterResponse](request)
    master.expectMsg(ResolveAppId(appId))
    master.reply(ResolveAppIdResult(Success(appMaster.ref)))
    appMaster.expectMsg(MockAskAppMasterRequest("request"))
    appMaster.reply(MockAskAppMasterResponse("response"))
    val result = Await.result(future, Duration.Inf)
    assert(result.res.equals("response"))

    // ResolveAppId should not be called multiple times
    val future2 = application.askAppMaster[MockAskAppMasterResponse](request)
    appMaster.expectMsg(MockAskAppMasterRequest("request"))
    appMaster.reply(MockAskAppMasterResponse("response"))
    val result2 = Await.result(future2, Duration.Inf)
    assert(result2.res.equals("response"))
  }
}

object RunningApplicationSpec {
  case class MockAskAppMasterRequest(req: String)

  case class MockAskAppMasterResponse(res: String)
} 
Example 87
Source File: MiniCluster.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.TestActorRef
import com.typesafe.config.ConfigValueFactory
import org.apache.gearpump.cluster.AppMasterToMaster.GetAllWorkers
import org.apache.gearpump.cluster.MasterToAppMaster.WorkerList
import org.apache.gearpump.cluster.master.Master
import org.apache.gearpump.cluster.worker.Worker
import org.apache.gearpump.util.Constants

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class MiniCluster {
  private val mockMasterIP = "127.0.0.1"

  implicit val system = ActorSystem("system", TestUtil.MASTER_CONFIG.
    withValue(Constants.NETTY_TCP_HOSTNAME, ConfigValueFactory.fromAnyRef(mockMasterIP)))

  val (mockMaster, worker) = {
    val master = system.actorOf(Props(classOf[Master]), "master")
    val worker = system.actorOf(Props(classOf[Worker], master), "worker")

    // Wait until worker register itself to master
    waitUtilWorkerIsRegistered(master)
    (master, worker)
  }

  def launchActor(props: Props): TestActorRef[Actor] = {
    TestActorRef(props)
  }

  private def waitUtilWorkerIsRegistered(master: ActorRef): Unit = {
    while (!isWorkerRegistered(master)) {}
  }

  private def isWorkerRegistered(master: ActorRef): Boolean = {
    import scala.concurrent.duration._
    implicit val dispatcher = system.dispatcher

    implicit val futureTimeout = Constants.FUTURE_TIMEOUT

    val workerListFuture = (master ? GetAllWorkers).asInstanceOf[Future[WorkerList]]

    // Waits until the worker is registered.
    val workers = Await.result[WorkerList](workerListFuture, 15.seconds)
    workers.workers.size > 0
  }

  def shutDown(): Unit = {
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
} 
Example 88
Source File: ConfigsSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.util

import java.io.File
import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.ActorSystem
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}

import org.apache.gearpump.cluster.{ClusterConfig, ClusterConfigSource, UserConfig}

class ConfigsSpec extends FlatSpec with Matchers with MockitoSugar {
  "Typesafe Cluster Configs" should "follow the override rules" in {

    val conf =
      """
      gearpump {
        gear = "gearpump"
      }

      gearpump-master {
        conf = "master"
      }
      gearpump-worker {
        conf = "worker"
      }
      conf = "base"
      """

    val file = File.createTempFile("test", ".conf")
    FileUtils.write(file, conf)

    val raw = ClusterConfig.load(ClusterConfigSource(file.toString))

    assert(raw.master.getString("conf") == "master", "master > base")
    assert(raw.worker.getString("conf") == "worker", "worker > base")
    assert(raw.default.getString("conf") == "base", "application > base")

    file.delete()
  }

  "ClusterConfigSource" should "return empty for non-exist files" in {
    val source = ClusterConfigSource("non-exist")
    var config = source.getConfig
    assert(config.isEmpty)

    val nullCheck = ClusterConfigSource(null)
    config = nullCheck.getConfig
    assert(config.isEmpty)
  }

  "User Config" should "work" in {

    implicit val system = ActorSystem("forSerialization")

    val map = Map[String, String]("key1" -> "1", "key2" -> "value2")

    val user = new UserConfig(map)
      .withLong("key3", 2L)
      .withBoolean("key4", value = true)
      .withFloat("key5", 3.14F)
      .withDouble("key6", 2.718)

    assert(user.getInt("key1").get == 1)
    assert(user.getString("key1").get == "1")
    assert(user.getLong("key3").get == 2L)
    assert(user.getBoolean("key4").get == true)
    assert(user.getFloat("key5").get == 3.14F)
    assert(user.getDouble("key6").get == 2.718)

    val data = new ConfigsSpec.Data(3)
    assert(data == user.withValue("data", data).getValue[ConfigsSpec.Data]("data").get)
    system.terminate()
    Await.result(system.whenTerminated, Duration.Inf)
  }
}

object ConfigsSpec {
  case class Data(value: Int)
} 
Example 89
Source File: ActorSystemBooterSpec.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.util

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestProbe
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}

import org.apache.gearpump.cluster.TestUtil
import org.apache.gearpump.util.ActorSystemBooter.{ActorCreated, RegisterActorSystem, _}
import org.apache.gearpump.util.ActorSystemBooterSpec._

class ActorSystemBooterSpec extends FlatSpec with Matchers with MockitoSugar {

  "ActorSystemBooter" should "report its address back" in {
    val boot = bootSystem()
    boot.prob.expectMsgType[RegisterActorSystem]
    boot.shutdown()
  }

  "ActorSystemBooter" should "terminate itself when parent actor dies" in {
    val boot = bootSystem()
    boot.prob.expectMsgType[RegisterActorSystem]

    val dummy = boot.host.actorOf(Props(classOf[Dummy]), "dummy")
    boot.prob.reply(ActorSystemRegistered(boot.prob.ref))
    boot.prob.reply(BindLifeCycle(dummy))
    boot.host.stop(dummy)
    val terminated = retry(5)(boot.bootedSystem.whenTerminated.isCompleted)
    assert(terminated)
    boot.shutdown()
  }

  "ActorSystemBooter" should "create new actor" in {
    val boot = bootSystem()
    boot.prob.expectMsgType[RegisterActorSystem]
    boot.prob.reply(ActorSystemRegistered(boot.prob.ref))
    boot.prob.reply(CreateActor(Props(classOf[AcceptThreeArguments], 1, 2, 3), "three"))
    boot.prob.expectMsgType[ActorCreated]

    boot.prob.reply(CreateActor(Props(classOf[AcceptZeroArguments]), "zero"))
    boot.prob.expectMsgType[ActorCreated]

    boot.shutdown()
  }

  private def bootSystem(): Boot = {
    val booter = ActorSystemBooter(TestUtil.DEFAULT_CONFIG)

    val system = ActorSystem("reportback", TestUtil.DEFAULT_CONFIG)

    val receiver = TestProbe()(system)
    val address = ActorUtil.getFullPath(system, receiver.ref.path)

    val bootSystem = booter.boot("booter", address)

    Boot(system, receiver, bootSystem)
  }

  case class Boot(host: ActorSystem, prob: TestProbe, bootedSystem: ActorSystem) {
    def shutdown(): Unit = {
      host.terminate()
      bootedSystem.terminate()
      Await.result(host.whenTerminated, Duration.Inf)
      Await.result(bootedSystem.whenTerminated, Duration.Inf)
    }
  }

  def retry(seconds: Int)(fn: => Boolean): Boolean = {
    val result = fn
    if (result) {
      result
    } else {
      Thread.sleep(1000)
      retry(seconds - 1)(fn)
    }
  }
}

object ActorSystemBooterSpec {
  class Dummy extends Actor {
    def receive: Receive = {
      case _ =>
    }
  }

  class AcceptZeroArguments extends Actor {
    def receive: Receive = {
      case _ =>
    }
  }

  class AcceptThreeArguments(a: Int, b: Int, c: Int) extends Actor {
    def receive: Receive = {
      case _ =>
    }
  }
} 
Example 90
Source File: FutureTracingSpec.scala    From scribe   with MIT License 5 votes vote down vote up
package spec

import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

import scala.concurrent.duration.Duration
import scala.concurrent.Await

class FutureTracingSpec extends AnyWordSpec with Matchers {
  "Future tracing" when {
    "using scribe implicits" should {
      "future trace back" in {
        val exception = intercept[RuntimeException](Await.result(FutureTesting.position(), Duration.Inf))
        val trace = exception.getStackTrace
        trace(0).getFileName should be("FutureTesting.scala")
        trace(0).getLineNumber should be(27)

        trace(1).getFileName should be("FutureTesting.scala")
        trace(1).getMethodName should be("three")
        trace(1).getLineNumber should be(26)

        trace(2).getFileName should be("FutureTesting.scala")
        trace(2).getMethodName should be("two")
        trace(2).getLineNumber should be(20)

        trace(3).getFileName should be("FutureTesting.scala")
        trace(3).getMethodName should be("one")
        trace(3).getLineNumber should be(14)

        trace(4).getFileName should be("FutureTesting.scala")
        trace(4).getMethodName should be("position")
        trace(4).getLineNumber should be(9)
      }
      "async trace back" in {
        val exception = intercept[RuntimeException](Await.result(AsyncTesting.position(), Duration.Inf))
        val trace = exception.getStackTrace

        var i = 0

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getLineNumber should be(34)
        i += 1

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("three")
        trace(i).getLineNumber should be(32)
        i += 1

        if (trace(i).getMethodName == "three") {
          trace(i).getFileName should be("AsyncTesting.scala")
          trace(i).getMethodName should be("three")
          trace(i).getLineNumber should be(33)
          i += 1
        }

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("two")
        trace(i).getLineNumber should be(25)
        i += 1

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("one")
        trace(i).getLineNumber should be(17)
        i += 1

        trace(i).getFileName should be("AsyncTesting.scala")
        trace(i).getMethodName should be("position")
        trace(i).getLineNumber should be(10)
      }
    }
  }
} 
Example 91
Source File: package.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy

import java.util.concurrent.{ExecutionException, Future => JFuture, TimeUnit}

import scala.concurrent.duration.Duration

package object scalaapi {

  
  implicit class ScalaWrapper(livyJavaClient: LivyClient) {
    def asScalaClient: LivyScalaClient = new LivyScalaClient(livyJavaClient)
  }

  private[livy] def getJavaFutureResult[T](jFuture: JFuture[T],
                                           atMost: Duration = Duration.Undefined): T = {
    try {
      if (!atMost.isFinite()) jFuture.get else jFuture.get(atMost.toMillis, TimeUnit.MILLISECONDS)
    } catch {
      case executionException: ExecutionException => throw executionException.getCause
    }
  }
} 
Example 92
Source File: SessionHeartbeat.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.server.interactive

import java.util.Date

import scala.concurrent.duration.{Deadline, Duration, FiniteDuration}

import org.apache.livy.sessions.Session.RecoveryMetadata
import org.apache.livy.LivyConf
import org.apache.livy.server.SessionServlet
import org.apache.livy.sessions.{Session, SessionManager}


trait SessionHeartbeatWatchdog[S <: Session with SessionHeartbeat, R <: RecoveryMetadata] {
  self: SessionManager[S, R] =>

  private val watchdogThread = new Thread(s"HeartbeatWatchdog-${self.getClass.getName}") {
    override def run(): Unit = {
      val interval = livyConf.getTimeAsMs(LivyConf.HEARTBEAT_WATCHDOG_INTERVAL)
      info("Heartbeat watchdog thread started.")
      while (true) {
        deleteExpiredSessions()
        Thread.sleep(interval)
      }
    }
  }

  protected def start(): Unit = {
    assert(!watchdogThread.isAlive())

    watchdogThread.setDaemon(true)
    watchdogThread.start()
  }

  private[interactive] def deleteExpiredSessions(): Unit = {
    // Delete takes time. If we use .filter().foreach() here, the time difference between we check
    // expiration and the time we delete the session might be huge. To avoid that, check expiration
    // inside the foreach block.
    sessions.values.foreach { s =>
      if (s.heartbeatExpired) {
        info(s"Session ${s.id} expired. Last heartbeat is at ${s.lastHeartbeat}.")
        try { delete(s) } catch {
          case t: Throwable =>
            warn(s"Exception was thrown when deleting expired session ${s.id}", t)
        }
      }
    }
  }
} 
Example 93
Source File: Utils.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy

import java.io.{Closeable, File, InputStreamReader}
import java.net.URL
import java.nio.charset.StandardCharsets.UTF_8
import java.security.SecureRandom
import java.util.Properties

import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.concurrent.TimeoutException
import scala.concurrent.duration.Duration

import org.apache.commons.codec.binary.Base64

object Utils {
  def getPropertiesFromFile(file: File): Map[String, String] = {
    loadProperties(file.toURI().toURL())
  }

  def loadProperties(url: URL): Map[String, String] = {
    val inReader = new InputStreamReader(url.openStream(), UTF_8)
    try {
      val properties = new Properties()
      properties.load(inReader)
      properties.stringPropertyNames().asScala.map { k =>
        (k, properties.getProperty(k).trim())
      }.toMap
    } finally {
      inReader.close()
    }
  }

  
  def isProcessAlive(process: Process): Boolean = {
    try {
      process.exitValue()
      false
    } catch {
      case _: IllegalThreadStateException =>
        true
    }
  }

  def startDaemonThread(name: String)(f: => Unit): Thread = {
    val thread = new Thread(name) {
      override def run(): Unit = f
    }
    thread.setDaemon(true)
    thread.start()
    thread
  }

  def usingResource[A <: Closeable, B](resource: A)(f: A => B): B = {
    try {
      f(resource)
    } finally {
      resource.close()
    }
  }

  def createSecret(secretBitLength: Int): String = {
    val rnd = new SecureRandom()
    val secretBytes = new Array[Byte](secretBitLength / java.lang.Byte.SIZE)
    rnd.nextBytes(secretBytes)

    Base64.encodeBase64String(secretBytes)
  }
} 
Example 94
Source File: Implicits.scala    From openlaw-core   with Apache License 2.0 5 votes vote down vote up
package org.adridadou.openlaw.result

import cats.implicits._
import cats.data.NonEmptyList

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.language.implicitConversions
import scala.util.{Try, Failure => TFailure, Success => TSuccess}

object Implicits {

  implicit class RichNonEmptyList[T](val nel: NonEmptyList[T]) extends AnyVal {
    def mkString: String = mkString(", ")
    def mkString(sep: String): String = nel.toList.mkString(sep)
  }

  implicit class RichTry[T](val t: Try[T]) extends AnyVal {
    def toResult: Result[T] = t match {
      case TSuccess(v)            => Success(v)
      case TFailure(e: Exception) => Failure(e)

      // don't try to handle Error instances
      case TFailure(t) => throw t
    }
  }

  implicit class RichEither[T](val either: Either[String, T]) extends AnyVal {
    def toResult: Result[T] = either.left.map(FailureMessage(_))
  }

  implicit class RichFuture[T](val future: Future[T]) extends AnyVal {
    def getResult(timeout: Duration): Result[T] =
      attempt(Await.result(future, timeout))
  }

  implicit class RichResult[T](val result: Result[T]) extends AnyVal {
    def addCause(cause: Failure[T]): ResultNel[T] = result match {
      case Success(_)     => cause.toResultNel
      case Left(original) => FailureNel(original, cause.value)
    }
    def addFailure[U >: T](cause: FailureCause): ResultNel[U] = result match {
      case s @ Success(_) => s.toResultNel
      case Left(original) => FailureNel(cause, original)
    }
    def addMessageToFailure[U >: T](message: String): ResultNel[U] =
      result match {
        case s @ Success(_) => s.toResultNel
        case Left(original) => FailureNel(FailureMessage(message), original)
      }
    def convert(pf: PartialFunction[Exception, Exception]): Result[T] =
      result.left.map {
        case FailureException(e, _) if pf.isDefinedAt(e) =>
          FailureException(pf(e))
        case f => f
      }
    def recoverMerge(f: FailureCause => T): T =
      result.fold(failure => f(failure), success => success)
    def recoverWith(pf: PartialFunction[FailureCause, Result[T]]): Result[T] =
      result.leftFlatMap { error =>
        if (pf.isDefinedAt(error)) {
          pf(error)
        } else {
          result
        }
      }
    def toResultNel: ResultNel[T] = result.toValidatedNel
    def toFuture: Future[T] = result match {
      case Success(value) => Future.successful(value)
      case Failure(e, _)  => Future.failed(e)
    }
    def getOrThrow(): T = result.valueOr(_.throwException())
  }

  implicit class RichOption[T](val option: Option[T]) extends AnyVal {
    def toResult(message: String): Result[T] =
      option.map(x => Success(x)).getOrElse(Failure(message))
  }

  implicit class RichResultNel[T](val result: ResultNel[T]) extends AnyVal {
    def toUnit: ResultNel[Unit] = result.map(_ => ())
    def toResult: Result[T] = result.toEither.leftMap {
      case NonEmptyList(x, Seq()) => x
      case nel                    => FailureException(MultipleCauseException(nel))
    }
  }

  implicit def exception2Result[A](e: Exception): Result[A] = Failure[A](e)
  implicit def unitResultConversion[T](wrapped: Result[T]): Result[Unit] =
    wrapped.map(_ => ())
  implicit def failureCause2Exception[T](wrapped: FailureCause): Exception =
    wrapped.e
} 
Example 95
Source File: GrpcAkkaStreamsServerCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import io.grpc.ServerCallHandler
import io.grpc.stub.{CallStreamObserver, ServerCalls, StreamObserver}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}

object GrpcAkkaStreamsServerCalls {

  def unaryCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncUnaryCall(
    new ServerCalls.UnaryMethod[I, O] {
      override def invoke(request: I, responseObserver: StreamObserver[O]) =
        Source
          .single(request)
          .via(service)
          .runForeach(responseObserver.onNext)
          .onComplete {
            case Success(_) => responseObserver.onCompleted()
            case Failure(t) => responseObserver.onError(t)
          }(mat.executionContext)
    }
  )

  def serverStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] =
    ServerCalls.asyncServerStreamingCall(
      new ServerCalls.ServerStreamingMethod[I, O] {
        override def invoke(request: I, responseObserver: StreamObserver[O]) =
          Source
            .single(request)
            .via(service)
            .runWith(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            )))
      }
    )

  def clientStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncClientStreamingCall(
    new ServerCalls.ClientStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )

  def bidiStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncBidiStreamingCall(
    new ServerCalls.BidiStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )
} 
Example 96
Source File: search.scala    From fs2-elastic   with MIT License 5 votes vote down vote up
package com.alessandromarrella.fs2_elastic.syntax

import cats.effect.Async
import fs2.Stream
import org.elasticsearch.action.search.{SearchRequest, SearchResponse, SearchScrollRequest}
import org.elasticsearch.client.RestHighLevelClient
import org.elasticsearch.search.{SearchHit, SearchHits}

import scala.concurrent.duration.Duration
import com.alessandromarrella.fs2_elastic.io
import org.apache.http.Header

private[syntax] trait search {

  type SearchResultMaybe[A] =
    Option[(A, (RestHighLevelClient, SearchScrollRequest, SearchResponse))]

  implicit class ElasticClientSearchOps[F[_]](
      val client: Stream[F, RestHighLevelClient]) {

    def search(searchRequest: SearchRequest, headers:Header*): Stream[F, SearchResponse] =
      client.through(io.search.search(searchRequest))

    def searchScroll(searchRequest: SearchRequest, duration: Duration, headers:Header*)(
        implicit F: Async[F])
      : Stream[F, (RestHighLevelClient, SearchResponse)] =
      client.through(io.search.searchScroll(searchRequest, duration, headers:_*))
  }

  implicit class SearchResponseOps[F[_]](
      val searchResponse: Stream[F, SearchResponse]) {
    def hits: Stream[F, SearchHits] =
      searchResponse.through(io.search.hits[F])
  }

  implicit class SearchScrollResponseOps[F[_]](
      val searchResponse: Stream[F, (RestHighLevelClient, SearchResponse)]) {
    def hitsScroll(implicit F: Async[F]): Stream[F, SearchHits] =
      searchResponse.through(io.search.hitsScroll[F])
  }

  implicit class SearchHitOps[F[_]](val hitsStream: Stream[F, SearchHits]) {
    def stream(implicit F: Async[F]): Stream[F, SearchHit] =
      streamFromJavaIterable(hitsStream)
  }

}

object search extends search 
Example 97
Source File: package.scala    From fs2-elastic   with MIT License 5 votes vote down vote up
package com.alessandromarrella.fs2_elastic

import cats.effect.Async
import fs2.Stream

import org.elasticsearch.common.unit.TimeValue

import scala.concurrent.duration.Duration
import scala.collection.JavaConverters._

package object syntax {

  private[syntax] type IteratorResultMaybe[A] = Option[(A, Iterator[A])]

  private[syntax] def streamFromJavaIterable[F[_], A](
      inputStream: Stream[F, java.lang.Iterable[A]])(
      implicit F: Async[F]): Stream[F, A] =
    streamFromIterable(inputStream.map(_.asScala))

  private[syntax] def streamFromIterable[F[_], A](
      inputStream: Stream[F, Iterable[A]])(implicit F: Async[F]): Stream[F, A] =
    inputStream.flatMap(a =>
      Stream.unfoldEval(a.iterator) { i =>
        if (i.hasNext) F.delay[IteratorResultMaybe[A]](Some((i.next(), i)))
        else F.delay[IteratorResultMaybe[A]](Option.empty)
    })

  private[syntax] def durationToTimeValue(duration: Duration): TimeValue =
    TimeValue.timeValueNanos(duration.toNanos)

} 
Example 98
Source File: search.scala    From fs2-elastic   with MIT License 5 votes vote down vote up
package com.alessandromarrella.fs2_elastic.io

import cats.effect.Async
import fs2.{Pipe, Stream}
import org.apache.http.Header
import org.elasticsearch.action.search.{SearchRequest, SearchResponse, SearchScrollRequest}
import org.elasticsearch.client.RestHighLevelClient
import org.elasticsearch.search.SearchHits

import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration

private[io] trait search {

  type SearchResultMaybe[A] = Option[(A, SearchResponse)]

  def search[F[_]](searchRequest: SearchRequest, headers: Header*)
    : Pipe[F, RestHighLevelClient, SearchResponse] =
    client => client.map(_.search(searchRequest, headers:_*))

  def searchScroll[F[_]](searchRequest: SearchRequest, duration: Duration, headers: Header*)(
      implicit F: Async[F])
    : Pipe[F, RestHighLevelClient, (RestHighLevelClient, SearchResponse)] =
    client =>
      client.map(c =>
        (c, c.search(searchRequest.scroll(durationToTimeValue(duration)), headers:_*)))

  def hits[F[_]]: Pipe[F, SearchResponse, SearchHits] =
    response => response.map(_.getHits)

  def hitsScroll[F[_]](implicit F: Async[F])
    : Pipe[F, (RestHighLevelClient, SearchResponse), SearchHits] =
    input =>
      input.flatMap {
        case (client, response) =>
          Stream.unfoldEval(response) { res =>
            F.delay[SearchResultMaybe[SearchHits]](
              Option[SearchHits](res.getHits).flatMap {
                case hits if hits.asScala.nonEmpty =>
                  val newRequest = new SearchScrollRequest(res.getScrollId)
                  val newHits = client.searchScroll(newRequest)
                  Some((hits, newHits))
                case _ =>
                  None
              })
          }
    }

}

object search extends search 
Example 99
Source File: ProjectionConfig.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.projection

import scala.concurrent.duration.Duration
import akka.annotation.InternalApi
import com.lightbend.lagom.projection.Started
import com.lightbend.lagom.projection.Status
import com.lightbend.lagom.projection.Stopped
import com.typesafe.config.Config
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._

@InternalApi
sealed trait ProjectionConfig {
  def writeMajorityTimeout: FiniteDuration
  def defaultRequestedStatus: Status
}
@InternalApi
object ProjectionConfig {
  def apply(config: Config): ProjectionConfig = {
    new ProjectionConfigImpl(config.getConfig("lagom.projection"))
  }

  private final class ProjectionConfigImpl(config: Config) extends ProjectionConfig {
    val writeMajorityTimeout: FiniteDuration =
      config.getDuration("write.majority.timeout", TimeUnit.MILLISECONDS).millis

    val defaultRequestedStatus: Status = {
      val autoStartEnabled = config.getBoolean("auto-start.enabled")
      if (autoStartEnabled) Started
      else Stopped
    }
  }
} 
Example 100
Source File: WorkerConfig.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.projection

import scala.concurrent.duration.Duration
import akka.annotation.InternalApi
import com.typesafe.config.Config
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._

@InternalApi
sealed trait WorkerConfig {
  def minBackoff: FiniteDuration
  def maxBackoff: FiniteDuration
  def randomFactor: Double
}

object WorkerConfig {
  def apply(config: Config): WorkerConfig = new WorkerConfigImpl(config.getConfig("lagom.projection.worker"))

  private final class WorkerConfigImpl(config: Config) extends WorkerConfig {
    val minBackoff: FiniteDuration = config.getDuration("backoff.supervisor.minBackoff", TimeUnit.MILLISECONDS).millis
    val maxBackoff: FiniteDuration = config.getDuration("backoff.supervisor.maxBackoff", TimeUnit.MILLISECONDS).millis
    val randomFactor: Double       = config.getDouble("backoff.supervisor.randomFactor")
  }
} 
Example 101
Source File: RefreshTokenStorage.scala    From akka-http-session   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.session

import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration.Duration

trait RefreshTokenStorage[T] {
  def lookup(selector: String): Future[Option[RefreshTokenLookupResult[T]]]
  def store(data: RefreshTokenData[T]): Future[Unit]
  def remove(selector: String): Future[Unit]
  def schedule[S](after: Duration)(op: => Future[S]): Unit
}

case class RefreshTokenData[T](forSession: T,
                               selector: String,
                               tokenHash: String,
                               
trait InMemoryRefreshTokenStorage[T] extends RefreshTokenStorage[T] {
  case class Store(session: T, tokenHash: String, expires: Long)
  private val _store = mutable.Map[String, Store]()

  def store: Map[String, Store] = _store.toMap

  override def lookup(selector: String) = {
    Future.successful {
      val r = _store.get(selector).map(s => RefreshTokenLookupResult[T](s.tokenHash, s.expires, () => s.session))
      log(s"Looking up token for selector: $selector, found: ${r.isDefined}")
      r
    }
  }

  override def store(data: RefreshTokenData[T]) = {
    log(
      s"Storing token for selector: ${data.selector}, user: ${data.forSession}, " +
        s"expires: ${data.expires}, now: ${System.currentTimeMillis()}")
    Future.successful(_store.put(data.selector, Store(data.forSession, data.tokenHash, data.expires)))
  }

  override def remove(selector: String) = {
    log(s"Removing token for selector: $selector")
    Future.successful(_store.remove(selector))
  }

  override def schedule[S](after: Duration)(op: => Future[S]) = {
    log("Running scheduled operation immediately")
    op
    Future.successful(())
  }

  def log(msg: String): Unit
} 
Example 102
Source File: BlockTransferService.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient}
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.ThreadUtils

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel,
      classTag: ClassTag[_]): Unit = {
    val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
    ThreadUtils.awaitResult(future, Duration.Inf)
  }
} 
Example 103
Source File: FutureActionSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import scala.concurrent.duration.Duration

import org.scalatest.{BeforeAndAfter, Matchers}

import org.apache.spark.util.ThreadUtils


class FutureActionSuite
  extends SparkFunSuite
  with BeforeAndAfter
  with Matchers
  with LocalSparkContext {

  before {
    sc = new SparkContext("local", "FutureActionSuite")
  }

  test("simple async action") {
    val rdd = sc.parallelize(1 to 10, 2)
    val job = rdd.countAsync()
    val res = ThreadUtils.awaitResult(job, Duration.Inf)
    res should be (10)
    job.jobIds.size should be (1)
  }

  test("complex async action") {
    val rdd = sc.parallelize(1 to 15, 3)
    val job = rdd.takeAsync(10)
    val res = ThreadUtils.awaitResult(job, Duration.Inf)
    res should be (1 to 10)
    job.jobIds.size should be (2)
  }

} 
Example 104
Source File: ExecutorServiceWrapper.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.internal

import java.util
import java.util.concurrent.{ AbstractExecutorService, ExecutorService, TimeUnit }

import monix.execution.schedulers.{ ReferenceScheduler, SchedulerService }
import monix.execution.{ Cancelable, ExecutionModel, Scheduler }

import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, ExecutionContextExecutorService }


  private val currentThread: Scheduler =
    new ReferenceScheduler {
      import monix.execution.Scheduler.global
      def execute(r: Runnable): Unit = r.run()
      def reportFailure(t: Throwable): Unit = throw t
      def scheduleOnce(initialDelay: Long, unit: TimeUnit, r: Runnable): Cancelable =
        global.scheduleOnce(initialDelay, unit, r)
      def executionModel: ExecutionModel =
        ExecutionModel.Default
    }
} 
Example 105
Source File: Utils.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
import java.util.{Timer, TimerTask}

import scala.concurrent.duration.Duration
import scala.concurrent.{Future, Promise}
import scala.util.Try

object Utils {
  def after[T](duration: Duration)(block: => T): Future[T] = {
    val promise = Promise[T]()
    val t = new Timer()
    t.schedule(new TimerTask {
      override def run(): Unit = {
        promise.complete(Try(block))
      }
    }, duration.toMillis)
    promise.future
  }
} 
Example 106
Source File: TestUtils.scala    From scala-play-realworld-example-app   with MIT License 5 votes vote down vote up
package commons_test.test_helpers

import commons.services.ActionRunner
import slick.dbio.DBIO

import scala.concurrent.duration.{Duration, DurationInt}
import scala.concurrent.{Await, Future}

object TestUtils {

  val config: Map[String, String] = Map(
    "play.evolutions.enabled" -> "true",
    "play.evolutions.autoApply" -> "true",
    "slick.dbs.default.profile" -> "slick.jdbc.H2Profile$",
    "slick.dbs.default.db.driver" -> "org.h2.Driver",
    "slick.dbs.default.db.url" -> "jdbc:h2:mem:play;DATABASE_TO_UPPER=false",
    "slick.dbs.default.db.user" -> "user",
    "slick.dbs.default.db.password" -> ""
  )

  def runAndAwaitResult[T](action: DBIO[T])(implicit actionRunner: ActionRunner,
                                            duration: Duration = new DurationInt(1).minute): T = {
    val future: Future[T] = actionRunner.runTransactionally(action)
    Await.result(future, duration)
  }
} 
Example 107
Source File: BlockTransferService.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Promise, Await, Future}
import scala.concurrent.duration.Duration

import org.apache.spark.Logging
import org.apache.spark.network.buffer.{NioManagedBuffer, ManagedBuffer}
import org.apache.spark.network.shuffle.{ShuffleClient, BlockFetchingListener}
import org.apache.spark.storage.{BlockManagerId, BlockId, StorageLevel}

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel): Unit = {
    Await.result(uploadBlock(hostname, port, execId, blockId, blockData, level), Duration.Inf)
  }
} 
Example 108
Source File: FutureActionSuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import org.scalatest.{BeforeAndAfter, FunSuite, Matchers}


class FutureActionSuite extends FunSuite with BeforeAndAfter with Matchers with LocalSparkContext {

  before {
    sc = new SparkContext("local", "FutureActionSuite")
  }

  test("simple async action") {
    val rdd = sc.parallelize(1 to 10, 2)
    val job = rdd.countAsync()
    val res = Await.result(job, Duration.Inf)
    res should be (10)
    job.jobIds.size should be (1)
  }

  test("complex async action") {
    val rdd = sc.parallelize(1 to 15, 3)
    val job = rdd.takeAsync(10)
    val res = Await.result(job, Duration.Inf)
    res should be (1 to 10)
    job.jobIds.size should be (2)
  }

} 
Example 109
Source File: Redis4CatsFunSuite.scala    From redis4cats   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.redis4cats

import cats.effect._
import cats.implicits._
import dev.profunktor.redis4cats.connection._
import dev.profunktor.redis4cats.data.RedisCodec
import dev.profunktor.redis4cats.effect.Log.NoOp._
import munit.FunSuite
import scala.concurrent.{ Await, ExecutionContext, Future }
import scala.concurrent.duration.Duration

abstract class Redis4CatsFunSuite(isCluster: Boolean) extends FunSuite {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit val timer: Timer[IO]     = IO.timer(ExecutionContext.global)
  implicit val clock: Clock[IO]     = timer.clock

  val flushAllFixture = new Fixture[Unit]("FLUSHALL") {
    def apply(): Unit = ()

    override def afterEach(context: AfterEach): Unit =
      Await.result(flushAll(), Duration.Inf)
  }

  override def munitFixtures = List(flushAllFixture)

  override def munitFlakyOK: Boolean = true

  private val stringCodec = RedisCodec.Utf8

  def withAbstractRedis[A, K, V](f: RedisCommands[IO, K, V] => IO[A])(codec: RedisCodec[K, V]): Future[Unit] =
    Redis[IO].simple("redis://localhost", codec).use(f).as(assert(true)).unsafeToFuture()

  def withRedis[A](f: RedisCommands[IO, String, String] => IO[A]): Future[Unit] =
    withAbstractRedis[A, String, String](f)(stringCodec)

  private def flushAll(): Future[Unit] =
    if (isCluster) withRedisCluster(_.flushAll)
    else withRedis(_.flushAll)

  // --- Cluster ---

  lazy val redisUri = List(
    "redis://localhost:30001",
    "redis://localhost:30002",
    "redis://localhost:30003"
  ).traverse(RedisURI.make[IO](_))

  private def mkRedisCluster[K, V](codec: RedisCodec[K, V]): Resource[IO, RedisCommands[IO, K, V]] =
    for {
      uris <- Resource.liftF(redisUri)
      client <- RedisClusterClient[IO](uris: _*)
      cluster <- Redis[IO].fromClusterClient(client, codec)
    } yield cluster

  def withAbstractRedisCluster[A, K, V](
      f: RedisCommands[IO, K, V] => IO[A]
  )(codec: RedisCodec[K, V]): Future[Unit] =
    mkRedisCluster(codec).use(f).as(assert(true)).unsafeToFuture()

  def withRedisCluster[A](f: RedisCommands[IO, String, String] => IO[A]): Future[Unit] =
    withAbstractRedisCluster[A, String, String](f)(stringCodec)

} 
Example 110
Source File: lists.scala    From redis4cats   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.redis4cats.algebra

import scala.concurrent.duration.Duration

trait ListCommands[F[_], K, V]
    extends ListBlocking[F, K, V]
    with ListGetter[F, K, V]
    with ListSetter[F, K, V]
    with ListPushPop[F, K, V]

trait ListBlocking[F[_], K, V] {
  def blPop(timeout: Duration, keys: K*): F[(K, V)]
  def brPop(timeout: Duration, keys: K*): F[(K, V)]
  def brPopLPush(timeout: Duration, source: K, destination: K): F[Option[V]]
}

trait ListGetter[F[_], K, V] {
  def lIndex(key: K, index: Long): F[Option[V]]
  def lLen(key: K): F[Option[Long]]
  def lRange(key: K, start: Long, stop: Long): F[List[V]]
}

trait ListSetter[F[_], K, V] {
  def lInsertAfter(key: K, pivot: V, value: V): F[Unit]
  def lInsertBefore(key: K, pivot: V, value: V): F[Unit]
  def lRem(key: K, count: Long, value: V): F[Unit]
  def lSet(key: K, index: Long, value: V): F[Unit]
  def lTrim(key: K, start: Long, stop: Long): F[Unit]
}

trait ListPushPop[F[_], K, V] {
  def lPop(key: K): F[Option[V]]
  def lPush(key: K, values: V*): F[Unit]
  def lPushX(key: K, values: V*): F[Unit]
  def rPop(key: K): F[Option[V]]
  def rPopLPush(source: K, destination: K): F[Option[V]]
  def rPush(key: K, values: V*): F[Unit]
  def rPushX(key: K, values: V*): F[Unit]
} 
Example 111
Source File: ParallelIteratorExecutor.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.passes

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class ParallelIteratorExecutor[T](iterator: Iterator[T]) {
  def map[D](func: T => D): Iterator[D] = {
    val futures = Future.traverse(iterator) { element =>
      Future {
        func(element)
      }
    }
    Await.result(futures, Duration.Inf)
  }
} 
Example 112
Source File: DistributedCountRDD.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.indexserver

import java.util.concurrent.Executors

import scala.collection.JavaConverters._
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}
import scala.concurrent.duration.Duration

import org.apache.hadoop.mapred.TaskAttemptID
import org.apache.hadoop.mapreduce.{InputSplit, TaskType}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.{Partition, SparkEnv, TaskContext}
import org.apache.spark.sql.SparkSession

import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.CacheProvider
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.index.{IndexInputFormat, IndexStoreManager}
import org.apache.carbondata.core.index.dev.expr.IndexInputSplitWrapper
import org.apache.carbondata.core.util.{CarbonProperties, CarbonThreadFactory}
import org.apache.carbondata.spark.rdd.CarbonRDD


class DistributedCountRDD(@transient ss: SparkSession, indexInputFormat: IndexInputFormat)
  extends CarbonRDD[(String, String)](ss, Nil) {

  @transient private val LOGGER = LogServiceFactory.getLogService(classOf[DistributedPruneRDD]
    .getName)

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    if (split.asInstanceOf[IndexRDDPartition].getLocations != null) {
      split.asInstanceOf[IndexRDDPartition].getLocations.toSeq
    } else {
      Seq()
    }
  }

  override def internalCompute(split: Partition,
      context: TaskContext): Iterator[(String, String)] = {
    val attemptId = new TaskAttemptID(DistributedRDDUtils.generateTrackerId,
      id, TaskType.MAP, split.index, 0)
    val attemptContext = new TaskAttemptContextImpl(FileFactory.getConfiguration, attemptId)
    val inputSplits = split.asInstanceOf[IndexRDDPartition].inputSplit
    val numOfThreads = CarbonProperties.getInstance().getNumOfThreadsForExecutorPruning
    val service = Executors
      .newFixedThreadPool(numOfThreads, new CarbonThreadFactory("IndexPruningPool", true))
    implicit val ec: ExecutionContextExecutor = ExecutionContext
      .fromExecutor(service)
    if (indexInputFormat.ifAsyncCall()) {
      // to clear cache of invalid segments during pre-priming in index server
      IndexStoreManager.getInstance().clearInvalidSegments(indexInputFormat.getCarbonTable,
        indexInputFormat.getInvalidSegments)
    }
    val futures = if (inputSplits.length <= numOfThreads) {
      inputSplits.map {
        split => generateFuture(Seq(split))
      }
    } else {
      DistributedRDDUtils.groupSplits(inputSplits, numOfThreads).map {
        splits => generateFuture(splits)
      }
    }
    // scalastyle:off awaitresult
    val results = Await.result(Future.sequence(futures), Duration.Inf).flatten
    // scalastyle:on awaitresult
    val executorIP = s"${ SparkEnv.get.blockManager.blockManagerId.host }_${
      SparkEnv.get.blockManager.blockManagerId.executorId
    }"
    val cacheSize = if (CacheProvider.getInstance().getCarbonCache != null) {
      CacheProvider.getInstance().getCarbonCache.getCurrentSize
    } else {
      0L
    }
    Iterator((executorIP + "_" + cacheSize.toString, results.map(_._2.toLong).sum.toString))
  }

  override protected def internalGetPartitions: Array[Partition] = {
    new DistributedPruneRDD(ss, indexInputFormat).partitions
  }

  private def generateFuture(split: Seq[InputSplit])
    (implicit executionContext: ExecutionContext) = {
    Future {
      val segments = split.map { inputSplit =>
        val distributable = inputSplit.asInstanceOf[IndexInputSplitWrapper]
        distributable.getDistributable.getSegment
          .setReadCommittedScope(indexInputFormat.getReadCommittedScope)
        distributable.getDistributable.getSegment
      }
      val defaultIndex = IndexStoreManager.getInstance
        .getIndex(indexInputFormat.getCarbonTable, split.head
          .asInstanceOf[IndexInputSplitWrapper].getDistributable.getIndexSchema)
      defaultIndex.getBlockRowCount(defaultIndex, segments.toList.asJava, indexInputFormat
        .getPartitions).asScala
    }
  }

} 
Example 113
Source File: TestSegmentReadingForMultiThreading.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.spark.testsuite.segmentreading

import java.util.concurrent.TimeUnit

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

import org.apache.spark.sql.{CarbonUtils, Row}
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll



class TestSegmentReadingForMultiThreading extends QueryTest with BeforeAndAfterAll {

  override def beforeAll: Unit = {
    sql("DROP TABLE IF EXISTS carbon_table_MulTI_THread")
    sql(
      "CREATE TABLE carbon_table_MulTI_THread (empno int, empname String, designation String, doj " +
      "Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname " +
      "String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance " +
      "int,utilization int,salary int) STORED AS carbondata")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data1.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
    sql(
      s"LOAD DATA LOCAL INPATH '$resourcesPath/data1.csv' INTO TABLE carbon_table_MulTI_THread " +
      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
  }

  test("test multithreading for segment reading") {


    CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1,2,3")
    val df = sql("select count(empno) from carbon_table_MulTI_THread")
    checkAnswer(df, Seq(Row(30)))

    val four = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1,3")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(20)))
    }

    val three = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "0,1,2")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(30)))
    }


    val one = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "0,2")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(20)))
    }

    val two = Future {
      CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1")
      val df = sql("select count(empno) from carbon_table_MulTI_THread")
      checkAnswer(df, Seq(Row(10)))
    }
    Await.result(Future.sequence(Seq(one, two, three, four)), Duration(300, TimeUnit.SECONDS))
  }

  override def afterAll: Unit = {
    sql("DROP TABLE IF EXISTS carbon_table_MulTI_THread")
    CarbonUtils.threadUnset("carbon.input.segments.default.carbon_table_MulTI_THread")
  }
} 
Example 114
Source File: InfluxAkkaClient.scala    From chronicler   with Apache License 2.0 5 votes vote down vote up
package com.github.fsanaulla.chronicler.akka.shared

import akka.actor.ActorSystem
import akka.http.scaladsl.{Http, HttpExt, HttpsConnectionContext}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

abstract class InfluxAkkaClient(
    terminateActorSystem: Boolean,
    httpsContext: Option[HttpsConnectionContext]
  )(implicit system: ActorSystem,
    ec: ExecutionContext) { self: AutoCloseable =>

  private[akka] implicit val http: HttpExt = Http()

  private[akka] val (ctx, schema) =
    httpsContext
      .map(_ -> "https")
      .getOrElse(http.defaultClientHttpsContext -> "http")

  def close(): Unit =
    Await.ready(closeAsync(), Duration.Inf)

  def closeAsync(): Future[Unit] = {
    for {
      _ <- http.shutdownAllConnectionPools()
      _ <- if (terminateActorSystem) system.terminate().map(_ => {}) else Future.successful({})
    } yield {}
  }
} 
Example 115
Source File: PlotServer.scala    From DynaML   with Apache License 2.0 5 votes vote down vote up
package io.github.mandar2812.dynaml.graphics.charts.repl

import unfiltered.request._
import unfiltered.response._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Promise}


class PlotServer extends UnfilteredWebApp[UnfilteredWebApp.Arguments]  {
  // this is fulfilled by the plot command, to allow a browser to wait for plot to reload
  var p = Promise[Unit]()

  private class WebApp extends unfiltered.filter.Plan {
    def intent = {
      // handle jsonp
      case req @ GET(Path(Seg("check" :: Nil)) & Params(params)) =>
        implicit val responder = req
        val str = """[]"""
        val response = params.get("callback") match {
          case Some(v) =>
            val callbackName = v.head
            s"$callbackName($str)"
          case _ => str
        }
        // block for plot command to fulfill promise, and release this result to trigger browser reload
        Await.result(p.future, Duration.Inf)
        JsonContent ~> ResponseString(response)
      case _ => Pass
    }
  }

  def parseArgs(args: Array[String]) = {
    val parsed = new UnfilteredWebApp.Arguments{}
    parsed.parse(args)
    parsed
  }

  def setup(parsed: UnfilteredWebApp.Arguments): unfiltered.filter.Plan = {
    new WebApp
  }

  def htmlRoot: String = "/"
} 
Example 116
Source File: KVStore.scala    From Freasy-Monad   with MIT License 5 votes vote down vote up
package examples.scalaz

import scalaz._
import scalaz.Id.Id
import freasymonad.scalaz._

import scala.collection.mutable
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

@free trait KVStore {
  type KVStoreF[A] = Free[GrammarADT, A]
  sealed trait GrammarADT[A]

  def put[T](key: String, value: T): KVStoreF[Unit]
  def get[T](key: String): KVStoreF[Option[T]]
  def delete(key: String): KVStoreF[Unit]

  def update[T](key: String, f: T => T): KVStoreF[Unit] =
    for {
      vMaybe <- get[T](key)
      _      <- vMaybe.map(v => put[T](key, f(v))).getOrElse(Free.pure(()))
    } yield ()
}

object Main extends App {
  import KVStore.ops._

  def program: KVStoreF[Option[Int]] =
    for {
      _ <- put("wild-cats", 2)
      _ <- update[Int]("wild-cats", _ + 12)
      _ <- put("tame-cats", 5)
      n <- get[Int]("wild-cats")
      _ <- delete("tame-cats")
    } yield n

  val idInterpreter = new KVStore.Interp[Id] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Id[Option[T]] = {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Id[Unit] = {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Id[Unit] = {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resId: Id[Option[Int]] = idInterpreter.run(program)

  import scalaz.std.scalaFuture.futureInstance
  import scala.concurrent.ExecutionContext.Implicits.global

  val futureInterpreter = new KVStore.Interp[Future] {
    val kvs = mutable.Map.empty[String, Any]
    def get[T](key: String): Future[Option[T]] = Future {
      println(s"get($key)")
      kvs.get(key).map(_.asInstanceOf[T])
    }
    def put[T](key: String, value: T): Future[Unit] = Future {
      println(s"put($key, $value)")
      kvs(key) = value
    }
    def delete(key: String): Future[Unit] = Future {
      println(s"delete($key)")
      kvs.remove(key)
    }
  }
  val resFuture: Future[Option[Int]] = futureInterpreter.run(program)
  Await.ready(resFuture, Duration.Inf)
} 
Example 117
Source File: Master.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.actors

import java.time.LocalDateTime
import java.util.concurrent.TimeUnit

import akka.actor.{Props, Cancellable, Actor}
import akka.routing.RoundRobinPool
import com.ivan.nikolov.scheduler.actors.messages.{Work, Schedule, Done}
import com.ivan.nikolov.scheduler.config.job.{Daily, Hourly}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.Duration
import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext.Implicits.global

class Master(numWorkers: Int, actorFactory: ActorFactory) extends Actor with LazyLogging {
  val cancelables = ListBuffer[Cancellable]()
  
  val router = context.actorOf(
    Props(actorFactory.createWorkerActor()).withRouter(RoundRobinPool(numWorkers)),
    "scheduler-master-worker-router"
  )
  
  override def receive: Receive = {
    case Done(name, command, jobType, success) =>
      if (success) {
        logger.info("Successfully completed {} ({}).", name, command)
      } else {
        logger.error("Failure! Command {} ({}) returned a non-zero result code.", name, command)
      }
    case Schedule(configs) => 
      configs.foreach {
        case config =>
          val cancellable = this.context.system.scheduler.schedule(
            config.timeOptions.getInitialDelay(LocalDateTime.now(), config.frequency),
            config.frequency match {
              case Hourly => Duration.create(1, TimeUnit.HOURS)
              case Daily => Duration.create(1, TimeUnit.DAYS)
            },
            router,
            Work(config.name, config.command, config.jobType)
          )
          cancellable +: cancelables
          logger.info("Scheduled: {}", config)
      }
  }
  
  override def postStop(): Unit = {
    cancelables.foreach(_.cancel())
  }
} 
Example 118
Source File: Scheduler.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler

import akka.actor.{Props, ActorSystem}
import com.ivan.nikolov.scheduler.actors.messages.Schedule
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Scheduler extends LazyLogging {
  import com.ivan.nikolov.scheduler.registry.ComponentRegistry._
  def main(args: Array[String]): Unit = {
    logger.info("Running migrations before doing anything else.")
    migrationService.runMigrations()
    logger.info("Migrations done!")
    
    val system = ActorSystem("scheduler")
    
    val master = system.actorOf(
      Props(actorFactory.createMasterActor()),
      "scheduler-master"
    )
    
    sys.addShutdownHook({
      logger.info("Awaiting actor system termination.")
      // not great...
      Await.result(system.terminate(), Duration.Inf)
      logger.info("Actor system terminated. Bye!")
    })
    
    master ! Schedule(jobConfigReaderService.readJobConfigs())
    logger.info("Started! Use CTRL+C to exit.")
  }
} 
Example 119
Source File: TimeOptions.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.config.job

import java.time.LocalDateTime
import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.{FiniteDuration, Duration}

case class TimeOptions(hours: Int, minutes: Int) {
  if (hours < 0 || hours > 23) {
    throw new IllegalArgumentException("Hours must be between 0 and 23: " + hours)
  } else if (minutes < 0 || minutes > 59) {
    throw new IllegalArgumentException("Minutes must be between 0 and 59: " + minutes)
  }
  
  def getInitialDelay(now: LocalDateTime, frequency: JobFrequency): FiniteDuration = {
    val firstRun = now.withHour(hours).withMinute(minutes)
    val isBefore = firstRun.isBefore(now)
    val actualFirstRun = frequency match {
      case Hourly =>
        var tmp = firstRun
        Iterator.continually({tmp = tmp.plusHours(1); tmp}).takeWhile(d => d.isBefore(now)).toList.lastOption.getOrElse(if (isBefore) firstRun else firstRun.minusHours(1)).plusHours(1)
      case Daily =>
        var tmp = firstRun
        Iterator.continually({tmp = tmp.plusDays(1); tmp}).takeWhile(d => d.isBefore(now)).toList.lastOption.getOrElse(if (isBefore) firstRun else firstRun.minusDays(1)).plusDays(1)
    }
    val secondsUntilRun = now.until(actualFirstRun, ChronoUnit.SECONDS)
    Duration.create(secondsUntilRun, TimeUnit.SECONDS)
  }
} 
Example 120
Source File: Master.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.actors

import java.time.LocalDateTime
import java.util.concurrent.TimeUnit

import akka.actor.{Props, Cancellable, Actor}
import akka.routing.RoundRobinPool
import com.ivan.nikolov.scheduler.actors.messages.{Work, Schedule, Done}
import com.ivan.nikolov.scheduler.config.job.{Daily, Hourly}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.Duration
import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext.Implicits.global

class Master(numWorkers: Int, actorFactory: ActorFactory) extends Actor with LazyLogging {
  val cancelables = ListBuffer[Cancellable]()
  
  val router = context.actorOf(
    Props(actorFactory.createWorkerActor()).withRouter(RoundRobinPool(numWorkers)),
    "scheduler-master-worker-router"
  )
  
  override def receive: Receive = {
    case Done(name, command, jobType, success) =>
      if (success) {
        logger.info("Successfully completed {} ({}).", name, command)
      } else {
        logger.error("Failure! Command {} ({}) returned a non-zero result code.", name, command)
      }
    case Schedule(configs) => 
      configs.foreach {
        case config =>
          val cancellable = this.context.system.scheduler.schedule(
            config.timeOptions.getInitialDelay(LocalDateTime.now(), config.frequency),
            config.frequency match {
              case Hourly => Duration.create(1, TimeUnit.HOURS)
              case Daily => Duration.create(1, TimeUnit.DAYS)
            },
            router,
            Work(config.name, config.command, config.jobType)
          )
          cancellable +: cancelables
          logger.info("Scheduled: {}", config)
      }
  }
  
  override def postStop(): Unit = {
    cancelables.foreach(_.cancel())
  }
} 
Example 121
Source File: Scheduler.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler

import akka.actor.{Props, ActorSystem}
import com.ivan.nikolov.scheduler.actors.messages.Schedule
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Scheduler extends LazyLogging {
  import com.ivan.nikolov.scheduler.registry.ComponentRegistry._
  def main(args: Array[String]): Unit = {
    logger.info("Running migrations before doing anything else.")
    migrationService.runMigrations()
    logger.info("Migrations done!")
    
    val system = ActorSystem("scheduler")
    
    val master = system.actorOf(
      Props(actorFactory.createMasterActor()),
      "scheduler-master"
    )
    
    sys.addShutdownHook({
      logger.info("Awaiting actor system termination.")
      // not great...
      Await.result(system.terminate(), Duration.Inf)
      logger.info("Actor system terminated. Bye!")
    })
    
    master ! Schedule(jobConfigReaderService.readJobConfigs())
    logger.info("Started! Use CTRL+C to exit.")
  }
} 
Example 122
Source File: TimeOptions.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.config.job

import java.time.LocalDateTime
import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.{FiniteDuration, Duration}

case class TimeOptions(hours: Int, minutes: Int) {
  if (hours < 0 || hours > 23) {
    throw new IllegalArgumentException("Hours must be between 0 and 23: " + hours)
  } else if (minutes < 0 || minutes > 59) {
    throw new IllegalArgumentException("Minutes must be between 0 and 59: " + minutes)
  }
  
  def getInitialDelay(now: LocalDateTime, frequency: JobFrequency): FiniteDuration = {
    val firstRun = now.withHour(hours).withMinute(minutes)
    val isBefore = firstRun.isBefore(now)
    val actualFirstRun = frequency match {
      case Hourly =>
        var tmp = firstRun
        Iterator.continually({tmp = tmp.plusHours(1); tmp}).takeWhile(d => d.isBefore(now)).toList.lastOption.getOrElse(if (isBefore) firstRun else firstRun.minusHours(1)).plusHours(1)
      case Daily =>
        var tmp = firstRun
        Iterator.continually({tmp = tmp.plusDays(1); tmp}).takeWhile(d => d.isBefore(now)).toList.lastOption.getOrElse(if (isBefore) firstRun else firstRun.minusDays(1)).plusDays(1)
    }
    val secondsUntilRun = now.until(actualFirstRun, ChronoUnit.SECONDS)
    Duration.create(secondsUntilRun, TimeUnit.SECONDS)
  }
} 
Example 123
Source File: Animation.scala    From threejs-facade   with Mozilla Public License 2.0 5 votes vote down vote up
package org.denigma.threejs.extensions.animations

import java.util.Date

import org.scalajs.dom
import Animation.{Started, AniState}
import scala.concurrent.duration.Duration
import scala.scalajs.js


class Scheduler
{
  def current: Double = js.Date.now
  var animations = List.empty[Animation]

  def add(ani: Animation): Unit = {
    this.animations = ani :: animations
    ani.state = Animation.Running(current)
  }

  def tick(): Unit =   {
    val now = current
    animations.foreach{ani=>
      ani.state match {
        case Animation.Running(start)=> ani(now)
        case _ =>
        // dom.console.info(other.toString)
        // do nothing
      }
    }
  }

  protected def onEnterFrameFunction(double: Double): Unit = {
    this.tick()
    start()
  }

  def start(): Scheduler = {
    dom.window.requestAnimationFrame(onEnterFrameFunction _ )
    this
  }


}

object Easings {

  val linear: Double=>Double = i=>i

}


object Animation{
  trait AniState
  trait Started extends AniState{
    def start: Double

  }
  case object Stopped extends AniState
  case class Finished(start: Double) extends Started
  case class Paused(start: Double) extends Started
  case class Running(start: Double) extends Started
  case class Backward(start: Double, finished: Double) extends Started

}


class Animation(val length: Duration, easing: Double=>Double = Easings.linear)(fun: (Double=>Unit)){

  lazy val lengthMillis: Long = length.toMillis

  var state: AniState = Animation.Stopped

  def apply(current: Double): Unit =    state match {
    case st: Started=>
      val finish: Double = st.start + this.lengthMillis
      easing(1.0-(finish-current)/length.toMillis) match{
        case p: Double if p>=1.0=>
          fun(1.0)
          this.state = Animation.Finished(current)
        case p: Double if p < 0.0=>
          dom.console.error(s"animation percent is $p that is below zero!\n " +
            s"Current time is $current, start is ${st.start} and length is $lengthMillis")
          this.state = Animation.Finished(current)

        case p: Double=>
          fun(p)
        // dom.console.info( s"Current time is $current, start is ${st.start} and length is $lengthMillis and percent is $p")

      }

    case _=> dom.console.error("trying to run an operation that has not started")
  }

  def go(implicit scheduler: Scheduler): Unit = {
    scheduler.add(this)
  }

} 
Example 124
Source File: JumpCameraControls.scala    From threejs-facade   with Mozilla Public License 2.0 5 votes vote down vote up
package org.denigma.threejs.extensions.controls

import org.denigma.threejs.extensions.animations.{Animation, Scheduler}
import org.denigma.threejs.{Camera, Scene, Vector3}
import org.scalajs.dom
import org.scalajs.dom.raw.{Element, HTMLElement}
import org.scalajs.dom.MouseEvent

import scala.concurrent.duration
import scala.concurrent.duration.Duration
import scala.concurrent.duration.MILLISECONDS
import scala.language.postfixOps


class JumpCameraControls(val camera: Camera,
  val element: Element, //scalastyle:ignore
  val scene: Scene,
  val width: Double, val height: Double,
  center: Vector3 = new Vector3())
  extends HoverControls(camera, element, center) with IntersectionControls {

  implicit val scheduler = new Scheduler().start()

  override def onMouseMove(event: MouseEvent)= {
    this.onCursorMove(event.clientX, event.clientY, width, height)
    rotateOnMove(event)
  }

  def moveTo(position: Vector3): Unit = {
    val start = center.clone()
    val dp = new Vector3().subVectors(position, center)
    dom.console.info(dp)

    new Animation(Duration(1, duration.SECONDS))(p => {

      val m = dp.clone().multiplyScalar(p)
      val cur = start.clone().add(m)
      // dom.console.info(cur)
      center.copy(cur)
    }).go(scheduler)
    // center.copy(position)
  }

  override def onMouseDown(event: MouseEvent): Unit = {
    this.intersections.headOption match {
      case Some(obj) => obj.`object`.position match {
        case p: Vector3 if p.equals(center) => super.onMouseDown(event)
        case p: Vector3 => moveTo(p)
      }
      case None => super.onMouseDown(event)
    }

  }

} 
Example 125
Source File: Main.scala    From scalaz-nio   with Apache License 2.0 5 votes vote down vote up
package zio.nio

import testz._
import runner.TestOutput

import scala.concurrent.{ Await, ExecutionContext, Future }
import ExecutionContext.global
import scala.concurrent.duration.Duration
import scala.concurrent.duration._

object Main {

  def main(args: Array[String]): Unit = {
    val printer: (Result, List[String]) => Unit =
      (tr, ls) => runner.printStrs(runner.printTest(tr, ls), Console.print)

    val ec = global

    val pureHarness   = PureHarness.makeFromPrinter(printer)
    val effectHarness = FutureHarness.makeFromPrinterEffR(printer)(ec)

    def unitTests =
      TestOutput.combineAll1(
        BufferSuite.tests(pureHarness)((), List("Buffer tests")),
        ChannelSuite.tests(pureHarness)((), List("Channel tests")),
        FileChannelSuite.tests(pureHarness)((), List("FileChannel tests")),
        ScatterGatherChannelSuite
          .tests(pureHarness)((), List("Scattering and Gathering Channel tests")),
        SelectorSuite.tests(pureHarness)((), List("Selector tests"))
      )

    def asyncChannelGroupSuite =
      AsynchronousChannelGroupSuite
        .tests(effectHarness)((), List("Asynchronous Channel Group tests"))

    // Evaluate tests before the runner expects,
    // for parallelism.
    val testOutputs: List[() => Future[TestOutput]] = List(
      Future(unitTests)(ec),
      asyncChannelGroupSuite
    ).map(s => () => s)

    val runSuites = runner(testOutputs, Console.print, global)
    val result    = Await.result(runSuites, Duration.Inf)

    if (result.failed) throw new Exception("some tests failed")
  }
} 
Example 126
Source File: MultiLoginCacheIdContainer.scala    From cluster-broccoli   with Apache License 2.0 5 votes vote down vote up
package jp.t2v.lab.play2.auth

import java.security.SecureRandom
import java.util.concurrent.TimeUnit

import play.api.cache.CacheApi

import scala.annotation.tailrec
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag
import scala.util.Random

class MultiLoginCacheIdContainer[Id: ClassTag](cache: CacheApi) extends IdContainer[Id] {

  private val log = play.api.Logger(getClass)

  private[auth] val tokenSuffix = ":multitoken"
  private[auth] val random = new Random(new SecureRandom())

  override def startNewSession(userId: Id, timeoutInSeconds: Int): AuthenticityToken = {
    log.info(s"Starting new session for user '$userId'.")
    val token = generate
    store(token, userId, Duration(timeoutInSeconds.toLong, TimeUnit.SECONDS))
    token
  }

  @tailrec
  private[auth] final def generate: AuthenticityToken = {
    val table = "abcdefghijklmnopqrstuvwxyz1234567890_.~*'()"
    val token = Iterator.continually(random.nextInt(table.size)).map(table).take(64).mkString
    if (get(token).isDefined) generate else token
  }

  def remove(token: AuthenticityToken) {
    log.info(s"Deleting session of user '${get(token)}'")
    cache.remove(token + tokenSuffix)
  }

  def get(token: AuthenticityToken): Option[Id] =
    cache.get(token + tokenSuffix).map(_.asInstanceOf[Id])

  private[auth] def store(token: AuthenticityToken, userId: Id, duration: Duration) {
    cache.set(token + tokenSuffix, userId, duration)
  }

  override def prolongTimeout(token: AuthenticityToken, timeoutInSeconds: Int) {
    get(token).foreach(store(token, _, Duration(timeoutInSeconds.toLong, TimeUnit.SECONDS)))
  }

} 
Example 127
Source File: NomadServiceSpec.scala    From cluster-broccoli   with Apache License 2.0 5 votes vote down vote up
package de.frosner.broccoli.services

import java.util.concurrent.TimeUnit

import de.frosner.broccoli.controllers.ServiceMocks
import de.frosner.broccoli.nomad.NomadConfiguration
import de.frosner.broccoli.nomad.models.Job.jobFormat
import de.frosner.broccoli.nomad.models._
import org.mockito.Mockito._
import org.specs2.mutable.Specification
import play.api.libs.json._
import play.api.mvc._
import play.api.routing.sird._
import play.api.test._
import play.core.server.Server
import squants.information.InformationConversions._
import squants.time.FrequencyConversions._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration

class NomadServiceSpec extends Specification with ServiceMocks {

  sequential

  "Requesting services for specific job" should {

    "ask consul for the services that nomad returns" in {
      val service = Service("my-service")
      val resources = Resources(
        shapeless.tag[Resources.CPU](20.megahertz),
        shapeless.tag[Resources.Memory](1.gigabytes)
      )
      val task = Task(shapeless.tag[Task.Name]("foo"), resources, Some(Seq(service)))
      val taskGroup = TaskGroup(Seq(task))
      val job = Job(Seq(taskGroup))
      val jobId = "my-job"
      Server.withRouter() {
        case GET(p"/v1/job/my-job") =>
          Action {
            Results.Ok(Json.toJson(job))
          }
      } { implicit port =>
        WsTestClient.withClient { client =>
          val configuration = NomadConfiguration(url = s"http://localhost:$port")
          val nomadService = new NomadService(configuration, client)
          val result = Await.result(nomadService.requestServices(jobId), Duration(5, TimeUnit.SECONDS))
          result === Seq(service.name)
        }
      }
    }

    "not explode when receiving tasks without services" in {
      val service1 = Service("my-service")
      val resources = Resources(
        shapeless.tag[Resources.CPU](100.megahertz),
        shapeless.tag[Resources.Memory](100.megabytes)
      )
      val task1 = Task(shapeless.tag[Task.Name]("foo1"), resources, Some(Seq(service1)))
      val task2 = Task(shapeless.tag[Task.Name]("foo2"), resources, None)
      val taskGroup1 = TaskGroup(Seq(task1))
      val taskGroup2 = TaskGroup(Seq(task2))
      val job = Job(Seq(taskGroup1, taskGroup2))
      val jobId = "my-job"
      Server.withRouter() {
        case GET(p"/v1/job/my-job") =>
          Action {
            Results.Ok(Json.toJson(job))
          }
      } { implicit port =>
        WsTestClient.withClient { client =>
          val configuration = NomadConfiguration(url = s"http://localhost:$port")
          val nomadService = new NomadService(configuration, client)
          val result = Await.result(nomadService.requestServices(jobId), Duration(5, TimeUnit.SECONDS))
          result === Seq(service1.name)
        }
      }
    }

  }

} 
Example 128
Source File: SecurityServiceSpec.scala    From cluster-broccoli   with Apache License 2.0 5 votes vote down vote up
package de.frosner.broccoli.services

import cats.data.OptionT
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.services.IdentityService
import com.mohiva.play.silhouette.api.util.Credentials
import com.mohiva.play.silhouette.impl.exceptions.InvalidPasswordException
import com.mohiva.play.silhouette.impl.providers.CredentialsProvider
import de.frosner.broccoli.auth.{Account, AuthConfiguration, AuthMode, Role}
import org.mockito.Mock
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import org.specs2.specification.mutable.ExecutionEnvironment

import scala.concurrent.Future
import scala.concurrent.duration.Duration

class SecurityServiceSpec extends Specification with Mockito with ExecutionEnvironment {

  def configWithAccounts(accounts: Seq[Account]): AuthConfiguration =
    AuthConfiguration(
      mode = AuthMode.Conf,
      session = AuthConfiguration.Session(timeout = Duration(1, "hour"), allowMultiLogin = true),
      cookie = AuthConfiguration.Cookie(secure = true),
      conf = AuthConfiguration.Conf(
        accounts = accounts
          .map(
            a =>
              AuthConfiguration.ConfAccount(
                a.name,
                "",
                a.instanceRegex,
                a.role
            ))
          .toList),
      allowedFailedLogins = 3
    )

  val identityService = mock[IdentityService[Account]]

  val account = Account("frank", "^test.*", Role.Administrator)

  override def is(implicit executionEnv: ExecutionEnv): Any =
    "An authentication check" should {

      "succeed if the credentials provider authenticates" in {
        val login = LoginInfo(CredentialsProvider.ID, account.name)
        val credentials = Credentials(account.name, "pass")

        val credentialsProvider = mock[CredentialsProvider]
        credentialsProvider.authenticate(credentials) returns Future.successful(login)

        SecurityService(configWithAccounts(List(account)), credentialsProvider, identityService)
          .authenticate(credentials) must beSome(login).await
      }

      "fail if the credentials provider fails to authenticate" in {
        val credentials = Credentials(account.name, "pass")

        val credentialsProvider = mock[CredentialsProvider]
        credentialsProvider.authenticate(credentials) returns Future.failed(new InvalidPasswordException("foo"))

        SecurityService(configWithAccounts(List(account)), credentialsProvider, identityService)
          .authenticate(credentials) must beNone.await
      }

      "succeed if the number of failed logins is equal to the allowed ones" in {
        val credentials = Credentials(account.name, "pass")
        val failedCredentials = credentials.copy(password = "foo")
        val login = LoginInfo(CredentialsProvider.ID, credentials.identifier)

        val credentialsProvider = mock[CredentialsProvider]
        credentialsProvider.authenticate(failedCredentials) returns Future.failed(new InvalidPasswordException("foo"))
        credentialsProvider.authenticate(credentials) returns Future.successful(login)

        val service = SecurityService(configWithAccounts(List(account)), credentialsProvider, identityService)
        val failedAttempts = for (attemptNo <- 1 to service.allowedFailedLogins) {
          service.authenticate(failedCredentials) must beNone.await
        }
        service.authenticate(credentials) must beSome(login).await
      }

      "fail if the number of failed logins is greater than the allowed number" in {
        val credentials = Credentials(account.name, "password")
        val failedCredentials = credentials.copy(password = "foo")
        val login = LoginInfo(CredentialsProvider.ID, credentials.identifier)

        val credentialsProvider = mock[CredentialsProvider]
        credentialsProvider.authenticate(failedCredentials) returns Future.failed(new InvalidPasswordException("foo"))
        credentialsProvider.authenticate(credentials) returns Future.successful(login)

        val service = SecurityService(configWithAccounts(List(account)), credentialsProvider, identityService)
        val failedAttempts = for (attemptNo <- 0 to service.allowedFailedLogins) {
          service.authenticate(failedCredentials) must beNone.await
        }
        service.authenticate(credentials) must beNone.await
      }

    }
} 
Example 129
Source File: MongoObservableReactivePublisherTest.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package mongo.async

import com.avsystem.commons.concurrent.RunNowEC
import com.github.ghik.silencer.silent
import com.mongodb.async.{client => mongo}
import monix.execution.{Cancelable, Scheduler}
import org.mockito.ArgumentMatchers.{eq => eqTo, _}
import org.mockito.Mockito
import org.mockito.Mockito._
import org.mongodb.scala.{Completed, Document, FindObservable, MongoCollection, SingleObservable}
import org.scalactic.source.Position
import org.scalatest.freespec.AnyFreeSpec

import scala.concurrent.duration.Duration

@silent("deprecated")
class MongoObservableReactivePublisherTest extends AnyFreeSpec {

  abstract class MockedObservableTests(implicit position: Position) extends MongoObservableExtensions {

    def subscribe[T](obs: mongo.Observable[T], testSubscriber: TestSubscriber[T]): Unit

    "should drop test collection" in {
      val collection = Mockito.mock(classOf[MongoCollection[Document]])
      when(collection.drop()).thenReturn(SingleObservable(Completed()))
      val dropSubscriber = TestSubscriber[Completed]()

      subscribe(collection.drop(), dropSubscriber)

      dropSubscriber.assertNoTerminalEvent()
      dropSubscriber.requestMore(1)
      dropSubscriber.awaitTerminalEvent(Duration(100, "ms"))
      dropSubscriber.assertNoErrors()
      dropSubscriber.assertReceivedOnNext(Seq(Completed()))

      verify(collection).drop()
      verifyNoMoreInteractions(collection)
    }

    "should insert documents" in {
      val collection = Mockito.mock(classOf[MongoCollection[Document]])
      val insertSubscriber = TestSubscriber[Completed]()
      when(collection.insertMany(any())).thenReturn(SingleObservable(Completed()))

      val documents: IndexedSeq[Document] = (1 to 100) map { i: Int => Document("_id" -> i) }
      subscribe(collection.insertMany(documents), insertSubscriber)
      insertSubscriber.requestMore(1)
      insertSubscriber.awaitTerminalEvent(Duration(100, "ms"))
      insertSubscriber.assertNoErrors()
      insertSubscriber.assertReceivedOnNext(Seq(Completed()))

      verify(collection).insertMany(eqTo(documents))
      verifyNoMoreInteractions(collection)
    }

    "should find documents" in {
      val documents: IndexedSeq[Document] = (1 to 100) map { i: Int => Document("_id" -> i) }
      val original = Mockito.mock(classOf[FindObservable[Document]])
      val findSubscriber = TestSubscriber[Document]()
      doNothing().when(original).subscribe(any())

      subscribe(original, findSubscriber)
      findSubscriber.assertNoTerminalEvent()
      findSubscriber.requestMore(101)
      documents.foreach(findSubscriber.onNext)
      findSubscriber.onComplete()
      findSubscriber.awaitTerminalEvent(Duration(100, "ms"))
      findSubscriber.assertNoErrors()
      findSubscriber.assertReceivedOnNext(documents)


      verify(original).subscribe(any(classOf[mongo.Observer[_ >: Document]]))
      verifyNoMoreInteractions(original)
    }
  }

  "A Mongo-Reactive observable" - new MockedObservableTests {
    override def subscribe[T](obs: mongo.Observable[T], testSubscriber: TestSubscriber[T]): Unit =
      obs.asReactive.subscribe(testSubscriber)
  }
  "A Mongo-Monix observable" - new MockedObservableTests {
    override def subscribe[T](obs: mongo.Observable[T], testSubscriber: TestSubscriber[T]): Unit =
      obs.asMonix.subscribe(
        monix.reactive.observers.Subscriber.fromReactiveSubscriber(testSubscriber, Cancelable.empty)(Scheduler(RunNowEC))
      )
  }
} 
Example 130
Source File: RetryStrategy.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis.config

import scala.concurrent.duration.{Duration, FiniteDuration}


  def randomized(minFactor: Double, maxFactor: Double): RetryStrategy =
    RetryStrategy(self.nextRetry.flatMap { case (delay, nextStrat) =>
      val factor = minFactor + (maxFactor - minFactor) * math.random
      delay * factor match {
        case fd: FiniteDuration => Opt((fd, nextStrat.randomized(minFactor, maxFactor)))
        case _ => Opt.Empty
      }
    })

  def next: RetryStrategy =
    nextRetry.fold(RetryStrategy.never) { case (_, n) => n }
}
object RetryStrategy {
  def apply(nextRetryThunk: => Opt[(FiniteDuration, RetryStrategy)]): RetryStrategy =
    new RetryStrategy {
      def nextRetry: Opt[(FiniteDuration, RetryStrategy)] = nextRetryThunk
    }

  def never: RetryStrategy =
    apply(Opt.Empty)

  def immediately: RetryStrategy =
    once(Duration.Zero)

  def times(count: Int, duration: FiniteDuration = Duration.Zero): RetryStrategy =
    if (count <= 0) never else apply(Opt(duration, times(count - 1, duration)))

  def once(delay: FiniteDuration): RetryStrategy =
    apply(Opt((delay, never)))

  def continually(delay: FiniteDuration): RetryStrategy =
    apply(Opt((delay, continually(delay))))

  def exponentially(firstDelay: FiniteDuration, factor: Double = 2): RetryStrategy = apply {
    val nextStrat = firstDelay * factor match {
      case fd: FiniteDuration => exponentially(fd, factor)
      case _ => never
    }
    Opt((firstDelay, nextStrat))
  }
}

object ConfigDefaults {
  val Dispatcher = "redis.pinned-dispatcher"
} 
Example 131
Source File: DelayedFuture.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis.util

import akka.actor.ActorSystem
import com.avsystem.commons.concurrent.RunNowEC

import scala.concurrent.duration.{Duration, FiniteDuration}

object DelayedFuture {
  def apply(delay: FiniteDuration)(implicit system: ActorSystem): Future[Unit] =
    if (delay <= Duration.Zero) Future.unit
    else {
      val promise = Promise[Unit]()
      system.scheduler.scheduleOnce(delay)(promise.success(()))(RunNowEC)
      promise.future
    }
} 
Example 132
Source File: ClusterApiTest.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis.commands

import com.avsystem.commons.redis.{ClusterUtils, NodeAddress, RedisApi, RedisClusterCommandsSuite, RedisNodeClient}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class ClusterApiTest extends RedisClusterCommandsSuite {
  override def executor: RedisNodeClient = Await.result(redisClient.initializedCurrentState, Duration.Inf).mapping.head._2

  import RedisApi.Batches.StringTyped._

  test("CLUSTER DELSLOTS/ADDSLOTS") {
    clusterDelslots(0 to 16).get
    clusterAddslots(0 to 16).get
    clusterDelslots(5, 8, 10).get
    clusterAddslots(5, 8, 10).get
  }

  test("CLUSTER COUNT-FAILURE-REPORTS") {
    val id = clusterMyid.get
    clusterCountFailureReports(id).assert(_ >= 0)
  }

  test("CLUSTER COUNTKEYSINSLOT") {
    val tag = ClusterUtils.SlotKeys(0)
    setup(mset((0 until 10).map(i => (s"{$tag}$i", ""))))
    clusterCountkeysinslot(0).assertEquals(10)
    clusterCountkeysinslot(1).assertEquals(0)
  }

  test("CLUSTER GETKEYSINSLOT") {
    val tag = ClusterUtils.SlotKeys(0)
    setup(mset((0 until 10).map(i => (s"{$tag}$i", ""))))
    clusterGetkeysinslot(0, 10).map(_.sorted).assertEquals((0 until 10).map(i => s"{$tag}$i"))
  }

  test("CLUSTER INFO") {
    clusterInfo.assert(_.stateOk)
  }

  test("CLUSTER MYID") {
    clusterMyid.get
  }

  test("CLUSTER NODES") {
    clusterNodes.assert(_.size == ports.size)
  }

  test("CLUSTER SLOTS") {
    clusterSlots.map(_.sortBy(_.master.port)).assertEquals(Seq(
      SlotRangeMapping(SlotRange(0, 5460), NodeAddress(port = 9000), NodeId("b714a8032b9c1d74a7adc7da75fdbde0517bdf1b").opt,
        Seq((NodeAddress(port = 9001), NodeId("cc8228f6e849ba1ee5abfc8f1ebde238e08c1d27").opt))),
      SlotRangeMapping(SlotRange(5461, 10921), NodeAddress(port = 9002), NodeId("5f4c1e93370f3a60e9106cff3a613216abb1c8dc").opt,
        Seq((NodeAddress(port = 9003), NodeId("18346204561dbae251912d8ae93fa4c78eeb3e16").opt))),
      SlotRangeMapping(SlotRange(10922, 16383), NodeAddress(port = 9004), NodeId("6a724c321662027e9c1c58684ea82a1315a294fb").opt,
        Seq((NodeAddress(port = 9005), NodeId("9a90efc8f9cf52de6aa60be3da3071798e0e365f").opt)))
    ))
  }

  test("CLUSTER SLAVES") {
    val id = clusterMyid.get
    clusterSlaves(id).assert(_.size == 1)
  }
} 
Example 133
Source File: PingPong.scala    From effpi   with MIT License 5 votes vote down vote up
// Effpi - verified message-passing programs in Dotty
// Copyright 2019 Alceste Scalas and Elias Benussi
// Released under the MIT License: https://opensource.org/licenses/MIT
package effpi.examples.demo.pingpong

import scala.concurrent.duration.Duration

import effpi.channel.{Channel => Chan, InChannel => IChan, OutChannel => OChan}
import effpi.process._
import effpi.process.dsl._

import effpi.verifier.verify

package object types {
  type Pinger = [P <: Chan[String], C <: OChan[P]] =>> (
    Out[C, P] >>: In[P, String, (x: String) => PNil]
  )

  type Ponger = [C <: IChan[OChan[String]]] =>> (
    In[C, OChan[String], (x: OChan[String]) => Out[x.type, String]]
  )

  type PingPong = [P1 <: Chan[OChan[String]], P2 <: Chan[String]] =>> (
    Par[Ponger[P1], Pinger[P2, P1]]
  )
}

package object implementation {
  import types._

  implicit val timeout: Duration = Duration("30 seconds")

  def pinger(self: Chan[String],
             pongc: OChan[OChan[String]]): Pinger[self.type, pongc.type] = {
    println("Pinger: sending reply channel...")
    send(pongc, self) >> {
      println("Waiting for answer...")
      receive(self) { reply =>
        println(s"Pinger: ...received: ${reply}")
        nil
      }
    }
  }

  // This annotation checks whether the ponger process will always answer back
  // when it receives a message.
  @verify(property = "responsive(self)")
  def ponger(self: IChan[OChan[String]]): Ponger[self.type] = {
    println("Ponger: waiting for channel...")
    receive(self) { reqc =>
      println(s"Ponger: ...received: ${reqc}. Sending reply.")
      send(reqc, "Hello!")
    }
  }

  // This annotation checks whether p2 is eventually used for output. Note that
  // p2 is sent from pinger to ponger before being used, and the analysis can
  // track it.
  @verify(property = "eventual_output_use(p2)")
  def pingpong(p1: Chan[OChan[String]],
               p2: Chan[String]): PingPong[p1.type, p2.type] = {
    par(ponger(p1), pinger(p2, p1))
  }
}

// To run this example, try:
// sbt "examples/runMain effpi.examples.demo.pingpong.Main"
object Main {
  import implementation.pingpong
  def main(): Unit = main(Array())

  def main(args: Array[String]) = {
    val (c1, c2) = (Chan[OChan[String]](), Chan[String]())

    eval(
      pingpong(c1, c2)
    )
  }
} 
Example 134
Source File: BlockTransferService.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network

import java.io.Closeable
import java.nio.ByteBuffer

import scala.concurrent.{Promise, Await, Future}
import scala.concurrent.duration.Duration

import org.apache.spark.Logging
import org.apache.spark.network.buffer.{NioManagedBuffer, ManagedBuffer}
import org.apache.spark.network.shuffle.{ShuffleClient, BlockFetchingListener}
import org.apache.spark.storage.{BlockManagerId, BlockId, StorageLevel}

private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {

  
  def uploadBlockSync(
      hostname: String,
      port: Int,
      execId: String,
      blockId: BlockId,
      blockData: ManagedBuffer,
      level: StorageLevel): Unit = {
    Await.result(uploadBlock(hostname, port, execId, blockId, blockData, level), Duration.Inf)
  }
} 
Example 135
Source File: FutureActionSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import scala.concurrent.Await
import scala.concurrent.duration.Duration

import org.scalatest.{BeforeAndAfter, Matchers}


class FutureActionSuite
  extends SparkFunSuite
  with BeforeAndAfter
  with Matchers
  with LocalSparkContext {

  before {
    sc = new SparkContext("local", "FutureActionSuite")
  }

  test("simple async action") {
    val rdd = sc.parallelize(1 to 10, 2)
    val job = rdd.countAsync()
    val res = Await.result(job, Duration.Inf)
    res should be (10)
    job.jobIds.size should be (1)
  }

  test("complex async action") {
    val rdd = sc.parallelize(1 to 15, 3)
    val job = rdd.takeAsync(10)
    val res = Await.result(job, Duration.Inf)
    res should be (1 to 10)
    job.jobIds.size should be (2)
  }

} 
Example 136
Source File: FutureTest.scala    From scalaprops   with MIT License 5 votes vote down vote up
package scalaprops
package std

import _root_.java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
import scalaz._
import scalaz.std.anyVal._
import scalaz.std.scalaFuture._

object FutureTest extends Scalaprops {
  private[this] implicit def genFuture[A](implicit A: Gen[A]): Gen[Future[A]] =
    Gen.oneOf(
      A.map(Future.successful),
      genThrowable.map(Future.failed(_))
    )

  private[this] final case class SomeFailure(n: Byte) extends Exception {
    override def toString = s"SomeFailure($n)"
  }

  private[this] implicit val singleThreadExecutionContext: ExecutionContext = new ExecutionContext {
    def execute(runnable: Runnable): Unit = runnable.run
    def reportFailure(cause: Throwable): Unit = cause.printStackTrace
  }

  private[this] implicit val genThrowable: Gen[Throwable] =
    Gen[Byte].map(SomeFailure)

  private[this] implicit val cogenThrowable: Cogen[Throwable] =
    Cogen[Byte].contramap {
      case SomeFailure(n) => n
    }

  private[this] implicit def futureEqual[A](implicit A: Equal[A]): Equal[Future[A]] =
    Equal.equal { (f1, f2) =>
      def f(future: Future[A]) =
        Await.result(future.map(\/.right).recover { case e => -\/(e) }, Duration(5, TimeUnit.SECONDS))

      (f(f1), f(f2)) match {
        case (\/-(a1), \/-(a2)) => A.equal(a1, a2)
        case (-\/(e1), -\/(e2)) => e1 == e2
        case _ => false
      }
    }

  val laws = Properties.list(
    scalazlaws.cobind.all[Future],
    scalazlaws.monadError.all[Future, Throwable],
    scalazlaws.equal.all[Future[Int]]
  )
} 
Example 137
Source File: TestExecutorImpl.scala    From scalaprops   with MIT License 5 votes vote down vote up
package scalaprops

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.Duration
import java.lang.Thread.UncaughtExceptionHandler
import sbt.testing.Logger
import java.util.concurrent.ForkJoinPool

object TestExecutorImpl {
  private[this] def newInstance(log: Logger): TestExecutor =
    new TestExecutor {
      private[this] val executionContext = {
        lazy val executorService: ForkJoinPool = new ForkJoinPool(
          sys.runtime.availableProcessors(),
          ForkJoinPool.defaultForkJoinWorkerThreadFactory,
          new UncaughtExceptionHandler {
            def uncaughtException(t: Thread, e: Throwable): Unit = {
              log.error("uncaughtException Thread = " + t)
              log.trace(e)
              e.printStackTrace()
              executorService.shutdown()
            }
          },
          false
        )
        ExecutionContext.fromExecutorService(executorService)
      }

      override def execute[A](timeout: Duration)(f: => A): A =
        Await.result(Future(f)(executionContext), timeout)

      override def shutdown(): Unit =
        executionContext.shutdown()
    }
  def withExecutor[A](logger: Logger)(f: TestExecutor => A): A = {
    val executor = newInstance(logger)
    try f(executor)
    finally executor.shutdown()
  }
} 
Example 138
Source File: ParamOpt.scala    From scalaprops   with MIT License 5 votes vote down vote up
package scalaprops

import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration

final case class ParamOpt(
  seed: Option[Seed],
  minSuccessful: Option[Int],
  maxDiscarded: Option[Int],
  minSize: Option[Int],
  maxSize: Option[Int],
  timeoutSeconds: Option[Int]
) {
  def merge(param: Param): Param = {
    Param(
      seed = seed.getOrElse(param.seed),
      minSuccessful = minSuccessful.getOrElse(param.minSuccessful),
      maxDiscarded = maxDiscarded.getOrElse(param.maxDiscarded),
      minSize = minSize.getOrElse(param.minSize),
      maxSize = maxSize.getOrElse(param.maxSize),
      timeout = timeoutSeconds match {
        case Some(n) => Duration(n, TimeUnit.SECONDS)
        case None => param.timeout
      }
    )
  }
} 
Example 139
Source File: Param.scala    From scalaprops   with MIT License 5 votes vote down vote up
package scalaprops

import java.util.concurrent.TimeUnit
import scalaprops.internal._
import scala.concurrent.duration.Duration

final case class Param(
  seed: Seed,
  minSuccessful: Int = Platform.minSuccessful,
  maxDiscarded: Int = 500,
  minSize: Int = 0,
  maxSize: Int = Gen.defaultSize,
  timeout: Duration = Duration(30, TimeUnit.SECONDS)
) {
  def rand: Rand = seed.createRand
}

object Param {
  def withCurrentTimeSeed(): Param =
    Param(
      seed = Seed.LongSeed(System.nanoTime())
    )

  def rand(rand: Rand): Endo[Param] =
    Endo(_.copy(seed = Seed.RandSeed(rand)))

  def constantSeed(value: Int): Endo[Param] =
    Endo(_.copy(seed = Seed.IntSeed(value)))

  def minSuccessful(n: Int): Endo[Param] =
    Endo(_.copy(minSuccessful = n))

  def maxSize(n: Int): Endo[Param] =
    Endo(_.copy(maxSize = n))

  def timeout(n: Int, timeunit: TimeUnit): Endo[Param] =
    Endo(_.copy(timeout = Duration(n, timeunit)))

  val id: Endo[Param] =
    Endo.idEndo[Param]
} 
Example 140
Source File: TestingUtil.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.test

import java.util.function.BooleanSupplier

import scala.annotation.tailrec
import scala.concurrent.duration.{Duration, _}
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}

object TestingUtil {

   val DefaultDuration = 60 seconds
   val waitBetweenRetries = 500

   def waitForCondition(command: () => Boolean, duration: Duration): Unit = {
      val NumTimes = duration.toMillis.toInt / waitBetweenRetries
      @tailrec
      def waitForCondition(numTimes: Int, sleep: Boolean): Unit = {
         if (sleep) Thread.sleep(waitBetweenRetries)
         Try(command.apply()) match {
            case Success(true) =>
            case Success(false) if numTimes == 0 => throw new Exception("Timeout waiting for condition.")
            case Failure(e) if numTimes == 0 => throw new Exception("Given up trying to execute command.", e)
            case _ => waitForCondition(numTimes - 1, sleep = true)
         }
      }
      waitForCondition(NumTimes, sleep = false)
   }

   def waitForCondition(command: () => Boolean): Unit = waitForCondition(command, DefaultDuration)

   def waitForCondition(command: BooleanSupplier): Unit = waitForCondition(toScala(command), DefaultDuration)

   private def toScala(f: BooleanSupplier) = new (() => Boolean) {
      override def apply() = f.getAsBoolean
   }
} 
Example 141
Source File: StreamingUtils.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.test

import java.time.{Duration => JDuration}
import java.util.concurrent.TimeUnit
import java.util.{List => JList}

import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver

import scala.annotation.meta.param
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag


object StreamingUtils {

   class TestReceiver[T](of: Seq[T], streamItemEvery: Duration) extends Receiver[T](StorageLevel.MEMORY_ONLY) {
      override def onStart(): Unit = {
         of.foreach { item =>
            Thread.sleep(streamItemEvery.toMillis)
            store(item)
         }
      }

      override def onStop(): Unit = {}
   }

   class TestInputDStream[T: ClassTag](@(transient@param) ssc_ : StreamingContext, of: Seq[T], streamItemEvery: Duration) extends ReceiverInputDStream[T](ssc_) {
      override def getReceiver(): Receiver[T] = new TestReceiver[T](of, streamItemEvery)
   }

   def createJavaReceiverDInputStream[T](jssc: JavaStreamingContext, of: JList[T], streamItemEvery: JDuration): JavaReceiverInputDStream[T] = {
      implicit val cmt: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]]
      JavaReceiverInputDStream.fromReceiverInputDStream(new TestInputDStream[T](jssc.ssc, of.asScala, Duration(streamItemEvery.getNano, TimeUnit.NANOSECONDS)))
   }

} 
Example 142
Source File: MigrationImpl.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package migration.impl

import dcos.metronome.migration.Migration
import dcos.metronome.repository.impl.kv.{JobHistoryPathResolver, JobRunPathResolver, JobSpecPathResolver}
import dcos.metronome.utils.state.{PersistentStore, PersistentStoreManagement, PersistentStoreWithNestedPathsSupport}
import org.slf4j.LoggerFactory

import scala.async.Async.{async, await}
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

class MigrationImpl(store: PersistentStore) extends Migration {
  import MigrationImpl._

  override def migrate()(implicit ec: ExecutionContext): Unit = {
    Await.result(initializeStore(), Duration.Inf)
    log.info("Migration successfully applied for version")
  }

  private[this] def initializeStore()(implicit ec: ExecutionContext): Future[Unit] =
    async {
      store match {
        case store: PersistentStoreManagement with PersistentStoreWithNestedPathsSupport =>
          await(store.initialize())
          await(store.createPath(JobSpecPathResolver.basePath))
          await(store.createPath(JobRunPathResolver.basePath))
          await(store.createPath(JobHistoryPathResolver.basePath))
        case _: PersistentStore =>
          log.info("Unsupported type of persistent store. Not running any migrations.")
          Future.successful(())
      }
    }

}

object MigrationImpl {
  private[migration] val log = LoggerFactory.getLogger(getClass)
} 
Example 143
Source File: JobRun.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package model

import java.time.Instant

import dcos.metronome.scheduler.TaskState
import mesosphere.marathon.core.task.Task

import scala.concurrent.duration.Duration

case class JobRun(
    id: JobRunId,
    jobSpec: JobSpec,
    status: JobRunStatus,
    createdAt: Instant,
    completedAt: Option[Instant],
    startingDeadline: Option[Duration],
    tasks: Map[Task.Id, JobRunTask]
)

case class JobRunTask(id: Task.Id, startedAt: Instant, completedAt: Option[Instant], status: TaskState)

object JobRunTask {
  def apply(task: Task): JobRunTask = {
    // Note: Terminal LaunchedEphemeral tasks are expunged from the repo
    // so it is somewhat safe to derive that completedAt for these tasks is always None!
    JobRunTask(
      id = task.taskId,
      startedAt = Instant.ofEpochMilli(task.status.stagedAt.millis),
      completedAt = None,
      status = TaskState(task)
    )
  }
}

sealed trait JobRunStatus
object JobRunStatus {

  
  case object Failed extends JobRunStatus

  val names: Map[String, JobRunStatus] =
    Map("INITIAL" -> Initial, "STARTING" -> Starting, "ACTIVE" -> Active, "SUCCESS" -> Success, "FAILED" -> Failed)
  val statusNames: Map[JobRunStatus, String] = names.map { case (a, b) => (b, a) }

  def name(status: JobRunStatus): String = statusNames(status)
  def unapply(name: String): Option[JobRunStatus] = names.get(name)
  def isDefined(name: String): Boolean = names.contains(name)
} 
Example 144
Source File: JobRunServiceFixture.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package jobrun

import java.time.Clock

import dcos.metronome.model._
import mesosphere.marathon.core.task.Task

import scala.collection.concurrent.TrieMap
import scala.concurrent.duration.Duration
import scala.concurrent.{Future, Promise}

object JobRunServiceFixture {

  def simpleJobRunService(): JobRunService =
    new JobRunService {
      val specs = TrieMap.empty[JobRunId, StartedJobRun]

      override def getJobRun(jobRunId: JobRunId): Future[Option[StartedJobRun]] = {
        Future.successful(specs.get(jobRunId))
      }

      override def killJobRun(jobRunId: JobRunId): Future[StartedJobRun] = {
        specs.get(jobRunId) match {
          case Some(value) => Future.successful(value)
          case None => Future.failed(JobRunDoesNotExist(jobRunId))
        }
      }

      override def activeRuns(jobSpecId: JobId): Future[Iterable[StartedJobRun]] = {
        Future.successful(specs.values.filter(_.jobRun.jobSpec.id == jobSpecId))
      }
      override def listRuns(filter: JobRun => Boolean): Future[Iterable[StartedJobRun]] = {
        Future.successful(specs.values.filter(r => filter(r.jobRun)))
      }
      override def startJobRun(jobSpec: JobSpec, schedule: Option[ScheduleSpec] = None): Future[StartedJobRun] = {
        val startingDeadline: Option[Duration] = schedule.map(_.startingDeadline)
        val run = JobRun(
          JobRunId(jobSpec),
          jobSpec,
          JobRunStatus.Active,
          Clock.systemUTC().instant(),
          None,
          startingDeadline,
          Map.empty[Task.Id, JobRunTask]
        )
        val startedRun = StartedJobRun(run, Promise[JobResult].future)
        specs += run.id -> startedRun
        Future.successful(startedRun)
      }
    }
} 
Example 145
Source File: TaglessFinal.scala    From Mastering-Functional-Programming   with MIT License 5 votes vote down vote up
package jvm

import scala.concurrent.{ Future, Await }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration

import cats._, cats.implicits._

trait Capabilities[F[_]] {
  def resource(name: String): F[String]
  def notify(target: String, text: String): F[Unit]
}

object TaglessFinalExample extends App {
  implicit val capabilities: Capabilities[Future] = new Capabilities[Future] {
    import java.io.File
    import org.apache.commons.io.FileUtils

    def resource(name: String): Future[String] =
      Future { FileUtils.readFileToString(new File(name), "utf8") }

    def notify(target: String, text: String): Future[Unit] =
      Future { println(s"Notifying $target: $text") }
  }

  implicit val anotherEnvironmentCapabilities: Capabilities[Future] = new Capabilities[Future] {
    def resource(name: String): Future[String] = ???
    def notify(target: String, text: String): Future[Unit] = ???
  }

  implicit val logMonad: Monad[Future] = new Monad[Future] {
    def flatMap[A, B](fa: Future[A])(f: (A) ⇒ Future[B]): Future[B] =
      fa.flatMap { x =>
        println(s"Trace of the Future's result: $x")
        f(x) }
    
    def pure[A](x: A): Future[A] = Future(x)

    def tailRecM[A, B](a: A)(f: (A) ⇒ Future[Either[A, B]]): Future[B] = ???
  }

  def income[F[_]](implicit M: Monad[F], C: Capabilities[F]): F[Unit] =
    for {
      contents <- C.resource("sales.csv")
      total = contents
        .split("\n").toList.tail  // Collection of lines, drop the CSV header
        .map { _.split(",").toList match  // List[Double] - prices of each of the entries
          { case name :: price :: Nil => price.toDouble }
        }
        .sum
      _ <- C.notify("[email protected]", s"Total income made today: $total")
    } yield ()

  Await.result(income[Future](logMonad, capabilities), Duration.Inf)  // Block so that the application does not exit prematurely
}

object FacadeExample {
  trait Capabilities {
    def resource(name: String): String
    def notify(target: String, text: String): Unit
  }

  def income(c: Capabilities): Unit = {
    val contents = c.resource("sales.csv")
    val total = contents
      .split("\n").toList.tail  // Collection of lines, drop the CSV header
      .map { _.split(",").toList match  // List[Double] - prices of each of the entries
        { case name :: price :: Nil => price.toDouble }
      }
      .sum
    c.notify("[email protected]", s"Total income made today: $total")
  }
} 
Example 146
Source File: BenchmarkUtil.scala    From sigmastate-interpreter   with MIT License 5 votes vote down vote up
package scalan.util

import scala.concurrent.duration.Duration
import scala.concurrent.{Future, Await}
import scala.concurrent.ExecutionContext.Implicits.global


  def measureTime[T](action: => T): (T, Long) = {
    val t0 = System.currentTimeMillis()
    val res = action
    val t = System.currentTimeMillis()
    (res, t - t0)
  }

  def runTasks(nTasks: Int)(block: Int => Unit) = {
    val (_, total) = measureTime {
      val tasks = (1 to nTasks).map(iTask => Future(block(iTask)))
      val res = Await.result(Future.sequence(tasks), Duration.Inf)
    }
    println(s"Completed $nTasks tasks in $total msec")
  }

} 
Example 147
Source File: Main.scala    From play-zipkin-tracing   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.TimeUnit

import actors.{HelloWorldActor, HelloWorldMessage}
import akka.actor._
import akka.util.Timeout
import brave.play.actor.ActorTraceSupport._
import brave.play.actor.ZipkinTraceService

import scala.concurrent._
import scala.concurrent.duration.Duration

object Main extends App {

  val system = ActorSystem("mySystem")
  implicit val tracer = new ZipkinTraceService(system, "zipkin-akka-actor")
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)

  val actor = system.actorOf(Props(classOf[HelloWorldActor], tracer), "parent-actor")

  val f = TraceableActorRef(actor) ? HelloWorldMessage("Test")(ActorTraceData())
  val result = Await.result(f, Duration.Inf)
  println(result)

  Thread.sleep(1000)
  tracer.close()
  system.terminate()

} 
Example 148
Source File: ProcessRunner.scala    From stryker4s   with Apache License 2.0 5 votes vote down vote up
package stryker4s.run.process

import better.files.File
import grizzled.slf4j.Logging

import scala.concurrent.duration.{Duration, MINUTES}
import scala.sys.process.{Process, ProcessLogger}
import scala.util.Try
import cats.effect.IO

trait ProcessRunner extends Logging {
  def apply(command: Command, workingDir: File): Try[Seq[String]] = {
    Try {
      Process(s"${command.command} ${command.args}", workingDir.toJava)
        .!!<(ProcessLogger(debug(_)))
        .linesIterator
        .toSeq
    }
  }

  def apply(command: Command, workingDir: File, envVar: (String, String)): Try[Int] = {
    val mutantProcess = Process(s"${command.command} ${command.args}", workingDir.toJava, envVar)
      .run(ProcessLogger(debug(_)))

    val exitCodeFuture = IO(mutantProcess.exitValue())
    // TODO: Maybe don't use unsafeRunTimed
    // TODO: Use timeout decided by initial test-run duration
    Try(exitCodeFuture.unsafeRunTimed(Duration(2, MINUTES)).get)
  }
}

object ProcessRunner {
  private def isWindows: Boolean = sys.props("os.name").toLowerCase.contains("windows")

  def apply(): ProcessRunner = {
    if (isWindows) new WindowsProcessRunner
    else new UnixProcessRunner
  }
} 
Example 149
Source File: Scheduler.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.{Duration, FiniteDuration}
import com.typesafe.config.Config
import swave.core.impl.util.SettingsCompanion
import swave.core.macros._
import swave.core.util._

trait Scheduler {

  def settings: Scheduler.Settings

  final def schedule(interval: FiniteDuration)(body: ⇒ Unit)(implicit ec: ExecutionContext): Cancellable =
    schedule(Duration.Zero, interval)(body)

  final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(body: ⇒ Unit)(
      implicit ec: ExecutionContext): Cancellable =
    schedule(initialDelay, interval, Runnable(body))

  final def schedule(interval: FiniteDuration, r: Runnable)(implicit ec: ExecutionContext): Cancellable =
    schedule(Duration.Zero, interval, r)

  def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, r: Runnable)(
      implicit ec: ExecutionContext): Cancellable

  final def scheduleOnce(delay: FiniteDuration)(body: ⇒ Unit)(implicit ec: ExecutionContext): Cancellable =
    scheduleOnce(delay, Runnable(body))

  def scheduleOnce(delay: FiniteDuration, r: Runnable)(implicit ec: ExecutionContext): Cancellable
}

object Scheduler {

  final case class Settings(tickDuration: FiniteDuration, ticksPerWheel: Int) {
    requireArg(tickDuration > Duration.Zero, "`tickDuration` must be > 0")
    requireArg(ticksPerWheel > 0, "`ticksPerWheel` must be > 0")
    requireArg(isPowerOf2(ticksPerWheel), "`ticksPerWheel` must be a power of 2")

    def withTickDuration(tickDuration: FiniteDuration) = copy(tickDuration = tickDuration)
    def withTicksPerWheel(ticksPerWheel: Int)          = copy(ticksPerWheel = ticksPerWheel)
  }

  object Settings extends SettingsCompanion[Settings]("swave.core.scheduler") {
    def fromSubConfig(c: Config): Settings =
      Settings(tickDuration = c getFiniteDuration "tick-duration", ticksPerWheel = c getInt "ticks-per-wheel")
  }
} 
Example 150
Source File: CleaningIntegrationSpec.scala    From neotypes   with MIT License 5 votes vote down vote up
package neotypes

import neotypes.implicits.mappers.executions._
import neotypes.implicits.syntax.string._
import org.scalatest.FutureOutcome
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration


abstract class CleaningIntegrationSpec[F[_]](testkit: EffectTestkit[F]) extends BaseIntegrationSpec(testkit) {
  override final def withFixture(test: NoArgAsyncTest): FutureOutcome = {
    complete {
      super.withFixture(test)
    } lastly {
      this.cleanDb()
    }
  }

  override final val initQuery: String = BaseIntegrationSpec.EMPTY_INIT_QUERY
} 
Example 151
Source File: PlanWriter.scala    From piglet   with Apache License 2.0 5 votes vote down vote up
package dbis.piglet.tools

import java.nio.file.{Files, Path, StandardOpenOption}

import dbis.piglet.op.{PigOperator, TimingOp}
import dbis.piglet.plan.DataflowPlan
import dbis.piglet.tools.logging.PigletLogging
//import guru.nidi.graphviz.engine.{Format, Graphviz}
//import guru.nidi.graphviz.parse.Parser

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.concurrent.duration.Duration


case class Node(id: String, var time: Option[Duration] = None, var label: String = "") {
  
  private def mkLabel = {
    val t = if(time.isDefined) s"\n${time.get.toMillis}ms (${BigDecimal(time.get.toMillis / 1000.0).setScale(2,BigDecimal.RoundingMode.HALF_UP).toDouble}s)" else ""
    val l = s"$label\n$id\n$t" 
    PlanWriter.quote(l)
  }
  
  override def toString = s"op$id ${if(label.trim().nonEmpty) s"[label=$mkLabel]" else ""}"
}

case class Edge(from: String, to: String, var label: String = "") {
  override def toString = s"op$from -> op$to ${if(label.trim().nonEmpty) s"[label=$label]" else "" }"
}


  private def writeDotFile(file: Path, graph: String): Unit = {
    logger.debug(s"writing dot file to $file")
    if(Files.notExists(file.getParent)) {
      Files.createDirectories(file.getParent)
    }
    Files.write(file, List(graph).asJava, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)
  }
  
  
} 
Example 152
Source File: Keys.scala    From sbt-coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.sbtcoursier

import java.io.File

import coursier.cache.CachePolicy
import coursier.ProjectCache
import coursier.core._
import coursier.util.Artifact
import sbt.librarymanagement.{GetClassifiersModule, Resolver}
import sbt.{InputKey, SettingKey, TaskKey}

import scala.concurrent.duration.Duration

object Keys {
  val coursierParallelDownloads = SettingKey[Int]("coursier-parallel-downloads")
  val coursierMaxIterations = SettingKey[Int]("coursier-max-iterations")
  val coursierChecksums = SettingKey[Seq[Option[String]]]("coursier-checksums")
  val coursierArtifactsChecksums = SettingKey[Seq[Option[String]]]("coursier-artifacts-checksums")
  val coursierCachePolicies = SettingKey[Seq[CachePolicy]]("coursier-cache-policies")
  val coursierTtl = SettingKey[Option[Duration]]("coursier-ttl")

  val coursierVerbosity = SettingKey[Int]("coursier-verbosity")

  val coursierConfigGraphs = TaskKey[Seq[(Configuration, Seq[Configuration])]]("coursier-config-graphs")

  val coursierSbtClassifiersModule = TaskKey[GetClassifiersModule]("coursier-sbt-classifiers-module")

  val coursierConfigurations = TaskKey[Map[Configuration, Set[Configuration]]]("coursier-configurations")


  val coursierParentProjectCache = TaskKey[Map[Seq[Resolver], Seq[ProjectCache]]]("coursier-parent-project-cache")
  val coursierResolutions = TaskKey[Map[Configuration, Resolution]]("coursier-resolutions")

  private[coursier] val actualCoursierResolution = TaskKey[Resolution]("coursier-resolution")

  val coursierSbtClassifiersResolutions = TaskKey[Map[Configuration, Resolution]]("coursier-sbt-classifiers-resolution")

  val coursierDependencyTree = TaskKey[Unit](
    "coursier-dependency-tree",
    "Prints dependencies and transitive dependencies as a tree"
  )
  val coursierDependencyInverseTree = TaskKey[Unit](
    "coursier-dependency-inverse-tree",
    "Prints dependencies and transitive dependencies as an inverted tree (dependees as children)"
  )

  val coursierWhatDependsOn = InputKey[String](
    "coursier-what-depends-on",
    "Prints dependencies and transitive dependencies as an inverted tree for a specific module (dependees as children)"
  )
  val coursierArtifacts = TaskKey[Map[Artifact, File]]("coursier-artifacts")
  val coursierSignedArtifacts = TaskKey[Map[Artifact, File]]("coursier-signed-artifacts")
  val coursierClassifiersArtifacts = TaskKey[Map[Artifact, File]]("coursier-classifiers-artifacts")
  val coursierSbtClassifiersArtifacts = TaskKey[Map[Artifact, File]]("coursier-sbt-classifiers-artifacts")
} 
Example 153
Source File: SystemShutdown.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.core

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.github.vonnagy.service.container.log.LoggingAdapter

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.sys.ShutdownHookThread



  private[container] def shutdownActorSystem(fromHook: Boolean = false)(f: => Unit): Unit = {

    try {
      // Remove the hook
      if (shutdownHook.isDefined && !fromHook) {
        shutdownHook.get.remove

      }
      shutdownHook = None

      log.info("Shutting down the actor system")
      system.terminate()

      // Wait for termination if it is not already complete
      Await.result(system.whenTerminated, Duration.apply(30, TimeUnit.SECONDS))
      log.info("The actor system has terminated")
    }
    catch {
      case t: Throwable =>
        log.error(s"The actor system could not be shutdown: ${t.getMessage}", t)
    }

    // Call the passed function
    f
  }
} 
Example 154
Source File: TransmittablePrimitives.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package transmitter

import scala.concurrent.duration.{Duration, FiniteDuration}

trait TransmittablePrimitives extends TransmittableDummy {
  this: Transmittable.type =>

  final implicit def unit: IdenticallyTransmittable[Unit] =
    IdenticallyTransmittable()
  final implicit def boolean: IdenticallyTransmittable[Boolean] =
    IdenticallyTransmittable()
  final implicit def char: IdenticallyTransmittable[Char] =
    IdenticallyTransmittable()
  final implicit def byte: IdenticallyTransmittable[Byte] =
    IdenticallyTransmittable()
  final implicit def short: IdenticallyTransmittable[Short] =
    IdenticallyTransmittable()
  final implicit def int: IdenticallyTransmittable[Int] =
    IdenticallyTransmittable()
  final implicit def long: IdenticallyTransmittable[Long] =
    IdenticallyTransmittable()
  final implicit def float: IdenticallyTransmittable[Float] =
    IdenticallyTransmittable()
  final implicit def double: IdenticallyTransmittable[Double] =
    IdenticallyTransmittable()
  final implicit def string: IdenticallyTransmittable[String] =
    IdenticallyTransmittable()
  final implicit def symbol: IdenticallyTransmittable[Symbol] =
    IdenticallyTransmittable()
  final implicit def bigInt: IdenticallyTransmittable[BigInt] =
    IdenticallyTransmittable()
  final implicit def bigDecimal: IdenticallyTransmittable[BigDecimal] =
    IdenticallyTransmittable()
  final implicit def duration: IdenticallyTransmittable[Duration] =
    IdenticallyTransmittable()
  final implicit def finiteDuration: IdenticallyTransmittable[FiniteDuration] =
    IdenticallyTransmittable()
  final implicit def infiniteDuration: IdenticallyTransmittable[Duration.Infinite] =
    IdenticallyTransmittable()

  final implicit def javaBoolean: IdenticallyTransmittable[java.lang.Boolean] =
    IdenticallyTransmittable()
  final implicit def javaChar: IdenticallyTransmittable[java.lang.Character] =
    IdenticallyTransmittable()
  final implicit def javaByte: IdenticallyTransmittable[java.lang.Byte] =
    IdenticallyTransmittable()
  final implicit def javaShort: IdenticallyTransmittable[java.lang.Short] =
    IdenticallyTransmittable()
  final implicit def javaInt: IdenticallyTransmittable[java.lang.Integer] =
    IdenticallyTransmittable()
  final implicit def javaLong: IdenticallyTransmittable[java.lang.Long] =
    IdenticallyTransmittable()
  final implicit def javaFloat: IdenticallyTransmittable[java.lang.Float] =
    IdenticallyTransmittable()
  final implicit def javaDouble: IdenticallyTransmittable[java.lang.Double] =
    IdenticallyTransmittable()
  final implicit def javaBigInteger: IdenticallyTransmittable[java.math.BigInteger] =
    IdenticallyTransmittable()
  final implicit def javaBigDecimal: IdenticallyTransmittable[java.math.BigDecimal] =
    IdenticallyTransmittable()
  final implicit def javaUuid: IdenticallyTransmittable[java.util.UUID] =
    IdenticallyTransmittable()
  final implicit def javaDate: IdenticallyTransmittable[java.util.Date] =
    IdenticallyTransmittable()
} 
Example 155
Source File: ConnectionSetupParser.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package communicator

import scala.concurrent.duration.Duration
import scala.util.Try

trait ConnectionSetupParser {
  type Properties

  protected abstract class PropertyParser[T](
    val parse: List[String] => Option[T])

  protected abstract class PropertyParserTry[T](parse: List[String] => T)
    extends PropertyParser[T](value => Try { parse(value) }.toOption)

  protected implicit class PropertiesParsingOp(properties: Properties) {
    def set[T: PropertyParser]
        (key: String)(transform: T => Properties => Properties)
        (implicit props: ConnectionSetupFactory.Properties) =
      (props get key
        flatMap { implicitly[PropertyParser[T]] parse _ }
        map { transform(_)(properties) }
        getOrElse properties)
  }
}

trait SimpleConnectionSetupProperties { this: ConnectionSetupParser =>
  protected implicit object booleanParser
    extends PropertyParserTry(_.head.toBoolean)
  protected implicit object byteParser
    extends PropertyParserTry(_.head.toByte)
  protected implicit object shortParser
    extends PropertyParserTry(_.head.toShort)
  protected implicit object intParser
    extends PropertyParserTry(_.head.toInt)
  protected implicit object longParser
    extends PropertyParserTry(_.head.toLong)
  protected implicit object floatParser
    extends PropertyParserTry(_.head.toFloat)
  protected implicit object doubleParser
    extends PropertyParserTry(_.head.toDouble)
  protected implicit object stringParser
    extends PropertyParserTry(_.head)
  protected implicit object durationParser
    extends PropertyParserTry(value => Duration(value.head))
  protected implicit object finiteDurationParser
    extends PropertyParserTry(value =>
      Duration fromNanos Duration(value.head).toNanos)
  protected implicit object byteListParser
    extends PropertyParserTry(_ map { _.toByte })
  protected implicit object shortListParser
    extends PropertyParserTry(_ map { _.toShort })
  protected implicit object intListParser
    extends PropertyParserTry(_ map { _.toInt })
  protected implicit object longListParser
    extends PropertyParserTry(_ map { _.toLong })
  protected implicit object floatListParser
    extends PropertyParserTry(_ map { _.toFloat })
  protected implicit object doubleListParser
    extends PropertyParserTry(_ map { _.toDouble })
  protected implicit object stringListParser
    extends PropertyParserTry(identity)
  protected implicit object durationListParser
    extends PropertyParserTry(_ map { Duration(_) })
  protected implicit object finiteDurationListParser
    extends PropertyParserTry(_ map { Duration fromNanos Duration(_).toNanos })
} 
Example 156
Source File: RemoteAccessor.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package transmitter

import contexts.Immediate.Implicits.global

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

object RemoteAccessor {
  trait Default { this: language.PlacedValue.type =>
    implicit class BasicMultipleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, T, L, Multiple])
      extends RemoteAccessor {

      def asLocalFromAll: Seq[(Remote[R], T)] = value.remotes zip value.retrieveValues
    }

    implicit class BasicBlockingMultipleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, Future[T], L, Multiple])
      extends RemoteAccessor {

      def asLocalFromAll_?(timeout: Duration): Seq[(Remote[R], T)] =
        value.remotes zip (Await result (Future sequence value.retrieveValues, timeout))

      def asLocalFromAll_! : Seq[(Remote[R], T)] = asLocalFromAll_?(Duration.Inf)
    }


    implicit class BasicOptionalAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, T, L, Optional])
      extends RemoteAccessor {

      def asLocal: Option[T] = value.retrieveValue
    }

    implicit class BasicBlockingOptionalAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, Future[T], L, Optional])
      extends RemoteAccessor {

      def asLocal_?(timeout: Duration): Option[T] =
        value.retrieveValue map { Await result (_, timeout) }

      def asLocal_! : Option[T] = asLocal_?(Duration.Inf)
    }


    implicit class BasicSingleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, T, L, Single])
      extends RemoteAccessor {

      def asLocal: T = value.retrieveValue
    }

    implicit class BasicBlockingSingleAccessor[V, R, T, L](value: V from R)(
        implicit ev: Transmission[V, R, Future[T], L, Single])
      extends RemoteAccessor {

      def asLocal_?(timeout: Duration): T =
        Await result (value.retrieveValue, timeout)

      def asLocal_! : T = asLocal_?(Duration.Inf)
    }
  }

  sealed trait Access {
    implicit class MultipleValueAccess[V, T, R, L](value: V from R)(implicit
        ev: Transmission[V, R, T, L, _]) {

      def cache[B <: AnyRef](id: Any)(body: => B): B = ev.cache(id, body)
      val remoteJoined: Notice.Stream[Remote[R]] = ev.remoteJoined
      val remoteLeft: Notice.Stream[Remote[R]] = ev.remoteLeft
      def remotes: Seq[Remote[R]] = ev.remotesReferences
      def retrieveValues: Seq[T] = ev.retrieveValues
    }
  }
}

trait RemoteAccessor extends RemoteAccessor.Access {
  implicit class OptionalValueAccess[V, T, R, L](value: V from R)(implicit
      ev: Transmission[V, R, T, L, Optional])
    extends MultipleValueAccess(value)(ev) {

    def remote: Option[Remote[R]] = ev.remotesReferences.headOption
    def retrieveValue: Option[T] = ev.retrieveValues.headOption
  }

  implicit class SingleValueAccess[V, T, R, L](value: V from R)(implicit
      ev: Transmission[V, R, T, L, Single])
    extends MultipleValueAccess(value)(ev) {

    def remote: Remote[R] = ev.remotesReferences.head
    def retrieveValue: T = ev.retrieveValues.head
  }
} 
Example 157
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo6

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val create = withSchema {
    (authorId: String, title: String) =>
      cql"""
         INSERT INTO test.posts (author_id , post_id , post_title )
         VALUES ( $authorId, now(), $title);
       """.prepared.executeAsync
  }

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  println(Await.result(create("test", "title"), Duration(1, "second")))
  println(Await.result(listByAuthor("test"), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 158
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo5

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, comments: Map[Int, String])

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val getCommentsByLine = withSchema {
    (authorId: String, postId: UUID, line: Int) =>
      cql"""
         SELECT post_id, comments
         FROM test.posts
         WHERE author_id = $authorId
           AND post_id = $postId
           AND comments CONTAINS KEY $line
       """.prepared.as(Post)
  }

  val postId = UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711")
  println(Await.result(getCommentsByLine("test", postId, 5), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 159
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo2

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByAuthor = withSchema {
    (authorId: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  val result = listByAuthor("test")
  println(Await.result(result, Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 160
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo4

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val listByTag = withSchema {
    (tag: String) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE post_tags CONTAINS $tag
       """.prepared.as(Post)
  }

  val titlesF = listByTag("test").map(_.map(_.title))
  val titles = Await.result(titlesF, Duration(1, "second"))
  println("Matching titles:")
  titles.foreach(println)

  session.close()
  cluster.close()
} 
Example 161
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo99

import java.util.UUID

import com.datastax.driver.core._
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.ExecutionContext.Implicits.global


case class Post(
                 id: UUID,
                 authorName: String,
                 reviewerName: Option[String],
                 title: String,
                 rating: Option[Int],
                 tags: Seq[String]
               )

class PostService(implicit session: Session, ec: ExecutionContext) {
  val create = withSchema { (authorId: String, title: String) =>
    cql"""
       INSERT INTO test.posts (author_id , post_id , post_title )
       VALUES ( $authorId, now(), $title);
     """.prepared.executeAsync
  }

  val get = withSchema { (authorId: String, postId: UUID) =>
    cql"""
      SELECT post_id, author_name, reviewer_name, post_title, post_rating, post_tags
      FROM test.posts
      WHERE author_id = $authorId AND post_id = $postId;
    """.prepared.as(Post)
  }

  val listByAuthor = withSchema { (authorId: String) =>
      cql"""
         SELECT post_id, author_name, reviewer_name, post_title, post_rating, post_tags
         FROM test.posts
         WHERE author_id = $authorId
       """
        .prepared
        .executeAsync
        .as(Post)
  }

  val update = withSchema { (authorId: String, postId: UUID, newTitle: String) =>
    cql"""
       UPDATE test.posts SET post_title = $newTitle WHERE author_id = $authorId AND post_id = $postId;
     """.prepared.executeAsync
  }

  val delete = withSchema { (authorId: String, postId: UUID) =>
    cql"""
       DELETE FROM test.posts WHERE author_id = $authorId AND post_id = $postId;
     """.prepared.executeAsync
  }

}

object Main extends App {
  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val postService = new PostService()


  val result = for {
    _ <- postService.create("test", "title")
    posts <- postService.listByAuthor("test")
    postId = posts.head.id
    _ <- postService.update("test", postId, "new title")
    updatedPost <- postService.get("test", postId)
    _ <- postService.delete("test", postId)
    deletedPosts <- postService.listByAuthor("test")
  } yield (posts, updatedPost, deletedPosts)

  val (posts, updatedPost, deletedPosts) = Await.result(result, Duration(1, "second"))

  println(posts)
  println(updatedPost)
  println(deletedPosts)

  session.close()
  cluster.close()
} 
Example 162
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo3

import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._

import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, title: String)

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val get = withSchema {
    (authorId: String, postId: UUID) =>
      cql"""
         SELECT post_id, post_title
         FROM test.posts
         WHERE author_id = $authorId AND post_id = $postId
       """
        .prepared
        .executeAsync
        .oneOption
        .as(Post)
  }

  val result = get("test", UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711"))
  val maybePost: Option[Post] = Await.result(result, Duration(1, "second"))
  println(maybePost.map(_.title).getOrElse("Post not found"))

  session.close()
  cluster.close()
} 
Example 163
Source File: Main.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package demo1

import java.util.UUID
import com.datastax.driver.core.{Cluster, Session}
import troy.dsl._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

case class Post(id: UUID, img: String) // Map doesn't work with Primitives yet. See: https://github.com/cassandra-scala/troy/issues/18

object Main extends App {
  import scala.concurrent.ExecutionContext.Implicits.global

  val port: Int = 9042
  val host: String = "127.0.0.1"

  private val cluster =
    new Cluster.Builder().addContactPoints(host).withPort(port).build()

  implicit val session: Session = cluster.connect()

  val getCommentsByLine = withSchema.minVersion(2).maxVersion(2) {
    (authorId: String, postId: UUID) =>
      cql"""
         SELECT post_id, post_img
         FROM test.posts
         WHERE author_id = $authorId
           AND post_id = $postId
       """.prepared.as(Post)
  }

  val postId = UUID.fromString("a4a70900-24e1-11df-8924-001ff3591711")
  println(Await.result(getCommentsByLine("test", postId), Duration(1, "second")))

  session.close()
  cluster.close()
} 
Example 164
Source File: Playground.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package meta

import java.util.UUID

import com.datastax.driver.core.{ Cluster, Session }
import com.datastax.driver.core.utils.UUIDs

import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, Future }
import scala.util.Try

@schema object Schema

object Playground extends App {
  def query[I, O](x: String): I => Future[Seq[O]] = ???
  case class Post(id: UUID, title: String)

  import Schema._

  withSession { implicit session =>
    import scala.concurrent.ExecutionContext.Implicits.global

    @schemasafe val getAuthorPosts =
      query[(UUID, Int), Post]("select post_id, post_title from test.posts where author_id = ? AND post_rating >= ? ALLOW FILTERING;")

    val authorId = UUID.fromString("6287c470-e298-11e6-9b3d-ffeaf4ddcb54")
    println(Await.result(getAuthorPosts((authorId, 4)): Future[Iterable[Post]], Duration.Inf))
  }

  def withSession[T](f: Session => T) = {
    val cluster = new Cluster.Builder().addContactPoints("127.0.0.1").withPort(9042).build()
    val session: Session = cluster.connect()
    Try(f(session)).failed.foreach(println)
    session.close()
    cluster.close()
  }
} 
Example 165
Source File: Usage.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package driver

import java.util.UUID

import com.datastax.driver.core._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
import DSL._


object Usage extends App {
  val cluster = Cluster.builder().addContactPoint("127.0.0.1").build()
  implicit val session: Session = cluster.connect()
  case class Post(id: UUID, authorName: String, title: String)

  val getByTitle = {
    import InternalDsl._
    val prepared = session.prepare("SELECT post_id, author_name, post_title FROM test.posts where post_title = ?;")

    (title: String) => {
      def parser(row: _root_.com.datastax.driver.core.Row) =
        Post(
          column[UUID](0)(row).as[CDT.Uuid],
          column[String](1)(row).as[CDT.Text],
          column[String](2)(row).as[CDT.Text]
        )

      bind(prepared, param(title).as[CDT.Text])
        .executeAsync
        .oneOption
        .parseAs(parser)
    }
  }

  println(Await.result(getByTitle("Title"), Duration(1, "second")))
  session.close()
  cluster.close()
} 
Example 166
Source File: MockPingPongServer.scala    From Dsl.scala   with Apache License 2.0 5 votes vote down vote up
package com.thoughtworks.dsl
import com.thoughtworks.enableMembersIf
import org.scalatest.{AsyncTestSuite, BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration


@enableMembersIf(scala.util.Properties.versionNumberString.matches("""^2\.1(1|2)\..*$"""))
trait MockPingPongServer extends BeforeAndAfterAll { this: Suite =>

  implicit def executionContext: ExecutionContext

  protected implicit val system = akka.actor.ActorSystem()

  protected implicit val materializer = akka.stream.ActorMaterializer()

  protected val mockServer = {
    import akka.http.scaladsl.server.Directives._
    val route =
      get {
        path("ping") {
          complete("PING!")
        } ~ path("pong") {
          complete("PONG!")
        }
      }
    concurrent.Await.result(akka.http.scaladsl.Http().bindAndHandle(route, "localhost", 8085), Duration.Inf)
  }

  override protected def afterAll(): Unit = {
    mockServer
      .unbind()
      .onComplete(_ => system.terminate())
  }

} 
Example 167
Source File: IOPlatform.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.internals

import cats.effect.IO

import scala.concurrent.duration.Duration

private[effect] object IOPlatform {

  
  final private class ResyncCallback[A] extends (Either[Throwable, A] => Unit) {
    var isActive = true
    var value: Either[Throwable, A] = _

    def apply(value: Either[Throwable, A]): Unit =
      if (isActive) {
        isActive = false
        this.value = value
      } else
        value match {
          case Left(e) => Logger.reportFailure(e)
          case _       => ()
        }
  }
} 
Example 168
Source File: TAutoSpeedActor.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.experiment

import akka.actor.{ActorSystem, Props}
import com.gabry.job.core.actor.AutoSpeedActor
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.Duration


  override def isAutoDriveMessage(msg: Any): Boolean = true
}
object TAutoSpeedActor {
  def main(args: Array[String]): Unit = {
    import scala.concurrent.duration._
    val system = ActorSystem("testAutoSpeed", ConfigFactory.load())
    val batchNum = 10
    val batchInterval = 30 millisecond
    val autoSpeedActor = system.actorOf(AutoSpeed.props(batchNum,batchInterval,1526367346362L),"TestAutoSpeedActor")

    val messageTimes = Array(
      1526367346362L,1526367346362L,1526367346362L,1526367346362L,1526367346362L
      ,1526367346362L,1526367346362L,1526367346362L,1526367346362L,1526367346363L

      ,1526367346363L,1526367346363L,1526367346363L,1526367346363L,1526367346363L
      ,1526367346363L,1526367346363L,1526367346363L,1526367346363L,1526367346363L

      ,1526367346364L,1526367346365L,1526367346366L,1526367346367L,1526367346368L
      ,1526367346368L,1526367346368L,1526367346368L,1526367346369L,1526367346370L

      ,1526367346370L,1526367346371L,1526367346372L,1526367346372L,1526367346372L
      ,1526367346372L,1526367346372L,1526367346372L)

    messageTimes.foreach{ tim =>
      Thread.sleep(3)
      autoSpeedActor ! AutoSpeedMessage(tim,s"time=$tim")
    }
    Thread.sleep(300)
    messageTimes.foreach{ tim =>
      Thread.sleep(3)
      autoSpeedActor ! AutoSpeedMessage(tim+6,s"time=$tim")
    }
  }
} 
Example 169
Source File: AbstractTrainer.scala    From Clustering4Ever   with Apache License 2.0 5 votes vote down vote up
package org.clustering4ever.spark.clustering.mtm

import org.apache.spark.mllib.linalg.DenseVector
import java.util.concurrent.TimeUnit._
import org.apache.spark.rdd.RDD
import scala.concurrent.duration.{FiniteDuration, Duration}


trait AbstractTrainer extends Serializable
{
  private var iter = 0
  def getLastIt = iter

  private var converge = 1D
  def getLastConvergence() = converge

  private var trainingDuration = Duration.Zero
  def getLastTrainingDuration = trainingDuration

  protected def initModel(dataset: RDD[Array[Double]], modelOptions: Option[Map[String, String]])

  protected def trainingIteration(dataset: RDD[Array[Double]], currentIteration: Int, maxIteration: Int): Double

  protected def getModel: AbstractModel

  final def training(
    dataset: RDD[Array[Double]],
    modelOptions: Option[Map[String, String]] = None,
    maxIteration: Int = 100,
    endConvergeDistance: Double = 0.001
  ): AbstractModel =
  {
    val datasetSize = dataset.count()

    val startLearningTime = System.currentTimeMillis()

    val model = initModel(dataset, modelOptions)
    iter = 0
    converge = 1D

    while (converge > endConvergeDistance && iter < maxIteration)
    {
      // Training iteration
      val sumConvergence = trainingIteration(dataset, iter, maxIteration)
      // process convergence
      converge = sumConvergence / datasetSize
      iter += 1
    }

    trainingDuration = Duration.create(System.currentTimeMillis() - startLearningTime, MILLISECONDS)
    println("le model apres training est : "+getModel)

    // return the model
    getModel
  }
} 
Example 170
Source File: EtlWorkflowTest.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.workflow.etl

import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
import mass.rdp.RdpSystem
import mass.rdp.etl.EtlWorkflow
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class EtlWorkflowTest extends ScalaTestWithActorTestKit with AnyWordSpecLike {
  var rdpSystem: RdpSystem = _
  var etlWorkflow: EtlWorkflow = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    rdpSystem = RdpSystem(system)
    etlWorkflow = EtlWorkflow.fromXML(TestStub.graphXmlConfig, rdpSystem).get
  }

  override protected def afterAll(): Unit = {
    etlWorkflow.close()
    super.afterAll()
  }

  "EtlWorkflow" should {
    "show" in {
      etlWorkflow.connectors should not be empty
      etlWorkflow.connectors.foreach(c => println(s"connector: $c"))
      println(etlWorkflow.graph)

      etlWorkflow.connectors.foreach(println)
      println(etlWorkflow.graph.name)
      println(etlWorkflow.graph.graphSource)
      etlWorkflow.graph.graphFlows.foreach(println)
      println(etlWorkflow.graph.graphSink)
    }

    "run" in {
      val execution = etlWorkflow.run()
      val result = Await.result(execution.future, Duration.Inf)
      println(result)
    }
  }
} 
Example 171
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.hbase.kerberos.utils

import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.{Executors, TimeUnit}

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newSingleThreadExecutor

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 172
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{Executors, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newFixedThreadPool(1)

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 173
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{Executors, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newFixedThreadPool(1)

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 174
Source File: Http4sUtils.scala    From core   with Apache License 2.0 5 votes vote down vote up
package com.smartbackpackerapp.http

import cats.{Applicative, Monad}
import cats.data.{Kleisli, OptionT}
import cats.effect.IO
import monix.eval.Task
import monix.execution.Scheduler.Implicits.global
import org.http4s.server.AuthMiddleware
import org.http4s.{EntityBody, Request}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Http4sUtils {

  private def authUser[F[_]](implicit F: Applicative[F]): Kleisli[OptionT[F, ?], Request[F], String] =
    Kleisli(_ => OptionT.liftF(F.pure("access_token")))

  def middleware[F[_]: Monad]: AuthMiddleware[F, String] = AuthMiddleware.apply[F, String](authUser)

  val taskMiddleware: AuthMiddleware[Task, String] = middleware[Task]
  val ioMiddleware: AuthMiddleware[IO, String] = middleware[IO]

  implicit class ByteVector2String(body: EntityBody[IO]) {
    def asString: String = {
      val array = body.compile.toVector.unsafeRunSync().toArray
      new String(array.map(_.toChar))
    }
  }

  implicit class ByteVector2StringTask(body: EntityBody[Task]) {
    def asString: String = {
      val array = Await.result(body.compile.toVector.runAsync, Duration.Inf).toArray
      new String(array.map(_.toChar))
    }
  }

} 
Example 175
Source File: StartClusterAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws

import com.criteo.dev.cluster.aws.AwsUtilities.NodeRole
import com.criteo.dev.cluster.config.AWSConfig
import com.criteo.dev.cluster.{GeneralUtilities, NodeFactory, StartServiceAction}
import org.slf4j.LoggerFactory

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global


object StartClusterAction {

  private val logger = LoggerFactory.getLogger(StartClusterAction.getClass)

  def apply(conf: AWSConfig, clusters: Iterable[JcloudCluster]) = {

    clusters.foreach(c => {
      logger.info(s"Starting services on master ${c.master.getId}")
      val masterNode = NodeFactory.getAwsNode(conf, c.master)
      StartServiceAction(masterNode, NodeRole.Master)
      logger.info(s"Successfully started services on master ${c.master.getId}")

      logger.info(s"Starting services on ${c.slaves.size} in parallel.")
      val setupSlaves = c.slaves.map(s => GeneralUtilities.getFuture {
        logger.info(s"Starting service on slave ${s.getId}")
        val slaveNode = NodeFactory.getAwsNode(conf, s)
        StartServiceAction(slaveNode, NodeRole.Slave)
        logger.info(s"Successfully started service on slave ${s.getId}")
      })

      val aggSetupSlaveFutures = Future.sequence(setupSlaves)
      Await.result(aggSetupSlaveFutures, Duration.Inf)
    })
  }
} 
Example 176
Source File: DestroyAwsCliAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws

import com.criteo.dev.cluster.config.GlobalConfig
import com.criteo.dev.cluster.{CliAction, GeneralUtilities, Public}
import org.jclouds.compute.ComputeService
import org.jclouds.compute.domain.NodeMetadata.Status
import org.slf4j.LoggerFactory

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global


@Public object DestroyAwsCliAction extends CliAction[Unit] {

  override def command : String = "destroy-aws"

  override def usageArgs =
    List("cluster.id", Option("force"))

  override def help : String =
    "Destroys AWS cluster with given cluster.id." +
      "  Second option must be force if trying to destroy a cluster for another user."

  private val logger = LoggerFactory.getLogger(DestroyAwsCliAction.getClass)

  def applyInternal(args: List[String], config: GlobalConfig): Unit = {
    logger.info("Connecting to AWS to fetch nodes to destroy.")
    val conf = config.backCompat
    val instanceId = args(0)
    var result = {
      if (args.length == 2 && (args(1)).toLowerCase().equals("force")) {
        AwsUtilities.getCluster(conf, instanceId)
      } else {
        AwsUtilities.getUserCluster(conf, instanceId)
      }
    }
    if (result == null || result.master.getStatus().equals(Status.TERMINATED)) {
      logger.info("No clusters found matching criteria, or force not specified for deleting cluster of other users.")
      return
    }
    destroy(conf, List(result))
  }

  def destroy(conf: Map[String, String], clusters: Iterable[JcloudCluster]) = {
    val nodesToDestroy = clusters.flatMap(_.slaves) ++ clusters.map(_.master)

    val computeService = AwsUtilities.getComputeService(conf)
    logger.info(s"Destroying ${nodesToDestroy.size} nodes in parallel.")
    val futures = nodesToDestroy.filter(n => !(n.getStatus().equals(Status.TERMINATED))).map(n => {
      GeneralUtilities.getFuture {
        logger.info(s"Destroying instance ${n.getId()}")
        AwsUtilities.retryAwsAction(new RetryableDestroy(computeService, n.getId()))
        logger.info(s"Destroyed.")
      }
    })
    val result = Future.sequence(futures)
    Await.result(result, Duration.Inf)
  }
}


class RetryableDestroy(computeService : ComputeService, nodeid : String) extends Retryable[Any] {
  def action() : Unit = {
    computeService.destroyNode(nodeid)
  }
} 
Example 177
Source File: ConfigureHostsAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws

import com.criteo.dev.cluster.command.SshMultiAction
import com.criteo.dev.cluster.config.AWSConfig
import com.criteo.dev.cluster.{GeneralConstants, GeneralUtilities, NodeFactory}
import org.jclouds.compute.domain.NodeMetadata
import org.slf4j.LoggerFactory

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global


  def editEtcHosts(conf: AWSConfig, target: NodeMetadata, cluster: JcloudCluster) : Unit = {
    val node = NodeFactory.getAwsNode(conf, target)
    val sshAction = new SshMultiAction(node)
    sshAction.add("echo \"127.0.0.1   localhost localhost.localdomain\" | sudo tee --append /etc/hosts")
    sshAction.add("echo \"127.0.0.1 $(hostname)\" | sudo tee --append /etc/hosts")
    sshAction.add("echo \"" + AwsUtilities.privateIp(cluster.master) + " " +
      GeneralConstants.masterHostName + "\" | sudo tee --append /etc/hosts")
    cluster.slaves.foreach(sm => {

      val slaveName = sm.getUserMetadata.get(AwsConstants.hostName)
      val ip = AwsUtilities.privateIp(sm)
      sshAction.add("echo \"" + s"${ip} $slaveName" + "\" | sudo tee --append /etc/hosts")
    })
    sshAction.run()
  }
} 
Example 178
Source File: ConfigureDiskAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws

import com.criteo.dev.cluster.command.SshAction
import com.criteo.dev.cluster.{command, _}
import com.criteo.dev.cluster.config.AWSConfig
import org.slf4j.LoggerFactory

import scala.collection.mutable
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global


  def configureDisk(node: Node) : List[String] = {
    val result = SshAction(node, "lsblk", returnResult = true).stripLineEnd
    logger.info(s"Block information on ${node.ip}:")
    val lines = result.split("\n").map(_.trim)
    require(lines(0).trim.split("\\s+")(6).equalsIgnoreCase("MOUNTPOINT"),
      s"Mount point not in expected position in lsblk output: ${lines(0)}")

    //this is a bit delicate, but assuming the unmounted ones are at the end,
    //then we will take the ones up to the first one that has a mount entry.
    val toMount = lines.reverse.takeWhile(l => l.split("\\s+").length <= 6).map(l => l.split("\\s+")(0))
    val mountCommands = toMount.zipWithIndex.flatMap { case (tm, i) =>
      List(
        s"sudo echo -e 'o\\nn\\np\\n1\\n\\n\\nw' | sudo fdisk /dev/$tm", // create one partition (n, p, 1, default start, default end of sector)
        s"sudo /sbin/mkfs.ext4 /dev/${tm}1", // make fs
        s"sudo mkdir -p /${GeneralConstants.data}/$i",
        s"sudo mount /dev/${tm}1 /${GeneralConstants.data}/$i" // mount
      )
    }.toList
    command.SshMultiAction(node, mountCommands)

    0.to(toMount.length - 1).map(i => s"/${GeneralConstants.data}/$i").toList
  }
} 
Example 179
Source File: StartAwsCliAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws


import com.criteo.dev.cluster._
import com.criteo.dev.cluster.aws.AwsUtilities.NodeRole
import com.criteo.dev.cluster.config.GlobalConfig
import com.google.common.util.concurrent.Futures
import org.jclouds.compute.ComputeService
import org.jclouds.compute.domain.NodeMetadata.Status
import org.slf4j.LoggerFactory

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global


@Public object StartAwsCliAction extends CliAction[Unit] {

  override def command : String = "start-aws"

  override def usageArgs = List(Option("cluster.id"))

  override def help() = "Starting a stopped cluster with given cluster.id.  " +
    "If no cluster.id is given, start all stopped clusters owned by this user. " +
    "Note that AWS assigns new public ips for restarted nodes in the cluster.  " +
    "Expiration countdown is extended for restarted clusters."

  private val logger = LoggerFactory.getLogger(StartAwsCliAction.getClass)

  def applyInternal(args: List[String], config: GlobalConfig): Unit = {
    logger.info("Connecting to AWS to fetch nodes to start.")
    val conf = config.backCompat
    var clusters = getClusters(args, conf)
    clusters = clusters.filter(u => u.master.getStatus().equals(Status.SUSPENDED))
    if (clusters.size == 0) {
      logger.info("No clusters found matching criteria.")
    }

    //start nodes in parallel
    val nodesToStart = clusters.flatMap(_.slaves) ++ clusters.map(_.master)
    logger.info(s"Starting ${nodesToStart.size} nodes in parallel.")
    val computeService = AwsUtilities.getComputeService(conf)
    val startFutures = nodesToStart.filter(u => u.getStatus().equals(Status.SUSPENDED))
      .map(u => GeneralUtilities.getFuture {
        val shortId = AwsUtilities.stripRegion(conf, u.getId)
        logger.info(s"Starting instance $shortId")
        AwsUtilities.retryAwsAction(new RetryableStart(computeService, u.getId()))
      }
    )

    val aggStartFuture = Future.sequence(startFutures)
    Await.result(aggStartFuture, Duration.Inf)

    //lookup nodes and reconfigure.
    //Sometimes /etc/hosts gets regenerated on new instances, sometimes they do not.
    val startedClusters = clusters.map(_.master.getId).toSet
    val newClusters = getClusters(args, conf).filter(c => startedClusters.contains(c.master.getId))
    ConfigureHostsAction(config.target.aws, newClusters)

    newClusters.foreach(c => ExtendAwsCliAction.extend(conf, c, reset=true))

    logger.info("Restarting services in parallel.")
    StartClusterAction(config.target.aws, newClusters)

    //print out all the infos.
    newClusters.foreach(c => AwsUtilities.printClusterInfo(conf, c))
  }

  def getClusters(args: List[String], conf: Map[String, String]): Iterable[JcloudCluster] = {
    if (args.length == 1) {
      //instance id is optional
      val instanceId = args(0)
      Set(AwsUtilities.getUserCluster(conf, instanceId))
    } else {
      AwsUtilities.getUserClusters(conf)
    }
  }
}

class RetryableStart(computeService: ComputeService, nodeid: String) extends Retryable[Any] {
  def action: Unit = {
    computeService.resumeNode(nodeid)
  }
} 
Example 180
Source File: CopyJarAwsCliAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws

import java.io.File
import java.net.{URI, URL}

import com.criteo.dev.cluster.config.GlobalConfig
import com.criteo.dev.cluster._
import com.criteo.dev.cluster.command.RsyncAction
import org.jclouds.compute.domain.NodeMetadata.Status
import org.slf4j.LoggerFactory

import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import sys.process._


@Public object CopyJarAwsCliAction extends CliAction[Unit] {
  override def command: String = "copy-jar-aws"

  override def usageArgs: List[Any] = List("instance.id", "source", "destination")

  override def help: String = "Copies a file from source to destination path to all nodes of a given cluster (if target directory exists)."

  private val logger = LoggerFactory.getLogger(CopyJarAwsCliAction.getClass)

  override def applyInternal(args: List[String], conf: GlobalConfig): Unit = {
    val instanceId = args(0)
    val cluster = AwsUtilities.getCluster(conf.backCompat, instanceId)

    if (!cluster.master.getStatus().equals(Status.RUNNING)) {
      logger.info("No running clusters found matching criteria.")
    }

    val source = args(1)
    val target = args(2)
    val sourceUri = new URI(source)
    val targetFile = new File(target)

    GeneralUtilities.prepareTempDir
    val sourceFile = sourceUri.getScheme().toLowerCase() match {
      case "http" => {
        val path = s"${GeneralUtilities.getTempDir()}/${targetFile.getName}"
        DevClusterProcess.process(s"curl -o $path $source").!!
        path
      }
      //only localhost supported
      case "file" => sourceUri.getPath()
      case _ => throw new IllegalArgumentException("Only http and file supported for sources for now.")
    }

    //copy over files in parallel
    val nodesToCopy = cluster.slaves ++ Set(cluster.master)
    logger.info(s"Copying to ${nodesToCopy.size} nodes in parallel.")
    val copyFutures = nodesToCopy.map(u => GeneralUtilities.getFuture {
      val targetN = NodeFactory.getAwsNode(conf.target.aws, u)
      val role = if (AwsUtilities.isSlave(u)) "Slave" else "Master"
      try {
        RsyncAction(
          srcPath = sourceFile,
          targetN = targetN,
          targetPath = target,
          sudo = true)
        s"$role Node ${u.getId()} with ${targetN.ip}: Copy successful."
      } catch {
        case e : Exception => s"$role Node ${u.getId()} with ${targetN.ip}: Copy Failed.  This is normal if the given directory does not exist on the node." +
          s"  If not expected, check the directory location and try again."
      }
    })

    val aggCopyFuture = Future.sequence(copyFutures)
    val result = Await.result(aggCopyFuture, Duration.Inf)
    result.foreach(r => logger.info(r))
    GeneralUtilities.cleanupTempDir
  }
} 
Example 181
Source File: InstallHadoopAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws

import com.criteo.dev.cluster.aws.AwsUtilities.NodeRole
import com.criteo.dev.cluster._
import com.criteo.dev.cluster.command.{ScpAction, SshAction}
import com.criteo.dev.cluster.config.{AWSConfig, TargetConfig}
import org.slf4j.LoggerFactory

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global


object InstallHadoopAction {

  private val logger = LoggerFactory.getLogger(InstallHadoopAction.getClass)

  def apply(config: TargetConfig, cluster: JcloudCluster) = {
    logger.info(s"Installing CDH on ${cluster.size} nodes in parallel.")
    val hadoopVersion = config.common.hadoopVersion
    val masterNode = NodeFactory.getAwsNode(config.aws, cluster.master)

    val installMaster = GeneralUtilities.getFuture {
      val setupMaster = AwsUtilities.getOsSetupScript(config.common, NodeRole.Master)
      logger.info(s"Running $setupMaster on master ${masterNode.ip}")
      ScpAction(
        sourceN = None,
        srcPath = s"${GeneralConstants.setupScriptDir}/$setupMaster",
        targetN = Some(masterNode),
        targetPath = "~/setup.sh")

      //script will check if the specified hadoop version is valid.
      SshAction(masterNode, s"source setup.sh $hadoopVersion")
      SshAction(masterNode, "rm setup.sh")
      CopyHiveJarAction(config, cluster.master, NodeRole.Master)
      "" //otherwise there is NPE
    }

    val installSlaves = cluster.slaves.map(slaveMeta => {
      GeneralUtilities.getFuture {
        val slaveNode = NodeFactory.getAwsNode(config.aws, slaveMeta)
        val slaveSetup = AwsUtilities.getOsSetupScript(config.common, NodeRole.Slave)
        logger.info(s"Running $slaveSetup on slave: ${slaveNode.ip}")
        ScpAction(
          sourceN = None,
          srcPath = s"${GeneralConstants.setupScriptDir}/$slaveSetup",
          targetN = Some(slaveNode),
          targetPath = "~/setup.sh")

        //script will check if the specified hadoop version is valid.
        SshAction(slaveNode, s"source setup.sh $hadoopVersion")
        SshAction(slaveNode, "rm setup.sh")
        ""  //otherwise there is NPE
      }
    })

    Await.result(installMaster, Duration.Inf)
    installSlaves.map(sf => Await.result(sf, Duration.Inf))
  }
} 
Example 182
Source File: StopAwsCliAction.scala    From berilia   with Apache License 2.0 5 votes vote down vote up
package com.criteo.dev.cluster.aws


import com.criteo.dev.cluster.config.GlobalConfig
import com.criteo.dev.cluster.{CliAction, GeneralUtilities, Public}
import org.jclouds.compute.ComputeService
import org.jclouds.compute.domain.NodeMetadata.Status
import org.slf4j.LoggerFactory

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global


@Public object StopAwsCliAction extends CliAction[Unit] {

  override def command : String = "stop-aws"

  override def usageArgs = List(Option("cluster.id"))

  override def help: String = "Stopping a running cluster with given cluster.id.  " +
    "If no cluster.id is given, stop all running clusters owned by this user.  " +
    "Stopping a cluster prevents it from being purged due to expiration."

  private val logger = LoggerFactory.getLogger(StopAwsCliAction.getClass)

  def applyInternal(args: List[String], config: GlobalConfig): Unit = {
    val conf = config.backCompat
    var clusters = {
      if (args.length == 1) {
        //instance id is optional
        val instanceId = args(0)
        Set(AwsUtilities.getUserCluster(conf, instanceId))
      } else {
        AwsUtilities.getUserClusters(conf)
      }
    }
    clusters = clusters.filter(u => u.master.getStatus().equals(Status.RUNNING))
    if (clusters.size == 0) {
      logger.info("No clusters found matching criteria.")
      return
    }
    val nodesToStop = clusters.flatMap(_.slaves) ++ clusters.map(_.master)

    val computeService = AwsUtilities.getComputeService(conf)
    logger.info(s"Stopping ${nodesToStop.size} nodes in parallel")
    val futures = nodesToStop.filter(n => (n.getStatus().equals(Status.RUNNING))).map(u => GeneralUtilities.getFuture{
      logger.info(s"Stopping instance ${AwsUtilities.stripRegion(conf, u.getId())}")
      AwsUtilities.retryAwsAction(new RetryableStop(computeService, u.getId()))
      logger.info(s"Stopped.")
    })
    val aggFutures = Future.sequence(futures)
    Await.result(aggFutures, Duration.Inf)
  }
}


class RetryableStop(computeService: ComputeService, nodeid: String) extends Retryable[Any] {
  def action: Unit = {
    computeService.suspendNode(nodeid)
  }
} 
Example 183
Source File: FicusImplicits.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.generator.config

import com.google.common.base.CaseFormat
import com.typesafe.config.{Config, ConfigRenderOptions}
import com.wavesplatform.generator.Worker
import com.wavesplatform.state.DataEntry
import com.wavesplatform.transaction.{TransactionParser, TransactionParsers}
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.{CollectionReaders, ValueReader}
import play.api.libs.json._

import scala.concurrent.duration.{Duration, FiniteDuration}

trait FicusImplicits {

  private[this] val byName: Map[String, TransactionParser] = TransactionParsers.all.map {
    case (_, builder) => builder.getClass.getSimpleName.replaceAll("\\$$", "") -> builder
  }

  private def by(name: String): Option[TransactionParser] = byName.get(name)

  implicit val distributionsReader: ValueReader[Map[TransactionParser, Double]] = {
    val converter                                = CaseFormat.LOWER_HYPHEN.converterTo(CaseFormat.UPPER_CAMEL)
    def toTxType(key: String): TransactionParser = by(converter.convert(key)).get

    CollectionReaders.mapValueReader[Double].map { xs =>
      xs.map {
        case (k, v) =>
          toTxType(k) -> v
      }
    }
  }

  implicit val dataEntryReader: ValueReader[DataEntry[_]] = (config: Config, path: String) =>
    Json.parse(config.getConfig(path).root().render(ConfigRenderOptions.concise())).as[DataEntry[_]]

  implicit val workerSettingsReader: ValueReader[Worker.Settings] = (config: Config, path: String) => {
    def readWaitUtxOrDelay(path: String, default: FiniteDuration): Either[FiniteDuration, FiniteDuration] =
      if (config.hasPath(path)) {
        val value = config.as[String](path)
        if (value == "empty-utx") Right(default)
        else {
          val duration: Duration = Duration(value)
          Left(FiniteDuration(duration.length, duration.unit))
        }
      } else Right(default)

    val utxLimit         = config.as[Int](s"$path.utx-limit")
    val delay            = config.as[FiniteDuration](s"$path.delay")
    val tailInitialDelay = readWaitUtxOrDelay(s"$path.tail-initial-delay", delay)
    val initialDelay     = readWaitUtxOrDelay(s"$path.initial-delay", delay)
    val workingTime      = config.as[FiniteDuration](s"$path.working-time")
    val autoReconnect    = config.as[Boolean](s"$path.auto-reconnect")
    val reconnectDelay   = config.as[FiniteDuration](s"$path.reconnect-delay")

    def readWarmUp(warmUpConfig: Config): Worker.WarmUp = {
      val warmUpStart    = warmUpConfig.as[Int](s"start")
      val warmUpEnd      = warmUpConfig.as[Option[Int]](s"end").getOrElse(utxLimit)
      val warmUpStep     = warmUpConfig.as[Int](s"step")
      val warmUpDuration = warmUpConfig.as[Option[FiniteDuration]](s"duration")
      val warmUpOnce     = warmUpConfig.as[Option[Boolean]](s"once").getOrElse(true)
      Worker.WarmUp(warmUpStart, warmUpEnd, warmUpStep, warmUpDuration, warmUpOnce)
    }

    val warmUp     = readWarmUp(config.getConfig(s"$path.warm-up"))
    val initWarmUp = if (config.hasPath(s"$path.initial-warm-up")) Some(readWarmUp(config.getConfig(s"$path.init-warm-up"))) else None

    Worker.Settings(utxLimit, delay, tailInitialDelay, initialDelay, workingTime, autoReconnect, reconnectDelay, warmUp, initWarmUp)
  }
} 
Example 184
Source File: RootActorSystem.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.actor

import akka.actor.{ActorSystem, AllForOneStrategy, SupervisorStrategy, SupervisorStrategyConfigurator}
import com.typesafe.config.Config
import com.wavesplatform.utils.ScorexLogging

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object RootActorSystem extends ScorexLogging {
  @volatile private var failed = false

  final class EscalatingStrategy extends SupervisorStrategyConfigurator {
    override def create(): SupervisorStrategy = AllForOneStrategy(loggingEnabled = false) {
      case t: Throwable =>
        failed = true
        log.error("Root actor got exception, escalate", t)
        SupervisorStrategy.Escalate
    }
  }

  def start(id: String, config: Config)(init: ActorSystem => Unit): Unit = {
    val system = ActorSystem(id, config)
    try {
      init(system)
    } catch {
      case t: Throwable =>
        log.error(s"Error while initializing actor system $id", t)
        sys.exit(1)
    }

    Await.result(system.whenTerminated, Duration.Inf)
    if (failed) {
      sys.exit(1)
    } else {
      sys.exit(0)
    }
  }
} 
Example 185
Source File: Signed.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.transaction

import com.wavesplatform.transaction.TxValidationError.InvalidSignature
import monix.eval.{Coeval, Task}
import monix.execution.Scheduler
import monix.execution.schedulers.SchedulerService

import scala.concurrent.Await
import scala.concurrent.duration.Duration

trait Signed extends Authorized {
  protected val signatureValid: Coeval[Boolean]

  protected val signedDescendants: Coeval[Seq[Signed]] =
    Coeval(Nil)

  protected val signaturesValidMemoized: Task[Either[InvalidSignature, this.type]] =
    Signed.validateTask[this.type](this).memoize

  val signaturesValid: Coeval[Either[InvalidSignature, this.type]] =
    Coeval.evalOnce(Await.result(signaturesValidMemoized.runToFuture(Signed.scheduler), Duration.Inf))
}

object Signed {
  type E[A] = Either[InvalidSignature, A]

  private implicit lazy val scheduler: SchedulerService = {
    val parallelism = (Runtime.getRuntime.availableProcessors() / 2).max(1).min(4)
    Scheduler.computation(parallelism, "sig-validator")
  }

  def validateOrdered[S <: Signed](ss: Seq[S]): E[Seq[S]] =
    Await.result(
      Task
        .parTraverse(ss)(s => s.signaturesValidMemoized)
        .map(
          _.collectFirst { case Left(v) => Left(v) }.getOrElse(Right(ss))
        )
        .runAsyncLogErr,
      Duration.Inf
    )

  private def validateTask[S <: Signed](signedEntity: S): Task[E[S]] =
    Task {
      import cats.instances.either._
      import cats.instances.list._
      import cats.syntax.traverse._

      if (!signedEntity.signatureValid()) {
        Task.now(Left(InvalidSignature(signedEntity, None)))
      } else if (signedEntity.signedDescendants().isEmpty) {
        Task.now(Right(signedEntity))
      } else {
        Task
          .parTraverseUnordered(signedEntity.signedDescendants())(s => s.signaturesValidMemoized)
          .map(_.sequence.map(_ => signedEntity))
      }
    }.flatten
} 
Example 186
Source File: InputManager.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.interpreter.input

import java.io.InputStream
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.ByteBuffer

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.{Failure, Success}

trait InputManager {
  def done(): Unit
  def readInput(prompt: String = "", password: Boolean = false): Future[String]

  final def password(prompt: String = ""): Future[String] =
    readInput(prompt, password = true)

  final def inputStream(ec: ExecutionContext): InputStream =
    new InputManager.InputManagerInputStream(this, ec)
}

object InputManager {

  class NoMoreInputException extends Exception

  class InputManagerInputStream(
    manager: InputManager,
    ec: ExecutionContext
  ) extends InputStream {

    private var bufferOpt = Option.empty[ByteBuffer]
    private var done = false

    private def maybeFetchNewBuffer(): Option[ByteBuffer] =
      if (done)
        None
      else {
        if (bufferOpt.forall(!_.hasRemaining)) {

          val res = {
            implicit val ec0 = ec
            Await.result(
              manager.readInput()
                .map(Success(_))
                .recover { case t => Failure(t) },
              Duration.Inf
            )
          }

          res match {
            case Success(value) =>
              val b0 = ByteBuffer.wrap((value + "\n").getBytes(UTF_8)).asReadOnlyBuffer()
              bufferOpt = Some(b0)
            case Failure(_: NoMoreInputException) =>
              done = true
              bufferOpt = None
            case Failure(ex) =>
              throw new Exception("Error getting more input", ex)
          }
        }

        bufferOpt
      }


    def read(): Int =
      maybeFetchNewBuffer()
        .fold(-1)(_.get())

    override def read(b: Array[Byte], off: Int, len: Int): Int =
      // InputStream.read does these 3 checks upfront too
      if (b == null)
        throw new NullPointerException
      else if (off < 0 || len < 0 || len > b.length - off)
        throw new IndexOutOfBoundsException
      else if (len == 0)
        0
      else
        maybeFetchNewBuffer().fold(-1) { b0 =>
          val toRead = math.min(b0.remaining(), len)
          b0.get(b, off, toRead)
          toRead
        }
  }

} 
Example 187
Source File: JupyterApiImpl.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond

import java.io.ByteArrayOutputStream
import java.nio.charset.StandardCharsets

import almond.api.{FullJupyterApi, JupyterApi}
import almond.internals.HtmlAnsiOutputStream
import almond.interpreter.api.CommHandler
import ammonite.util.Ref
import pprint.{TPrint, TPrintColors}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag


final class JupyterApiImpl(
  execute: Execute,
  commHandlerOpt: => Option[CommHandler],
  replApi: ReplApiImpl,
  silent0: Ref[Boolean]
) extends FullJupyterApi {

  protected def printOnChange[T](
    value: => T,
    ident: String,
    custom: Option[String],
    onChange: Option[(T => Unit) => Unit],
    onChangeOrError: Option[(Either[Throwable, T] => Unit) => Unit]
  )(implicit
    tprint: TPrint[T],
    tcolors: TPrintColors,
    classTagT: ClassTag[T]
  ): Iterator[String] =
    replApi.printSpecial(value, ident, custom, onChange, onChangeOrError, replApi.pprinter, Some(updatableResults))(tprint, tcolors, classTagT).getOrElse {
      replApi.Internal.print(value, ident, custom)(tprint, tcolors, classTagT)
    }

  override def silent(s: Boolean): Unit = silent0.update(s)
  override def silent: Boolean = silent0.apply()

  protected def ansiTextToHtml(text: String): String = {
    val baos = new ByteArrayOutputStream
    val haos = new HtmlAnsiOutputStream(baos)
    haos.write(text.getBytes(StandardCharsets.UTF_8))
    haos.close()
    baos.toString("UTF-8")
  }

  def stdinOpt(prompt: String, password: Boolean): Option[String] =
    for (m <- execute.currentInputManagerOpt)
      yield Await.result(m.readInput(prompt, password), Duration.Inf)

  override def changingPublish =
    execute.currentPublishOpt.getOrElse(super.changingPublish)
  override def commHandler =
    commHandlerOpt.getOrElse(super.commHandler)

  protected def updatableResults0: JupyterApi.UpdatableResults =
    execute.updatableResults
} 
Example 188
Source File: SparkContextFunctions.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark

import com.couchbase.client.java.analytics.AnalyticsQuery
import com.couchbase.client.java.document.Document
import com.couchbase.client.java.query.N1qlQuery
import com.couchbase.client.java.view.{SpatialViewQuery, ViewQuery}
import com.couchbase.spark.connection.{SubdocLookupResult, SubdocLookupSpec, SubdocMutationResult, SubdocMutationSpec}
import com.couchbase.spark.rdd._
import org.apache.spark.SparkContext

import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD

import scala.concurrent.duration.Duration

class SparkContextFunctions(@transient val sc: SparkContext) extends Serializable {

  def couchbaseGet[D <: Document[_]: ClassTag](ids: Seq[String], bucketName: String = null,
    numSlices: Int = sc.defaultParallelism, timeout: Option[Duration] = None): RDD[D] = {
    new KeyValueRDD[D](sc, ids, bucketName, timeout)
  }

  def couchbaseSubdocLookup(ids: Seq[String], get: Seq[String], timeout: Option[Duration])
    : RDD[SubdocLookupResult] = couchbaseSubdocLookup(ids, get, Seq(), null, timeout)

  def couchbaseSubdocLookup(ids: Seq[String], get: Seq[String])
  : RDD[SubdocLookupResult] = couchbaseSubdocLookup(ids, get, Seq(), null, None)

  def couchbaseSubdocLookup(ids: Seq[String], get: Seq[String], exists: Seq[String])
  : RDD[SubdocLookupResult] = couchbaseSubdocLookup(ids, get, exists, null, None)

  def couchbaseSubdocLookup(ids: Seq[String], get: Seq[String], exists: Seq[String],
                            timeout: Option[Duration])
  : RDD[SubdocLookupResult] = couchbaseSubdocLookup(ids, get, exists, null, timeout)

  def couchbaseSubdocLookup(ids: Seq[String], get: Seq[String], exists: Seq[String],
    bucketName: String, timeout: Option[Duration] = None): RDD[SubdocLookupResult] = {
    new SubdocLookupRDD(sc, ids.map(SubdocLookupSpec(_, get, exists)), bucketName, timeout)
  }

  def couchbaseSubdocMutate(specs: Seq[SubdocMutationSpec], bucketName: String,
                            timeout: Option[Duration]):
    RDD[SubdocMutationResult] = {
    new SubdocMutateRDD(sc, specs, bucketName, timeout)
  }

  def couchbaseSubdocMutate(specs: Seq[SubdocMutationSpec], bucketName: String):
  RDD[SubdocMutationResult] = {
    new SubdocMutateRDD(sc, specs, bucketName, None)
  }

  def couchbaseSubdocMutate(specs: Seq[SubdocMutationSpec], timeout: Option[Duration] = None):
  RDD[SubdocMutationResult] = {
    couchbaseSubdocMutate(specs, null, timeout)
  }

  def couchbaseView(query: ViewQuery, bucketName: String = null,
                    timeout: Option[Duration] = None) = ViewRDD(sc, bucketName, query, timeout)

  def couchbaseSpatialView(query: SpatialViewQuery,
                           bucketName: String = null, timeout: Option[Duration] = None) =
    SpatialViewRDD(sc, bucketName, query, timeout)

  def couchbaseQuery(query: N1qlQuery, bucketName: String = null,
                     timeout: Option[Duration] = None) = QueryRDD(sc, bucketName, query, timeout)

  def couchbaseAnalytics(query: AnalyticsQuery, bucketName: String = null,
                         timeout: Option[Duration] = None): AnalyticsRDD = {
    new AnalyticsRDD(sc, query, bucketName, timeout)
  }
} 
Example 189
Source File: ViewAccessor.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.connection

import java.util.concurrent.TimeUnit

import com.couchbase.client.core.BackpressureException
import com.couchbase.client.core.time.Delay
import com.couchbase.client.java.util.retry.RetryBuilder
import com.couchbase.client.java.view.ViewQuery
import com.couchbase.spark.Logging
import com.couchbase.spark.internal.LazyIterator
import com.couchbase.spark.rdd.CouchbaseViewRow
import rx.lang.scala.JavaConversions._
import rx.lang.scala.Observable

import scala.concurrent.duration.Duration


class ViewAccessor(cbConfig: CouchbaseConfig, viewQuery: Seq[ViewQuery], bucketName: String = null,
                   timeout: Option[Duration])
  extends Logging {

  def compute(): Iterator[CouchbaseViewRow] = {
    if (viewQuery.isEmpty) {
      return Iterator[CouchbaseViewRow]()
    }

    val bucket = CouchbaseConnection().bucket(cbConfig, bucketName).async()

    val maxDelay = cbConfig.retryOpts.maxDelay
    val minDelay = cbConfig.retryOpts.minDelay
    val maxRetries = cbConfig.retryOpts.maxTries

    val viewTimeout = timeout
      .map(_.toMillis)
      .orElse(cbConfig.timeouts.view)
      .getOrElse(bucket.environment().viewTimeout())

    LazyIterator {
      Observable.from(viewQuery)
        .flatMap(vq => toScalaObservable(bucket.query(vq)
          .timeout(viewTimeout, TimeUnit.MILLISECONDS)
          .retryWhen(
          RetryBuilder
            .anyOf(classOf[BackpressureException])
            .delay(Delay.exponential(TimeUnit.MILLISECONDS, maxDelay, minDelay))
            .max(maxRetries)
            .build()
        )))
        .doOnNext(result => {
          toScalaObservable(result.error()).subscribe(err => {
            logError(s"Couchbase View Query $viewQuery failed with $err")
          })
        })
        .flatMap(result => toScalaObservable(result.rows()))
        .map(row => CouchbaseViewRow(row.id(), row.key(), row.value()))
        .toBlocking
        .toIterable
        .iterator
    }

  }

} 
Example 190
Source File: SpatialViewAccessor.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.connection

import java.util.concurrent.TimeUnit

import com.couchbase.client.core.BackpressureException
import com.couchbase.client.core.time.Delay
import com.couchbase.client.java.util.retry.RetryBuilder
import com.couchbase.client.java.view.SpatialViewQuery
import com.couchbase.spark.Logging
import com.couchbase.spark.internal.LazyIterator
import com.couchbase.spark.rdd.CouchbaseSpatialViewRow
import rx.lang.scala.JavaConversions._
import rx.lang.scala.Observable

import scala.concurrent.duration.Duration


class SpatialViewAccessor(cbConfig: CouchbaseConfig, spatialQuery: Seq[SpatialViewQuery],
  bucketName: String = null, timeout: Option[Duration])
  extends Logging {

  def compute(): Iterator[CouchbaseSpatialViewRow] = {
    if (spatialQuery.isEmpty) {
      return Iterator[CouchbaseSpatialViewRow]()
    }

    val bucket = CouchbaseConnection().bucket(cbConfig, bucketName).async()

    val maxDelay = cbConfig.retryOpts.maxDelay
    val minDelay = cbConfig.retryOpts.minDelay
    val maxRetries = cbConfig.retryOpts.maxTries

    val viewTimeout = timeout
      .map(_.toMillis)
      .orElse(cbConfig.timeouts.view)
      .getOrElse(bucket.environment().viewTimeout())


    LazyIterator {
      Observable.from(spatialQuery)
        .flatMap(vq => toScalaObservable(bucket.query(vq)
          .timeout(viewTimeout, TimeUnit.MILLISECONDS)
          .retryWhen(
          RetryBuilder
            .anyOf(classOf[BackpressureException])
            .delay(Delay.exponential(TimeUnit.MILLISECONDS, maxDelay, minDelay))
            .max(maxRetries)
            .build()
        )))
        .doOnNext(result => {
          toScalaObservable(result.error()).subscribe(err => {
            logError(s"Couchbase View Query $spatialQuery failed with $err")
          })
        })
        .flatMap(result => toScalaObservable(result.rows()))
        .map(row => CouchbaseSpatialViewRow(row.id(), row.key(), row.value(), row.geometry()))
        .toBlocking
        .toIterable
        .iterator
    }

  }

} 
Example 191
Source File: SubdocLookupAccessor.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.connection

import java.util.concurrent.TimeUnit

import com.couchbase.client.core.BackpressureException
import com.couchbase.client.core.time.Delay
import com.couchbase.client.java.error.{CouchbaseOutOfMemoryException, TemporaryFailureException}
import com.couchbase.client.java.util.retry.RetryBuilder
import com.couchbase.spark.internal.LazyIterator
import rx.lang.scala.JavaConversions._
import rx.lang.scala.Observable

import scala.collection.mutable
import scala.concurrent.duration.Duration

case class SubdocLookupSpec(id: String, get: Seq[String], exists: Seq[String])

case class SubdocLookupResult(id: String, cas: Long, content: Map[String, Any],
                        exists: Map[String, Boolean])

class SubdocLookupAccessor(cbConfig: CouchbaseConfig, specs: Seq[SubdocLookupSpec],
                          bucketName: String = null, timeout: Option[Duration]) {

  def compute(): Iterator[SubdocLookupResult] = {
    if (specs.isEmpty) {
      return Iterator[SubdocLookupResult]()
    }

    val bucket = CouchbaseConnection().bucket(cbConfig, bucketName).async()
    val maxDelay = cbConfig.retryOpts.maxDelay
    val minDelay = cbConfig.retryOpts.minDelay
    val maxRetries = cbConfig.retryOpts.maxTries

    val kvTimeout = timeout
      .map(_.toMillis)
      .orElse(cbConfig.timeouts.kv)
      .getOrElse(bucket.environment().kvTimeout())


    LazyIterator {
      Observable
        .from(specs)
        .flatMap(spec => {
            var builder = bucket.lookupIn(spec.id)
            spec.exists.foreach(builder.exists(_))
            spec.get.foreach(builder.get(_))
            toScalaObservable(builder.execute().timeout(kvTimeout, TimeUnit.MILLISECONDS)
            ).map(fragment => {
              val content = mutable.Map[String, Any]()
              spec.get.foreach(path => content.put(path, fragment.content(path)))
              val exists = mutable.Map[String, Boolean]()
              spec.exists.foreach(path =>  exists.put(path, fragment.status(path).isSuccess))
              SubdocLookupResult(spec.id, fragment.cas(), content.toMap, exists.toMap)
            }).retryWhen(
            RetryBuilder
              .anyOf(classOf[TemporaryFailureException], classOf[BackpressureException],
                classOf[CouchbaseOutOfMemoryException])
              .delay(Delay.exponential(TimeUnit.MILLISECONDS, maxDelay, minDelay))
              .max(maxRetries)
              .build())
        })
        .toBlocking
        .toIterable
        .iterator
    }
  }

} 
Example 192
Source File: QueryAccessor.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.connection

import java.util.concurrent.TimeUnit

import com.couchbase.client.core.{BackpressureException, CouchbaseException}
import com.couchbase.client.core.time.Delay
import com.couchbase.client.java.document.json.JsonObject
import com.couchbase.client.java.error.QueryExecutionException
import com.couchbase.client.java.query.N1qlQuery
import com.couchbase.client.java.util.retry.RetryBuilder
import com.couchbase.spark.Logging
import com.couchbase.spark.internal.LazyIterator
import com.couchbase.spark.rdd.CouchbaseQueryRow
import rx.lang.scala.JavaConversions._
import rx.lang.scala.Observable

import scala.concurrent.duration.Duration


class QueryAccessor(cbConfig: CouchbaseConfig, query: Seq[N1qlQuery], bucketName: String = null,
                    timeout: Option[Duration])
  extends Logging {

  def compute(): Iterator[CouchbaseQueryRow] = {
    if (query.isEmpty) {
      return Iterator[CouchbaseQueryRow]()
    }

    val bucket = CouchbaseConnection().bucket(cbConfig, bucketName).async()

    val maxDelay = cbConfig.retryOpts.maxDelay
    val minDelay = cbConfig.retryOpts.minDelay
    val maxRetries = cbConfig.retryOpts.maxTries

    val queryTimeout = timeout
      .map(_.toMillis)
      .orElse(cbConfig.timeouts.query)
      .getOrElse(bucket.environment().queryTimeout())

    LazyIterator {
      Observable.from(query)
        .flatMap(vq => toScalaObservable(bucket.query(vq)
          .timeout(queryTimeout, TimeUnit.MILLISECONDS)
          .retryWhen(
          RetryBuilder
            .anyOf(classOf[BackpressureException])
            .delay(Delay.exponential(TimeUnit.MILLISECONDS, maxDelay, minDelay))
            .max(maxRetries)
            .build()
        )))
        .flatMap(v => {
          val errors = toScalaObservable(v.errors())
            .map(e => {
              val msgRaw = e.getString("msg")
              val msg = if (msgRaw == null) "Query failure" else msgRaw
              throw new QueryExecutionException(msg, e)
            })

          v.rows().mergeWith(errors)
        })
        .map(row => CouchbaseQueryRow(row.value()))
        .toBlocking
        .toIterable
        .iterator
    }

  }

} 
Example 193
Source File: AnalyticsAccessor.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.connection

import java.util.concurrent.TimeUnit

import com.couchbase.client.core.BackpressureException
import com.couchbase.client.core.time.Delay
import com.couchbase.client.java.analytics.AnalyticsQuery
import com.couchbase.client.java.document.json.JsonObject
import com.couchbase.client.java.query.N1qlQuery
import com.couchbase.client.java.util.retry.RetryBuilder
import com.couchbase.spark.Logging
import com.couchbase.spark.internal.LazyIterator
import com.couchbase.spark.rdd.CouchbaseQueryRow
import rx.lang.scala.JavaConversions._
import rx.lang.scala.Observable

import scala.concurrent.duration.Duration

case class CouchbaseAnalyticsRow(value: JsonObject)

class AnalyticsAccessor(cbConfig: CouchbaseConfig,
                        queries: Seq[AnalyticsQuery],
                        bucketName: String = null,
                        timeout: Option[Duration])
  extends Logging {

  def compute(): Iterator[CouchbaseAnalyticsRow] = {
    if (queries.isEmpty) {
      return Iterator[CouchbaseAnalyticsRow]()
    }

    val bucket = CouchbaseConnection().bucket(cbConfig, bucketName).async()

    val maxDelay = cbConfig.retryOpts.maxDelay
    val minDelay = cbConfig.retryOpts.minDelay
    val maxRetries = cbConfig.retryOpts.maxTries

    val queryTimeout = timeout
      .map(_.toMillis)
      .orElse(cbConfig.timeouts.query)
      .getOrElse(bucket.environment().queryTimeout())

    LazyIterator {
      Observable.from(queries)
        .flatMap(vq => toScalaObservable(bucket.query(vq)
          .timeout(queryTimeout, TimeUnit.MILLISECONDS)
          .retryWhen(
          RetryBuilder
            .anyOf(classOf[BackpressureException])
            .delay(Delay.exponential(TimeUnit.MILLISECONDS, maxDelay, minDelay))
            .max(maxRetries)
            .build()
        )))
        .doOnNext(result => {
          toScalaObservable(result.errors()).subscribe(err => {
            logError(s"Couchbase Analytics Queries $queries failed with $err")
          })
        })
        .flatMap(_.rows())
        .map(row => CouchbaseAnalyticsRow(row.value()))
        .toBlocking
        .toIterable
        .iterator
    }

  }

} 
Example 194
Source File: KeyValueAccessor.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.connection

import java.util.concurrent.TimeUnit

import com.couchbase.client.core.BackpressureException
import com.couchbase.client.core.time.Delay
import com.couchbase.client.java.document.Document
import com.couchbase.client.java.error.{CouchbaseOutOfMemoryException, TemporaryFailureException}
import com.couchbase.client.java.util.retry.RetryBuilder
import com.couchbase.spark.internal.LazyIterator
import rx.lang.scala.JavaConversions._
import rx.lang.scala.Observable

import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

class KeyValueAccessor[D <: Document[_]]
  (cbConfig: CouchbaseConfig, ids: Seq[String], bucketName: String = null,
   timeout: Option[Duration])
  (implicit ct: ClassTag[D]) {

  def compute(): Iterator[D] = {
    if (ids.isEmpty) {
      return Iterator[D]()
    }

    val bucket = CouchbaseConnection().bucket(cbConfig, bucketName).async()
    val castTo = ct.runtimeClass.asInstanceOf[Class[D]]

    val maxDelay = cbConfig.retryOpts.maxDelay
    val minDelay = cbConfig.retryOpts.minDelay
    val maxRetries = cbConfig.retryOpts.maxTries

    val kvTimeout = timeout
      .map(_.toMillis)
      .orElse(cbConfig.timeouts.kv)
      .getOrElse(bucket.environment().kvTimeout())

    LazyIterator {
      Observable
        .from(ids)
        .flatMap(id => toScalaObservable(bucket.get(id, castTo)
          .timeout(kvTimeout, TimeUnit.MILLISECONDS)
          .retryWhen(
          RetryBuilder
            .anyOf(classOf[TemporaryFailureException], classOf[BackpressureException],
              classOf[CouchbaseOutOfMemoryException])
            .delay(Delay.exponential(TimeUnit.MILLISECONDS, maxDelay, minDelay))
            .max(maxRetries)
            .build())
        ))
        .toBlocking
        .toIterable
        .iterator
    }
  }

} 
Example 195
Source File: SubdocMutateRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd

import java.net.InetAddress
import java.util.zip.CRC32

import com.couchbase.client.core.config.CouchbaseBucketConfig
import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.spark.connection._
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import rx.lang.scala.JavaConversions._

import scala.concurrent.duration.Duration

class SubdocMutationPartition(id: Int, specs: Seq[SubdocMutationSpec], loc: Option[InetAddress])
  extends Partition {
  override def index: Int = id
  def ids: Seq[SubdocMutationSpec] = specs
  def location: Option[InetAddress] = loc
  override def toString = s"SubdocMutatePartition($id, $ids, $loc)"
}

class SubdocMutateRDD(@transient private val sc: SparkContext, specs: Seq[SubdocMutationSpec],
                      bname: String = null, timeout: Option[Duration] = None)
  extends RDD[SubdocMutationResult](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)
  private val bucketName = Option(bname).getOrElse(cbConfig.buckets.head.name)


  override def compute(split: Partition, context: TaskContext): Iterator[SubdocMutationResult] = {
    val p = split.asInstanceOf[SubdocMutationPartition]
    new SubdocMutationAccessor(cbConfig, p.ids, bucketName, timeout).compute()
  }

  override protected def getPartitions: Array[Partition] = {
    val core = CouchbaseConnection().bucket(cbConfig, bucketName).core()

    val req = new GetClusterConfigRequest()
    val config = toScalaObservable(core.send[GetClusterConfigResponse](req))
      .map(c => {
        logWarning(c.config().bucketConfigs().toString)
        logWarning(bucketName)
        c.config().bucketConfig(bucketName)
      })
      .toBlocking
      .single

    val parts = config match {
      case bucketConfig: CouchbaseBucketConfig =>
        val numPartitions = bucketConfig.numberOfPartitions()
        var partitionIndex = 0
        specs.groupBy(spec => {
          val crc32 = new CRC32()
          crc32.update(spec.id.getBytes("UTF-8"))
          val rv = (crc32.getValue >> 16) & 0x7fff
          rv.toInt & numPartitions - 1
        }).map(grouped => {
          val hostname = Some(
            bucketConfig.nodeAtIndex(bucketConfig.nodeIndexForMaster(grouped._1, false)).hostname()
          )
          val currentIdx = partitionIndex
          partitionIndex += 1
          new SubdocMutationPartition(currentIdx, grouped._2,
            Some(InetAddress.getByName(hostname.get)))
        }).toArray
      case _ =>
        logWarning("Memcached preferred locations currently not supported.")
        Array(new SubdocMutationPartition(0, specs, None))
    }

    parts.asInstanceOf[Array[Partition]]
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    val p = split.asInstanceOf[SubdocMutationPartition]
    if (p.location.isDefined) {
      Seq(p.location.get.getHostName, p.location.get.getHostAddress)
    } else {
      Nil
    }
  }

} 
Example 196
Source File: AnalyticsRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd

import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.client.core.service.ServiceType
import com.couchbase.client.java.analytics.AnalyticsQuery
import com.couchbase.client.java.query.N1qlQuery
import com.couchbase.spark.connection._
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}
import rx.lang.scala.JavaConversions.toScalaObservable

import scala.concurrent.duration.Duration


class AnalyticsRDD(@transient private val sc: SparkContext, query: AnalyticsQuery,
                   bucketName: String = null,
                   timeout: Option[Duration] = None)
  extends RDD[CouchbaseAnalyticsRow](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)

  override def compute(split: Partition, context: TaskContext): Iterator[CouchbaseAnalyticsRow] =
    new AnalyticsAccessor(cbConfig, Seq(query), bucketName, timeout).compute()

  override protected def getPartitions: Array[Partition] = {
    // Try to run the query on a Spark worker co-located on a Couchbase analytics node
    val addressesWithAnalyticsService = RDDSupport.couchbaseNodesWithService(cbConfig,
      bucketName,
      ServiceType.ANALYTICS)

    // A single query can only run on one node, so return one partition
    Array(new QueryPartition(0, addressesWithAnalyticsService))
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    RDDSupport.getPreferredLocations(split)
  }
} 
Example 197
Source File: SubdocLookupRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd

import java.net.InetAddress
import java.util.zip.CRC32

import com.couchbase.client.core.config.CouchbaseBucketConfig
import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.spark.Logging
import com.couchbase.spark.connection._
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import rx.lang.scala.JavaConversions._

import scala.concurrent.duration.Duration


class SubdocLookupPartition(id: Int, specs: Seq[SubdocLookupSpec], loc: Option[InetAddress])
  extends Partition {
  override def index: Int = id
  def ids: Seq[SubdocLookupSpec] = specs
  def location: Option[InetAddress] = loc
  override def toString = s"SubdocLookupPartition($id, $ids, $loc)"
}


class SubdocLookupRDD(@transient private val sc: SparkContext, specs: Seq[SubdocLookupSpec],
                      bname: String = null, timeout: Option[Duration] = None)
  extends RDD[SubdocLookupResult](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)
  private val bucketName = Option(bname).getOrElse(cbConfig.buckets.head.name)


  override def compute(split: Partition, context: TaskContext): Iterator[SubdocLookupResult] = {
    val p = split.asInstanceOf[SubdocLookupPartition]
    new SubdocLookupAccessor(cbConfig, p.ids, bucketName, timeout).compute()
  }

  override protected def getPartitions: Array[Partition] = {
    val core = CouchbaseConnection().bucket(cbConfig, bucketName).core()

    val req = new GetClusterConfigRequest()
    val config = toScalaObservable(core.send[GetClusterConfigResponse](req))
      .map(c => {
        logWarning(c.config().bucketConfigs().toString)
        logWarning(bucketName)
        c.config().bucketConfig(bucketName)
      })
      .toBlocking
      .single

    val parts = config match {
      case bucketConfig: CouchbaseBucketConfig =>
        val numPartitions = bucketConfig.numberOfPartitions()
        var partitionIndex = 0
        specs.groupBy(spec => {
          val crc32 = new CRC32()
          crc32.update(spec.id.getBytes("UTF-8"))
          val rv = (crc32.getValue >> 16) & 0x7fff
          rv.toInt & numPartitions - 1
        }).map(grouped => {
          val hostname = Some(
            bucketConfig.nodeAtIndex(bucketConfig.nodeIndexForMaster(grouped._1, false)).hostname()
          )
          val currentIdx = partitionIndex
          partitionIndex += 1
          new SubdocLookupPartition(currentIdx, grouped._2,
            Some(InetAddress.getByName(hostname.get)))
        }).toArray
      case _ =>
        logWarning("Memcached preferred locations currently not supported.")
        Array(new SubdocLookupPartition(0, specs, None))
    }

    parts.asInstanceOf[Array[Partition]]
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    val p = split.asInstanceOf[SubdocLookupPartition]
    if (p.location.isDefined) {
      Seq(p.location.get.getHostName, p.location.get.getHostAddress)
    } else {
      Nil
    }
  }

} 
Example 198
Source File: QueryRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd

import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.client.core.service.ServiceType
import com.couchbase.client.java.document.json.JsonObject
import com.couchbase.client.java.query.N1qlQuery
import com.couchbase.spark.connection.{CouchbaseConfig, CouchbaseConnection, QueryAccessor}
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import rx.lang.scala.JavaConversions.toScalaObservable

import scala.concurrent.duration.Duration

case class CouchbaseQueryRow(value: JsonObject)

class QueryPartition(val index: Int, val hostnames: Seq[String]) extends Partition {
  override def toString = s"QueryPartition($index, $hostnames)"
}

class QueryRDD(@transient private val sc: SparkContext, query: N1qlQuery,
               bucketName: String = null,
               timeout: Option[Duration] = None)
  extends RDD[CouchbaseQueryRow](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)

  override def compute(split: Partition, context: TaskContext): Iterator[CouchbaseQueryRow] =
    new QueryAccessor(cbConfig, Seq(query), bucketName, timeout).compute()

  override protected def getPartitions: Array[Partition] = {
    // Try to run the query on a Spark worker co-located on a Couchbase query node
    val addressesWithQueryService = RDDSupport.couchbaseNodesWithService(cbConfig,
      bucketName,
      ServiceType.QUERY)

    // A single query can only run on one node, so return one partition
    Array(new QueryPartition(0, addressesWithQueryService))
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    RDDSupport.getPreferredLocations(split)
  }
}

object QueryRDD {

  def apply(sc: SparkContext, bucketName: String, query: N1qlQuery,
            timeout: Option[Duration] = None) =
    new QueryRDD(sc, query, bucketName, timeout)

} 
Example 199
Source File: SpatialViewRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd

import com.couchbase.client.java.document.json.JsonObject
import com.couchbase.client.java.view.SpatialViewQuery
import com.couchbase.spark.connection.{CouchbaseConfig, SpatialViewAccessor}
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD

import scala.concurrent.duration.Duration

case class CouchbaseSpatialViewRow(id: String, key: Any, value: Any, geometry: JsonObject)

class SpatialViewRDD
  (@transient private val sc: SparkContext, viewQuery: SpatialViewQuery, bucketName: String = null,
   timeout: Option[Duration] = None)
  extends RDD[CouchbaseSpatialViewRow](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)

  override def compute(split: Partition, context: TaskContext):
    Iterator[CouchbaseSpatialViewRow] =
    new SpatialViewAccessor(cbConfig, Seq(viewQuery), bucketName, timeout).compute()

  override protected def getPartitions: Array[Partition] = Array(new CouchbasePartition(0))

}

object SpatialViewRDD {
  def apply(sc: SparkContext, bucketName: String, viewQuery: SpatialViewQuery,
            timeout: Option[Duration] = None) =
    new SpatialViewRDD(sc, viewQuery, bucketName, timeout)
} 
Example 200
Source File: KeyValueRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd


import java.net.InetAddress
import java.util.zip.CRC32

import com.couchbase.client.core.config.CouchbaseBucketConfig
import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.client.java.document.Document
import com.couchbase.spark.Logging
import com.couchbase.spark.connection.{CouchbaseConfig, CouchbaseConnection, KeyValueAccessor}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}

import scala.reflect.ClassTag
import rx.lang.scala.JavaConversions._

import scala.concurrent.duration.Duration

class KeyValuePartition(id: Int, docIds: Seq[String], loc: Option[InetAddress]) extends Partition {
  override def index: Int = id
  def ids: Seq[String] = docIds
  def location: Option[InetAddress] = loc
  override def toString = s"KeyValuePartition($id, $docIds, $loc)"
}

class KeyValueRDD[D <: Document[_]]
  (@transient private val sc: SparkContext, ids: Seq[String], bname: String = null,
   timeout: Option[Duration] = None)
  (implicit ct: ClassTag[D])
  extends RDD[D](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)
  private val bucketName = Option(bname).getOrElse(cbConfig.buckets.head.name)

  override def compute(split: Partition, context: TaskContext): Iterator[D] = {
    val p = split.asInstanceOf[KeyValuePartition]
    new KeyValueAccessor[D](cbConfig, p.ids, bucketName, timeout).compute()
  }

  override protected def getPartitions: Array[Partition] = {
    val core = CouchbaseConnection().bucket(cbConfig, bucketName).core()

    val req = new GetClusterConfigRequest()
    val config = toScalaObservable(core.send[GetClusterConfigResponse](req))
      .map(c => {
        logWarning(c.config().bucketConfigs().toString)
        logWarning(bucketName)
        c.config().bucketConfig(bucketName)
      })
      .toBlocking
      .single

    val parts = config match {
      case bucketConfig: CouchbaseBucketConfig =>
        val numPartitions = bucketConfig.numberOfPartitions()
        var partitionIndex = 0
        ids.groupBy(id => {
          val crc32 = new CRC32()
          crc32.update(id.getBytes("UTF-8"))
          val rv = (crc32.getValue >> 16) & 0x7fff
          rv.toInt & numPartitions - 1
        }).map(grouped => {
          val hostname = Some(
            bucketConfig.nodeAtIndex(bucketConfig.nodeIndexForMaster(grouped._1, false)).hostname()
          )
          val currentIdx = partitionIndex
          partitionIndex += 1
          new KeyValuePartition(currentIdx, grouped._2,
            Some(InetAddress.getByName(hostname.get)))
        }).toArray
      case _ =>
        logWarning("Memcached preferred locations currently not supported.")
        Array(new KeyValuePartition(0, ids, None))
    }

    parts.asInstanceOf[Array[Partition]]
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    val p = split.asInstanceOf[KeyValuePartition]
    if (p.location.isDefined) {
      Seq(p.location.get.getHostName, p.location.get.getHostAddress)
    } else {
      Nil
    }
  }

}