java.util.concurrent.TimeUnit Scala Examples

The following examples show how to use java.util.concurrent.TimeUnit. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TsStreamingTest.scala    From spark-riak-connector   with Apache License 2.0 7 votes vote down vote up
package com.basho.riak.spark.streaming

import java.nio.ByteBuffer
import java.util.concurrent.{Callable, Executors, TimeUnit}

import com.basho.riak.spark._
import com.basho.riak.spark.rdd.RiakTSTests
import com.basho.riak.spark.rdd.timeseries.{AbstractTimeSeriesTest, TimeSeriesData}
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper, SerializationFeature}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.apache.spark.sql.Row
import org.junit.Assert._
import org.junit.experimental.categories.Category
import org.junit.{After, Before, Test}

@Category(Array(classOf[RiakTSTests]))
class TsStreamingTest extends AbstractTimeSeriesTest(false) with SparkStreamingFixture {

  protected final val executorService = Executors.newCachedThreadPool()
  private val dataSource = new SocketStreamingDataSource
  private var port = -1

  @Before
  def setUp(): Unit = {
    port = dataSource.start(client => {
      testData
        .map(tolerantMapper.writeValueAsString)
        .foreach(x => client.write(ByteBuffer.wrap(s"$x\n".getBytes)))
      logInfo(s"${testData.length} values were send to client")
    })
  }

  @After
  def tearDown(): Unit = {
    dataSource.stop()
  }

  @Test(timeout = 10 * 1000) // 10 seconds timeout
  def saveToRiak(): Unit = {
    executorService.submit(new Runnable {
      override def run(): Unit = {
        ssc.socketTextStream("localhost", port)
          .map(string => {
            val tsdata = new ObjectMapper()
              .configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, true)
              .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, true)
              .configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true)
              .configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true)
              .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false)
              .registerModule(DefaultScalaModule)
              .readValue(string, classOf[TimeSeriesData])
            Row(1, "f", tsdata.time, tsdata.user_id, tsdata.temperature_k)
          })
          .saveToRiakTS(bucketName)

        ssc.start()
        ssc.awaitTerminationOrTimeout(5 * 1000)
      }
    })

    val result = executorService.submit(new Callable[Array[Seq[Any]]] {
      override def call(): Array[Seq[Any]] = {
        var rdd = sc.riakTSTable[Row](bucketName)
          .sql(s"SELECT user_id, temperature_k FROM $bucketName $sqlWhereClause")
        var count = rdd.count()
        while (count < testData.length) {
          TimeUnit.SECONDS.sleep(2)

          rdd = sc.riakTSTable[Row](bucketName)
            .sql(s"SELECT user_id, temperature_k FROM $bucketName $sqlWhereClause")
          count = rdd.count()
        }
        rdd.collect().map(_.toSeq)
      }
    }).get()

    assertEquals(testData.length, result.length)
    assertEqualsUsingJSONIgnoreOrder(
      """
        |[
        |   ['bryce',305.37],
        |   ['bryce',300.12],
        |   ['bryce',295.95],
        |   ['ratman',362.121],
        |   ['ratman',3502.212]
        |]
      """.stripMargin, result)
  }
} 
Example 2
Source File: ShadowsocksApplication.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks

import java.util
import java.util.concurrent.TimeUnit

import android.app.Application
import android.content.pm.PackageManager
import android.preference.PreferenceManager
import android.support.v7.app.AppCompatDelegate
import com.github.shadowsocks.database.{DBHelper, ProfileManager}
import com.github.shadowsocks.utils.{Key, Utils}
import com.google.android.gms.analytics.{GoogleAnalytics, HitBuilders}
import com.google.android.gms.common.api.ResultCallback
import com.google.android.gms.tagmanager.{ContainerHolder, TagManager}
import com.j256.ormlite.logger.LocalLog

object ShadowsocksApplication {
  var app: ShadowsocksApplication = _
}

class ShadowsocksApplication extends Application {
  import ShadowsocksApplication._

  final val SIG_FUNC = "getSignature"
  var containerHolder: ContainerHolder = _
  lazy val tracker = GoogleAnalytics.getInstance(this).newTracker(R.xml.tracker)
  lazy val settings = PreferenceManager.getDefaultSharedPreferences(this)
  lazy val editor = settings.edit
  lazy val profileManager = new ProfileManager(new DBHelper(this))

  def isNatEnabled = settings.getBoolean(Key.isNAT, false)
  def isVpnEnabled = !isNatEnabled

  def getVersionName = try {
    getPackageManager.getPackageInfo(getPackageName, 0).versionName
  } catch {
    case _: PackageManager.NameNotFoundException => "Package name not found"
    case _: Throwable => null
  }

  // send event
  def track(category: String, action: String) = tracker.send(new HitBuilders.EventBuilder()
    .setAction(action)
    .setLabel(getVersionName)
    .build())

  def profileId = settings.getInt(Key.id, -1)
  def profileId(i: Int) = editor.putInt(Key.id, i).apply
  def currentProfile = profileManager.getProfile(profileId)

  def switchProfile(id: Int) = {
    profileId(id)
    profileManager.getProfile(id) getOrElse profileManager.createProfile()
  }

  override def onCreate() {
    java.lang.System.setProperty(LocalLog.LOCAL_LOG_LEVEL_PROPERTY, "ERROR")
    app = this
    AppCompatDelegate.setCompatVectorFromResourcesEnabled(true)
    val tm = TagManager.getInstance(this)
    val pending = tm.loadContainerPreferNonDefault("GTM-NT8WS8", R.raw.gtm_default_container)
    val callback = new ResultCallback[ContainerHolder] {
      override def onResult(holder: ContainerHolder) {
        if (!holder.getStatus.isSuccess) {
          return
        }
        containerHolder = holder
        val container = holder.getContainer
        container.registerFunctionCallMacroCallback(SIG_FUNC,
          (functionName: String, parameters: util.Map[String, AnyRef]) => {
            if (functionName == SIG_FUNC) {
              Utils.getSignature(getApplicationContext)
            }
            null
          })
      }
    }
    pending.setResultCallback(callback, 2, TimeUnit.SECONDS)
  }

  def refreshContainerHolder {
    val holder = app.containerHolder
    if (holder != null) holder.refresh()
  }
} 
Example 3
Source File: TimeoutsSpec.scala    From gfc-concurrent   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.concurrent

import java.util.concurrent.{ TimeoutException, TimeUnit }
import scala.concurrent.duration._
import scala.concurrent.{ Future, Await }
import org.scalatest.{WordSpec, Matchers}

class TimeoutsSpec extends WordSpec with Matchers {
  import TimeoutsSpec._

  "Timeouts" when {
    "generating timing out futures" should {
      "create a Future that times out after the given finite duration" in {
        val now = System.currentTimeMillis
        val after = FiniteDuration(1, "second")
        val timingOut = Timeouts.timeout(after)
        an [TimeoutException] should be thrownBy { Await.result(timingOut, Duration(10, "seconds")) }
        val elapsed = (System.currentTimeMillis - now)
        elapsed should be (after.toMillis +- 500L)
      }

      "create timing out Futures that will fail predictably even under load" in {
        import scala.util.Random._

        val MaxTimeout = Duration(10, "seconds").toMillis.toInt
        val MaxDelta = Duration(50, "milliseconds").toMillis
        val Load = 10000

        val timingOuts: List[(Future[Nothing], Duration)] = (1 to Load).map { i =>
          val after = Duration(nextInt(MaxTimeout), "milliseconds")
          val timingOut = Timeouts.timeout(after)
          (timingOut, after)
        }.toList

        val timedOuts: List[(Future[Nothing], Duration, Duration, Duration)] = timingOuts.map { case (timingOut, after) =>
          val thrown = the [TimeoutException] thrownBy { Await.result(timingOut, Duration.Inf) }
          // println(thrown)
          val real = Duration(extractReal(thrown.getCause.getMessage), TimeUnit.MILLISECONDS)
          val delta = Duration(real.toMillis - after.toMillis, TimeUnit.MILLISECONDS)
          (timingOut, after, real, delta)
        }

        timedOuts.filter { case (timedOut, after, real, delta) => delta.toMillis > MaxDelta }.size === 0
      }

      "include the origin of the future" in {
        val here = new Exception()
        val timingOut = Timeouts.timeout(1.millis)
        val thrown = the [TimeoutException] thrownBy { Await.result(timingOut, Duration(10, "seconds")) }
        thrown.getStackTrace.size shouldBe > (50)
        val thrownFrames = thrown.getStackTrace.map(f => f: AnyRef).drop(7)
        val expectedFrames = here.getStackTrace.map(f => f: AnyRef)
        // Scala 2.12 stack frames differ slightly to stack frames in 2.10/2.11
        if (!java.util.Arrays.deepEquals(thrownFrames, expectedFrames.drop(2))) {
          thrownFrames shouldBe expectedFrames.drop(1)
        }
        thrown.getCause should not be null
        thrown.getCause.getStackTrace.size shouldBe <= (10)

      }
    }
  }
}

object TimeoutsSpec {
  private val pattern = """real: (\d+) ms.""".r

  def extractReal(s: String): Int = try {
    pattern.findFirstMatchIn(s).map { _.toString.split("real: ") }.get(1).split(" ms.").head.toInt
  } catch {
    case ex: Exception => throw new RuntimeException(s"Unable to parse real time from '${s}'", ex)
  }
} 
Example 4
Source File: SinkRouteHandler.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.sink

import java.time.{Duration => JDuration}
import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes}
import akka.http.scaladsl.server.{ExceptionHandler, Route}
import com.typesafe.scalalogging.Logger
import oharastream.ohara.common.data.Row
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.shabondi.common.{JsonSupport, RouteHandler, ShabondiUtils}
import org.apache.commons.lang3.StringUtils

import scala.collection.mutable.ArrayBuffer
import scala.compat.java8.DurationConverters._
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.Duration
import spray.json.DefaultJsonProtocol._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._

private[shabondi] object SinkRouteHandler {
  def apply(config: SinkConfig)(implicit actorSystem: ActorSystem) =
    new SinkRouteHandler(config)
}

private[shabondi] class SinkRouteHandler(config: SinkConfig)(implicit actorSystem: ActorSystem) extends RouteHandler {
  implicit private val contextExecutor: ExecutionContextExecutor = actorSystem.dispatcher

  private val log              = Logger(classOf[SinkRouteHandler])
  private[sink] val dataGroups = SinkDataGroups(config)

  def scheduleFreeIdleGroups(interval: JDuration, idleTime: JDuration): Unit =
    actorSystem.scheduler.scheduleWithFixedDelay(Duration(1, TimeUnit.SECONDS), interval.toScala) { () =>
      {
        log.trace("scheduled free group, total group: {} ", dataGroups.size)
        dataGroups.freeIdleGroup(idleTime)
      }
    }

  private val exceptionHandler = ExceptionHandler {
    case ex: Throwable =>
      log.error(ex.getMessage, ex)
      complete((StatusCodes.InternalServerError, ex.getMessage))
  }

  private def fullyPollQueue(queue: RowQueue): Seq[Row] = {
    val buffer    = ArrayBuffer.empty[Row]
    var item: Row = queue.poll()
    while (item != null) {
      buffer += item
      item = queue.poll()
    }
    buffer.toSeq
  }

  private def apiUrl = ShabondiUtils.apiUrl

  def route(): Route = handleExceptions(exceptionHandler) {
    path("groups" / Segment) { groupId =>
      get {
        if (StringUtils.isAlphanumeric(groupId)) {
          val group  = dataGroups.createIfAbsent(groupId)
          val result = fullyPollQueue(group.queue).map(row => JsonSupport.toRowData(row))
          complete(result)
        } else {
          val entity =
            HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Illegal group name, only accept alpha and numeric.")
          complete(StatusCodes.NotAcceptable -> entity)
        }
      } ~ {
        complete(StatusCodes.MethodNotAllowed -> s"Unsupported method, please reference: $apiUrl")
      }
    } ~ {
      complete(StatusCodes.NotFound -> s"Please reference: $apiUrl")
    }
  }

  override def close(): Unit = {
    Releasable.close(dataGroups)
  }
} 
Example 5
Source File: QueueProducer.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.sink

import java.time.{Duration => JDuration}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import java.util.{Queue => JQueue}

import oharastream.ohara.common.data.{Row, Serializer}
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.kafka.Consumer
import com.typesafe.scalalogging.Logger
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.metrics.basic.Counter

import scala.jdk.CollectionConverters._

private[sink] class QueueProducer(
  val groupName: String,
  val queue: JQueue[Row],
  val brokerProps: String,
  val topicKeys: Set[TopicKey],
  val pollTimeout: JDuration,
  val rowCounter: Counter
) extends Runnable
    with Releasable {
  private[this] val log                    = Logger(classOf[QueueProducer])
  private[this] val paused: AtomicBoolean  = new AtomicBoolean(false)
  private[this] val stopped: AtomicBoolean = new AtomicBoolean(false)

  private[this] val consumer: Consumer[Row, Array[Byte]] = Consumer
    .builder()
    .keySerializer(Serializer.ROW)
    .valueSerializer(Serializer.BYTES)
    .offsetFromBegin()
    .topicKeys(topicKeys.asJava)
    .connectionProps(brokerProps)
    .build()

  override def run(): Unit = {
    log.info(
      "{} group `{}` start.(topics={}, brokerProps={})",
      this.getClass.getSimpleName,
      groupName,
      topicKeys.mkString(","),
      brokerProps
    )
    try {
      while (!stopped.get) {
        if (!paused.get && queue.isEmpty) {
          val rows = consumer.poll(pollTimeout).asScala.map(_.key.get)
          rows.foreach { r =>
            queue.add(r)
            rowCounter.incrementAndGet()
          }
          log.trace("    group[{}], queue: {}, rows: {}", groupName, queue.size, rows.size)
        } else {
          TimeUnit.MILLISECONDS.sleep(10)
        }
      } // while
    } finally {
      consumer.close()
      log.info("stopped.")
    }
  }

  override def close(): Unit = {
    stop()
  }

  def stop(): Unit = {
    stopped.set(true)
  }

  def pause(): Unit = {
    if (paused.compareAndSet(false, true)) {
      log.info("{} paused.", this.getClass.getSimpleName)
    }
  }

  def resume(): Unit = {
    if (paused.compareAndSet(true, false)) {
      log.info("{} resumed.", this.getClass.getSimpleName)
    }
  }
} 
Example 6
Source File: TestSourceRoute.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.source

import java.util.concurrent.TimeUnit

import akka.http.scaladsl.model._
import akka.http.scaladsl.testkit.RouteTestTimeout
import oharastream.ohara.common.data.Row
import oharastream.ohara.kafka.Consumer
import oharastream.ohara.metrics.BeanChannel
import oharastream.ohara.metrics.basic.CounterMBean
import oharastream.ohara.shabondi.{BasicShabondiTest, KafkaSupport}
import org.junit.Test
import org.scalatest.matchers.should.Matchers._
import spray.json.DefaultJsonProtocol._
import spray.json._

import scala.concurrent.duration.Duration
import scala.jdk.CollectionConverters._
final class TestSourceRoute extends BasicShabondiTest {
  import oharastream.ohara.shabondi.ShabondiRouteTestSupport._

  // Extend the timeout to avoid the exception:
  // org.scalatest.exceptions.TestFailedException: Request was neither completed nor rejected within 1 second
  implicit val routeTestTimeout = RouteTestTimeout(Duration(5, TimeUnit.SECONDS))

  private val columnCount  = 6
  private val requestCount = 200

  private def sourceData: Map[String, Int] =
    (1 to columnCount).foldLeft(Map.empty[String, Int]) { (m, v) =>
      m + ("col-" + v -> v)
    }

  @Test
  def testInvalidRequest(): Unit = {
    val topicKey1 = createTopicKey
    val config    = defaultSourceConfig(Seq(topicKey1))
    val webServer = new WebServer(config)

    val request = Get("/")
    request ~> webServer.routes ~> check {
      response.status should ===(StatusCodes.MethodNotAllowed)
      contentType should ===(ContentTypes.`text/plain(UTF-8)`)
    }

    val request2 = Post("/")
    request2 ~> webServer.routes ~> check {
      response.status should ===(StatusCodes.BadRequest)
      contentType should ===(ContentTypes.`text/plain(UTF-8)`)
    }

    val jsonRow  = sourceData.toJson.compactPrint
    val entity   = HttpEntity(ContentTypes.`application/json`, jsonRow)
    val request3 = Post("/", entity)
    request3 ~> webServer.routes ~> check {
      response.status should ===(StatusCodes.OK)
      contentType should ===(ContentTypes.`text/plain(UTF-8)`)
    }
  }

  @Test
  def testSourceRoute(): Unit = {
    val topicKey1 = createTopicKey
    val config    = defaultSourceConfig(Seq(topicKey1))
    val webServer = new WebServer(config)
    try {
      (1 to requestCount).foreach { _ =>
        val jsonRow = sourceData.toJson.compactPrint
        val entity  = HttpEntity(ContentTypes.`application/json`, jsonRow)
        val request = Post(uri = "/", entity)

        request ~> webServer.routes ~> check {
          entityAs[String] should ===("OK")
        }
      }

      // assertion
      val rowsTopic1: Seq[Consumer.Record[Row, Array[Byte]]] =
        KafkaSupport.pollTopicOnce(brokerProps, topicKey1, 60, requestCount)
      rowsTopic1.size should ===(requestCount)
      rowsTopic1(0).key.get.cells.size should ===(columnCount)

      // assert metrics
      val beans = counterMBeans()
      beans.size should ===(1)
      beans(0).getValue should ===(requestCount)
    } finally {
      webServer.close()
      topicAdmin.deleteTopic(topicKey1)
    }
  }

  private def counterMBeans(): Seq[CounterMBean] = BeanChannel.local().counterMBeans().asScala.toSeq
} 
Example 7
Source File: TestKafkaClient.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.common

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import oharastream.ohara.common.data.{Cell, Row}
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.shabondi.{BasicShabondiTest, KafkaSupport}
import org.junit.{After, Before, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

final class TestKafkaClient extends BasicShabondiTest {
  import oharastream.ohara.shabondi.common.ConvertSupport._

  implicit lazy val system: ActorSystem = ActorSystem("shabondi-test")

  private[this] val topicKey = TopicKey.of("group", CommonUtils.randomString(5))

  @Before
  def before(): Unit = createTestTopic(topicKey)

  @After
  override def tearDown(): Unit =
    topicAdmin.deleteTopic(topicKey)

  @Test
  def testSingleProducer(): Unit = {
    val producer = KafkaSupport.newProducer(brokerProps)
    try {
      val row = Row.of(Cell.of("col1", 100))
      val sender = producer
        .sender()
        .key(row)
        .value(Array[Byte]())
        .topicKey(topicKey)

      val future = sender.send.toScala

      val metadata = Await.result(future, Duration(10, TimeUnit.SECONDS))

      metadata.topicKey should ===(topicKey)
      metadata.offset should ===(0)
      metadata.partition should ===(0)
    } finally {
      Releasable.close(producer)
    }
  }

  @Test
  def testConsumer(): Unit = {
    val producer = KafkaSupport.newProducer(brokerProps)
    try {
      Future.sequence {
        (1 to 9)
          .map(i => Row.of(Cell.of(s"col-$i", i * 10)))
          .map(row => producer.sender().key(row).value(Array[Byte]()).topicKey(topicKey))
          .map { sender =>
            sender.send.toScala
          }
      }

      val records = KafkaSupport.pollTopicOnce(brokerProps, topicKey, 10, 10)

      records.size should ===(9)
      records(0).topicKey shouldBe topicKey
      records(0).key.isPresent shouldBe true
      records(0).key.get shouldBe Row.of(Cell.of("col-1", 10))

      records(8).topicKey shouldBe topicKey
      records(8).key.isPresent shouldBe true
      records(8).key.get shouldBe Row.of(Cell.of("col-9", 90))
    } finally {
      Releasable.close(producer)
    }
  }
} 
Example 8
Source File: ServiceKeyHolder.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.it

import java.util.concurrent.TimeUnit

import oharastream.ohara.agent.container.ContainerClient
import oharastream.ohara.common.setting.ObjectKey
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import com.typesafe.scalalogging.Logger

import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}


      if (!finalClose || !KEEP_CONTAINERS)
        result(client.containers())
          .filter(
            container =>
              clusterKey.exists(key => container.name.contains(key.group()) && container.name.contains(key.name()))
          )
          .filterNot(container => excludedNodes.contains(container.nodeName))
          .foreach { container =>
            try {
              println(s"[-----------------------------------${container.name}-----------------------------------]")
              // Before 10 minutes container log. Avoid the OutOfMemory of Java heap
              val containerLogs = try result(client.log(container.name, Option(600)))
              catch {
                case e: Throwable =>
                  s"failed to fetch the logs for container:${container.name}. caused by:${e.getMessage}"
              }
              println(containerLogs)
              println("[------------------------------------------------------------------------------------]")
              result(client.forceRemove(container.name))
            } catch {
              case e: Throwable =>
                LOG.error(s"failed to remove container ${container.name}", e)
            }
          } finally Releasable.close(client)
} 
Example 9
Source File: TestQueryConfiguratorLog.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.it.client

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.LogApi
import oharastream.ohara.it.{ContainerPlatform, WithRemoteConfigurator}
import oharastream.ohara.it.category.ClientGroup
import org.junit.Test
import org.junit.experimental.categories.Category
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global

@Category(Array(classOf[ClientGroup]))
class TestQueryConfiguratorLog(platform: ContainerPlatform)
    extends WithRemoteConfigurator(platform: ContainerPlatform) {
  @Test
  def test(): Unit = {
    val log = result(LogApi.access.hostname(configuratorHostname).port(configuratorPort).log4Configurator())
    log.logs.size shouldBe 1
    log.logs.head.hostname.length should not be 0
    log.logs.head.value.length should not be 0

    val logOf1Second = result(LogApi.access.hostname(configuratorHostname).port(configuratorPort).log4Configurator(1)).logs.head.value
    TimeUnit.SECONDS.sleep(6)
    val logOf6Second = result(LogApi.access.hostname(configuratorHostname).port(configuratorPort).log4Configurator(6)).logs.head.value
    withClue(s"logOf1Second:$logOf1Second\nlogOf6Second:$logOf6Second") {
      // it counts on timer so the "=" is legal :)
      logOf1Second.length should be <= logOf6Second.length
    }
  }
} 
Example 10
Source File: TestVerifyNode.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.agent.docker

import java.util
import java.util.concurrent.TimeUnit

import oharastream.ohara.agent.{DataCollie, ServiceCollie}
import oharastream.ohara.client.configurator.NodeApi.{Node, State}
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.testing.service.SshdServer
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration


class TestVerifyNode extends OharaTest {
  private[this] var errorMessage: String = _
  private[this] val sshServer = SshdServer.local(
    0,
    java.util.Map.of(
      "docker info --format '{{json .}}'",
      (_: String) =>
        if (errorMessage != null)
          throw new IllegalArgumentException(errorMessage)
        else util.List.of("""
                        |  {
                        |    "NCPU": 1,
                        |    "MemTotal": 1024
                        |  }
                        |""".stripMargin)
    )
  )

  private[this] val node = Node(
    hostname = sshServer.hostname(),
    port = sshServer.port(),
    user = sshServer.user(),
    password = sshServer.password(),
    services = Seq.empty,
    state = State.AVAILABLE,
    error = None,
    lastModified = CommonUtils.current(),
    resources = Seq.empty,
    tags = Map.empty
  )

  private[this] val collie = ServiceCollie.dockerModeBuilder.dataCollie(DataCollie(Seq(node))).build

  @Test
  def happyCase(): Unit = Await.result(collie.verifyNode(node), Duration(30, TimeUnit.SECONDS))

  @Test
  def badCase(): Unit = {
    errorMessage = CommonUtils.randomString()
    intercept[Exception] {
      Await.result(collie.verifyNode(node), Duration(30, TimeUnit.SECONDS))
    }.getMessage should include("unavailable")
  }

  @After
  def tearDown(): Unit = {
    Releasable.close(collie)
    Releasable.close(sshServer)
  }
} 
Example 11
Source File: TestK8SClientVerify.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.agent.k8s

import java.util.concurrent.TimeUnit

import oharastream.ohara.agent.fake.FakeK8SClient
import oharastream.ohara.agent.{DataCollie, ServiceCollie}
import oharastream.ohara.client.configurator.NodeApi.Node
import oharastream.ohara.common.rule.OharaTest
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration

class TestK8SClientVerify extends OharaTest {
  private[this] val dataCollie: DataCollie = DataCollie(Seq.empty)

  private[this] def node: Node = Node("ohara", "user", "password")

  @Test
  def testMockK8sClientVerifyNode1(): Unit = {
    val fakeK8SClient = new FakeK8SClient(true, Option(K8SStatusInfo(true, "")), "")
    val serviceCollie: ServiceCollie =
      ServiceCollie.k8sModeBuilder.dataCollie(dataCollie).k8sClient(fakeK8SClient).build()
    Await.result(
      serviceCollie.verifyNode(node),
      Duration(30, TimeUnit.SECONDS)
    ) shouldBe "ohara node is running."
  }

  @Test
  def testMockK8sClientVerifyNode2(): Unit = {
    val fakeK8SClient = new FakeK8SClient(true, Option(K8SStatusInfo(false, "node failed.")), "")
    val serviceCollie: ServiceCollie =
      ServiceCollie.k8sModeBuilder.dataCollie(dataCollie).k8sClient(fakeK8SClient).build()
    intercept[IllegalStateException] {
      Await.result(
        serviceCollie.verifyNode(node),
        Duration(30, TimeUnit.SECONDS)
      )
    }.getMessage shouldBe "ohara node doesn't running container. cause: node failed."
  }

  @Test
  def testMockK8sClientVerifyNode3(): Unit = {
    val fakeK8SClient = new FakeK8SClient(false, Option(K8SStatusInfo(false, "failed")), "")
    val serviceCollie: ServiceCollie =
      ServiceCollie.k8sModeBuilder.dataCollie(dataCollie).k8sClient(fakeK8SClient).build()
    intercept[IllegalStateException] {
      Await.result(
        serviceCollie.verifyNode(node),
        Duration(30, TimeUnit.SECONDS)
      )
    }.getMessage shouldBe "ohara node doesn't running container. cause: failed"
  }

  @Test
  def testMockK8SClientVerifyNode4(): Unit = {
    val fakeK8SClient = new FakeK8SClient(false, None, "")
    val serviceCollie: ServiceCollie =
      ServiceCollie.k8sModeBuilder.dataCollie(dataCollie).k8sClient(fakeK8SClient).build()
    intercept[IllegalStateException] {
      Await.result(
        serviceCollie.verifyNode(node),
        Duration(30, TimeUnit.SECONDS)
      )
    }.getMessage shouldBe "ohara node doesn't running container. cause: ohara node doesn't exists."
  }
} 
Example 12
Source File: TestK8SServiceCollieImpl.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.agent.k8s

import java.util.concurrent.TimeUnit

import oharastream.ohara.agent.DataCollie
import oharastream.ohara.agent.fake.FakeK8SClient
import oharastream.ohara.client.configurator.NodeApi
import oharastream.ohara.client.configurator.NodeApi.{Node, Resource}
import oharastream.ohara.common.rule.OharaTest
import org.junit.Test

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, ExecutionContext, Future}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration

class TestK8SServiceCollieImpl extends OharaTest {
  @Test
  def testResource(): Unit = {
    val nodeCache  = (1 to 3).map(x => Node(s"node$x", "user", "password"))
    val dataCollie = DataCollie(nodeCache)

    val k8sClient = new FakeK8SClient(false, None, "container1") {
      override def resources()(
        implicit executionContext: ExecutionContext
      ): Future[Map[String, Seq[NodeApi.Resource]]] =
        Future.successful {
          Map(
            "node1" -> Seq(Resource.cpu(8, Option(2.0)), Resource.memory(1024 * 1024 * 1024 * 100, Option(5.0))),
            "node2" -> Seq(Resource.cpu(8, Option(1.0)), Resource.memory(1024 * 1024 * 1024 * 100, Option(5.0))),
            "node3" -> Seq(Resource.cpu(8, Option(3.0)), Resource.memory(1024 * 1024 * 1024 * 100, Option(5.0)))
          )
        }
    }

    val k8sServiceCollieImpl = new K8SServiceCollieImpl(dataCollie, k8sClient)
    val resource             = result(k8sServiceCollieImpl.resources())
    resource.size shouldBe 3
    val nodeNames = resource.keys.toSeq
    nodeNames(0) shouldBe "node1"
    nodeNames(1) shouldBe "node2"
    nodeNames(2) shouldBe "node3"

    val node1Resource: Seq[Resource] =
      resource.filter(x => x._1 == "node1").flatMap(x => x._2).toSeq

    node1Resource(0).name shouldBe "CPU"
    node1Resource(0).unit shouldBe "cores"
    node1Resource(0).used.get shouldBe 2.0
    node1Resource(0).value shouldBe 8

    node1Resource(1).name shouldBe "Memory"
    node1Resource(1).unit shouldBe "bytes"
    node1Resource(1).used.get shouldBe 5.0
    node1Resource(1).value shouldBe 1024 * 1024 * 1024 * 100
  }

  @Test
  def testEmptyResource(): Unit = {
    val nodeCache  = (1 to 3).map(x => Node(s"node$x", "user", "password"))
    val dataCollie = DataCollie(nodeCache)

    val k8sClient = new FakeK8SClient(false, None, "container1") {
      override def resources()(
        implicit executionContext: ExecutionContext
      ): Future[Map[String, Seq[NodeApi.Resource]]] =
        Future.successful(Map.empty)
    }

    val k8sServiceCollieImpl = new K8SServiceCollieImpl(dataCollie, k8sClient)
    val resource             = result(k8sServiceCollieImpl.resources())
    resource.size shouldBe 0
  }

  private[this] def result[T](future: Future[T]): T = Await.result(future, Duration(10, TimeUnit.SECONDS))
} 
Example 13
Source File: TestRemoteFolderHandler.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.agent

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.NodeApi.Node
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.testing.service.SshdServer
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class TestRemoteFolderHandler extends OharaTest {
  private[this] val server   = SshdServer.local(0)
  private[this] val hostname = server.hostname()
  private[this] val dataCollie = DataCollie(
    Seq(
      Node(
        hostname = hostname,
        port = server.port(),
        user = server.user(),
        password = server.password()
      )
    )
  )
  private[this] val folderHandler = RemoteFolderHandler.builder().dataCollie(dataCollie).build()

  @Test
  def testFolderNotExists(): Unit =
    result(folderHandler.exist(server.hostname(), "/home/ohara100")) shouldBe false

  @Test
  def testCreateFolderAndDelete(): Unit = {
    val path = s"/tmp/${CommonUtils.randomString(5)}"
    result(folderHandler.create(hostname, path)) shouldBe true
    result(folderHandler.exist(hostname, path)) shouldBe true
    // file exists so it does nothing
    result(folderHandler.create(hostname, path)) shouldBe false
    result(folderHandler.delete(hostname, path)) shouldBe true
    result(folderHandler.delete(hostname, path)) shouldBe false
  }

  @Test
  def testListFolder(): Unit = {
    result(folderHandler.list(hostname, "/tmp")).size should not be 0
    val path = s"/tmp/${CommonUtils.randomString(5)}"
    result(folderHandler.create(hostname, path)) shouldBe true
    result(folderHandler.list(hostname, "/tmp")) should contain(path)
  }

  @Test
  def testInspectFolder(): Unit = {
    val folderInfo = result(folderHandler.inspect(hostname, "/tmp"))
    folderInfo.name shouldBe "tmp"
    folderInfo.permission shouldBe FolderPermission.READWRITE
    folderInfo.size should be > 0L
    folderInfo.uid should be >= 0
  }

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(120, TimeUnit.SECONDS))

  @After
  def tearDown(): Unit = Releasable.close(server)
} 
Example 14
Source File: MetricsCache.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator.store

import java.util.Objects
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean

import oharastream.ohara.client.configurator.BrokerApi.BrokerClusterInfo
import oharastream.ohara.client.configurator.ClusterInfo
import oharastream.ohara.client.configurator.MetricsApi.Metrics
import oharastream.ohara.client.configurator.ShabondiApi.ShabondiClusterInfo
import oharastream.ohara.client.configurator.StreamApi.StreamClusterInfo
import oharastream.ohara.client.configurator.WorkerApi.WorkerClusterInfo
import oharastream.ohara.client.configurator.ZookeeperApi.ZookeeperClusterInfo
import oharastream.ohara.common.annotations.{Optional, VisibleForTesting}
import oharastream.ohara.common.cache.RefreshableCache
import oharastream.ohara.common.setting.ObjectKey
import oharastream.ohara.common.util.Releasable

import scala.concurrent.duration.Duration

trait MetricsCache extends Releasable {
  
  def meters(clusterInfo: ClusterInfo, key: ObjectKey): Map[String, Metrics] =
    meters(clusterInfo)
      .map {
        case (hostname, keyAndMeters) =>
          hostname -> keyAndMeters.getOrElse(key, Metrics.EMPTY)
      }
}

object MetricsCache {
  def builder: Builder = new Builder()

  // TODO: remove this workaround if google guava support the custom comparison ... by chia
  @VisibleForTesting
  private[store] case class RequestKey(key: ObjectKey, service: String) {
    override def equals(obj: Any): Boolean = obj match {
      case another: RequestKey => another.key == key && another.service == service
      case _                   => false
    }
    override def hashCode(): Int  = 31 * key.hashCode + service.hashCode
    override def toString: String = s"key:$key, service:$service"
  }

  class Builder private[MetricsCache] extends oharastream.ohara.common.pattern.Builder[MetricsCache] {
    private[this] var refresher: () => Map[ClusterInfo, Map[String, Map[ObjectKey, Metrics]]] = _
    private[this] var frequency: Duration                                                     = Duration(5, TimeUnit.SECONDS)

    def refresher(refresher: () => Map[ClusterInfo, Map[String, Map[ObjectKey, Metrics]]]): Builder = {
      this.refresher = Objects.requireNonNull(refresher)
      this
    }

    @Optional("default value is equal to timeout")
    def frequency(frequency: Duration): Builder = {
      this.frequency = Objects.requireNonNull(frequency)
      this
    }

    override def build: MetricsCache = new MetricsCache {
      import scala.jdk.CollectionConverters._
      private[this] val refresher = Objects.requireNonNull(Builder.this.refresher)
      private[this] val closed    = new AtomicBoolean(false)
      private[this] val cache = RefreshableCache
        .builder[RequestKey, Map[String, Map[ObjectKey, Metrics]]]()
        .supplier(
          () =>
            refresher().map {
              case (clusterInfo, meters) =>
                key(clusterInfo) -> meters
            }.asJava
        )
        .frequency(java.time.Duration.ofMillis(frequency.toMillis))
        .build()

      private[this] def key(clusterInfo: ClusterInfo): RequestKey = RequestKey(
        key = clusterInfo.key,
        service = clusterInfo match {
          case _: ZookeeperClusterInfo => "zk"
          case _: BrokerClusterInfo    => "bk"
          case _: WorkerClusterInfo    => "wk"
          case _: StreamClusterInfo    => "stream"
          case _: ShabondiClusterInfo  => "shabondi"
          case c: ClusterInfo          => c.getClass.getSimpleName // used by testing
        }
      )

      override def meters(clusterInfo: ClusterInfo): Map[String, Map[ObjectKey, Metrics]] =
        cache.get(key(clusterInfo)).orElse(Map.empty)

      override def close(): Unit = if (closed.compareAndSet(false, true)) Releasable.close(cache)
    }
  }
} 
Example 15
Source File: TestConfiguratorMain.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator
import java.util.concurrent.{Executors, TimeUnit}
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.configurator.Configurator.Mode
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._
import scala.concurrent.{ExecutionContext, Future}

class TestConfiguratorMain extends OharaTest {
  @Test
  def illegalK8sUrl(): Unit =
    intercept[IllegalArgumentException] {
      Configurator.main(Array[String](Configurator.K8S_KEY, s"http://localhost:${CommonUtils.availablePort()}"))
    }.getMessage should include("unable to access")

  @Test
  def emptyK8sArgument(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY, ""))

  @Test
  def nullK8sArgument(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY))

  @Test
  def fakeWithK8s(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(
      Array[String](Configurator.K8S_KEY, "http://localhost", Configurator.FAKE_KEY, "true")
    )

  @Test
  def k8sWithFake(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(
      Array[String](Configurator.FAKE_KEY, "true", Configurator.K8S_KEY, "http://localhost")
    )

  @Test
  def testFakeMode(): Unit =
    runMain(
      Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0", Configurator.FAKE_KEY, "true"),
      configurator => configurator.mode shouldBe Mode.FAKE
    )

  @Test
  def testDockerMode(): Unit =
    runMain(
      Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0"),
      configurator => configurator.mode shouldBe Mode.DOCKER
    )

  private[this] def runMain(args: Array[String], action: Configurator => Unit): Unit = {
    Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false
    val service = ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor())
    Future[Unit](Configurator.main(args))(service)
    import java.time.Duration
    try {
      CommonUtils.await(() => Configurator.GLOBAL_CONFIGURATOR_RUNNING, Duration.ofSeconds(30))
      action(Configurator.GLOBAL_CONFIGURATOR)
    } finally {
      Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = true
      service.shutdownNow()
      service.awaitTermination(60, TimeUnit.SECONDS)
    }
  }

  @After
  def tearDown(): Unit = {
    Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false
    Releasable.close(Configurator.GLOBAL_CONFIGURATOR)
    Configurator.GLOBAL_CONFIGURATOR == null
  }
} 
Example 16
Source File: TestDefaultValuesAutoComplete.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.{ConcurrentHashMap, TimeUnit}

import oharastream.ohara.client.configurator.{ConnectorApi, WorkerApi}
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class TestDefaultValuesAutoComplete extends WithBrokerWorker {
  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] val configurator =
    Configurator.builder.fake(testUtil().brokersConnProps(), testUtil().workersConnProps()).build()

  private[this] val workerClusterInfo = result(
    WorkerApi.access.hostname(configurator.hostname).port(configurator.port).list()
  ).head

  private[this] val connectorApi = ConnectorApi.access.hostname(configurator.hostname).port(configurator.port)

  @Test
  def testDefaultValuesForPerfSource(): Unit = {
    val connector = result(
      connectorApi.request
        .workerClusterKey(workerClusterInfo.key)
        .className("oharastream.ohara.connector.perf.PerfSource")
        .create()
    )
    connector.settings.keySet should contain("perf.batch")
    connector.settings.keySet should contain("perf.frequency")
    connector.settings.keySet should contain("perf.cell.length")
  }

  @Test
  def testDefaultValuesForConsoleSink(): Unit = {
    val connector = result(
      connectorApi.request
        .workerClusterKey(workerClusterInfo.key)
        .className("oharastream.ohara.connector.console.ConsoleSink")
        .create()
    )
    connector.settings.keySet should contain("console.sink.frequence")
    connector.settings.keySet should contain("console.sink.row.divider")

    val a = new ConcurrentHashMap[String, String]()
    import scala.jdk.CollectionConverters._
    a.elements().asScala.toSeq
  }
} 
Example 17
Source File: TestFakeConnectorAdmin.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator
import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.ConnectorApi.State
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.setting.{ConnectorKey, TopicKey}
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.configurator.fake.FakeConnectorAdmin
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
class TestFakeConnectorAdmin extends OharaTest {
  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))
  @Test
  def testControlConnector(): Unit = {
    val connectorKey = ConnectorKey.of(CommonUtils.randomString(5), CommonUtils.randomString(5))
    val topicKey     = TopicKey.of(CommonUtils.randomString(5), CommonUtils.randomString(5))
    val className    = CommonUtils.randomString(10)
    val fake         = new FakeConnectorAdmin()
    result(
      fake
        .connectorCreator()
        .connectorKey(connectorKey)
        .topicKey(topicKey)
        .numberOfTasks(1)
        .className(className)
        .create()
    )

    result(fake.exist(connectorKey)) shouldBe true

    result(fake.status(connectorKey)).connector.state shouldBe State.RUNNING.name

    result(fake.pause(connectorKey))
    result(fake.status(connectorKey)).connector.state shouldBe State.PAUSED.name

    result(fake.resume(connectorKey))
    result(fake.status(connectorKey)).connector.state shouldBe State.RUNNING.name

    result(fake.delete(connectorKey))
    result(fake.exist(connectorKey)) shouldBe false
  }
} 
Example 18
Source File: TestInspectWorkerCluster.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.{InspectApi, WorkerApi}
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class TestInspectWorkerCluster extends WithBrokerWorker {
  private[this] val configurator =
    Configurator.builder.fake(testUtil().brokersConnProps(), testUtil().workersConnProps()).build()

  private[this] val workerClusterInfo = result(
    WorkerApi.access.hostname(configurator.hostname).port(configurator.port).list()
  ).head
  private[this] def inspectApi = InspectApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  @Test
  def inspectWithoutKey(): Unit = {
    val info = result(inspectApi.workerInfo())
    info.imageName shouldBe WorkerApi.IMAGE_NAME_DEFAULT
    info.settingDefinitions.size shouldBe WorkerApi.DEFINITIONS.size
    info.settingDefinitions.foreach { definition =>
      definition shouldBe WorkerApi.DEFINITIONS.find(_.key() == definition.key()).get
    }
  }

  @Test
  def inspectWithKey(): Unit = {
    val info = result(inspectApi.workerInfo(workerClusterInfo.key))
    info.imageName shouldBe WorkerApi.IMAGE_NAME_DEFAULT
    info.settingDefinitions.size shouldBe WorkerApi.DEFINITIONS.size
    info.settingDefinitions.foreach { definition =>
      definition shouldBe WorkerApi.DEFINITIONS.find(_.key() == definition.key()).get
    }
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 19
Source File: TestResponseFromUnsupportedApis.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.{HttpMethod, HttpMethods, HttpRequest}
import akka.http.scaladsl.unmarshalling.Unmarshal
import oharastream.ohara.client.configurator.ErrorApi
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class TestResponseFromUnsupportedApis extends OharaTest {
  private[this] val configurator = Configurator.builder.fake().build()

  private[this] implicit val actorSystem: ActorSystem = ActorSystem("Executor-TestResponseFromUnsupportedApis")

  private[this] val expectedMessage = oharastream.ohara.configurator.route.apiUrl

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  @Test
  def testGet(): Unit = sendRequest(HttpMethods.GET, CommonUtils.randomString()).apiUrl.get shouldBe expectedMessage

  @Test
  def testPut(): Unit = sendRequest(HttpMethods.PUT, CommonUtils.randomString()).apiUrl.get shouldBe expectedMessage

  @Test
  def testDelete(): Unit =
    sendRequest(HttpMethods.DELETE, CommonUtils.randomString()).apiUrl.get shouldBe expectedMessage

  @Test
  def testPost(): Unit = sendRequest(HttpMethods.POST, CommonUtils.randomString()).apiUrl.get shouldBe expectedMessage

  private[this] def sendRequest(method: HttpMethod, postfix: String): ErrorApi.Error =
    result(
      Http()
        .singleRequest(HttpRequest(method, s"http://${configurator.hostname}:${configurator.port}/$postfix"))
        .flatMap { response =>
          if (response.status.isSuccess()) Future.failed(new AssertionError())
          else Unmarshal(response.entity).to[ErrorApi.Error]
        }
    )

  @After
  def tearDown(): Unit = {
    Releasable.close(configurator)
    result(actorSystem.terminate())
  }
} 
Example 20
Source File: TestListManyPipelines.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.{BrokerApi, ConnectorApi, PipelineApi, TopicApi, WorkerApi}
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
class TestListManyPipelines extends WithBrokerWorker {
  private[this] val configurator =
    Configurator.builder.fake(testUtil().brokersConnProps(), testUtil().workersConnProps()).build()

  private[this] val workerClusterInfo = result(
    WorkerApi.access.hostname(configurator.hostname).port(configurator.port).list()
  ).head

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] val numberOfPipelines = 30
  @Test
  def test(): Unit = {
    val topic = result(
      TopicApi.access
        .hostname(configurator.hostname)
        .port(configurator.port)
        .request
        .name(CommonUtils.randomString(10))
        .brokerClusterKey(
          result(BrokerApi.access.hostname(configurator.hostname).port(configurator.port).list()).head.key
        )
        .create()
    )

    val connector = result(
      ConnectorApi.access
        .hostname(configurator.hostname)
        .port(configurator.port)
        .request
        .name(CommonUtils.randomString(10))
        .className("oharastream.ohara.connector.perf.PerfSource")
        .topicKey(topic.key)
        .numberOfTasks(1)
        .workerClusterKey(workerClusterInfo.key)
        .create()
    )

    val pipelines = (0 until numberOfPipelines).map { _ =>
      result(
        PipelineApi.access
          .hostname(configurator.hostname)
          .port(configurator.port)
          .request
          .name(CommonUtils.randomString(10))
          .endpoint(connector)
          .endpoint(topic)
          .create()
      )
    }

    val listPipeline =
      Await.result(
        PipelineApi.access.hostname(configurator.hostname).port(configurator.port).list(),
        Duration(20, TimeUnit.SECONDS)
      )
    pipelines.size shouldBe listPipeline.size
    pipelines.foreach(p => listPipeline.exists(_.name == p.name) shouldBe true)
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 21
Source File: TestConcurrentAccess.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, TimeUnit}

import oharastream.ohara.client.configurator.NodeApi
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}

class TestConcurrentAccess extends OharaTest {
  private[this] val configurator = Configurator.builder.fake().build()

  private[this] val nodeApi = NodeApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(10, TimeUnit.SECONDS))

  
  @Test
  def deletedObjectShouldDisappearFromGet(): Unit = {
    val threadCount                                         = 10
    val threadsPool                                         = Executors.newFixedThreadPool(threadCount)
    val unmatchedCount                                      = new AtomicInteger()
    implicit val executionContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(threadsPool)
    (0 until threadCount).foreach { _ =>
      threadsPool.execute { () =>
        val nodeName = CommonUtils.randomString(10)
        val nodes = result(
          nodeApi.request
            .nodeName(nodeName)
            .user(CommonUtils.randomString(10))
            .password(CommonUtils.randomString(10))
            .create()
            .flatMap(node => nodeApi.delete(node.key))
            .flatMap(_ => nodeApi.list())
        )
        if (nodes.exists(_.hostname == nodeName)) unmatchedCount.incrementAndGet()
      }
    }
    threadsPool.shutdown()
    threadsPool.awaitTermination(60, TimeUnit.SECONDS) shouldBe true
    unmatchedCount.get() shouldBe 0
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 22
Source File: TestNodeNameUpperCaseRoute.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator.route

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.NodeApi
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.configurator.Configurator
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class TestNodeNameUpperCaseRoute extends OharaTest {
  private[this] val numberOfCluster = 1
  private[this] val configurator =
    Configurator.builder.fake(numberOfCluster, numberOfCluster, "zookeepercluster").build()
  private[this] val nodeApi                    = NodeApi.access.hostname(configurator.hostname).port(configurator.port)
  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  @Test
  def testAddNodeNameLowerCase(): Unit = {
    val name = CommonUtils.randomString(10).toLowerCase
    result(nodeApi.request.nodeName(name).port(22).user("b").password("c").create()).name shouldBe name
  }

  @Test
  def testAddNodeNameUpperCase1(): Unit = {
    val name = CommonUtils.randomString(10).toUpperCase
    result(nodeApi.request.nodeName(name).port(22).user("b").password("c").create())
  }

  @Test
  def testAddNodeNameUpperCase2(): Unit = {
    val name = "HOST1.test"
    result(nodeApi.request.nodeName(name).port(22).user("b").password("c").create())
  }

  @Test
  def testAddNodeNameUpperCase3(): Unit = {
    val name = "aaa-Node1.test"
    result(nodeApi.request.nodeName(name).port(22).user("b").password("c").create())
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 23
Source File: TestPrivateRoute.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator.route

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.PrivateApi.Deletion
import oharastream.ohara.client.configurator.{PrivateApi, WorkerApi}
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.configurator.Configurator
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class TestPrivateRoute extends OharaTest {
  private[this] val workerCount  = 2
  private[this] val configurator = Configurator.builder.fake(1, workerCount).build()

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] val workerApi = WorkerApi.access.hostname(configurator.hostname).port(configurator.port)

  @Test
  def testDeletion(): Unit = {
    val workers = result(workerApi.list())
    val group   = workers.head.group
    val kind    = workers.head.kind
    workers.size shouldBe workerCount
    result(
      workerApi.request
        .group(group)
        .nodeNames(workers.head.nodeNames)
        .brokerClusterKey(workers.head.brokerClusterKey)
        .create()
    )

    result(workerApi.list()).size shouldBe workers.size + 1

    // we use same group to create an new worker cluster
    result(workerApi.list()).groupBy(_.group).size shouldBe workerCount

    result(
      PrivateApi.delete(
        hostname = configurator.hostname,
        port = configurator.port,
        deletion = Deletion(groups = Set(group), kinds = Set(kind))
      )
    )

    val latestWorkers = result(workerApi.list())
    latestWorkers.size shouldBe workers.size - 1

    // delete again
    result(
      PrivateApi.delete(
        hostname = configurator.hostname,
        port = configurator.port,
        deletion = Deletion(groups = Set(group), kinds = Set(kind))
      )
    )
    result(workerApi.list()).size shouldBe latestWorkers.size

    // delete group without kind
    result(
      PrivateApi.delete(
        hostname = configurator.hostname,
        port = configurator.port,
        deletion = Deletion(groups = Set(latestWorkers.head.group), kinds = Set.empty)
      )
    )
    result(workerApi.list()).size shouldBe latestWorkers.size
  }
} 
Example 24
Source File: TestObjectRoute.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator.route

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.ObjectApi
import oharastream.ohara.client.configurator.ObjectApi.ObjectInfo
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.setting.ObjectKey
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.configurator.Configurator
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._
import spray.json.JsString

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
class TestObjectRoute extends OharaTest {
  private[this] val configurator = Configurator.builder.fake(1, 1).build()

  private[this] val objectApi = ObjectApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] def create(): ObjectInfo = {
    val key = ObjectKey.of("g", "n")
    val settings = Map(
      CommonUtils.randomString() -> JsString(CommonUtils.randomString()),
      CommonUtils.randomString() -> JsString(CommonUtils.randomString())
    )
    val objectInfo = result(objectApi.request.key(key).settings(settings).create())
    objectInfo.key shouldBe key
    settings.foreach {
      case (k, v) => objectInfo.settings(k) shouldBe v
    }
    objectInfo
  }

  @Test
  def testCreate(): Unit = create()

  @Test
  def testGet(): Unit = {
    val objectInfo = create()
    objectInfo shouldBe result(objectApi.get(objectInfo.key))
  }

  @Test
  def testGetNothing(): Unit =
    an[IllegalArgumentException] should be thrownBy result(objectApi.get(ObjectKey.of(CommonUtils.randomString(), "n")))

  @Test
  def testList(): Unit = {
    val objectInfo = create()
    objectInfo shouldBe result(objectApi.list()).head
  }

  @Test
  def testDelete(): Unit = {
    val objectInfo = create()
    result(objectApi.delete(objectInfo.key))
    result(objectApi.list()) shouldBe Seq.empty
  }

  @Test
  def testUpdate(): Unit = {
    val objectInfo = create()
    val settings = Map(
      CommonUtils.randomString() -> JsString(CommonUtils.randomString()),
      CommonUtils.randomString() -> JsString(CommonUtils.randomString())
    )
    val updated = result(objectApi.request.key(objectInfo.key).settings(settings).update())
    settings.foreach {
      case (k, v) => updated.settings(k) shouldBe v
    }
    objectInfo.settings.foreach {
      case (k, v) =>
        if (k == "lastModified") updated.settings(k) should not be v
        else updated.settings(k) shouldBe v
    }
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 25
Source File: TestMetricsCache.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator.store

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.MetricsApi.{Meter, Metrics}
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.setting.ObjectKey
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.configurator.store.MetricsCache.RequestKey
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration

class TestMetricsCache extends OharaTest {
  @Test
  def testRequestKey(): Unit = {
    val key = RequestKey(
      key = ObjectKey.of(CommonUtils.randomString(), CommonUtils.randomString()),
      service = CommonUtils.randomString()
    )

    key shouldBe key
    key should not be key.copy(key = ObjectKey.of(CommonUtils.randomString(), CommonUtils.randomString()))
    key should not be key.copy(service = CommonUtils.randomString())
  }

  @Test
  def nullRefresher(): Unit =
    an[NullPointerException] should be thrownBy MetricsCache.builder.refresher(null)

  @Test
  def nullFrequency(): Unit =
    an[NullPointerException] should be thrownBy MetricsCache.builder.frequency(null)

  @Test
  def testRefresh(): Unit = {
    val data = Map(
      ObjectKey.of("a", "b") -> Metrics(
        Seq(
          Meter(
            name = "name",
            value = 1.1,
            unit = "unit",
            document = "document",
            queryTime = CommonUtils.current(),
            startTime = Some(CommonUtils.current()),
            lastModified = Some(CommonUtils.current()),
            valueInPerSec = None
          )
        )
      )
    )
    val clusterInfo = FakeClusterInfo(CommonUtils.randomString())
    val cache = MetricsCache.builder
      .refresher(() => Map(clusterInfo -> Map(CommonUtils.hostname() -> data)))
      .frequency(Duration(2, TimeUnit.SECONDS))
      .build
    try {
      cache.meters(clusterInfo) shouldBe Map.empty
      TimeUnit.SECONDS.sleep(3)
      cache.meters(clusterInfo)(CommonUtils.hostname()) shouldBe data
    } finally cache.close()
  }

  @Test
  def failToOperateAfterClose(): Unit = {
    val cache = MetricsCache.builder.refresher(() => Map.empty).frequency(Duration(2, TimeUnit.SECONDS)).build
    cache.close()

    an[IllegalStateException] should be thrownBy cache.meters(FakeClusterInfo(CommonUtils.randomString()))
  }
} 
Example 26
Source File: ValidatorTask.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.validation

import java.util
import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.InspectApi.{RdbInfo, RdbQuery}
import oharastream.ohara.client.configurator.{ErrorApi, InspectApi}
import oharastream.ohara.client.database.DatabaseClient
import oharastream.ohara.common.data.Serializer
import oharastream.ohara.common.util.VersionUtils
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.source.{SourceRecord, SourceTask}
import spray.json.{JsObject, _}

import scala.jdk.CollectionConverters._
class ValidatorTask extends SourceTask {
  private[this] var done                       = false
  private[this] var props: Map[String, String] = _
  private[this] val topic: String              = InspectApi.INTERNAL_TOPIC_KEY.topicNameOnKafka
  private[this] var requestId: String          = _
  override def start(props: util.Map[String, String]): Unit = {
    this.props = props.asScala.toMap
    requestId = require(InspectApi.REQUEST_ID)
  }

  override def poll(): util.List[SourceRecord] =
    if (done) {
      // just wait the configurator to close this connector
      TimeUnit.SECONDS.sleep(2)
      null
    } else
      try information match {
        case query: RdbQuery => toSourceRecord(validate(query))
      } catch {
        case e: Throwable => toSourceRecord(ErrorApi.of(e))
      } finally done = true

  override def stop(): Unit = {
    // do nothing
  }

  override def version(): String = VersionUtils.VERSION

  private[this] def validate(query: RdbQuery): RdbInfo = {
    val client = DatabaseClient.builder.url(query.url).user(query.user).password(query.password).build
    try RdbInfo(
      name = client.databaseType,
      tables = client.tableQuery
        .catalog(query.catalogPattern.orNull)
        .schema(query.schemaPattern.orNull)
        .tableName(query.tableName.orNull)
        .execute()
    )
    finally client.close()
  }

  private[this] def toJsObject: JsObject = props(InspectApi.SETTINGS_KEY).parseJson.asJsObject
  private[this] def information = require(InspectApi.TARGET_KEY) match {
    case InspectApi.RDB_KIND => InspectApi.RDB_QUERY_FORMAT.read(toJsObject)
    case other: String =>
      throw new IllegalArgumentException(
        s"valid targets are ${InspectApi.RDB_KIND}. current is $other"
      )
  }

  private[this] def toSourceRecord(data: Object): util.List[SourceRecord] =
    util.Arrays.asList(
      new SourceRecord(
        null,
        null,
        topic,
        Schema.BYTES_SCHEMA,
        Serializer.STRING.to(requestId),
        Schema.BYTES_SCHEMA,
        Serializer.OBJECT.to(data)
      )
    )

  private[this] def require(key: String): String =
    props.getOrElse(key, throw new IllegalArgumentException(s"the $key is required"))
} 
Example 27
Source File: PerfSourceProps.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.perf
import java.util.concurrent.TimeUnit

import oharastream.ohara.kafka.connector.TaskSetting

import scala.compat.java8.OptionConverters._
import scala.concurrent.duration.Duration
case class PerfSourceProps(batch: Int, freq: Duration, cellSize: Int) {
  def toMap: Map[String, String] = Map(
    PERF_BATCH_KEY       -> batch.toString,
    PERF_FREQUENCY_KEY   -> freq.toString,
    PERF_CELL_LENGTH_KEY -> cellSize.toString
  )
}

object PerfSourceProps {
  def apply(settings: TaskSetting): PerfSourceProps = PerfSourceProps(
    batch = settings.intOption(PERF_BATCH_KEY).orElse(PERF_BATCH_DEFAULT),
    freq = settings
      .durationOption(PERF_FREQUENCY_KEY)
      .asScala
      .map(d => Duration(d.toMillis, TimeUnit.MILLISECONDS))
      .getOrElse(PERF_FREQUENCY_DEFAULT),
    cellSize = settings.intOption(PERF_CELL_LENGTH_KEY).orElse(PERF_CELL_LENGTH_DEFAULT)
  )
} 
Example 28
Source File: ConsoleSinkTask.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.console

import java.util
import java.util.concurrent.TimeUnit

import oharastream.ohara.common.annotations.VisibleForTesting
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.kafka.connector.{RowSinkRecord, RowSinkTask, TaskSetting}
import com.typesafe.scalalogging.Logger

import scala.concurrent.duration.Duration
import scala.jdk.CollectionConverters._

class ConsoleSinkTask extends RowSinkTask {
  private[this] val LOG = Logger(classOf[ConsoleSinkTask])
  @VisibleForTesting
  private[console] var freq: Duration = CONSOLE_FREQUENCE_DEFAULT
  @VisibleForTesting
  private[console] var divider: String = CONSOLE_ROW_DIVIDER_DEFAULT
  @VisibleForTesting
  private[console] var lastLog: Long = -1
  override protected def run(config: TaskSetting): Unit = {
    divider = config.stringOption(CONSOLE_ROW_DIVIDER).orElse(CONSOLE_ROW_DIVIDER_DEFAULT)
    freq = Duration(
      config
        .durationOption(CONSOLE_FREQUENCE)
        .orElse(java.time.Duration.ofMillis(CONSOLE_FREQUENCE_DEFAULT.toMillis))
        .toMillis,
      TimeUnit.MILLISECONDS
    )
  }

  override protected def terminate(): Unit = {
    // do nothing
  }

  override protected def putRecords(records: util.List[RowSinkRecord]): Unit =
    if (!records.isEmpty && (lastLog == -1 || CommonUtils.current() - lastLog >= freq.toMillis)) {
      try {
        LOG.info(records.asScala.map(_.row()).mkString(divider))
      } finally lastLog = CommonUtils.current()
    }
} 
Example 29
Source File: package.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector

import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger

import oharastream.ohara.common.setting.SettingDef

import scala.concurrent.duration.Duration

package object console {
  
  private[this] val COUNTER               = new AtomicInteger(0)
  val CONSOLE_FREQUENCE: String           = "console.sink.frequence"
  val CONSOLE_FREQUENCE_DOC: String       = "the frequence to print the row on log"
  val CONSOLE_FREQUENCE_DEFAULT: Duration = Duration(3, TimeUnit.SECONDS)
  val CONSOLE_FREQUENCE_DEFINITION: SettingDef = SettingDef
    .builder()
    .displayName("the frequence of printing data")
    .key(CONSOLE_FREQUENCE)
    .documentation(CONSOLE_FREQUENCE_DOC)
    .optional(java.time.Duration.ofMillis(CONSOLE_FREQUENCE_DEFAULT.toMillis))
    .orderInGroup(COUNTER.getAndIncrement())
    .build()

  val CONSOLE_ROW_DIVIDER: String = "console.sink.row.divider"
  val CONSOLE_ROW_DIVIDER_DOC: String =
    "the charset used to divide the rows. For example, the divider \"|\" makes the output string: row_0|row_1"
  val CONSOLE_ROW_DIVIDER_DEFAULT: String = "|"
  val CONSOLE_ROW_DIVIDER_DEFINITION: SettingDef = SettingDef
    .builder()
    .displayName("the divider charset to distinguish each row")
    .key(CONSOLE_ROW_DIVIDER)
    .documentation(CONSOLE_ROW_DIVIDER_DOC)
    .optional(CONSOLE_ROW_DIVIDER_DEFAULT)
    .orderInGroup(COUNTER.getAndIncrement())
    .build()
} 
Example 30
Source File: ConnectorTestUtils.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.configurator.ConnectorApi.State
import oharastream.ohara.client.filesystem.FileSystem
import oharastream.ohara.client.kafka.ConnectorAdmin
import oharastream.ohara.common.exception.NoSuchFileException
import oharastream.ohara.common.setting.ConnectorKey
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.testing.OharaTestUtils
import org.apache.kafka.connect.connector.Connector
import org.scalatest.matchers.should.Matchers._

import scala.jdk.CollectionConverters._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration

object ConnectorTestUtils {
  private[this] val TIMEOUT = java.time.Duration.ofSeconds(60)

  def assertFailedConnector(testingUtil: OharaTestUtils, connectorKey: ConnectorKey): Unit =
    assertFailedConnector(testingUtil.workersConnProps(), connectorKey)

  def assertFailedConnector(workersConnProps: String, connectorKey: ConnectorKey): Unit = CommonUtils.await(
    () => {
      val client = ConnectorAdmin(workersConnProps)
      try Await.result(client.status(connectorKey), Duration(20, TimeUnit.SECONDS)).connector.state == State.FAILED.name
      catch {
        case _: Throwable => false
      }
    },
    TIMEOUT
  )

  def checkConnector(testingUtil: OharaTestUtils, connectorKey: ConnectorKey): Unit =
    checkConnector(testingUtil.workersConnProps(), connectorKey)

  def checkConnector(workersConnProps: String, connectorKey: ConnectorKey): Unit =
    CommonUtils.await(
      () => {
        val connectorAdmin = ConnectorAdmin(workersConnProps)
        try {
          Await.result(connectorAdmin.activeConnectors(), Duration(10, TimeUnit.SECONDS)).contains(connectorKey)
          val status = Await.result(connectorAdmin.status(connectorKey), Duration(10, TimeUnit.SECONDS))
          status.connector.state == State.RUNNING.name && status.tasks.nonEmpty && status.tasks
            .forall(_.state == State.RUNNING.name)
        } catch {
          case _: Throwable => false
        }
      },
      TIMEOUT
    )

  def nonexistentFolderShouldFail(
    fileSystem: FileSystem,
    connectorClass: Class[_ <: Connector],
    props: Map[String, String],
    path: String
  ): Unit = {
    fileSystem.delete(path, true)
    intercept[NoSuchFileException] {
      val connector = connectorClass.getDeclaredConstructor().newInstance()
      try connector.start(props.asJava)
      finally connector.stop()
    }.getMessage should include("doesn't exist")
  }

  def fileShouldFail(
    fileSystem: FileSystem,
    connectorClass: Class[_ <: Connector],
    props: Map[String, String],
    path: String
  ): Unit = {
    fileSystem.delete(path, true)
    val output = fileSystem.create(path)
    try output.write("fileShouldFail".getBytes)
    finally output.close()
    intercept[IllegalArgumentException] {
      val connector = connectorClass.getDeclaredConstructor().newInstance()
      try connector.start(props.asJava)
      finally connector.stop()
    }.getMessage should include("NOT folder")
  }
} 
Example 31
Source File: TestPerfSourceMetrics.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.perf

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.kafka.ConnectorAdmin
import oharastream.ohara.common.setting.{ConnectorKey, TopicKey}
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.metrics.BeanChannel
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.jdk.CollectionConverters._
import scala.concurrent.Await
import scala.concurrent.duration.Duration

class TestPerfSourceMetrics extends WithBrokerWorker {
  private[this] val connectorAdmin = ConnectorAdmin(testUtil.workersConnProps)

  private[this] val props = PerfSourceProps(
    batch = 5,
    freq = Duration(5, TimeUnit.SECONDS),
    cellSize = 10
  )

  @Test
  def test(): Unit = {
    val topicKey     = TopicKey.of(CommonUtils.randomString(5), CommonUtils.randomString(5))
    val connectorKey = ConnectorKey.of(CommonUtils.randomString(5), CommonUtils.randomString(5))
    Await.result(
      connectorAdmin
        .connectorCreator()
        .topicKey(topicKey)
        .connectorClass(classOf[PerfSource])
        .numberOfTasks(1)
        .connectorKey(connectorKey)
        .settings(props.toMap)
        .create(),
      Duration(20, TimeUnit.SECONDS)
    )
    CommonUtils.await(() => {
      !BeanChannel.local().counterMBeans().isEmpty
    }, java.time.Duration.ofSeconds(30))
    val counters = BeanChannel.local().counterMBeans()
    counters.size should not be 0
    counters.asScala.foreach { counter =>
      counter.getStartTime should not be 0
      CommonUtils.requireNonEmpty(counter.getUnit)
      CommonUtils.requireNonEmpty(counter.getDocument)
    }
  }
} 
Example 32
Source File: TestPerfDefinition.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.perf

import java.util.concurrent.TimeUnit

import oharastream.ohara.client.kafka.ConnectorAdmin
import oharastream.ohara.common.setting.SettingDef.{Necessary, Permission, Reference}
import oharastream.ohara.common.setting.{ConnectorKey, SettingDef, TopicKey}
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.kafka.connector.json.ConnectorDefUtils
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
class TestPerfDefinition extends WithBrokerWorker {
  private[this] val perfSource                 = new PerfSource
  private[this] val connectorAdmin             = ConnectorAdmin(testUtil().workersConnProps())
  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  @Test
  def checkBatch(): Unit = {
    val definition = perfSource.settingDefinitions().get(PERF_BATCH_KEY)
    definition.necessary() should not be Necessary.REQUIRED
    definition.defaultInt() shouldBe PERF_BATCH_DEFAULT
    definition.permission() shouldBe Permission.EDITABLE
    definition.internal() shouldBe false
    definition.reference() shouldBe Reference.NONE
    definition.valueType() shouldBe SettingDef.Type.INT
  }

  @Test
  def checkFrequence(): Unit = {
    val definition = perfSource.settingDefinitions().get(PERF_FREQUENCY_KEY)
    definition.necessary() should not be Necessary.REQUIRED
    definition.defaultDuration() shouldBe java.time.Duration.ofMillis(PERF_FREQUENCY_DEFAULT.toMillis)
    definition.permission() shouldBe Permission.EDITABLE
    definition.internal() shouldBe false
    definition.reference() shouldBe Reference.NONE
    definition.valueType() shouldBe SettingDef.Type.DURATION
  }

  @Test
  def testSource(): Unit = {
    val topicKey = TopicKey.of(CommonUtils.randomString(5), CommonUtils.randomString(5))
    val response = result(
      connectorAdmin
        .connectorValidator()
        .connectorKey(ConnectorKey.of(CommonUtils.randomString(5), CommonUtils.randomString(5)))
        .numberOfTasks(1)
        .topicKey(topicKey)
        .connectorClass(classOf[PerfSource])
        .run()
    )

    response.settings().size should not be 0
    response
      .settings()
      .asScala
      .filter(_.definition().key() == ConnectorDefUtils.TOPIC_NAMES_DEFINITION.key())
      .head
      .definition()
      .necessary() shouldBe Necessary.REQUIRED
    response
      .settings()
      .asScala
      .filter(_.definition().key() == ConnectorDefUtils.CONNECTOR_CLASS_DEFINITION.key())
      .head
      .definition()
      .necessary() shouldBe Necessary.REQUIRED
    response
      .settings()
      .asScala
      .filter(_.definition().key() == ConnectorDefUtils.NUMBER_OF_TASKS_DEFINITION.key())
      .head
      .definition()
      .necessary() shouldBe Necessary.OPTIONAL
    response
      .settings()
      .asScala
      .filter(_.definition().key() == ConnectorDefUtils.COLUMNS_DEFINITION.key())
      .head
      .definition()
      .necessary() should not be Necessary.REQUIRED
    response
      .settings()
      .asScala
      .filter(_.definition().key() == ConnectorDefUtils.WORKER_CLUSTER_KEY_DEFINITION.key())
      .head
      .definition()
      .necessary() shouldBe Necessary.REQUIRED
    response.errorCount() shouldBe 0
  }
} 
Example 33
Source File: TestConsoleSinkTask.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.console

import java.util.concurrent.TimeUnit

import oharastream.ohara.common.data.Row
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.setting.{ConnectorKey, TopicKey}
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.kafka.connector.json.ConnectorDefUtils
import org.apache.kafka.connect.sink.SinkRecord
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration
import scala.jdk.CollectionConverters._

class TestConsoleSinkTask extends OharaTest {
  private[this] val connectorKey = ConnectorKey.of("group", "TestConsoleSinkTask")
  private[this] def configs(key: String, value: String): java.util.Map[String, String] =
    Map(
      ConnectorDefUtils.CONNECTOR_KEY_DEFINITION.key()  -> ConnectorKey.toJsonString(connectorKey),
      ConnectorDefUtils.CONNECTOR_NAME_DEFINITION.key() -> CommonUtils.randomString(),
      key                                               -> value
    ).asJava

  @Test
  def testEmptySetting(): Unit = {
    val task = new ConsoleSinkTask()
    task.start(
      Map(
        ConnectorDefUtils.CONNECTOR_KEY_DEFINITION.key()  -> ConnectorKey.toJsonString(connectorKey),
        ConnectorDefUtils.CONNECTOR_NAME_DEFINITION.key() -> CommonUtils.randomString()
      ).asJava
    )
    task.freq shouldBe CONSOLE_FREQUENCE_DEFAULT
    task.divider shouldBe CONSOLE_ROW_DIVIDER_DEFAULT
  }

  @Test
  def testFrequence(): Unit = {
    val task = new ConsoleSinkTask()
    task.start(configs(CONSOLE_FREQUENCE, "20 seconds"))
    task.freq shouldBe Duration(20, TimeUnit.SECONDS)
  }

  @Test
  def testDivider(): Unit = {
    val task    = new ConsoleSinkTask()
    val divider = CommonUtils.randomString()
    task.start(configs(CONSOLE_ROW_DIVIDER, divider))
    task.divider shouldBe divider
  }

  @Test
  def testPrint(): Unit = {
    val task = new ConsoleSinkTask()
    task.start(configs(CONSOLE_FREQUENCE, "2 seconds"))
    task.lastLog shouldBe -1

    task.put(java.util.List.of())
    task.lastLog shouldBe -1

    putRecord(task)
    val lastLogCopy1 = task.lastLog
    lastLogCopy1 should not be -1

    TimeUnit.SECONDS.sleep(1)

    putRecord(task)
    val lastLogCopy2 = task.lastLog
    lastLogCopy2 shouldBe lastLogCopy1

    TimeUnit.SECONDS.sleep(1)

    putRecord(task)
    val lastLogCopy3 = task.lastLog
    lastLogCopy3 should not be lastLogCopy2
    lastLogCopy3 should not be -1
  }

  private[this] def putRecord(task: ConsoleSinkTask): Unit =
    task.put(
      java.util.List.of(
        new SinkRecord(
          TopicKey.of("g", "n").topicNameOnKafka(),
          1,
          null,
          Row.EMPTY,
          null,
          null,
          1
        )
      )
    )
} 
Example 34
Source File: Test873.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.client.kafka

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.{Http, server}
import oharastream.ohara.client.kafka.WorkerJson.{ConnectorCreationResponse, KafkaConnectorTaskId, _}
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.setting.ConnectorKey
import oharastream.ohara.common.util.CommonUtils
import oharastream.ohara.kafka.connector.json.Creation
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.jdk.CollectionConverters._


class Test873 extends OharaTest {
  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(60, TimeUnit.SECONDS))

  @Test
  def testCreateConnector(): Unit = {
    val className = CommonUtils.randomString()
    val settings = Map(
      CommonUtils.randomString() -> CommonUtils.randomString()
    )
    val tasks = Seq(
      KafkaConnectorTaskId(
        connector = CommonUtils.randomString(),
        task = 10
      )
    )
    val server = toServer {
      path("connectors") {
        post {
          entity(as[Creation]) { req =>
            complete(
              ConnectorCreationResponse(
                name = req.name(),
                config = req.configs().asScala.toMap,
                tasks = tasks
              )
            )
          }
        }
      }
    }

    try {
      val connectorKey = ConnectorKey.of(CommonUtils.randomString(5), CommonUtils.randomString(5))
      val client       = ConnectorAdmin(s"${server.hostname}:${server.port}")
      val response = result(
        client.connectorCreator().connectorKey(connectorKey).settings(settings).className(className).create()
      )
      response.name shouldBe connectorKey.connectorNameOnKafka()
      response.tasks shouldBe tasks
      settings.foreach {
        case (k, v) =>
          response.config(k) shouldBe v
      }
    } finally server.close()
  }

  private[this] def toServer(route: server.Route): SimpleServer = {
    implicit val system: ActorSystem = ActorSystem("my-system")
    val server                       = Await.result(Http().bindAndHandle(route, "localhost", 0), Duration(30, TimeUnit.SECONDS))
    new SimpleServer {
      override def hostname: String = server.localAddress.getHostString
      override def port: Int        = server.localAddress.getPort
      override def close(): Unit = {
        Await.result(server.unbind(), Duration(30, TimeUnit.SECONDS))
        Await.result(system.terminate(), Duration(30, TimeUnit.SECONDS))
      }
    }
  }
} 
Example 35
Source File: DropwizardStatsCounter.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.caching

import java.util.concurrent.TimeUnit

import com.daml.metrics.CacheMetrics
import com.github.benmanes.caffeine.cache.RemovalCause
import com.github.benmanes.caffeine.cache.stats.{CacheStats, StatsCounter}

private[caching] final class DropwizardStatsCounter(
    metrics: CacheMetrics,
) extends StatsCounter {

  override def recordHits(newHits: Int): Unit =
    metrics.hitCount.inc(newHits.toLong)

  override def recordMisses(newMisses: Int): Unit =
    metrics.missCount.inc(newMisses.toLong)

  override def recordLoadSuccess(loadTimeNanos: Long): Unit = {
    metrics.loadSuccessCount.inc()
    metrics.totalLoadTime.update(loadTimeNanos, TimeUnit.NANOSECONDS)
  }

  override def recordLoadFailure(loadTimeNanos: Long): Unit = {
    metrics.loadFailureCount.inc()
    metrics.totalLoadTime.update(loadTimeNanos, TimeUnit.NANOSECONDS)
  }

  override def recordEviction(weight: Int, cause: RemovalCause): Unit = {
    metrics.evictionCount.inc()
    metrics.evictionWeight.inc(weight.toLong)
  }

  override def recordEviction(): Unit = {
    metrics.evictionCount.inc()
    metrics.evictionWeight.inc()
  }

  override def snapshot(): CacheStats =
    new CacheStats(
      metrics.hitCount.getCount,
      metrics.missCount.getCount,
      metrics.loadSuccessCount.getCount,
      metrics.loadFailureCount.getCount,
      metrics.totalLoadTime.getCount,
      metrics.evictionCount.getCount,
      metrics.evictionWeight.getCount,
    )

} 
Example 36
Source File: LargeTransactionBench.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.perf

import java.io.File
import java.util.concurrent.TimeUnit

import com.daml.platform.sandbox.perf.TestHelper._
import org.openjdk.jmh.annotations._

import scala.concurrent.Await

@State(Scope.Benchmark)
abstract class CreatedStateBase extends PerfBenchState {

  override def darFile: File = TestHelper.darFile

  @Param(Array("10", "100", "1000", "100000"))
  var n: Int = _

  var workflowId: String = _

  @Setup(Level.Invocation)
  def init(): Unit = {
    workflowId = uniqueId()
    sendCreates()
  }

  def sendCreates(): Unit
}

class RangeOfIntsCreatedState extends CreatedStateBase {

  override def sendCreates(): Unit =
    Await.result(rangeOfIntsCreateCommand(this, workflowId, n), setupTimeout)
}

class ListOfNIntsCreatedState extends CreatedStateBase {

  override def sendCreates(): Unit =
    Await.result(listUtilCreateCommand(this, workflowId), setupTimeout)
}

class LargeTransactionBench {

  @Benchmark
  def singleHugeContract(state: RangeOfIntsCreatedState): Unit =
    Await.result(
      rangeOfIntsExerciseCommand(state, state.workflowId, "ToListContainer", None),
      perfTestTimeout)

  //note that when running this with Postgres the bottleneck seems to originate from the fact the we traverse the huge
  //Transaction and execute SQL queries one after another. We could potentially partition the transaction so we can have batch queries instead.
  @Timeout(time = 20, timeUnit = TimeUnit.MINUTES) // we have a rare issue where this test runs extremely long with 100k contracts, making the test fail due to JMH timeout
  @Benchmark
  def manySmallContracts(state: RangeOfIntsCreatedState): Unit = {
    Await.result(
      rangeOfIntsExerciseCommand(state, state.workflowId, "ToListOfIntContainers", None),
      perfTestTimeout)
  }

  @Benchmark
  def listOfNInts(state: ListOfNIntsCreatedState): Unit =
    Await.result(
      listUtilExerciseSizeCommand(state, listUtilTemplateId, state.workflowId, state.n),
      perfTestTimeout)

} 
Example 37
Source File: MetricsReporting.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.metrics

import java.time.Duration
import java.util.concurrent.TimeUnit

import com.codahale.metrics.Slf4jReporter.LoggingLevel
import com.codahale.metrics.jmx.JmxReporter
import com.codahale.metrics.{MetricRegistry, Reporter, Slf4jReporter}
import com.daml.metrics.{JvmMetricSet, Metrics}
import com.daml.platform.configuration.MetricsReporter
import com.daml.resources.{Resource, ResourceOwner}

import scala.concurrent.{ExecutionContext, Future}


final class MetricsReporting(
    jmxDomain: String,
    extraMetricsReporter: Option[MetricsReporter],
    extraMetricsReportingInterval: Duration,
) extends ResourceOwner[Metrics] {
  def acquire()(implicit executionContext: ExecutionContext): Resource[Metrics] = {
    val registry = new MetricRegistry
    registry.registerAll(new JvmMetricSet)
    for {
      slf4JReporter <- acquire(newSlf4jReporter(registry))
      _ <- acquire(newJmxReporter(registry))
        .map(_.start())
      _ <- extraMetricsReporter.fold(Resource.unit) { reporter =>
        acquire(reporter.register(registry))
          .map(_.start(extraMetricsReportingInterval.getSeconds, TimeUnit.SECONDS))
      }
      // Trigger a report to the SLF4J logger on shutdown.
      _ <- Resource(Future.successful(slf4JReporter))(reporter =>
        Future.successful(reporter.report()))
    } yield new Metrics(registry)
  }

  private def newJmxReporter(registry: MetricRegistry): JmxReporter =
    JmxReporter
      .forRegistry(registry)
      .inDomain(jmxDomain)
      .build()

  private def newSlf4jReporter(registry: MetricRegistry): Slf4jReporter =
    Slf4jReporter
      .forRegistry(registry)
      .convertRatesTo(TimeUnit.SECONDS)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .withLoggingLevel(LoggingLevel.DEBUG)
      .build()

  private def acquire[T <: Reporter](reporter: => T)(
      implicit executionContext: ExecutionContext
  ): Resource[T] =
    ResourceOwner
      .forCloseable(() => reporter)
      .acquire()
} 
Example 38
Source File: GrpcClientResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.services

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.TimeUnit

import com.daml.platform.apiserver.EventLoopGroupOwner
import com.daml.ports.Port
import com.daml.resources.{Resource, ResourceOwner}
import io.grpc.Channel
import io.grpc.netty.NettyChannelBuilder
import io.netty.channel.EventLoopGroup

import scala.concurrent.{ExecutionContext, Future}

object GrpcClientResource {
  def owner(port: Port): ResourceOwner[Channel] =
    for {
      eventLoopGroup <- new EventLoopGroupOwner("api-client", sys.runtime.availableProcessors())
      channel <- channelOwner(port, EventLoopGroupOwner.clientChannelType, eventLoopGroup)
    } yield channel

  private def channelOwner(
      port: Port,
      channelType: Class[_ <: io.netty.channel.Channel],
      eventLoopGroup: EventLoopGroup,
  ): ResourceOwner[Channel] =
    new ResourceOwner[Channel] {
      override def acquire()(implicit executionContext: ExecutionContext): Resource[Channel] = {
        Resource(Future {
          NettyChannelBuilder
            .forAddress(new InetSocketAddress(InetAddress.getLoopbackAddress, port.value))
            .channelType(channelType)
            .eventLoopGroup(eventLoopGroup)
            .usePlaintext()
            .directExecutor()
            .build()
        })(channel =>
          Future {
            channel.shutdownNow()
            if (!channel.awaitTermination(5, TimeUnit.SECONDS)) {
              sys.error(
                "Unable to shutdown channel to a remote API under tests. Unable to recover. Terminating.")
            }
        })
      }
    }
} 
Example 39
Source File: ValueConversions.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.participant.util

import java.time.Instant
import java.util.concurrent.TimeUnit

import com.daml.ledger.api.v1.commands.{
  Command,
  CreateCommand,
  ExerciseByKeyCommand,
  ExerciseCommand
}
import com.daml.ledger.api.v1.value.Value.Sum
import com.daml.ledger.api.v1.value.Value.Sum.{
  ContractId,
  Numeric,
  Int64,
  Party,
  Text,
  Timestamp,
  List => DamlListValue
}
import com.daml.ledger.api.v1.value.{Identifier, Record, RecordField, Value, List => DamlList}

import scala.language.implicitConversions

object ValueConversions {

  val unit = Value(Sum.Record(Record.defaultInstance))

  implicit class StringValues(val s: String) extends AnyVal {
    def asParty: Value = Value(Party(s))
    def asNumeric: Value = Value(Numeric(s))
    def asText: Value = Value(Text(s))
    def asContractId: Value = Value(ContractId(s))
  }

  implicit class InstantValues(val i: Instant) extends AnyVal {
    def asTime: Value = {
      val micros = TimeUnit.SECONDS.toMicros(i.getEpochSecond) + TimeUnit.NANOSECONDS.toMicros(
        i.getNano.toLong)
      Value(Timestamp(micros))
    }
  }

  implicit class BooleanValues(val b: Boolean) extends AnyVal {
    def asBoolean: Value = Value(Value.Sum.Bool(b))
  }

  implicit class LongValues(val i: Long) extends AnyVal {
    def asInt64: Value = Value(Int64(i))
  }

  implicit class LabeledValues(val labeledValues: Seq[(String, Value)]) extends AnyVal {
    def asRecord = Record(None, asRecordFields)

    def asRecordOf(identifier: Identifier) = Record(Some(identifier), asRecordFields)

    def asRecordValue = Value(Value.Sum.Record(asRecord))

    def asRecordValueOf(identifier: Identifier) = Value(Value.Sum.Record(asRecordOf(identifier)))

    def asRecordFields: Seq[RecordField] = {
      labeledValues.map {
        case (k, v) => RecordField(k, Some(v))
      }
    }
  }

  implicit class ValueSequences(val values: Seq[Value]) extends AnyVal {
    def asList = Value(DamlListValue(DamlList(values)))
  }

  implicit def value2Optional(value: Value): Option[Value] = Some(value)

  implicit class ExerciseCommands(val exercise: ExerciseCommand) extends AnyVal {
    def wrap = Command(Command.Command.Exercise(exercise))
  }

  implicit class ExerciseByKeyCommands(val exerciseByKey: ExerciseByKeyCommand) extends AnyVal {
    def wrap = Command(Command.Command.ExerciseByKey(exerciseByKey))
  }

  implicit class CreateCommands(val create: CreateCommand) extends AnyVal {
    def wrap = Command(Command.Command.Create(create))
  }

} 
Example 40
Source File: ValueConversions.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.extractor.helpers

import java.time.Instant
import java.util.concurrent.TimeUnit

import com.daml.ledger.api.v1.commands.{Command, CreateCommand, ExerciseCommand}
import com.daml.ledger.api.v1.value.Value.Sum.{
  Numeric,
  Int64,
  Party,
  Text,
  Timestamp,
  List => DamlListValue
}
import com.daml.ledger.api.v1.value.{Identifier, Record, RecordField, Value, List => DamlList}

import scala.language.implicitConversions

object ValueConversions {

  implicit class StringValues(val s: String) extends AnyVal {
    def asParty: Value = Value(Party(s))
    def asNumeric: Value = Value(Numeric(s))
    def asText: Value = Value(Text(s))
  }

  implicit class InstantValues(val i: Instant) extends AnyVal {
    def asTime: Value = {
      val micros = TimeUnit.SECONDS.toMicros(i.getEpochSecond) + TimeUnit.NANOSECONDS.toMicros(
        i.getNano.toLong)
      Value(Timestamp(micros))
    }
  }

  implicit class LongValues(val i: Long) extends AnyVal {
    def asInt64: Value = Value(Int64(i))
  }

  implicit class LabeledValues(val labeledValues: Seq[(String, Value)]) extends AnyVal {
    def asRecord = Record(None, recordFields)

    def asRecordOf(identifier: Identifier) = Record(Some(identifier), recordFields)

    def asRecordValue = Value(Value.Sum.Record(asRecord))

    def asRecordValueOf(identifier: Identifier) = Value(Value.Sum.Record(asRecordOf(identifier)))

    private def recordFields: Seq[RecordField] = {
      labeledValues.map {
        case (k, v) => RecordField(k, Some(v))
      }
    }
  }

  implicit class ValueSequences(val values: Seq[Value]) extends AnyVal {
    def asList = Value(DamlListValue(DamlList(values)))
  }

  implicit def value2Optional(value: Value): Option[Value] = Some(value)

  implicit class ExerciseCommands(val exercise: ExerciseCommand) extends AnyVal {
    def wrap = Command(Command.Command.Exercise(exercise))
  }

  implicit class CreateCommands(val create: CreateCommand) extends AnyVal {
    def wrap = Command(Command.Command.Create(create))
  }

} 
Example 41
Source File: JwksVerifier.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.jwt

import java.net.{URI, URL}
import java.security.interfaces.RSAPublicKey
import java.util.concurrent.TimeUnit

import com.auth0.jwk.UrlJwkProvider
import com.daml.jwt.JwtVerifier.Error
import com.google.common.cache.{Cache, CacheBuilder}
import scalaz.{-\/, Show, \/}
import scalaz.syntax.show._


  private[this] def getCachedVerifier(keyId: String): Error \/ JwtVerifier = {
    if (keyId == null)
      -\/(Error('getCachedVerifier, "No Key ID found"))
    else
      \/.fromTryCatchNonFatal(
        cache.get(keyId, () => getVerifier(keyId).fold(e => sys.error(e.shows), x => x))
      ).leftMap(e => Error('getCachedVerifier, e.getMessage))
  }

  def verify(jwt: domain.Jwt): Error \/ domain.DecodedJwt[String] = {
    for {
      keyId <- \/.fromTryCatchNonFatal(com.auth0.jwt.JWT.decode(jwt.value).getKeyId)
        .leftMap(e => Error('verify, e.getMessage))
      verifier <- getCachedVerifier(keyId)
      decoded <- verifier.verify(jwt)
    } yield decoded
  }
}

object JwksVerifier {
  def apply(url: String) = new JwksVerifier(new URI(url).toURL)

  final case class Error(what: Symbol, message: String)

  object Error {
    implicit val showInstance: Show[Error] =
      Show.shows(e => s"JwksVerifier.Error: ${e.what}, ${e.message}")
  }
} 
Example 42
Source File: AkkaStreamGrpcServerResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.net.SocketAddress
import java.util.concurrent.TimeUnit

import akka.stream.Materializer
import io.grpc.BindableService

class AkkaStreamGrpcServerResource(
    constructServices: Materializer => Iterable[BindableService],
    actorMaterializerResource: Resource[Materializer],
    address: Option[SocketAddress])
    extends DerivedResource[Materializer, ServerWithChannelProvider](actorMaterializerResource) {

  @volatile private var runningServices: Iterable[BindableService] = Nil

  def getRunningServices: Iterable[BindableService] = runningServices

  override protected def construct(source: Materializer): ServerWithChannelProvider = {

    runningServices = constructServices(actorMaterializerResource.value)
    ServerWithChannelProvider.fromServices(runningServices, address, "server")

  }

  override protected def destruct(resource: ServerWithChannelProvider): Unit = {
    val server = derivedValue.server

    server.shutdownNow()

    runningServices.foreach {
      case closeable: AutoCloseable => closeable.close()
      case _ => ()
    }
    runningServices = Nil

    server.awaitTermination(10, TimeUnit.SECONDS)
    ()
  }
}

object AkkaStreamGrpcServerResource {
  def apply(
      constructServices: Materializer => Iterable[BindableService],
      actorSystemName: String = "",
      address: Option[SocketAddress]) =
    new AkkaStreamGrpcServerResource(
      constructServices,
      new ActorMaterializerResource(actorSystemName),
      address)
} 
Example 43
Source File: MultiFixtureBase.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.{Executors, ScheduledExecutorService, TimeUnit}

import com.daml.dec.DirectExecutionContext
import org.scalatest._
import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans}
import org.scalatest.exceptions.TestCanceledException
import org.scalatest.time.Span

import scala.collection.immutable.Iterable
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Future, Promise, TimeoutException}
import scala.util.control.{NoStackTrace, NonFatal}

trait MultiFixtureBase[FixtureId, TestContext]
    extends Assertions
    with BeforeAndAfterAll
    with ScaledTimeSpans
    with AsyncTimeLimitedTests {
  self: AsyncTestSuite =>

  private var es: ScheduledExecutorService = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    es = Executors.newScheduledThreadPool(1)
  }

  override protected def afterAll(): Unit = {
    es.shutdownNow()
    super.afterAll()
  }

  protected class TestFixture(val id: FixtureId, createContext: () => TestContext) {
    def context(): TestContext = createContext()
  }

  def timeLimit: Span = scaled(30.seconds)

  object TestFixture {
    def apply(id: FixtureId, createContext: () => TestContext): TestFixture =
      new TestFixture(id, createContext)

    def unapply(testFixture: TestFixture): Option[(FixtureId, TestContext)] =
      Some((testFixture.id, testFixture.context()))
  }

  protected def fixtures: Iterable[TestFixture]

  
  protected def allFixtures(runTest: TestContext => Future[Assertion]): Future[Assertion] =
    forAllFixtures(fixture => runTest(fixture.context))

  protected def forAllFixtures(runTest: TestFixture => Future[Assertion]): Future[Assertion] = {
    forAllMatchingFixtures { case f => runTest(f) }
  }

  protected def forAllMatchingFixtures(
      runTest: PartialFunction[TestFixture, Future[Assertion]]): Future[Assertion] = {
    if (parallelExecution) {
      val results = fixtures.map(
        fixture =>
          if (runTest.isDefinedAt(fixture))
            runTestAgainstFixture(fixture, runTest)
          else
            Future.successful(succeed))
      Future.sequence(results).map(foldAssertions)
    } else {
      fixtures.foldLeft(Future.successful(succeed)) {
        case (resultSoFar, thisFixture) =>
          resultSoFar.flatMap {
            case Succeeded => runTestAgainstFixture(thisFixture, runTest)
            case other => Future.successful(other)
          }
      }
    }
  }

} 
Example 44
Source File: GrpcServerResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.net.SocketAddress
import java.util.concurrent.TimeUnit

import io.grpc._

final class GrpcServerResource(
    services: () => Iterable[BindableService with AutoCloseable],
    port: Option[SocketAddress],
) extends ManagedResource[ServerWithChannelProvider] {

  @volatile private var boundServices: Iterable[BindableService with AutoCloseable] = Nil

  override protected def construct(): ServerWithChannelProvider = {
    boundServices = services()
    ServerWithChannelProvider.fromServices(boundServices, port, "server")
  }

  override protected def destruct(resource: ServerWithChannelProvider): Unit = {
    resource.server.shutdownNow().awaitTermination(5, TimeUnit.SECONDS)
    boundServices.foreach(_.close())
    boundServices = Nil
    ()
  }
} 
Example 45
Source File: ReferenceServiceFixture.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import com.daml.grpc.sampleservice.implementations.ReferenceImplementation
import com.daml.ledger.api.testing.utils.{GrpcServiceFixture, SuiteResourceManagementAroundAll}
import com.daml.platform.hello.HelloServiceGrpc
import com.daml.platform.hello.HelloServiceGrpc.HelloServiceStub

import java.util.concurrent.TimeUnit

trait ReferenceServiceFixture
    extends GrpcServiceFixture[HelloServiceStub]
    with SuiteResourceManagementAroundAll {

  protected lazy val channel = suiteResource.value.channel()
  protected lazy val clientStub = HelloServiceGrpc.stub(channel)

  override protected def afterAll(): Unit = {
    channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS)
    super.afterAll()
  }

  override protected def services =
    List(new ReferenceImplementation())

} 
Example 46
Source File: ServerStreamingBenchmark.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.stream.scaladsl.Sink
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.grpc.adapter.operation.AkkaServiceFixture
import com.daml.ledger.api.perf.util.AkkaStreamPerformanceTest
import com.daml.ledger.api.testing.utils.Resource
import com.daml.platform.hello.{HelloRequest, HelloServiceGrpc}
import io.grpc.ManagedChannel
import org.scalameter.api.Gen
import org.scalameter.picklers.noPickler._

import scala.concurrent.Future
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

object ServerStreamingBenchmark extends AkkaStreamPerformanceTest {

  override type ResourceType = () => ManagedChannel

  @transient override protected lazy val resource: Resource[() => ManagedChannel] =
    AkkaServiceFixture.getResource(Some(new InetSocketAddress(0))).map(_._2.channel)

  private val sizes = for {
    totalElements <- Gen.range("numResponses")(50000, 100000, 50000)
    clients <- Gen.enumeration("numClients")(1, 10)
    callsPerClient <- Gen.enumeration("numCals")(1, 10)
  } yield (totalElements, clients, callsPerClient)

  performance of "Akka-Stream server" config (daConfig: _*) in {
    measure method "server streaming" in {
      using(sizes).withLifecycleManagement() in {
        case (totalElements, clients, callsPerClient) =>
          val eventualDones = for {
            (channel, schedulerPool) <- 1
              .to(clients)
              .map(i => resource.value() -> new AkkaExecutionSequencerPool(s"client-$i")(system))
            _ <- 1.to(callsPerClient)
          } yield {
            serverStreamingCall(totalElements / clients / callsPerClient, channel)(schedulerPool)
              .map(_ => channel -> schedulerPool)
          }
          val eventualTuples = Future.sequence(eventualDones)
          await(eventualTuples).foreach {
            case (channel, pool) =>
              channel.shutdown()
              channel.awaitTermination(5, TimeUnit.SECONDS)
              pool.close()
          }

      }
    }
  }

  private def serverStreamingCall(streamedElements: Int, managedChannel: ManagedChannel)(
      implicit
      executionSequencerFactory: ExecutionSequencerFactory): Future[Done] = {
    ClientAdapter
      .serverStreaming(
        HelloRequest(streamedElements),
        HelloServiceGrpc.stub(managedChannel).serverStreaming)
      .runWith(Sink.ignore)(materializer)
  }
} 
Example 47
Source File: AkkaServiceFixture.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import com.daml.grpc.adapter.{ExecutionSequencerFactory, TestExecutionSequencerFactory}
import com.daml.grpc.adapter.utils.implementations.AkkaImplementation
import com.daml.ledger.api.testing.utils._
import com.daml.platform.hello.HelloServiceGrpc
import com.daml.platform.hello.HelloServiceGrpc.HelloServiceStub
import java.net.SocketAddress
import java.util.concurrent.TimeUnit

trait AkkaServiceFixture
    extends GrpcServerFixture[HelloServiceStub]
    with SuiteResourceManagementAroundAll {

  protected lazy val channel = suiteResource.value.channel()
  protected lazy val clientStub = HelloServiceGrpc.stub(channel)

  override protected def afterAll(): Unit = {
    channel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS)
    super.afterAll()
  }
  protected def socketAddress: Option[SocketAddress]

  override protected def suiteResource: Resource[ServerWithChannelProvider] = resources.map(_._2)

  lazy val resources = AkkaServiceFixture.getResource(socketAddress)

  protected def service: AkkaImplementation =
    resources.getRunningServices.head.asInstanceOf[AkkaImplementation]

}

object AkkaServiceFixture {

  implicit private val esf: ExecutionSequencerFactory = TestExecutionSequencerFactory.instance

  def getResource(address: Option[SocketAddress]): AkkaStreamGrpcServerResource = {
    AkkaStreamGrpcServerResource(implicit m => List(new AkkaImplementation()), "server", address)
  }
} 
Example 48
Source File: DecodeMain.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.archive

import java.io.File
import java.util.concurrent.TimeUnit

import com.daml.lf.data.Ref
import com.daml.lf.language.Ast


object DecodeMain extends App {
  if (args.length != 1) {
    println("usage: decode <dar file>")
    System.exit(1)
  }

  def toMillis(a: Long, b: Long) = TimeUnit.NANOSECONDS.toMillis(b - a)

  (1 to 3).foreach { _ =>
    val t0 = System.nanoTime()

    val archives =
      DarReader().readArchiveFromFile(new File(args(0))).get
    val t1 = System.nanoTime()

    val _: (Ref.PackageId, Ast.Package) =
      Decode.readArchivePayload(archives.main._1, archives.main._2)
    val t2 = System.nanoTime()

    println(s"parseFrom in ${toMillis(t0, t1)}ms, decoded in ${toMillis(t1, t2)}ms.")

    // Wait a while to allow for running e.g. jmap -heap etc.
    //val pid = Integer.parseInt(new File("/proc/self").getCanonicalFile.getName)
    //println(s"sleeping 5s, pid is $pid.")
    //Thread.sleep(5000)

    System.gc()
  }
} 
Example 49
Source File: LedgerConfigurationClientImplTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.rxjava.grpc

import java.util.concurrent.TimeUnit

import com.daml.ledger.rxjava._
import com.daml.ledger.rxjava.grpc.helpers.{LedgerServices, TestConfiguration}
import com.daml.ledger.api.v1.ledger_configuration_service.GetLedgerConfigurationResponse
import org.scalatest.{FlatSpec, Matchers, OptionValues}

final class LedgerConfigurationClientImplTest
    extends FlatSpec
    with Matchers
    with AuthMatchers
    with OptionValues {

  private val ledgerServices = new LedgerServices("ledger-configuration-service-ledger")

  behavior of "[5.1] LedgerConfigurationClientImpl.getLedgerConfiguration"

  it should "send the request to the Ledger" in {
    ledgerServices.withConfigurationClient(Seq(GetLedgerConfigurationResponse.defaultInstance)) {
      (client, _) =>
        // to test that we send a request to the Ledger, we check if there is a response
        client.getLedgerConfiguration
          .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
          .blockingFirst()
    }
  }

  behavior of "[5.2] LedgerConfigurationClientImpl.getLedgerConfiguration"

  it should "send the request with the correct ledger ID" in {

    ledgerServices.withConfigurationClient(Seq(GetLedgerConfigurationResponse.defaultInstance)) {
      (client, service) =>
        client.getLedgerConfiguration
          .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
          .blockingFirst()
        service.getLastRequest.value.ledgerId shouldEqual ledgerServices.ledgerId
    }
  }

  behavior of "Authorization"

  def toAuthenticatedServer(fn: LedgerConfigurationClient => Any): Any =
    ledgerServices.withConfigurationClient(
      Seq(GetLedgerConfigurationResponse.defaultInstance),
      mockedAuthService) { (client, _) =>
      fn(client)
    }

  it should "deny access without a token" in {
    expectUnauthenticated {
      toAuthenticatedServer(
        _.getLedgerConfiguration
          .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
          .blockingFirst())
    }
  }

  it should "deny access with insufficient authorization" in {
    expectUnauthenticated {
      toAuthenticatedServer(
        _.getLedgerConfiguration(emptyToken)
          .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
          .blockingFirst())
    }
  }

  it should "allow access with sufficient authorization" in {
    toAuthenticatedServer(
      _.getLedgerConfiguration(publicToken)
        .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
        .blockingFirst())
  }

} 
Example 50
Source File: LedgerIdentityClientTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.rxjava.grpc

import java.util.concurrent.TimeUnit

import com.daml.ledger.rxjava._
import com.daml.ledger.rxjava.grpc.helpers.{LedgerServices, TestConfiguration}
import org.scalatest.{FlatSpec, Matchers}

final class LedgerIdentityClientTest extends FlatSpec with Matchers with AuthMatchers {

  val ledgerServices = new LedgerServices("ledger-identity-service-ledger")

  behavior of "[6.1] LedgerIdentityClient.getLedgerIdentity"

  it should "return ledger-id when requested" in ledgerServices.withLedgerIdentityClient() {
    (binding, _) =>
      binding.getLedgerIdentity
        .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
        .blockingGet() shouldBe ledgerServices.ledgerId
  }

  it should "return ledger-id when requested with authorization" in ledgerServices
    .withLedgerIdentityClient(mockedAuthService) { (binding, _) =>
      binding
        .getLedgerIdentity(publicToken)
        .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
        .blockingGet() shouldBe ledgerServices.ledgerId
    }

  it should "deny ledger-id queries with insufficient authorization" in ledgerServices
    .withLedgerIdentityClient(mockedAuthService) { (binding, _) =>
      expectUnauthenticated {
        binding
          .getLedgerIdentity(emptyToken)
          .timeout(TestConfiguration.timeoutInSeconds, TimeUnit.SECONDS)
          .blockingGet()
      }
    }

} 
Example 51
Source File: LedgerViewFlowableSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.rxjava.components

import java.util.concurrent.TimeUnit

import com.daml.ledger.javaapi.data.{Identifier, LedgerOffset, WorkflowEvent}
import com.daml.ledger.rxjava.components.helpers.{CommandsAndPendingSet, CreatedContract}
import io.reactivex.Flowable
import org.scalatest.{FlatSpec, Matchers}

class LedgerViewFlowableSpec extends FlatSpec with Matchers {

  behavior of "LedgerViewFlowable.of"

  it should "not emit the initial message if the ACS is empty" in {
    val ledgerViewFlowable = LedgerViewFlowable.of(
      LedgerViewFlowable.LedgerView.create[Unit](),
      Flowable.never[LedgerViewFlowable.SubmissionFailure](),
      Flowable.never[LedgerViewFlowable.CompletionFailure](),
      Flowable.never[WorkflowEvent](),
      Flowable.never[CommandsAndPendingSet](),
      _ => ()
    )

    intercept[RuntimeException] {
      // NOTE(mp): in theory this test is not perfect because the stream
      // could emit something after 10ms. Eventually the test would reveal
      // the emitted element with a proper failure.
      // The test should also never be result in a false negative, which
      // means that even is the element is emitted after the timeout, the
      // test would still be green and cause no red masters.
      ledgerViewFlowable
        .timeout(10, TimeUnit.MILLISECONDS)
        .blockingFirst()
    }
  }

  it should "emit the initial message if the ACS is not empty" in {
    val identifier = new Identifier("packageId", "moduleName", "entityName")
    val initialLedgerView =
      LedgerViewFlowable.LedgerView.create[Unit]().addActiveContract(identifier, "contractId", ())
    val ledgerViewFlowable = LedgerViewFlowable.of(
      initialLedgerView,
      Flowable.never[LedgerViewFlowable.SubmissionFailure](),
      Flowable.never[LedgerViewFlowable.CompletionFailure](),
      Flowable.never[WorkflowEvent](),
      Flowable.never[CommandsAndPendingSet](),
      _ => ()
    )

    ledgerViewFlowable
      .timeout(1, TimeUnit.SECONDS)
      .blockingFirst() shouldBe initialLedgerView
  }

  it should "use ledger begin as offset if the active contracts service returns an empty stream" in {
    val pair = LedgerViewFlowable
      .ledgerViewAndOffsetFromACS(Flowable.empty(), identity[CreatedContract])
      .blockingGet()
    pair.getSecond() shouldBe LedgerOffset.LedgerBegin.getInstance()
  }

} 
Example 52
Source File: TimestampConversion.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.api.util

import java.time.Instant
import java.util.concurrent.TimeUnit

import com.daml.ledger.api.v1.value.Value
import com.google.protobuf.timestamp.Timestamp

object TimestampConversion {
  val MIN = Instant parse "0001-01-01T00:00:00Z"
  val MAX = Instant parse "9999-12-31T23:59:59.999999Z"

  def microsToInstant(micros: Value.Sum.Timestamp): Instant = {
    val seconds = TimeUnit.MICROSECONDS.toSeconds(micros.value)
    val deltaMicros = micros.value - TimeUnit.SECONDS.toMicros(seconds)
    Instant.ofEpochSecond(seconds, TimeUnit.MICROSECONDS.toNanos(deltaMicros))
  }

  def instantToMicros(t: Instant): Value.Sum.Timestamp = {
    if (t.getNano % 1000 != 0)
      throw new IllegalArgumentException(
        s"Conversion of Instant $t to microsecond granularity would result in loss of precision.")
    else
      Value.Sum.Timestamp(
        TimeUnit.SECONDS.toMicros(t.getEpochSecond) + TimeUnit.NANOSECONDS
          .toMicros(t.getNano.toLong))

  }

  def toInstant(protoTimestamp: Timestamp): Instant = {
    Instant.ofEpochSecond(protoTimestamp.seconds, protoTimestamp.nanos.toLong)
  }

  def fromInstant(instant: Instant): Timestamp = {
    new Timestamp().withSeconds(instant.getEpochSecond).withNanos(instant.getNano)
  }
} 
Example 53
Source File: ScalaUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.util

import java.util.concurrent.{ScheduledExecutorService, ScheduledFuture, TimeUnit}

import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise, TimeoutException}

object ScalaUtil {

  implicit class FutureOps[T](val future: Future[T]) extends LazyLogging {

    def timeout(
        name: String,
        failTimeout: FiniteDuration = 1.minute,
        warnTimeout: FiniteDuration = 30.seconds)(
        implicit ec: ExecutionContext,
        scheduler: ScheduledExecutorService): Future[T] = {

      val promise = Promise[T]

      @SuppressWarnings(Array("org.wartremover.warts.JavaSerializable"))
      val warningTask = schedule(warnTimeout) {
        logger.warn("Function {} takes more than {}", name, warnTimeout)
      }

      val errorTask = schedule(failTimeout) {
        val error = new TimeoutException(s"Function call $name took more than $failTimeout")
        promise.tryFailure(error)
        ()
      }

      future.onComplete { outcome =>
        warningTask.cancel(false)
        errorTask.cancel(false)
        promise.tryComplete(outcome)
      }

      promise.future
    }

    private def schedule(timeout: FiniteDuration)(f: => Unit)(
        implicit scheduler: ScheduledExecutorService): ScheduledFuture[_] = {

      val runnable = new Runnable {
        override def run(): Unit = f
      }

      scheduler.schedule(runnable, timeout.toMillis, TimeUnit.MILLISECONDS)
    }

    def timeoutWithDefaultWarn(name: String, failTimeout: FiniteDuration)(
        implicit ec: ExecutionContext,
        scheduler: ScheduledExecutorService): Future[T] = timeout(name, failTimeout, 10.seconds)

  }

} 
Example 54
Source File: Create.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.util.concurrent.TimeUnit

import com.daml.ledger.api.refinements.ApiTypes
import com.daml.navigator.console._
import com.daml.lf.value.json.ApiCodecCompressed
import com.daml.navigator.model
import com.daml.navigator.store.Store.CreateContract
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

@SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable"))
case object Create extends SimpleCommand {
  def name: String = "create"

  def description: String = "Create a contract"

  def params: List[Parameter] = List(
    ParameterTemplateId("template", "Template ID"),
    ParameterLiteral("with"),
    ParameterDamlValue("argument", "Contract argument")
  )

  def sendCommand(
      state: State,
      ps: model.PartyState,
      template: String,
      arg: model.ApiRecord): Future[ApiTypes.CommandId] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    val command = CreateContract(
      ps,
      model.TemplateStringId(template),
      arg
    )
    (state.store ? command)
      .mapTo[Try[ApiTypes.CommandId]]
      .map(c => c.get)
  }

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    args match {
      case templateName :: w :: damlA if w.equalsIgnoreCase("with") =>
        for {
          ps <- state.getPartyState ~> s"Unknown party ${state.party}"
          templateId <- model.parseOpaqueIdentifier(templateName) ~> s"Unknown template $templateName"
          apiValue <- Try(
            ApiCodecCompressed.stringToApiType(
              damlA.mkString(" "),
              templateId,
              ps.packageRegistry.damlLfDefDataType _)) ~> "Failed to parse DAML value"
          apiRecord <- Try(apiValue.asInstanceOf[model.ApiRecord]) ~> "Record argument required"
          future <- Try(sendCommand(state, ps, templateName, apiRecord)) ~> "Failed to create contract"
          commandId <- Try(Await.result(future, 30.seconds)) ~> "Failed to create contract"
        } yield {
          (state, Pretty.yaml(Pretty.commandResult(ps, commandId)))
        }
      case _ =>
        Left(CommandError("Invalid syntax", None))
    }
  }

} 
Example 55
Source File: SetTime.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.time.Instant
import java.time.format.DateTimeFormatter
import java.util.concurrent.TimeUnit

import com.daml.navigator.console._
import com.daml.navigator.store.Store.AdvanceTime
import com.daml.navigator.time.TimeProviderWithType
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

case object SetTime extends SimpleCommand {
  def name: String = "set_time"

  def description: String = "Set the (static) ledger effective time"

  def params: List[Parameter] = List(ParameterString("time", "New (static) ledger effective time"))

  private def advanceTime(state: State, newTime: Instant): Future[TimeProviderWithType] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    (state.store ? AdvanceTime(newTime))
      .mapTo[Try[TimeProviderWithType]]
      .map(t => t.get)
  }

  private def formatTime(t: Instant): String = DateTimeFormatter.ISO_INSTANT.format(t)

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    for {
      arg1 <- args.headOption ~> "Missing <time> argument"
      newTime <- Try(Instant.parse(arg1)) ~> "Failed to parse time"
      future <- Try(advanceTime(state, newTime)) ~> "Failed to advance time"
      confirmedTime <- Try(Await.result(future, 30.seconds)) ~> "Failed to advance time"
      result <- Try(formatTime(confirmedTime.time.getCurrentTime)) ~> "Failed to format time"
    } yield {
      (state, s"New ledger effective time: $result")
    }
  }

} 
Example 56
Source File: Exercise.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.util.concurrent.TimeUnit

import com.daml.ledger.api.refinements.ApiTypes
import com.daml.navigator.console._
import com.daml.lf.value.Value.ValueUnit
import com.daml.lf.value.json.ApiCodecCompressed
import com.daml.navigator.model
import com.daml.navigator.store.Store.ExerciseChoice
import akka.pattern.ask
import akka.util.Timeout
import com.daml.navigator.model.ApiValue

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

@SuppressWarnings(Array("org.wartremover.warts.Product", "org.wartremover.warts.Serializable"))
case object Exercise extends SimpleCommand {
  def name: String = "exercise"

  def description: String = "Exercises a choice"

  def params: List[Parameter] = List(
    ParameterContractId("contract", "Contract ID"),
    ParameterChoiceId("choice", "Name of the choice"),
    ParameterLiteral("with"),
    ParameterDamlValue("argument", "Choice argument")
  )

  def sendCommand(
      state: State,
      ps: model.PartyState,
      contract: String,
      choice: String,
      arg: model.ApiValue): Future[ApiTypes.CommandId] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    val command = ExerciseChoice(
      ps,
      ApiTypes.ContractId(contract),
      ApiTypes.Choice(choice),
      arg
    )
    (state.store ? command)
      .mapTo[Try[ApiTypes.CommandId]]
      .map(c => c.get)
  }

  def exerciseChoice(
      state: State,
      cid: String,
      choice: String,
      damlA: Option[List[String]]): Either[CommandError, (State, String)] = {
    for {
      ps <- state.getPartyState ~> s"Unknown party ${state.party}"
      types = ps.packageRegistry
      contract <- ps.ledger.contract(ApiTypes.ContractId(cid), types) ~> s"Unknown contract $cid"
      choiceType <- contract.template.choices
        .find(c => ApiTypes.Choice.unwrap(c.name) == choice) ~> s"Unknown choice $choice"
      apiValue <- Try(
        // Use unit value if no argument is given
        damlA.fold[ApiValue](ValueUnit)(
          arg =>
            ApiCodecCompressed.stringToApiType(
              arg.mkString(" "),
              choiceType.parameter,
              ps.packageRegistry.damlLfDefDataType _))) ~> "Failed to parse choice argument"
      future <- Try(sendCommand(state, ps, cid, choice, apiValue)) ~> "Failed to exercise choice"
      commandId <- Try(Await.result(future, 30.seconds)) ~> "Failed to exercise choice"
    } yield {
      (state, Pretty.yaml(Pretty.commandResult(ps, commandId)))
    }
  }

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    args match {
      case cid :: choice :: Nil =>
        exerciseChoice(state, cid, choice, None)
      case cid :: choice :: w :: damlA if w.equalsIgnoreCase("with") =>
        exerciseChoice(state, cid, choice, Some(damlA))
      case _ =>
        Left(CommandError("Invalid syntax", None))
    }
  }

} 
Example 57
Source File: Time.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.console.commands

import java.time.Instant
import java.time.format.DateTimeFormatter
import java.util.concurrent.TimeUnit

import com.daml.navigator.console._
import com.daml.navigator.store.Store.ReportCurrentTime
import com.daml.navigator.time.TimeProviderWithType
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Try

case object Time extends SimpleCommand {
  def name: String = "time"

  def description: String = "Print the ledger effective time"

  def params: List[Parameter] = List.empty

  def getTime(state: State): Future[TimeProviderWithType] = {
    implicit val actorTimeout: Timeout = Timeout(20, TimeUnit.SECONDS)
    implicit val executionContext: ExecutionContext = state.ec

    (state.store ? ReportCurrentTime)
      .mapTo[Try[TimeProviderWithType]]
      .map(t => t.get)
  }

  def formatTime(t: Instant): String = DateTimeFormatter.ISO_INSTANT.format(t)

  def eval(
      state: State,
      args: List[String],
      set: CommandSet): Either[CommandError, (State, String)] = {
    for {
      future <- Try(getTime(state)) ~> "Failed to get time"
      time <- Try(Await.result(future, 30.seconds)) ~> "Failed to get time"
      result <- Try(formatTime(time.time.getCurrentTime)) ~> "Failed to format time"
    } yield {
      (state, result)
    }
  }

} 
Example 58
Source File: ProgramResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import java.util.concurrent.{Executors, TimeUnit}

import com.daml.logging.ContextualizedLogger
import com.daml.logging.LoggingContext.newLoggingContext
import com.daml.resources.ProgramResource._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Try
import scala.util.control.{NoStackTrace, NonFatal}

class ProgramResource[T](
    owner: => ResourceOwner[T],
    tearDownTimeout: FiniteDuration = 10.seconds,
) {
  private val logger = ContextualizedLogger.get(getClass)

  private val executorService = Executors.newCachedThreadPool()

  def run(): Unit = {
    newLoggingContext { implicit logCtx =>
      val resource = {
        implicit val executionContext: ExecutionContext =
          ExecutionContext.fromExecutor(executorService)
        Try(owner.acquire()).fold(Resource.failed, identity)
      }

      def stop(): Unit = {
        Await.result(resource.release(), tearDownTimeout)
        executorService.shutdown()
        executorService.awaitTermination(tearDownTimeout.toMillis, TimeUnit.MILLISECONDS)
        ()
      }

      sys.runtime.addShutdownHook(new Thread(() => {
        try {
          stop()
        } catch {
          case NonFatal(exception) =>
            logger.error("Failed to stop successfully.", exception)
        }
      }))

      // On failure, shut down immediately.
      resource.asFuture.failed.foreach { exception =>
        exception match {
          // The error is suppressed; we don't need to print anything more.
          case _: SuppressedStartupException =>
          case _: StartupException =>
            logger.error(
              s"Shutting down because of an initialization error.\n${exception.getMessage}")
          case NonFatal(_) =>
            logger.error("Shutting down because of an initialization error.", exception)
        }
        sys.exit(1) // `stop` will be triggered by the shutdown hook.
      }(ExecutionContext.global) // Run on the global execution context to avoid deadlock.
    }
  }
}

object ProgramResource {

  trait StartupException extends NoStackTrace {
    self: Exception =>
  }

  trait SuppressedStartupException {
    self: Exception =>
  }
} 
Example 59
Source File: JbokBenchmark.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.benchmark

import java.util.concurrent.TimeUnit

import cats.effect.IO
import org.openjdk.jmh.annotations._

import scala.concurrent.ExecutionContext

@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, jvmArgs = Array(
  "-server",
  "-Xms2g",
  "-Xmx2g",
  "-XX:NewSize=1g",
  "-XX:MaxNewSize=1g",
  "-XX:InitialCodeCacheSize=512m",
  "-XX:ReservedCodeCacheSize=512m",
  "-XX:+UseParallelGC",
  "-XX:-UseBiasedLocking",
  "-XX:+AlwaysPreTouch"
))
abstract class JbokBenchmark {
  implicit val cs = IO.contextShift(ExecutionContext.global)

  implicit val timer = IO.timer(ExecutionContext.global)
} 
Example 60
Source File: BasicCodecs.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.codec.rlp

import java.net.URI
import java.util.concurrent.TimeUnit

import scodec._
import scodec.bits._
import spire.math.SafeLong

import scala.concurrent.duration.Duration

// custom `scodec.Codec`
// mainly for unsigned numbers
private[rlp] trait BasicCodecs {
  val uSafeLong: Codec[SafeLong] = codecs.bytes.xmap[SafeLong](
    bytes => if (bytes.isEmpty) SafeLong.zero else SafeLong(BigInt(1, bytes.toArray)),
    safeLong => {
      require(safeLong >= SafeLong.zero, "unsigned codec cannot encode negative values")
      val bytes = safeLong.toBigInt.toByteArray
      ByteVector(if (bytes.head == 0) bytes.tail else bytes)
    }
  )

  val ubigint: Codec[BigInt] = new Codec[BigInt] {
    val codec = codecs.bytes.xmap[BigInt](
      bytes => {
        if (bytes.isEmpty) 0 else BigInt(1, bytes.toArray)
      },
      bi => {
        require(bi >= 0, "unsigned codec cannot encode negative values")
        val bytes = bi.toByteArray
        ByteVector(if (bytes.head == 0) bytes.tail else bytes)
      }
    )

    override def encode(value: BigInt): Attempt[BitVector]              = codec.encode(value)
    override def decode(bits: BitVector): Attempt[DecodeResult[BigInt]] = codec.decode(bits)
    override def sizeBound: SizeBound                                   = SizeBound.atLeast(1L)
    override def toString: String                                       = "UBigInt"
  }

  val ulong: Codec[Long] = new Codec[Long] {
    val codec = ubigint.xmap[Long](_.toLong, BigInt.apply)

    override def encode(value: Long): Attempt[BitVector]              = codec.encode(value)
    override def decode(bits: BitVector): Attempt[DecodeResult[Long]] = codec.decode(bits)
    override def sizeBound: SizeBound                                 = SizeBound.bounded(1L, 8L)
    override def toString: String                                     = "ULong"
  }

  val uint: Codec[Int] = new Codec[Int] {
    val codec = ubigint.xmap[Int](_.toInt, BigInt.apply)

    override def encode(value: Int): Attempt[BitVector]              = codec.encode(value)
    override def decode(bits: BitVector): Attempt[DecodeResult[Int]] = codec.decode(bits)
    override def sizeBound: SizeBound                                = SizeBound.bounded(1L, 4L)
    override def toString: String                                    = "UInt"
  }

  val bool: Codec[Boolean] = new Codec[Boolean] {
    val fb    = hex"00"
    val tb    = hex"01"
    val codec = codecs.bytes(1).xmap[Boolean](bytes => if (bytes == fb) false else true, b => if (b) tb else fb)

    override def encode(value: Boolean): Attempt[BitVector]              = codec.encode(value)
    override def decode(bits: BitVector): Attempt[DecodeResult[Boolean]] = codec.decode(bits)
    override def sizeBound: SizeBound                                    = SizeBound.exact(1L)
    override def toString: String                                        = "Boolean"
  }

  val duration: Codec[Duration] =
    ulong.xmap[Duration](l => Duration.apply(l, TimeUnit.NANOSECONDS), d => d.toNanos)

  val uri: Codec[URI] =
    codecs.utf8.xmap(str => new URI(str), _.toString)
}

object BasicCodecs extends BasicCodecs 
Example 61
Source File: BatchProducerIT.scala    From Scala-Programming-Projects   with MIT License 5 votes vote down vote up
package coinyser

import java.sql.Timestamp
import java.time.Instant
import java.util.concurrent.TimeUnit

import cats.effect.{IO, Timer}
import org.apache.spark.sql.test.SharedSparkSession
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.duration.FiniteDuration


class BatchProducerIT extends WordSpec with Matchers with SharedSparkSession {

  import testImplicits._

  "BatchProducer.save" should {
    "save a Dataset[Transaction] to parquet" in withTempDir { tmpDir =>
      val transaction1 = Transaction(timestamp = new Timestamp(1532365695000L), tid = 70683282, price = 7740.00, sell = false, amount = 0.10041719)
      val transaction2 = Transaction(timestamp = new Timestamp(1532365693000L), tid = 70683281, price = 7739.99, sell = false, amount = 0.00148564)
      val sourceDS = Seq(transaction1, transaction2).toDS()

      val uri = tmpDir.toURI
      BatchProducer.save(sourceDS, uri).unsafeRunSync()
      tmpDir.list() should contain("date=2018-07-23")
      val readDS = spark.read.parquet(uri.toString).as[Transaction]
      spark.read.parquet(uri + "/date=2018-07-23").show()
      sourceDS.collect() should contain theSameElementsAs readDS.collect()
    }
  }

  "BatchProducer.processOneBatch" should {
    "filter and save a batch of transaction, wait 59 mn, fetch the next batch" in withTempDir { tmpDir =>
      implicit object FakeTimer extends Timer[IO] {
        private var clockRealTimeInMillis: Long = Instant.parse("2018-08-02T01:00:00Z").toEpochMilli

        def clockRealTime(unit: TimeUnit): IO[Long] =
          IO(unit.convert(clockRealTimeInMillis, TimeUnit.MILLISECONDS))

        def sleep(duration: FiniteDuration): IO[Unit] = IO {
          clockRealTimeInMillis = clockRealTimeInMillis + duration.toMillis
        }

        def shift: IO[Unit] = ???

        def clockMonotonic(unit: TimeUnit): IO[Long] = ???
      }
      implicit val appContext: AppContext = new AppContext(transactionStorePath = tmpDir.toURI)

      implicit def toTimestamp(str: String): Timestamp = Timestamp.from(Instant.parse(str))
      val tx1 = Transaction("2018-08-01T23:00:00Z", 1, 7657.58, true, 0.021762)
      val tx2 = Transaction("2018-08-02T01:00:00Z", 2, 7663.85, false, 0.01385517)
      val tx3 = Transaction("2018-08-02T01:58:30Z", 3, 7663.85, false, 0.03782426)
      val tx4 = Transaction("2018-08-02T01:58:59Z", 4, 7663.86, false, 0.15750809)
      val tx5 = Transaction("2018-08-02T02:30:00Z", 5, 7661.49, true, 0.1)

     // Start at 01:00, tx 2 ignored (too soon)
      val txs0 = Seq(tx1)
      // Fetch at 01:59, get nb 2 and 3, but will miss nb 4 because of Api lag
      val txs1 = Seq(tx2, tx3)
      // Fetch at 02:58, get nb 3, 4, 5
      val txs2 = Seq(tx3, tx4, tx5)
      // Fetch at 03:57, get nothing
      val txs3 = Seq.empty[Transaction]

      val start0 = Instant.parse("2018-08-02T00:00:00Z")
      val end0 = Instant.parse("2018-08-02T00:59:55Z")
      val threeBatchesIO =
        for {
          tuple1 <- BatchProducer.processOneBatch(IO(txs1.toDS()), txs0.toDS(), start0, end0) // end - Api lag
          (ds1, start1, end1) = tuple1

          tuple2 <- BatchProducer.processOneBatch(IO(txs2.toDS()), ds1, start1, end1)
          (ds2, start2, end2) = tuple2

          _ <- BatchProducer.processOneBatch(IO(txs3.toDS()), ds2, start2, end2)
        } yield (ds1, start1, end1, ds2, start2, end2)

      val (ds1, start1, end1, ds2, start2, end2) = threeBatchesIO.unsafeRunSync()
      ds1.collect() should contain theSameElementsAs txs1
      start1 should ===(end0)
      end1 should ===(Instant.parse("2018-08-02T01:58:55Z")) // initialClock + 1mn - 15s - 5s

      ds2.collect() should contain theSameElementsAs txs2
      start2 should ===(end1)
      end2 should ===(Instant.parse("2018-08-02T02:57:55Z")) // initialClock + 1mn -15s + 1mn -15s -5s = end1 + 45s

      val lastClock = Instant.ofEpochMilli(
        FakeTimer.clockRealTime(TimeUnit.MILLISECONDS).unsafeRunSync())
      lastClock should === (Instant.parse("2018-08-02T03:57:00Z"))

      val savedTransactions = spark.read.parquet(tmpDir.toString).as[Transaction].collect()
      val expectedTxs = Seq(tx2, tx3, tx4, tx5)
      savedTransactions should contain theSameElementsAs expectedTxs
    }
  }


} 
Example 62
Source File: TaskLimiter.scala    From spark-tools   with Apache License 2.0 5 votes vote down vote up
package io.univalence.centrifuge.util

//from https://gist.github.com/alexandru/623fe6c587d73e89a8f14de284ca1e2d

import monix.eval.Task
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._


  final case class State(window: Long, period: TimeUnit, requested: Int, limit: Int) {
    private def periodMillis =
      TimeUnit.MILLISECONDS.convert(1, period)

    def request(now: Timestamp): (Option[FiniteDuration], State) = {
      val periodMillis  = this.periodMillis
      val currentWindow = now / periodMillis

      if (currentWindow != window)
        (None, copy(window = currentWindow, requested = 1))
      else if (requested < limit)
        (None, copy(requested = requested + 1))
      else {
        val nextTS = (currentWindow + 1) * periodMillis
        val sleep  = nextTS - now
        (Some(sleep.millis), this)
      }
    }
  }
} 
Example 63
Source File: GangliaSink.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.ganglia.GangliaReporter
import info.ganglia.gmetric4j.gmetric.GMetric
import info.ganglia.gmetric4j.gmetric.GMetric.UDPAddressingMode

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

class GangliaSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GANGLIA_KEY_PERIOD = "period"
  val GANGLIA_DEFAULT_PERIOD = 10

  val GANGLIA_KEY_UNIT = "unit"
  val GANGLIA_DEFAULT_UNIT: TimeUnit = TimeUnit.SECONDS

  val GANGLIA_KEY_MODE = "mode"
  val GANGLIA_DEFAULT_MODE: UDPAddressingMode = GMetric.UDPAddressingMode.MULTICAST

  // TTL for multicast messages. If listeners are X hops away in network, must be at least X.
  val GANGLIA_KEY_TTL = "ttl"
  val GANGLIA_DEFAULT_TTL = 1

  val GANGLIA_KEY_HOST = "host"
  val GANGLIA_KEY_PORT = "port"

  val GANGLIA_KEY_DMAX = "dmax"
  val GANGLIA_DEFAULT_DMAX = 0

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GANGLIA_KEY_HOST).isDefined) {
    throw new Exception("Ganglia sink requires 'host' property.")
  }

  if (!propertyToOption(GANGLIA_KEY_PORT).isDefined) {
    throw new Exception("Ganglia sink requires 'port' property.")
  }

  val host = propertyToOption(GANGLIA_KEY_HOST).get
  val port = propertyToOption(GANGLIA_KEY_PORT).get.toInt
  val ttl = propertyToOption(GANGLIA_KEY_TTL).map(_.toInt).getOrElse(GANGLIA_DEFAULT_TTL)
  val dmax = propertyToOption(GANGLIA_KEY_DMAX).map(_.toInt).getOrElse(GANGLIA_DEFAULT_DMAX)
  val mode: UDPAddressingMode = propertyToOption(GANGLIA_KEY_MODE)
    .map(u => GMetric.UDPAddressingMode.valueOf(u.toUpperCase)).getOrElse(GANGLIA_DEFAULT_MODE)
  val pollPeriod = propertyToOption(GANGLIA_KEY_PERIOD).map(_.toInt)
    .getOrElse(GANGLIA_DEFAULT_PERIOD)
  val pollUnit: TimeUnit = propertyToOption(GANGLIA_KEY_UNIT)
    .map(u => TimeUnit.valueOf(u.toUpperCase))
    .getOrElse(GANGLIA_DEFAULT_UNIT)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val ganglia = new GMetric(host, port, mode, ttl)
  val reporter: GangliaReporter = GangliaReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .withDMax(dmax)
      .build(ganglia)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 64
Source File: ProcessingTimeExecutorSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.util.concurrent.{CountDownLatch, TimeUnit}

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.ProcessingTime
import org.apache.spark.util.{Clock, ManualClock, SystemClock}

class ProcessingTimeExecutorSuite extends SparkFunSuite {

  test("nextBatchTime") {
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(100))
    assert(processingTimeExecutor.nextBatchTime(0) === 100)
    assert(processingTimeExecutor.nextBatchTime(1) === 100)
    assert(processingTimeExecutor.nextBatchTime(99) === 100)
    assert(processingTimeExecutor.nextBatchTime(100) === 200)
    assert(processingTimeExecutor.nextBatchTime(101) === 200)
    assert(processingTimeExecutor.nextBatchTime(150) === 200)
  }

  test("calling nextBatchTime with the result of a previous call should return the next interval") {
    val intervalMS = 100
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMS))

    val ITERATION = 10
    var nextBatchTime: Long = 0
    for (it <- 1 to ITERATION) {
      nextBatchTime = processingTimeExecutor.nextBatchTime(nextBatchTime)
    }

    // nextBatchTime should be 1000
    assert(nextBatchTime === intervalMS * ITERATION)
  }

  private def testBatchTermination(intervalMs: Long): Unit = {
    var batchCounts = 0
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMs))
    processingTimeExecutor.execute(() => {
      batchCounts += 1
      // If the batch termination works well, batchCounts should be 3 after `execute`
      batchCounts < 3
    })
    assert(batchCounts === 3)
  }

  test("batch termination") {
    testBatchTermination(0)
    testBatchTermination(10)
  }

  test("notifyBatchFallingBehind") {
    val clock = new ManualClock()
    @volatile var batchFallingBehindCalled = false
    val latch = new CountDownLatch(1)
    val t = new Thread() {
      override def run(): Unit = {
        val processingTimeExecutor = new ProcessingTimeExecutor(ProcessingTime(100), clock) {
          override def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
            batchFallingBehindCalled = true
          }
        }
        processingTimeExecutor.execute(() => {
          latch.countDown()
          clock.waitTillTime(200)
          false
        })
      }
    }
    t.start()
    // Wait until the batch is running so that we don't call `advance` too early
    assert(latch.await(10, TimeUnit.SECONDS), "the batch has not yet started in 10 seconds")
    clock.advance(200)
    t.join()
    assert(batchFallingBehindCalled === true)
  }
} 
Example 65
Source File: ProcessingTimeSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql

import java.util.concurrent.TimeUnit

import scala.concurrent.duration._

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.ProcessingTime

class ProcessingTimeSuite extends SparkFunSuite {

  test("create") {
    assert(ProcessingTime(10.seconds).intervalMs === 10 * 1000)
    assert(ProcessingTime.create(10, TimeUnit.SECONDS).intervalMs === 10 * 1000)
    assert(ProcessingTime("1 minute").intervalMs === 60 * 1000)
    assert(ProcessingTime("interval 1 minute").intervalMs === 60 * 1000)

    intercept[IllegalArgumentException] { ProcessingTime(null: String) }
    intercept[IllegalArgumentException] { ProcessingTime("") }
    intercept[IllegalArgumentException] { ProcessingTime("invalid") }
    intercept[IllegalArgumentException] { ProcessingTime("1 month") }
    intercept[IllegalArgumentException] { ProcessingTime("1 year") }
  }
} 
Example 66
Source File: ContextWaiter.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming

import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock

private[streaming] class ContextWaiter {

  private val lock = new ReentrantLock()
  private val condition = lock.newCondition()

  // Guarded by "lock"
  private var error: Throwable = null

  // Guarded by "lock"
  private var stopped: Boolean = false

  def notifyError(e: Throwable): Unit = {
    lock.lock()
    try {
      error = e
      condition.signalAll()
    } finally {
      lock.unlock()
    }
  }

  def notifyStop(): Unit = {
    lock.lock()
    try {
      stopped = true
      condition.signalAll()
    } finally {
      lock.unlock()
    }
  }

  
  def waitForStopOrError(timeout: Long = -1): Boolean = {
    lock.lock()
    try {
      if (timeout < 0) {
        while (!stopped && error == null) {
          condition.await()
        }
      } else {
        var nanos = TimeUnit.MILLISECONDS.toNanos(timeout)
        while (!stopped && error == null && nanos > 0) {
          nanos = condition.awaitNanos(nanos)
        }
      }
      // If already had error, then throw it
      if (error != null) throw error
      // already stopped or timeout
      stopped
    } finally {
      lock.unlock()
    }
  }
} 
Example 67
Source File: UIUtilsSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.ui

import java.util.TimeZone
import java.util.concurrent.TimeUnit

import org.scalatest.Matchers

import org.apache.spark.SparkFunSuite

class UIUtilsSuite extends SparkFunSuite with Matchers{

  test("shortTimeUnitString") {
    assert("ns" === UIUtils.shortTimeUnitString(TimeUnit.NANOSECONDS))
    assert("us" === UIUtils.shortTimeUnitString(TimeUnit.MICROSECONDS))
    assert("ms" === UIUtils.shortTimeUnitString(TimeUnit.MILLISECONDS))
    assert("sec" === UIUtils.shortTimeUnitString(TimeUnit.SECONDS))
    assert("min" === UIUtils.shortTimeUnitString(TimeUnit.MINUTES))
    assert("hrs" === UIUtils.shortTimeUnitString(TimeUnit.HOURS))
    assert("days" === UIUtils.shortTimeUnitString(TimeUnit.DAYS))
  }

  test("normalizeDuration") {
    verifyNormalizedTime(900, TimeUnit.MILLISECONDS, 900)
    verifyNormalizedTime(1.0, TimeUnit.SECONDS, 1000)
    verifyNormalizedTime(1.0, TimeUnit.MINUTES, 60 * 1000)
    verifyNormalizedTime(1.0, TimeUnit.HOURS, 60 * 60 * 1000)
    verifyNormalizedTime(1.0, TimeUnit.DAYS, 24 * 60 * 60 * 1000)
  }

  private def verifyNormalizedTime(
      expectedTime: Double, expectedUnit: TimeUnit, input: Long): Unit = {
    val (time, unit) = UIUtils.normalizeDuration(input)
    time should be (expectedTime +- 1E-6)
    unit should be (expectedUnit)
  }

  test("convertToTimeUnit") {
    verifyConvertToTimeUnit(60.0 * 1000 * 1000 * 1000, 60 * 1000, TimeUnit.NANOSECONDS)
    verifyConvertToTimeUnit(60.0 * 1000 * 1000, 60 * 1000, TimeUnit.MICROSECONDS)
    verifyConvertToTimeUnit(60 * 1000, 60 * 1000, TimeUnit.MILLISECONDS)
    verifyConvertToTimeUnit(60, 60 * 1000, TimeUnit.SECONDS)
    verifyConvertToTimeUnit(1, 60 * 1000, TimeUnit.MINUTES)
    verifyConvertToTimeUnit(1.0 / 60, 60 * 1000, TimeUnit.HOURS)
    verifyConvertToTimeUnit(1.0 / 60 / 24, 60 * 1000, TimeUnit.DAYS)
  }

  private def verifyConvertToTimeUnit(
      expectedTime: Double, milliseconds: Long, unit: TimeUnit): Unit = {
    val convertedTime = UIUtils.convertToTimeUnit(milliseconds, unit)
    convertedTime should be (expectedTime +- 1E-6)
  }

  test("formatBatchTime") {
    val tzForTest = TimeZone.getTimeZone("America/Los_Angeles")
    val batchTime = 1431637480452L // Thu May 14 14:04:40 PDT 2015
    assert("2015/05/14 14:04:40" === UIUtils.formatBatchTime(batchTime, 1000, timezone = tzForTest))
    assert("2015/05/14 14:04:40.452" ===
      UIUtils.formatBatchTime(batchTime, 999, timezone = tzForTest))
    assert("14:04:40" === UIUtils.formatBatchTime(batchTime, 1000, false, timezone = tzForTest))
    assert("14:04:40.452" === UIUtils.formatBatchTime(batchTime, 999, false, timezone = tzForTest))
  }
} 
Example 68
Source File: CsvSink.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.io.File
import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{CsvReporter, MetricRegistry}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class CsvSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val CSV_KEY_PERIOD = "period"
  val CSV_KEY_UNIT = "unit"
  val CSV_KEY_DIR = "directory"

  val CSV_DEFAULT_PERIOD = 10
  val CSV_DEFAULT_UNIT = "SECONDS"
  val CSV_DEFAULT_DIR = "/tmp/"

  val pollPeriod = Option(property.getProperty(CSV_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => CSV_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(CSV_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(CSV_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val pollDir = Option(property.getProperty(CSV_KEY_DIR)) match {
    case Some(s) => s
    case None => CSV_DEFAULT_DIR
  }

  val reporter: CsvReporter = CsvReporter.forRegistry(registry)
      .formatFor(Locale.US)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build(new File(pollDir))

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 69
Source File: MetricsServlet.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit
import javax.servlet.http.HttpServletRequest

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.json.MetricsModule
import com.fasterxml.jackson.databind.ObjectMapper
import org.eclipse.jetty.servlet.ServletContextHandler

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.ui.JettyUtils._

private[spark] class MetricsServlet(
    val property: Properties,
    val registry: MetricRegistry,
    securityMgr: SecurityManager)
  extends Sink {

  val SERVLET_KEY_PATH = "path"
  val SERVLET_KEY_SAMPLE = "sample"

  val SERVLET_DEFAULT_SAMPLE = false

  val servletPath = property.getProperty(SERVLET_KEY_PATH)

  val servletShowSample = Option(property.getProperty(SERVLET_KEY_SAMPLE)).map(_.toBoolean)
    .getOrElse(SERVLET_DEFAULT_SAMPLE)

  val mapper = new ObjectMapper().registerModule(
    new MetricsModule(TimeUnit.SECONDS, TimeUnit.MILLISECONDS, servletShowSample))

  def getHandlers(conf: SparkConf): Array[ServletContextHandler] = {
    Array[ServletContextHandler](
      createServletHandler(servletPath,
        new ServletParams(request => getMetricsSnapshot(request), "text/json"), securityMgr, conf)
    )
  }

  def getMetricsSnapshot(request: HttpServletRequest): String = {
    mapper.writeValueAsString(registry)
  }

  override def start() { }

  override def stop() { }

  override def report() { }
} 
Example 70
Source File: Slf4jSink.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{MetricRegistry, Slf4jReporter}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class Slf4jSink(
    val property: Properties,
    val registry: MetricRegistry,
    securityMgr: SecurityManager)
  extends Sink {
  val SLF4J_DEFAULT_PERIOD = 10
  val SLF4J_DEFAULT_UNIT = "SECONDS"

  val SLF4J_KEY_PERIOD = "period"
  val SLF4J_KEY_UNIT = "unit"

  val pollPeriod = Option(property.getProperty(SLF4J_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => SLF4J_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(SLF4J_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(SLF4J_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val reporter: Slf4jReporter = Slf4jReporter.forRegistry(registry)
    .convertDurationsTo(TimeUnit.MILLISECONDS)
    .convertRatesTo(TimeUnit.SECONDS)
    .build()

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 71
Source File: ConsoleSink.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{ConsoleReporter, MetricRegistry}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class ConsoleSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val CONSOLE_DEFAULT_PERIOD = 10
  val CONSOLE_DEFAULT_UNIT = "SECONDS"

  val CONSOLE_KEY_PERIOD = "period"
  val CONSOLE_KEY_UNIT = "unit"

  val pollPeriod = Option(property.getProperty(CONSOLE_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => CONSOLE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(CONSOLE_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(CONSOLE_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val reporter: ConsoleReporter = ConsoleReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build()

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 72
Source File: GraphiteSink.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter, GraphiteUDP}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 73
Source File: ConfigBuilder.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.internal.config

import java.util.concurrent.TimeUnit

import org.apache.spark.network.util.{ByteUnit, JavaUtils}

private object ConfigHelpers {

  def toNumber[T](s: String, converter: String => T, key: String, configType: String): T = {
    try {
      converter(s)
    } catch {
      case _: NumberFormatException =>
        throw new IllegalArgumentException(s"$key should be $configType, but was $s")
    }
  }

  def toBoolean(s: String, key: String): Boolean = {
    try {
      s.toBoolean
    } catch {
      case _: IllegalArgumentException =>
        throw new IllegalArgumentException(s"$key should be boolean, but was $s")
    }
  }

  def stringToSeq[T](str: String, converter: String => T): Seq[T] = {
    str.split(",").map(_.trim()).filter(_.nonEmpty).map(converter)
  }

  def seqToString[T](v: Seq[T], stringConverter: T => String): String = {
    v.map(stringConverter).mkString(",")
  }

  def timeFromString(str: String, unit: TimeUnit): Long = JavaUtils.timeStringAs(str, unit)

  def timeToString(v: Long, unit: TimeUnit): String = TimeUnit.MILLISECONDS.convert(v, unit) + "ms"

  def byteFromString(str: String, unit: ByteUnit): Long = {
    val (input, multiplier) =
      if (str.length() > 0 && str.charAt(0) == '-') {
        (str.substring(1), -1)
      } else {
        (str, 1)
      }
    multiplier * JavaUtils.byteStringAs(input, unit)
  }

  def byteToString(v: Long, unit: ByteUnit): String = unit.convertTo(v, ByteUnit.BYTE) + "b"

}


  def onCreate(callback: ConfigEntry[_] => Unit): ConfigBuilder = {
    _onCreate = Option(callback)
    this
  }

  def intConf: TypedConfigBuilder[Int] = {
    new TypedConfigBuilder(this, toNumber(_, _.toInt, key, "int"))
  }

  def longConf: TypedConfigBuilder[Long] = {
    new TypedConfigBuilder(this, toNumber(_, _.toLong, key, "long"))
  }

  def doubleConf: TypedConfigBuilder[Double] = {
    new TypedConfigBuilder(this, toNumber(_, _.toDouble, key, "double"))
  }

  def booleanConf: TypedConfigBuilder[Boolean] = {
    new TypedConfigBuilder(this, toBoolean(_, key))
  }

  def stringConf: TypedConfigBuilder[String] = {
    new TypedConfigBuilder(this, v => v)
  }

  def timeConf(unit: TimeUnit): TypedConfigBuilder[Long] = {
    new TypedConfigBuilder(this, timeFromString(_, unit), timeToString(_, unit))
  }

  def bytesConf(unit: ByteUnit): TypedConfigBuilder[Long] = {
    new TypedConfigBuilder(this, byteFromString(_, unit), byteToString(_, unit))
  }

  def fallbackConf[T](fallback: ConfigEntry[T]): ConfigEntry[T] = {
    new FallbackConfigEntry(key, _doc, _public, fallback)
  }

} 
Example 74
Source File: LauncherBackendSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.launcher

import java.util.concurrent.TimeUnit

import scala.concurrent.duration._
import scala.language.postfixOps

import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually._

import org.apache.spark._

class LauncherBackendSuite extends SparkFunSuite with Matchers {

  private val tests = Seq(
    "local" -> "local",
    "standalone/client" -> "local-cluster[1,1,1024]")

  tests.foreach { case (name, master) =>
    test(s"$name: launcher handle") {
      testWithMaster(master)
    }
  }

  private def testWithMaster(master: String): Unit = {
    val env = new java.util.HashMap[String, String]()
    env.put("SPARK_PRINT_LAUNCH_COMMAND", "1")
    val handle = new SparkLauncher(env)
      .setSparkHome(sys.props("spark.test.home"))
      .setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, System.getProperty("java.class.path"))
      .setConf("spark.ui.enabled", "false")
      .setConf(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, s"-Dtest.appender=console")
      .setMaster(master)
      .setAppResource(SparkLauncher.NO_RESOURCE)
      .setMainClass(TestApp.getClass.getName().stripSuffix("$"))
      .startApplication()

    try {
      eventually(timeout(30 seconds), interval(100 millis)) {
        handle.getAppId() should not be (null)
      }

      handle.stop()

      eventually(timeout(30 seconds), interval(100 millis)) {
        handle.getState() should be (SparkAppHandle.State.KILLED)
      }
    } finally {
      handle.kill()
    }
  }

}

object TestApp {

  def main(args: Array[String]): Unit = {
    new SparkContext(new SparkConf()).parallelize(Seq(1)).foreach { i =>
      Thread.sleep(TimeUnit.SECONDS.toMillis(20))
    }
  }

} 
Example 75
Source File: package.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics.dropwizard

import zio.RIO
import zio.metrics.Label
import zio.metrics.dropwizard.reporters._
import com.codahale.metrics.{ Counter => DWCounter, Gauge => DWGauge }
import com.codahale.metrics.{ Histogram => DWHistogram, Meter => DWMeter }
import com.codahale.metrics.{ Timer => DWTimer }
import com.codahale.metrics.{ MetricRegistry, Reservoir, UniformReservoir }
import java.util.concurrent.TimeUnit

package object helpers {

  def getCurrentRegistry(): RIO[Registry, MetricRegistry] =
    RIO.accessM(_.get.getCurrent())

  def registerCounter(name: String, labels: Array[String]): RIO[Registry, DWCounter] =
    RIO.accessM(_.get.registerCounter(Label(name, labels)))

  def registerGauge[A](name: String, labels: Array[String], f: () => A): RIO[Registry, DWGauge[A]] =
    RIO.accessM(_.get.registerGauge[String, A](Label(name, labels), f))

  def registerTimer(name: String, labels: Array[String]): RIO[Registry, DWTimer] =
    RIO.accessM(_.get.registerTimer(Label(name, labels)))

  def registerMeter(name: String, labels: Array[String]): RIO[Registry, DWMeter] =
    RIO.accessM(_.get.registerMeter(Label(name, labels)))

  def registerHistogram(
    name: String,
    labels: Array[String],
    reservoir: Reservoir
  ): RIO[Registry, DWHistogram] =
    RIO.accessM(_.get.registerHistogram(Label(name, labels), reservoir))

  object counter {
    def register(name: String) = Counter(name, Array.empty[String])

    def register(name: String, labels: Array[String]) =
      Counter(name, labels)
  }

  object gauge {
    def register[A](name: String, f: () => A) =
      Gauge(name, Array.empty[String], f)

    def register[A](name: String, labels: Array[String], f: () => A) =
      Gauge(name, labels, f)
  }

  object timer {
    def register(name: String) = Timer(name, Array.empty[String])

    def register(name: String, labels: Array[String]) =
      Timer(name, labels)
  }

  object meter {
    def register(name: String) = Meter(name, Array.empty[String])

    def register(name: String, labels: Array[String]) =
      Meter(name, labels)
  }

  object histogram {
    def register(name: String) =
      Histogram(name, Array.empty[String], new UniformReservoir)

    def register(name: String, labels: Array[String]) =
      Histogram(name, labels, new UniformReservoir)

    def register(name: String, labels: Array[String], reservoir: Reservoir) =
      Histogram(name, labels, reservoir)
  }

  def jmx(r: MetricRegistry): RIO[Reporters, Unit] =
    RIO.accessM(
      dwr =>
        for {
          cr <- dwr.get.jmx(r)
        } yield cr.start()
    )

  def console(r: MetricRegistry, duration: Long, unit: TimeUnit): RIO[Reporters, Unit] =
    RIO.accessM(
      dwr =>
        for {
          cr <- dwr.get.console(r)
        } yield cr.start(duration, unit)
    )

} 
Example 76
Source File: package.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics.dropwizard

import zio.{ Has, Layer, Task, ZLayer }
import java.util.concurrent.TimeUnit
import java.io.File
import java.util.Locale
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import org.slf4j.LoggerFactory
import java.{ util => ju }
import java.io.File

package object reporters {

  import com.codahale.metrics.MetricRegistry
  import com.codahale.metrics.MetricFilter
  import com.codahale.metrics.graphite.Graphite
  import com.codahale.metrics.graphite.GraphiteReporter
  import com.codahale.metrics.ConsoleReporter
  import com.codahale.metrics.Slf4jReporter
  import com.codahale.metrics.CsvReporter
  import com.codahale.metrics.jmx.JmxReporter
  import com.codahale.metrics.Reporter

  type Reporters = Has[Reporters.Service]

  object Reporters {
    trait Service {
      def jmx(r: MetricRegistry): Task[JmxReporter]

      def console(r: MetricRegistry): Task[ConsoleReporter]

      def slf4j(r: MetricRegistry, duration: Int, unit: TimeUnit, loggerName: String): Task[Slf4jReporter]

      def csv(r: MetricRegistry, file: File, locale: Locale): Task[Reporter]

      def graphite(r: MetricRegistry, host: String, port: Int, prefix: String): Task[GraphiteReporter]
    }

    val live: Layer[Nothing, Reporters] = ZLayer.succeed(new Service {

      def jmx(r: MetricRegistry): zio.Task[JmxReporter] = Task(JmxReporter.forRegistry(r).build())

      def console(r: MetricRegistry): Task[ConsoleReporter] = Task(
        ConsoleReporter
          .forRegistry(r)
          .convertRatesTo(TimeUnit.SECONDS)
          .convertDurationsTo(TimeUnit.MILLISECONDS)
          .build()
      )

      def slf4j(r: MetricRegistry, duration: Int, unit: TimeUnit, loggerName: String): Task[Slf4jReporter] =
        Task(
          Slf4jReporter
            .forRegistry(r)
            .outputTo(LoggerFactory.getLogger(loggerName))
            .convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS)
            .build()
        )

      def csv(r: MetricRegistry, file: File, locale: ju.Locale): zio.Task[Reporter] = Task(
        CsvReporter
          .forRegistry(r)
          .formatFor(locale)
          .convertRatesTo(TimeUnit.SECONDS)
          .convertDurationsTo(TimeUnit.MILLISECONDS)
          .build(file)
      )

      def graphite(r: MetricRegistry, host: String, port: Int, prefix: String): zio.Task[GraphiteReporter] =
        Task {
          val graphite = new Graphite(new InetSocketAddress(host, port))
          GraphiteReporter
            .forRegistry(r)
            .prefixedWith(prefix)
            .convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS)
            .filter(MetricFilter.ALL)
            .build(graphite)
        }
    })
  }
} 
Example 77
Source File: ReportersTest.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.metrics.dropwizard._
import zio.metrics.dropwizard.helpers._
import zio.metrics.dropwizard.reporters._
import zio.{ App, RIO, Runtime }
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._
import zio.console._
import zio.duration.Duration
import com.codahale.metrics.MetricRegistry
import zio.ExitCode

object ReportersTest extends App {

  val rt = Runtime.unsafeFromLayer(Registry.live ++ Reporters.live)

  val tests: RIO[
    Registry with Reporters,
    MetricRegistry
  ] =
    for {
      r   <- getCurrentRegistry()
      _   <- jmx(r)
      _   <- console(r, 2, TimeUnit.SECONDS)
      c   <- counter.register(Show.fixClassName(DropwizardTest.getClass()), Array("test", "counter"))
      _   <- c.inc()
      _   <- c.inc(2.0)
      t   <- timer.register("DropwizardTimer", Array("test", "timer"))
      ctx <- t.start()
      _ <- RIO.foreach(
            List(
              Thread.sleep(1000L),
              Thread.sleep(1400L),
              Thread.sleep(1200L)
            )
          )(_ => t.stop(ctx))
    } yield r

  override def run(args: List[String]) = {
    println("Starting tests")
    val json = rt.unsafeRun(tests >>= (r => DropwizardExtractor.writeJson(r)(None)))
    RIO.sleep(Duration.fromScala(30.seconds))
    putStrLn(json.spaces2).map(_ => ExitCode.success)
  }
} 
Example 78
Source File: ServerTest.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.console.putStrLn
import zio.metrics.dropwizard._
import zio.metrics.dropwizard.Server._
import zio.metrics.dropwizard.helpers._
import zio.metrics.dropwizard.reporters._
import zio.{ App, RIO, Task }
import java.util.concurrent.TimeUnit
import scala.util.Properties.envOrNone
import zio.interop.catz._
import org.http4s.implicits._
import org.http4s.server.Router
import com.codahale.metrics.MetricRegistry
import zio.ExitCode

object ServerTest extends App {

  val port: Int = envOrNone("HTTP_PORT").fold(9090)(_.toInt)
  println(s"Starting server on port $port")

  val testServer: RIO[
    Registry with Reporters,
    MetricRegistry
  ] =
    for {
      r   <- getCurrentRegistry()
      _   <- jmx(r)
      _   <- helpers.console(r, 30, TimeUnit.SECONDS)
      c   <- counter.register(Show.fixClassName(DropwizardTest.getClass()), Array("test", "counter"))
      _   <- c.inc()
      _   <- c.inc(2.0)
      t   <- timer.register("DropwizardTimer", Array("test", "timer"))
      ctx <- t.start()
      _ <- RIO.foreach(
            List(
              Thread.sleep(1000L),
              Thread.sleep(1400L),
              Thread.sleep(1200L)
            )
          )(_ => t.stop(ctx))
    } yield r

  val httpApp = (registry: MetricRegistry) =>
    Router(
      "/metrics" -> Server.serveMetrics(registry)
    ).orNotFound

  override def run(args: List[String]) = {
    println("Starting tests")

    val kApp: Task[KleisliApp] = testServer
      .map(r => httpApp(r))
      .provideLayer(Registry.live ++ Reporters.live)

    val app: RIO[HttpEnvironment, Unit] = kApp >>= builder
    println(s"App: $app")

    app
      .catchAll(t => putStrLn(s"$t"))
      .run
      .map(r => { println(s"Exiting $r"); ExitCode.success })
  }
} 
Example 79
Source File: StatsDClientTest.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.{ Queue, RIO, Runtime, Schedule }
import zio.clock.Clock
import zio.console._
import zio.metrics.encoders._
import zio.metrics.statsd._
import zio.duration.Duration
import java.util.concurrent.TimeUnit

object StatsDClientTest {

  val rt = Runtime.unsafeFromLayer(Encoder.statsd ++ Console.live ++ Clock.live)

  val schd = Schedule.recurs(10)

  val client = StatsDClient()

  def program(r: Long)(implicit queue: Queue[Metric]) =
    for {
      clock <- RIO.environment[Clock]
      _     <- client.listen
      t1    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      _     <- client.increment("zmetrics.counter", 0.9)
      _     <- putStrLn(s"waiting for $r s") *> clock.get.sleep(Duration(r, TimeUnit.SECONDS))
      t2    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      _     <- client.timer("zmetrics.timer", (t2 - t1).toDouble, 0.9)
    } yield ()

  def main(args: Array[String]): Unit = {
    val timeouts = Seq(4L, 6L, 2L)
    rt.unsafeRun(
      client.queue >>= (
        q =>
          RIO
            .foreach(timeouts)(l => program(l)(q))
            .repeat(schd)
        )
    )
    Thread.sleep(10000)
  }

} 
Example 80
Source File: DogStatsDClientTest.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.{ Queue, RIO, Runtime, Schedule }
import zio.clock.Clock
import zio.console._
import java.util.concurrent.TimeUnit
import zio.duration.Duration
import zio.metrics.dogstatsd._
import zio.metrics.encoders._

object DogStatsDClientTest {

  val rt = Runtime.unsafeFromLayer(Encoder.dogstatsd ++ Console.live ++ Clock.live)

  val schd = Schedule.recurs(10)

  val client = DogStatsDClient()

  def program(r: Long)(implicit queue: Queue[Metric]) =
    for {
      clock <- RIO.environment[Clock]
      _     <- client.listen
      t1    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      _     <- client.increment("zmetrics.dog.counter", 0.9)
      _     <- putStrLn(s"waiting for $r ms") *> clock.get.sleep(Duration(r, TimeUnit.MILLISECONDS))
      t2    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      d     = (t2 - t1).toDouble
      _     <- client.timer("zmetrics.dog.timer", d, 0.9)
      _     <- client.histogram("zmetrics.dog.hist", d)
      _     <- client.serviceCheck("zmetrics.dog.check", ServiceCheckOk)
      _     <- client.event("zmetrics.dog.event", "something amazing happened")
    } yield ()

  def main(args: Array[String]): Unit = {
    val timeouts = Seq(34L, 76L, 52L)
    rt.unsafeRun(
      client.queue >>= (
        q =>
          RIO
            .foreach(timeouts)(l => program(l)(q))
            .repeat(schd)
        )
    )
    Thread.sleep(10000)
  }

} 
Example 81
Source File: IdentitiesArbitrary.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.identities.arbitraries

import java.util.UUID
import java.util.concurrent.TimeUnit

import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.authenticators.JWTAuthenticator
import com.dataengi.crm.common.arbitraries.CommonArbitrary
import com.dataengi.crm.identities.models.Actions.Action
import com.dataengi.crm.identities.models.InviteStatuses.InviteStatus
import com.dataengi.crm.identities.models._
import com.dataengi.crm.identities.models.PermissionStates.PermissionState
import com.mohiva.play.silhouette.api.util.PasswordInfo
import org.joda.time.DateTime
import org.scalacheck.{Arbitrary, Gen}
import play.api.libs.json.Json

import scala.concurrent.duration.FiniteDuration

trait IdentitiesArbitrary extends CommonArbitrary {

  lazy val companyArbitrary: Arbitrary[Company] = Arbitrary(Gen.resultOf(Company))

  implicit val actionArbitrary: Arbitrary[Action]                   = Arbitrary(Gen.oneOf(Actions.values.toList))
  implicit val permissionStateArbitrary: Arbitrary[PermissionState] = Arbitrary(Gen.oneOf(PermissionStates.values.toList))
  implicit val permissionArbitrary: Arbitrary[Permission]           = Arbitrary(Gen.resultOf(Permission))
  implicit val roleArbitrary: Arbitrary[Role]                       = Arbitrary(Gen.resultOf(Role))
  implicit val inviteStatusArbitrary: Arbitrary[InviteStatus]       = Arbitrary(Gen.oneOf(InviteStatuses.values.toList))
  implicit val uuidArbitrary: Arbitrary[UUID]                       = Arbitrary(Gen.uuid)
  implicit val inviteArbitrary: Arbitrary[Invite]                   = Arbitrary(Gen.resultOf(Invite))

  val dateTimeGen = for {
    value <- Gen.Choose.chooseLong.choose(0, Long.MaxValue)
  } yield new DateTime(value)

  val finiteDurationGen = for {
    value <- Gen.Choose.chooseLong.choose(0, Long.MaxValue)
  } yield new FiniteDuration(value, TimeUnit.NANOSECONDS)

  val jsObject = Gen.oneOf(List(Some(Json.obj("a" -> "b")), None))

  implicit val jsObjectArbitrary       = Arbitrary(jsObject)
  implicit val dateTimeArbitrary       = Arbitrary(dateTimeGen)
  implicit val finiteDurationArbitrary = Arbitrary(finiteDurationGen)
  implicit val loginInfoArbitrary      = Arbitrary(Gen.resultOf(LoginInfo))
  implicit val authenticatorArbitrary  = Arbitrary(Gen.resultOf(JWTAuthenticator.apply _))
  implicit val passwordInfoArbitrary   = Arbitrary(Gen.resultOf(PasswordInfo))
} 
Example 82
Source File: RefineVBenchmark.scala    From refined   with MIT License 5 votes vote down vote up
package eu.timepit.refined.benchmark

import eu.timepit.refined.api.Refined
import eu.timepit.refined.numeric.Positive
import eu.timepit.refined.refineV
import eu.timepit.refined.string.Regex
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations.{Benchmark, BenchmarkMode, Mode, OutputTimeUnit}

@BenchmarkMode(Array(Mode.AverageTime))
class RefineVBenchmark {

  @Benchmark
  @OutputTimeUnit(TimeUnit.NANOSECONDS)
  def refineV_Positive: Either[String, Int Refined Positive] =
    refineV[Positive](1)

  @Benchmark
  @OutputTimeUnit(TimeUnit.NANOSECONDS)
  def refineV_Regex: Either[String, String Refined Regex] =
    refineV[Regex](".*")
} 
Example 83
Source File: CollectionCache.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.util

import java.net.InetAddress
import java.util.concurrent.TimeUnit

import com.google.common.cache.{Cache, CacheBuilder}
import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContext, Future}
import scala.language.{postfixOps, reflectiveCalls}

case class CollectionCacheConfig(maxSize: Int, ttl: Int, negativeCache: Boolean = false, negativeTTL: Int = 600)

class CollectionCache[C <: { def nonEmpty: Boolean; def isEmpty: Boolean } ](config: CollectionCacheConfig) {
  private val cache: Cache[String, C] = CacheBuilder.newBuilder()
    .expireAfterWrite(config.ttl, TimeUnit.SECONDS)
    .maximumSize(config.maxSize)
    .build[String, C]()

//  private lazy val cache = new SynchronizedLruMap[String, (C, Int)](config.maxSize)
  private lazy val className = this.getClass.getSimpleName

  private lazy val log = LoggerFactory.getLogger(this.getClass)
  val localHostname = InetAddress.getLocalHost.getHostName

  def size = cache.size
  val maxSize = config.maxSize

  // cache statistics
  def getStatsString: String = {
    s"$localHostname ${cache.stats().toString}"
  }

  def withCache(key: String)(op: => C): C = {
    Option(cache.getIfPresent(key)) match {
      case Some(r) => r
      case None =>
        val r = op
        if (r.nonEmpty || config.negativeCache) {
          cache.put(key, r)
        }
        r
    }
  }

  def withCacheAsync(key: String)(op: => Future[C])(implicit ec: ExecutionContext): Future[C] = {
    Option(cache.getIfPresent(key)) match {
      case Some(r) => Future.successful(r)
      case None =>
        op.map { r =>
          if (r.nonEmpty || config.negativeCache) {
            cache.put(key, r)
          }
          r
        }
    }
  }

  def purgeKey(key: String) = {
    cache.invalidate(key)
  }

  def contains(key: String): Boolean = {
    Option(cache.getIfPresent(key)).nonEmpty
  }
} 
Example 84
Source File: RankingCounter.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.core

import java.util.concurrent.TimeUnit

import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.typesafe.config.Config
import org.apache.s2graph.counter.core.RankingCounter.RankingValueMap
import org.apache.s2graph.counter.models.Counter
import org.apache.s2graph.counter.util.{CollectionCacheConfig, CollectionCache}
import org.slf4j.LoggerFactory
import scala.collection.JavaConversions._

case class RankingRow(key: RankingKey, value: Map[String, RankingValue])
case class RateRankingRow(key: RankingKey, value: Map[String, RateRankingValue])

class RankingCounter(config: Config, storage: RankingStorage) {
  private val log = LoggerFactory.getLogger(getClass)

  val storageStatusCache = new CollectionCache[Option[Boolean]](CollectionCacheConfig(1000, 60, negativeCache = false, 60))

  val cache: LoadingCache[RankingKey, RankingResult] = CacheBuilder.newBuilder()
    .maximumSize(1000000)
    .expireAfterWrite(10l, TimeUnit.MINUTES)
    .build(
      new CacheLoader[RankingKey, RankingResult]() {
        def load(rankingKey: RankingKey): RankingResult = {
//          log.warn(s"cache load: $rankingKey")
          storage.getTopK(rankingKey, Int.MaxValue).getOrElse(RankingResult(-1, Nil))
        }
      }
    )

  def getTopK(rankingKey: RankingKey, k: Int = Int.MaxValue): Option[RankingResult] = {
    val tq = rankingKey.eq.tq
    if (TimedQualifier.getQualifiers(Seq(tq.q), System.currentTimeMillis()).head == tq) {
      // do not use cache
      storage.getTopK(rankingKey, k)
    }
    else {
      val result = cache.get(rankingKey)
      if (result.values.nonEmpty) {
        Some(result.copy(values = result.values.take(k)))
      }
      else {
        None
      }
    }
  }

  def update(key: RankingKey, value: RankingValueMap, k: Int): Unit = {
    storage.update(key, value, k)
  }

  def update(values: Seq[(RankingKey, RankingValueMap)], k: Int): Unit = {
    storage.update(values, k)
  }

  def delete(key: RankingKey): Unit = {
    storage.delete(key)
  }

  def getAllItems(keys: Seq[RankingKey], k: Int = Int.MaxValue): Seq[String] = {
    val oldKeys = keys.filter(key => TimedQualifier.getQualifiers(Seq(key.eq.tq.q), System.currentTimeMillis()).head != key.eq.tq)
    val cached = cache.getAllPresent(oldKeys)
    val missed = keys.diff(cached.keys.toSeq)
    val found = storage.getTopK(missed, k)

//    log.warn(s"cached: ${cached.size()}, missed: ${missed.size}")

    for {
      (key, result) <- found
    } {
      cache.put(key, result)
    }

    for {
      (key, RankingResult(totalScore, values)) <- cached ++ found
      (item, score) <- values
    } yield {
      item
    }
  }.toSeq.distinct

  def prepare(policy: Counter): Unit = {
    storage.prepare(policy)
  }

  def destroy(policy: Counter): Unit = {
    storage.destroy(policy)
  }

  def ready(policy: Counter): Boolean = {
    storageStatusCache.withCache(s"${policy.id}") {
      Some(storage.ready(policy))
    }.getOrElse(false)
  }
}

object RankingCounter {
  type RankingValueMap = Map[String, RankingValue]
} 
Example 85
Source File: QueueActor.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.rest.play.actors

import java.util.concurrent.TimeUnit

import akka.actor._
import org.apache.s2graph.core.ExceptionHandler._
import org.apache.s2graph.core.utils.logger
import org.apache.s2graph.core.{ExceptionHandler, S2Graph, GraphElement}
import org.apache.s2graph.rest.play.actors.Protocol.FlushAll
import org.apache.s2graph.rest.play.config.Config
import play.api.Play.current
import play.api.libs.concurrent.Akka

import scala.collection.mutable
import scala.concurrent.duration.Duration

object Protocol {

  case object Flush

  case object FlushAll

}

object QueueActor {
  
  var router: ActorRef = _

  //    Akka.system.actorOf(props(), name = "queueActor")
  def init(s2: S2Graph, walLogHandler: ExceptionHandler) = {
    router = Akka.system.actorOf(props(s2, walLogHandler))
  }

  def shutdown() = {
    router ! FlushAll
    Akka.system.shutdown()
    Thread.sleep(Config.ASYNC_HBASE_CLIENT_FLUSH_INTERVAL * 2)
  }

  def props(s2: S2Graph, walLogHandler: ExceptionHandler): Props = Props(classOf[QueueActor], s2, walLogHandler)
}

class QueueActor(s2: S2Graph, walLogHandler: ExceptionHandler) extends Actor with ActorLogging {

  import Protocol._

  implicit val ec = context.system.dispatcher
  //  logger.error(s"QueueActor: $self")
  val queue = mutable.Queue.empty[GraphElement]
  var queueSize = 0L
  val maxQueueSize = Config.LOCAL_QUEUE_ACTOR_MAX_QUEUE_SIZE
  val timeUnitInMillis = 10
  val rateLimitTimeStep = 1000 / timeUnitInMillis
  val rateLimit = Config.LOCAL_QUEUE_ACTOR_RATE_LIMIT / rateLimitTimeStep


  context.system.scheduler.schedule(Duration.Zero, Duration(timeUnitInMillis, TimeUnit.MILLISECONDS), self, Flush)

  override def receive: Receive = {
    case element: GraphElement =>

      if (queueSize > maxQueueSize) {
        walLogHandler.enqueue(toKafkaMessage(Config.KAFKA_FAIL_TOPIC, element, None))
      } else {
        queueSize += 1L
        queue.enqueue(element)
      }

    case Flush =>
      val elementsToFlush =
        if (queue.size < rateLimit) queue.dequeueAll(_ => true)
        else (0 until rateLimit).map(_ => queue.dequeue())

      val flushSize = elementsToFlush.size

      queueSize -= elementsToFlush.length
      s2.mutateElements(elementsToFlush)

      if (flushSize > 0) {
        logger.info(s"flush: $flushSize, $queueSize")
      }

    case FlushAll =>
      s2.mutateElements(queue)
      context.stop(self)

    case _ => logger.error("unknown protocol")
  }
} 
Example 86
Source File: DEXExtension.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.wavesplatform.dex.grpc.integration.services._
import com.wavesplatform.dex.grpc.integration.settings.DEXExtensionSettings
import com.wavesplatform.extensions.{Extension, Context => ExtensionContext}
import com.wavesplatform.utils.ScorexLogging
import io.grpc.Server
import io.grpc.netty.NettyServerBuilder
import monix.execution.{ExecutionModel, Scheduler}
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
import net.ceedubs.ficus.readers.NameMapper

import scala.concurrent.Future

class DEXExtension(context: ExtensionContext) extends Extension with ScorexLogging {

  @volatile
  private var server: Server                            = _
  private var apiService: WavesBlockchainApiGrpcService = _

  implicit val chosenCase: NameMapper = net.ceedubs.ficus.readers.namemappers.implicits.hyphenCase
  implicit private val apiScheduler: Scheduler = Scheduler(
    ec = context.actorSystem.dispatchers.lookup("akka.actor.waves-dex-grpc-scheduler"),
    executionModel = ExecutionModel.AlwaysAsyncExecution
  )

  override def start(): Unit = {
    val settings    = context.settings.config.as[DEXExtensionSettings]("waves.dex.grpc.integration")
    val bindAddress = new InetSocketAddress(settings.host, settings.port)
    apiService = new WavesBlockchainApiGrpcService(context, settings.balanceChangesBatchLinger)
    server = NettyServerBuilder
      .forAddress(bindAddress)
      .permitKeepAliveWithoutCalls(true)
      .permitKeepAliveTime(500, TimeUnit.MILLISECONDS)
      .addService(WavesBlockchainApiGrpc.bindService(apiService, apiScheduler))
      .build()
      .start()

    log.info(s"gRPC DEX extension was bound to $bindAddress")
  }

  override def shutdown(): Future[Unit] = {
    log.info("Shutting down gRPC DEX extension")
    if (server != null) server.shutdownNow()
    Future.successful(())
  }
} 
Example 87
Source File: OrderBookAddBenchmark.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.model.orderbook

import java.util.concurrent.TimeUnit

import com.wavesplatform.dex.domain.order.{Order, OrderType}
import com.wavesplatform.dex.model.orderbook.OrderBookAddBenchmark._
import com.wavesplatform.dex.model.state.OrderBookBenchmarkState
import com.wavesplatform.dex.model.{AcceptedOrder, OrderBook}
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import org.scalacheck.Gen

import scala.collection.JavaConverters._

@OutputTimeUnit(TimeUnit.NANOSECONDS)
@BenchmarkMode(Array(Mode.AverageTime))
@Threads(4)
@Fork(1)
@Warmup(iterations = 10)
@Measurement(iterations = 10)
class OrderBookAddBenchmark {
  @Benchmark def add_0_plus_1250_test(st: Add_0_To_1250_State, bh: Blackhole): Unit       = bh.consume { st.run() }
  @Benchmark def add_1250_plus_1250_test(st: Add_1250_To_1250_State, bh: Blackhole): Unit = bh.consume { st.run() }
}

object OrderBookAddBenchmark {

  @State(Scope.Thread) class Add_0_To_1250_State    extends AddState(initOrderNumber = 0, orderNumberToAdd = 1250)
  @State(Scope.Thread) class Add_1250_To_1250_State extends AddState(initOrderNumber = 1250, orderNumberToAdd = 1250)

  sealed abstract class AddState(initOrderNumber: Int, orderNumberToAdd: Int) extends OrderBookBenchmarkState {
    val maxPrice = 1000L * Order.PriceConstant
    val minPrice = 1L * Order.PriceConstant
    val priceGen = Gen.chooseNum(minPrice, maxPrice)

    val askGen = orderGen(priceGen, OrderType.SELL)
    val bidGen = orderGen(priceGen, OrderType.BUY)

    val orderBook: OrderBook = ordersGen(initOrderNumber).sample.get.foldLeft(OrderBook.empty)(_.add(_, ts, getMakerTakerFee).orderBook)

    val orders: List[AcceptedOrder] = ordersGen(orderNumberToAdd).sample.get

    def run(): OrderBook = orders.foldLeft(OrderBook.empty) {
      case (r, o) => r.add(o, ts, getMakerTakerFee).orderBook
    }

    def ordersGen(orderNumber: Int): Gen[List[AcceptedOrder]] =
      for {
        orderSides <- Gen.listOfN(orderNumber, orderSideGen)
        orders <- Gen.sequence {
          orderSides.map { side =>
            val orderGen = if (side == OrderType.SELL) askGen else bidGen
            Gen.oneOf(limitOrderGen(orderGen), marketOrderGen(orderGen))
          }
        }
      } yield orders.asScala.toList
  }

} 
Example 88
Source File: OrderBookCancelBenchmark.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.model.orderbook

import java.util.concurrent.{ThreadLocalRandom, TimeUnit}

import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.model.OrderBook
import com.wavesplatform.dex.model.orderbook.OrderBookCancelBenchmark._
import com.wavesplatform.dex.model.state.OrderBookBenchmarkState
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import org.scalacheck.Gen

import scala.util.Random

@OutputTimeUnit(TimeUnit.NANOSECONDS)
@BenchmarkMode(Array(Mode.AverageTime))
@Threads(4)
@Fork(1)
@Warmup(iterations = 10)
@Measurement(iterations = 10)
class OrderBookCancelBenchmark {
//  @Benchmark def cancel_2500_to_1250_test(st: Cancel_2500_To_1250_State, bh: Blackhole): Unit = bh.consume { st.run() }
  @Benchmark def cancel_1250_to_0_test(st: Cancel_1250_To_0_State, bh: Blackhole): Unit       = bh.consume { st.run() }
}

object OrderBookCancelBenchmark {

  @State(Scope.Thread) class Cancel_2500_To_1250_State extends CancelState(initOrderNumber = 2500, orderNumberAfterCancel = 1250)
  @State(Scope.Thread) class Cancel_1250_To_0_State    extends CancelState(initOrderNumber = 1250, orderNumberAfterCancel = 0)

  sealed abstract class CancelState(initOrderNumber: Int, orderNumberAfterCancel: Int) extends OrderBookBenchmarkState {
    private val askPricesMin = 1000L * Order.PriceConstant
    private val askPricesMax = 2000L * Order.PriceConstant

    private val bidPricesMin = 1L * Order.PriceConstant
    private val bidPricesMax = 999L * Order.PriceConstant

    val orderBookGen: Gen[OrderBook] = fixedSidesOrdersGen(
      levelNumber = initOrderNumber / 2,
      orderNumberInLevel = 2,
      askPricesGen = Gen.choose(askPricesMin, askPricesMax),
      bidPricesGen = Gen.choose(bidPricesMin, bidPricesMax)
    ).map(Function.tupled(mkOrderBook))

    val orderBook: OrderBook = orderBookGen.sample.get
    val orders: Seq[Order.Id] = {
      val xs = orderBook.allOrders.map(_.order.id()).toVector
      new Random(ThreadLocalRandom.current()).shuffle(xs).take(initOrderNumber - orderNumberAfterCancel)
    }

    def run(): OrderBook = orders.foldLeft(orderBook) { case (r, id) => r.cancel(id, ts)._1 }
  }

} 
Example 89
Source File: TAC.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.example2.trustaccountcreation

import java.util.concurrent.TimeUnit

import akka.actor.Status.Failure
import akka.actor.{ActorSystem, Props, ActorPath}
import no.nextgentel.oss.akkatools.aggregate._
import no.nextgentel.oss.akkatools.example2.other.{DoCreateTrustAccount, DoPerformESigning, DoSendEmailToCustomer}

import scala.concurrent.duration.FiniteDuration

class TACAggregate
(
  dmSelf:ActorPath,
  eSigningSystem:ActorPath,
  emailSystem:ActorPath,
  trustAccountSystem:ActorPath
) extends GeneralAggregateDMViaEvent[TACEvent, TACState](dmSelf) {

  override def persistenceIdBase() = TACAggregate.persistenceIdBase

  // Override this one to set different timeout
  override def idleTimeout() = FiniteDuration(60, TimeUnit.SECONDS)

  override var state = TACState.empty() // This is the state of our initial state (empty)

  // transform command to event
  override def cmdToEvent = {
    case c:CreateNewTACCmd        =>
      ResultingEvent( RegisteredEvent(c.info) )
        .onSuccess{ sender() ! "ok" }
        .onError{   (e) => sender() ! Failure(new Exception(s"Failed: $e"))}

    case c:ESigningFailedCmd      => ResultingEvent( ESigningFailedEvent() )
    case c:ESigningCompletedCmd   => ResultingEvent( ESigningCompletedEvent() )
    case c:CompletedCmd           => ResultingEvent( CreatedEvent(c.trustAccountId) )
    case c:DeclinedCmd            => ResultingEvent( DeclinedEvent(c.cause) )
  }

  override def generateDMs = {
    case e:RegisteredEvent  =>
      // We must send message to eSigningSystem
      val msg = DoPerformESigning(dispatchId, e.info.customerNo)
      ResultingDMs( msg, eSigningSystem)

    case e:ESigningCompletedEvent =>
      // ESigning is completed, so we should init creation of the TrustAccount
      val info = state.info.get
      val msg = DoCreateTrustAccount(dispatchId, info.customerNo, info.trustAccountType)
      ResultingDMs(msg, trustAccountSystem)


    case e:DeclinedEvent =>
      // The TrustAccountCreation-process failed - must notify customer
      val msg = DoSendEmailToCustomer(state.info.get.customerNo, s"Sorry.. TAC-failed: ${e.cause}")
      ResultingDMs(msg, emailSystem)

    case e:CreatedEvent =>
      // The TrustAccountCreation-process was success - must notify customer
      val msg = DoSendEmailToCustomer(state.info.get.customerNo, s"Your TrustAccount '${e.trustAccountId}' has been created!")
      ResultingDMs(msg, emailSystem)

  }
}

object TACAggregate {

  val persistenceIdBase = "TAC-"

  def props(dmSelf:ActorPath,
            eSigningSystem:ActorPath,
            emailSystem:ActorPath,
            trustAccountSystem:ActorPath) = Props(new TACAggregate(dmSelf, eSigningSystem, emailSystem ,trustAccountSystem))
}


class TACStarter(system:ActorSystem) extends AggregateStarter("tac", system) with AggregateViewStarter {

  def config(eSigningSystem:ActorPath,
             emailSystem:ActorPath,
             trustAccountSystem:ActorPath):TACStarter = {
    setAggregatePropsCreator{
      dmSelf =>
        TACAggregate.props(dmSelf, eSigningSystem, emailSystem, trustAccountSystem)
    }
    this
  }

  override def createViewProps(aggregateId: String): Props =
    Props( new GeneralAggregateView[TACEvent, TACState](TACAggregate.persistenceIdBase, aggregateId, TACState.empty(), true))
} 
Example 90
Source File: SeedNodesListOrderingResolver.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.cluster

import java.util.concurrent.TimeUnit

import org.slf4j.LoggerFactory

import scala.concurrent.duration.FiniteDuration

// Must be used together with ClusterListener
object SeedNodesListOrderingResolver {
  val log = LoggerFactory.getLogger(getClass)
  def resolveSeedNodesList(repo:ClusterNodeRepo, clusterConfig:AkkaClusterConfig, maxAliveAge:FiniteDuration = FiniteDuration(20, TimeUnit.SECONDS)):AkkaClusterConfig = {

    val ourNode = clusterConfig.thisHostnameAndPort()

    // Since we're starting up, just make sure that we do not find info about ourself from our last run
    log.debug(s"removeClusterNodeAlive for $ourNode")
    repo.removeClusterNodeAlive(ourNode)

    val allSeedNodes = clusterConfig.seedNodes

    val weAreSeedNode = allSeedNodes.contains(ourNode)
    if ( !weAreSeedNode) {
      log.info("We are NOT a seedNode")
    }

    val aliveNodes = repo.findAliveClusterNodes(maxAliveAge, onlyJoined = false).map {
      node =>
        // alive nodes are listed on this form:
        //    akka.tcp://SomeAkkaSystem@host1:9999
        // We must remove everything before hostname:port
        val index = node.indexOf('@')
        if ( index >= 0) node.substring(index+1) else node
    }

    val seedNodeListToUse = if ( aliveNodes.isEmpty ) {
      if (weAreSeedNode) {
        val allNodesExceptOur = allSeedNodes.filter(n => n != ourNode)
        val list = List(ourNode) ++ allNodesExceptOur

        log.info("No other clusterNodes found as alive - We must be first seed node - seedNodeListToUse: " + list)
        list
      } else {
        log.info("No other clusterNodes found as alive - Since we're not a seedNode, we're using the list as is - seedNodeListToUse: " + allSeedNodes)
        allSeedNodes
      }
    } else {

      if (weAreSeedNode) {
        val allNodesExceptOurAndAliveOnes = allSeedNodes.filter(n => n != ourNode && !aliveNodes.contains(n))

        val list = aliveNodes ++ List(ourNode) ++ allNodesExceptOurAndAliveOnes

        log.info("Found other alive clusterNodes - we should not be first seed node. Alive cluster nodes: " + aliveNodes.mkString(",") + " - seedNodeListToUse: " + list)
        list
      } else {
        val allNodesExceptAliveOnes = allSeedNodes.filter(n => !aliveNodes.contains(n))

        val list = aliveNodes ++ allNodesExceptAliveOnes

        log.info("Found other alive clusterNodes - Alive cluster nodes: " + aliveNodes.mkString(",") + " - seedNodeListToUse: " + list)
        list

      }
    }

    clusterConfig.withSeedNodeList(seedNodeListToUse)
  }
} 
Example 91
Source File: ActorCache.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.utils

import java.util.concurrent.TimeUnit

import akka.actor._
import com.google.common.cache._

import scala.concurrent.duration.{FiniteDuration, Duration}
import scala.reflect.ClassTag

case class CheckCache()
case class ForwardToCachedActor[K](key:K, msg:AnyRef)

object ActorCache {
  def props[K:ClassTag](cacheLoader:(K)=>Props, expireAfter: Duration = Duration(2, TimeUnit.MINUTES)) = Props(new ActorCache[K](cacheLoader, expireAfter))
}

class ActorCache[K:ClassTag](cacheLoader:(K)=>Props, expireAfter: Duration) extends Actor with ActorLogging {

  implicit val ec = context.dispatcher

  val removalListener = new RemovalListener[AnyRef, ActorRef] {
    override def onRemoval(notification: RemovalNotification[AnyRef, ActorRef]): Unit = {
      val key = notification.getKey.asInstanceOf[K]
      log.debug("Stopping actor for " + key)
      val actor = notification.getValue
      actor ! PoisonPill
    }
  }

  val realCachLoader = new CacheLoader[AnyRef,ActorRef] {
    override def load(key: AnyRef): ActorRef = {
      log.debug("Creating actor for " + key)
      val props:Props = cacheLoader(key.asInstanceOf[K])
      context.actorOf( props )
    }
  }

  val cache:LoadingCache[AnyRef, ActorRef] = CacheBuilder.newBuilder
    .expireAfterAccess(expireAfter.toMillis, TimeUnit.MILLISECONDS)
    .removalListener(removalListener)
    .build(realCachLoader)

  val waitPeriode = FiniteDuration.apply(expireAfter.toMillis / 2, TimeUnit.MILLISECONDS)

  scheduleNextCacheCheck()

  def scheduleNextCacheCheck(): Unit = {
    context.system.scheduler.scheduleOnce(waitPeriode, self, CheckCache())
  }

  def receive = {
    case CheckCache() => {
      cache.cleanUp()
      scheduleNextCacheCheck()
    }
    case ForwardToCachedActor(key:K, msg) =>
      try {
        val actor = cache.get(key.asInstanceOf[AnyRef])
        log.debug("Forwarding message for " + key + " to " + actor)
        actor forward msg
      } catch {
        case e:Exception =>
          log.error(e, "Error forwarding message (with key "+key+") " + msg)
      }
    case x:AnyRef =>
      log.warning("Droping unknown msg: " + x)
  }

  @throws(classOf[Exception])
  override def postStop {
    super.postStop
    cache.invalidateAll
    cache.cleanUp
  }

} 
Example 92
Source File: AggregateStateGetter.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.testing

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.ask
import akka.util.Timeout
import no.nextgentel.oss.akkatools.persistence.GetState

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag

object AggregateStateGetter {
  val defaultTimeout = Duration("60s")

  def apply[S:ClassTag](aggregateActorRef:ActorRef, timeout:Duration = defaultTimeout)(implicit system:ActorSystem):AggregateStateGetter[S] = new AggregateStateGetter[S](system, aggregateActorRef, timeout)
}

import AggregateStateGetter._

class AggregateStateGetter[S:ClassTag](system:ActorSystem, aggregateActorRef:ActorRef, timeout:Duration) {

  def getState():S = getState(None)
  def getState(aggregateId:Option[String]):S = {
    implicit val ec = system.dispatcher
    implicit val t = Timeout(timeout.toMillis, TimeUnit.MILLISECONDS)
    val getStateMsg = aggregateId match {
      case Some(id) => GetState(id)
      case None     => GetState()
    }
    val f = ask(aggregateActorRef, getStateMsg).mapTo[S]
    Await.result(f, timeout)
  }

}

class AggregateStateGetterJava(system:ActorSystem, aggregateActorRef:ActorRef, timeout:Duration)
  extends AggregateStateGetter[Any](system, aggregateActorRef, timeout) {

  def this(system:ActorSystem, aggregateActorRef:ActorRef) = this(system, aggregateActorRef, defaultTimeout)
} 
Example 93
Source File: ActorWithDMSupportTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.persistence

import java.util.concurrent.TimeUnit

import akka.actor.{Props, ActorSystem}
import akka.testkit.{TestProbe, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, FunSuiteLike}

import scala.concurrent.duration.FiniteDuration

class ActorWithDMSupportTest(_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {
  def this() = this(ActorSystem("ActorWithDMSupportTest", ConfigFactory.load("application-test.conf")))

  test("success with dm") {
    val a = system.actorOf(Props(new TestActorWithDMSupport()))
    val s = TestProbe()

    // send raw
    s.send(a, "sendok")
    s.expectMsg("ok")

    // send via dm and withNewPayload
    val dm = DurableMessage(1L, "sendok", s.ref.path)
    s.send(a, dm)
    s.expectMsg(dm.withNewPayload("ok"))

    // send raw - do nothing
    s.send(a, "silent")


    // send silent - wait for configm
    s.send(a, DurableMessage(1L, "silent", s.ref.path))
    s.expectMsg( DurableMessageReceived(1,None) )


    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - with dm
    s.send(a, DurableMessage(1L, "no-confirm-custom", s.ref.path))
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

    // send noconfirm - without dm
    s.send(a, "no-confirm-custom")
    s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS))

  }


}

class TestActorWithDMSupport extends ActorWithDMSupport {
  // All raw messages or payloads in DMs are passed to this function.
  override def receivePayload = {
    case "sendok" =>
      send(sender.path, "ok")
    case "silent" =>
      Unit
    case "no-confirm" =>
      throw new LogWarningAndSkipDMConfirmException("something went wrong")
    case "no-confirm-custom" =>
      throw new CustomLogWarningAndSkipDMConfirm()
  }
}

class CustomLogWarningAndSkipDMConfirm extends Exception("") with LogWarningAndSkipDMConfirm 
Example 94
Source File: Master.scala    From asyspark   with MIT License 5 votes vote down vote up
package org.apache.spark.asyspark.core

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Address, Props, Terminated}
import akka.util.Timeout
import com.typesafe.config.Config
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.spark.asyspark.core.messages.master.{ClientList, RegisterClient, RegisterServer, ServerList}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}



  var clients = Set.empty[ActorRef]

  override def receive: Receive = {
    case RegisterServer(server) =>
      log.info(s"Registering server ${server.path.toString}")
      println("register server")
      servers += server
      context.watch(server)
      sender ! true

    case RegisterClient(client)  =>
      log.info(s"Registering client ${sender.path.toString}")
      clients += client
      context.watch(client)
      sender ! true

    case ServerList() =>
      log.info(s"Sending current server list to ${sender.path.toString}")
      sender ! servers.toArray

    case ClientList() =>
      log.info(s"Sending current client list to ${sender.path.toString}")
      sender ! clients.toArray


    case Terminated(actor) =>
      actor match {
        case server: ActorRef if servers contains server =>
          log.info(s"Removing server ${server.path.toString}")
          servers -= server
        case client: ActorRef if clients contains client =>
          log.info(s"Removing client ${client.path.toString}")
          clients -= client
        case actor: ActorRef =>
          log.warning(s"Actor ${actor.path.toString} will be terminated for some unknown reason")
      }
  }

}

object Master extends StrictLogging {
  def run(config: Config): Future[(ActorSystem, ActorRef)] = {
    logger.debug("Starting master actor system")
    val system = ActorSystem(config.getString("asyspark.master.system"), config.getConfig("asyspark.master"))
    logger.debug("Starting master")
    val master = system.actorOf(Props[Master], config.getString("asyspark.master.name"))
    implicit val timeout = Timeout(config.getDuration("asyspark.master.startup-timeout", TimeUnit.MILLISECONDS) milliseconds)
    implicit val ec = ExecutionContext.Implicits.global
    val address = Address("akka.tcp", config.getString("asyspark.master.system"), config.getString("asyspark.master.host"),
    config.getString("asyspark.master.port").toInt)
    system.actorSelection(master.path.toSerializationFormat).resolveOne().map {
      case actor: ActorRef =>
        logger.debug("Master successfully started")
        (system, master)

    }
  }

} 
Example 95
Source File: V1DaxClientConfigUtils.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.utils

import java.util.concurrent.TimeUnit

import com.amazon.dax.client.dynamodbv2.ClientConfig
import com.github.j5ik2o.akka.persistence.dynamodb.config.client.DynamoDBClientConfig

import scala.concurrent.duration.Duration

object V1DaxClientConfigUtils {

  def setup(dynamoDBClientConfig: DynamoDBClientConfig): ClientConfig = {
    import dynamoDBClientConfig.v1DaxClientConfig._
    val result = new ClientConfig()
    if (connectionTimeout != Duration.Zero)
      result
        .setConnectTimeout(
          connectionTimeout.toMillis,
          TimeUnit.MILLISECONDS
        )
    if (requestTimeout != Duration.Zero)
      result
        .setRequestTimeout(
          requestTimeout.toMillis,
          TimeUnit.MILLISECONDS
        )
    if (healthCheckTimeout != Duration.Zero)
      result.setHealthCheckTimeout(
        healthCheckTimeout.toMillis,
        TimeUnit.MILLISECONDS
      )
    if (healthCheckInterval != Duration.Zero)
      result.setHealthCheckInterval(
        healthCheckInterval.toMillis,
        TimeUnit.MILLISECONDS
      )
    if (idleConnectionTimeout != Duration.Zero)
      result.setIdleConnectionTimeout(
        idleConnectionTimeout.toMillis,
        TimeUnit.MILLISECONDS
      )
    result.setMinIdleConnectionSize(
      minIdleConnectionSize
    )

    result.setWriteRetries(
      writeRetries
    )
    result
      .setMaxPendingConnectsPerHost(
        maxPendingConnectionsPerHost
      )
    result.setReadRetries(
      readRetries
    )
    if (threadKeepAlive != Duration.Zero)
      result
        .setThreadKeepAlive(
          threadKeepAlive.toMillis,
          TimeUnit.MILLISECONDS
        )
    if (clusterUpdateInterval != Duration.Zero)
      result
        .setClusterUpdateInterval(
          clusterUpdateInterval.toMillis,
          TimeUnit.MILLISECONDS
        )
    if (clusterUpdateThreshold != Duration.Zero)
      result
        .setClusterUpdateThreshold(
          clusterUpdateThreshold.toMillis,
          TimeUnit.MILLISECONDS
        )
    if (maxRetryDelay != Duration.Zero)
      result
        .setMaxRetryDelay(
          maxRetryDelay.toMillis,
          TimeUnit.MILLISECONDS
        )
    result.setUnhealthyConsecutiveErrorCount(
      unhealthyConsecutiveErrorCount
    )
    result
  }
} 
Example 96
Source File: KafkaSink.scala    From spark-kafka-sink   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.{ Properties, Locale }
import java.util.concurrent.TimeUnit

import org.slf4j.Logger
import org.slf4j.LoggerFactory

import com.codahale.metrics.MetricRegistry
import org.apache.spark.SecurityManager

import com.manyangled.kafkasink.KafkaReporter

class KafkaSink(val properties: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends org.apache.spark.metrics.sink.Sink {

  val logger: Logger = LoggerFactory.getLogger(this.getClass)

  private def popt(prop: String): Option[String] =
    Option(properties.getProperty(prop))

  // These are non-negotiable
  val broker = popt("broker").get
  val topic = popt("topic").get

  lazy val reporter = new KafkaReporter(registry, broker, topic, properties)

  def start(): Unit = {
    logger.info(s"Starting Kafka metric reporter at $broker, topic $topic")
    val period = popt("period").getOrElse("10").toLong
    val tstr = popt("unit").getOrElse("seconds").toUpperCase(Locale.ROOT)
    val tunit = TimeUnit.valueOf(tstr)
    reporter.start(period, tunit)
  }

  def stop(): Unit = {
    logger.info(s"Stopping Kafka metric reporter at $broker, topic $topic")
    reporter.stop()
  }

  def report(): Unit = {
    logger.info(s"Reporting metrics to Kafka reporter at $broker, topic $topic")
    reporter.report()
  }
} 
Example 97
Source File: TSBucketDef.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.spark.rdd

import com.basho.riak.client.core.query.timeseries.TableDefinition
import java.util.concurrent.TimeUnit
import org.apache.spark.sql.types.StructType
import com.basho.riak.spark.util.TSConversionUtil
case class TSBucketDef(
    tableDefinition: TableDefinition,
    quantum: Int,
    timeUnit: TimeUnit) {
  require(!tableDefinition.getPartitionKeyColumnDescriptions.isEmpty(), "TableDefinition should have partition keys")

  def apply(schema: StructType, name: String, quantum: Int, timeUnit: TimeUnit): TSBucketDef = {
    val tableDef = TSConversionUtil.asTableDef(name, schema)
    TSBucketDef(tableDef, quantum, timeUnit)
  }
} 
Example 98
Source File: ParallelExecutor.scala    From nyaya   with GNU Lesser General Public License v2.1 5 votes vote down vote up
package nyaya.test

import java.util.concurrent.{Callable, ExecutorService, Executors, Future, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import nyaya.gen.ThreadNumber
import nyaya.prop.Prop
import ParallelExecutor._
import PTest._
import Executor.{DataCtx, Data}

// TODO data SampleSize = TotalSamples(n) | Fn(qty|%, gensize|%) | PerWorker(sampleSize)

object ParallelExecutor {
  val defaultThreadCount = 1.max(Runtime.getRuntime.availableProcessors - 1)

  def merge[A](a: RunState[A], b: RunState[A]): RunState[A] = {
    val runs = a.runs max b.runs
    (a.success, b.success) match {
      case (false, true) => RunState(runs, a.result)
      case _             => RunState(runs, b.result)
    }
  }
}

case class ParallelExecutor(workers: Int = defaultThreadCount) extends Executor {

  val debugPrefixes = (0 until workers).toVector.map(i => s"Worker #$i: ")

  override def run[A](p: Prop[A], g: Data[A], S: Settings): RunState[A] = {
    val sss = {
      var rem = S.sampleSize.value
      var i = workers
      var v = Vector.empty[SampleSize]
      while(i > 0) {
        val p = rem / i
        v :+= SampleSize(p)
        rem -= p
        i -= 1
      }
      v
    }

    if (S.debug) {
      val szs = sss.map(_.value)
      println(s"Samples/Worker: ${szs.mkString("{", ",", "}")} = Σ${szs.sum}")
    }

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      val dp = debugPrefixes(worker)
      val data = g(DataCtx(sss(worker), ThreadNumber(worker), S.seed, dp))
      testN(p, data, () => ai.incrementAndGet(), S)
    }
    runAsync2(workers, task)
  }

  override def prove[A](p: Prop[A], d: Domain[A], S: Settings): RunState[A] = {
    val threads = workers min d.size

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      proveN(p, d, worker, threads, _ => ai.incrementAndGet, S)
    }
    runAsync2(threads, task)
  }

  private[this] def mkTask[A](f: => RunState[A]) = new Callable[RunState[A]] {
    override def call(): RunState[A] = f
  }

  private[this] def runAsync2[A](threads: Int, f: Int => Callable[RunState[A]]): RunState[A] =
    runAsync(es => (0 until threads).toList.map(es submit f(_)))

  private[this] def runAsync[A](start: ExecutorService => List[Future[RunState[A]]]): RunState[A] = {
    val es: ExecutorService = Executors.newFixedThreadPool(workers)
    val fs = start(es)
    es.shutdown()
    val rss = fs.map(_.get())
    es.awaitTermination(1, TimeUnit.MINUTES)
    rss.foldLeft(RunState.empty[A])(merge)
  }
} 
Example 99
Source File: MDCPropagatingDispatcherConfigurator.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.gov.daf.common.monitoring

import java.util.concurrent.TimeUnit

import akka.dispatch._
import com.typesafe.config.Config
import org.slf4j.MDC

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.{Duration, FiniteDuration}


class MDCPropagatingDispatcher(_configurator: MessageDispatcherConfigurator,
                               id: String,
                               throughput: Int,
                               throughputDeadlineTime: Duration,
                               executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
                               shutdownTimeout: FiniteDuration)
  extends Dispatcher(_configurator, id, throughput, throughputDeadlineTime, executorServiceFactoryProvider, shutdownTimeout ) {

  self =>

  override def prepare(): ExecutionContext = new ExecutionContext {
    // capture the MDC
    val mdcContext = MDC.getCopyOfContextMap
    //val parent = Thread.currentThread().getId

    def execute(r: Runnable) = self.execute(new Runnable {
      def run() = {
        // backup the callee MDC context
        val oldMDCContext = MDC.getCopyOfContextMap

        // Run the runnable with the captured context
        setContextMap(mdcContext)
        //println(s"setto ${Thread.currentThread().getId} - $mdcContext - from $parent")
        try {
          r.run()
        } finally {
          // restore the callee MDC context

          setContextMap(oldMDCContext)
          //println(s"ripristino ${Thread.currentThread().getId} - $oldMDCContext - from $parent")
        }
      }
    })
    def reportFailure(t: Throwable) = self.reportFailure(t)
  }

  private[this] def setContextMap(context: java.util.Map[String, String]):Unit = {
    if (context == null) {
      MDC.clear()
    } else {
      MDC.setContextMap(context)
    }
  }

} 
Example 100
Source File: KuduEventsHandlerSpec.scala    From daf   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package it.teamdigitale.storage

import java.io.File
import java.util.concurrent.TimeUnit

import org.apache.kudu.spark.kudu._
import it.teamdigitale.miniclusters.KuduMiniCluster
import it.teamdigitale.config.IotIngestionManagerConfig.KuduConfig
import it.teamdigitale.managers.IotIngestionManager
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import it.gov.daf.iotingestion.event.Event
import it.teamdigitale.EventModel.{EventToKuduEvent, EventToStorableEvent}
import org.apache.logging.log4j.LogManager

import scala.util.{Failure, Success, Try}

class KuduEventsHandlerSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  val logger = LogManager.getLogger(this.getClass)
  val kuduCluster = new KuduMiniCluster()

  val metrics: Seq[Try[Event]] = Range(0,100).map(x => Success( Event(
    version = 1L,
    id = x + "metric",
    ts = System.currentTimeMillis() + x ,
    event_type_id = 0,
    location = "41.1260529:16.8692905",
    source = "http://domain/sensor/url",
    body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()),
    event_subtype_id = Some("Via Cernaia(TO)"),
    attributes = Map("value" -> x.toString)
  )))

  val rdd = kuduCluster.sparkSession.sparkContext.parallelize(metrics)


  "KuduEventsHandler" should "store correctly data" in {

   val metricsRDD = rdd
      .map(event => EventToStorableEvent(event))
      .flatMap(e => e.toOption)
      .map(se => EventToKuduEvent(se)).flatMap(e => e.toOption)

    val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD)

    val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEvents", 2)

    KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig)
    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)

    val df = kuduCluster.sparkSession.sqlContext
      .read
      .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName))
      .kudu

    df.count shouldBe 100

  }

  "KuduEventsHandler" should "handle redundant data" in {

    val metricsRDD = rdd
      .map(event => EventToStorableEvent(event))
      .flatMap(e => e.toOption)
      .map(se => EventToKuduEvent(se))
      .flatMap(e => e.toOption)

    val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD)

    val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEventsDuplicate", 2)
    KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig)

    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)
    KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig)

    val df = kuduCluster.sparkSession.sqlContext
      .read
      .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName))
      .kudu

    df.count shouldBe 100

  }

  override def beforeAll() {
    kuduCluster.start()
  }

  override def afterAll() {
    kuduCluster.start()
  }

} 
Example 101
Source File: SlackRtmClientTest.scala    From slack-scala-client   with MIT License 5 votes vote down vote up
package slack

import java.util.concurrent.{CountDownLatch, TimeUnit}

import slack.api.SlackApiClient
import slack.models.Reply
import slack.rtm.SlackRtmClient

import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers

class SlackRtmClientTest extends AnyFunSuite with Matchers with Credentials {

  rtmToken match {
    case Some(slackToken) =>

      val channel = system.settings.config.getString("test.channel")

      lazy val rtmClient = {
        val rtm = SlackRtmClient(slackToken)
        assert(rtm.state.self.id != null)
        rtm
      }
      test("rtm typing") {
        rtmClient.indicateTyping(channel)
      }

      test("team domain") {
        val domain = rtmClient.state.team.domain
        val name = rtmClient.state.team.name
        domain should be(system.settings.config.getString("test.team.domain"))
        name should be(system.settings.config.getString("test.team.name"))
      }

      test("send message and parse reply") {
        val latch = new CountDownLatch(1)
        val promise = Promise[Long]()
        rtmClient.onEvent {
          case r: Reply =>
            assert(r.reply_to.equals(Await.result(promise.future, 2.seconds)))
            latch.countDown()
          case e => println("EVENT >>>>> " + e)
        }
        val messageIdFuture = rtmClient.sendMessage(channel, "Hi there")
        promise.completeWith(messageIdFuture)
        latch.await(5, TimeUnit.SECONDS)
      }

      ignore("edit message as bot") {
        val rtmApi = SlackApiClient(slackToken)
        val future = rtmApi.updateChatMessage(channel, "1465891701.000006", "edit-x", asUser = Some(true))
        val result = Await.result(future, 5.seconds)
        assert(result.ok.equals(true))
      }

    case _ =>
      println("Skipping the test as the API credentials are not available")

  }
} 
Example 102
package org.zalando.benchmarks

import java.util.concurrent.TimeUnit

import akka.actor._
import com.softwaremill.macwire.wire
import org.openjdk.jmh.annotations._

import scala.concurrent.duration._
import scala.util.Random

object ComputationFollowedByAsyncPublishing {
  implicit val system = ActorSystem()

  val publishDuration = 15 millis
  val numTasks = 20000
  val numTokensToConsume = 5000000L // eats about 9-10 ms on my box

  val actors   = wire[Actors]
  val futures  = wire[Futures]
  val blocking = wire[Blocking]
  val streams  = wire[Streams]
  val rx       = wire[RxScala]

  def printResult(rs: Seq[PublishResult]): Unit = println(rs map computeResult sum)

  def computeResult(r: PublishResult): Int =
    r.result.result + r.result.job.payload(Random.nextInt(r.result.job.payload length))

  def numWorkers(coreFactor: Int): Int = Runtime.getRuntime.availableProcessors * coreFactor
}

@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Benchmark)
class ComputationFollowedByAsyncPublishing {
  import ComputationFollowedByAsyncPublishing._

  @Benchmark def bmActors():          Unit =   actors benchmark  1          // <= 109 threads ("Live peak" in JVisualVM)
  @Benchmark def bmParallelFutures(): Unit =  futures benchmark  1          // <=  44 threads ("Live peak" in JVisualVM)
  @Benchmark def bmBlocking():        Unit = blocking benchmark 64          // <= 549 threads ("Live peak" in JVisualVM)
  @Benchmark def bmStreams():         Unit =  streams benchmark  1          // <=  52 threads ("Live peak" in JVisualVM)
  @Benchmark def bmRxScala():         Unit =        rx benchmark 1          // <= 50 threads

  @TearDown def tearDown(): Unit = system.terminate()
} 
Example 103
Source File: Server.scala    From glint   with MIT License 5 votes vote down vote up
package glint

import java.util.concurrent.TimeUnit

import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.Config
import com.typesafe.scalalogging.slf4j.StrictLogging
import glint.messages.master.RegisterServer

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


  def run(config: Config): Future[(ActorSystem, ActorRef)] = {

    logger.debug(s"Starting actor system ${config.getString("glint.server.system")}")
    val system = ActorSystem(config.getString("glint.server.system"), config.getConfig("glint.server"))

    logger.debug("Starting server actor")
    val server = system.actorOf(Props[Server], config.getString("glint.server.name"))

    logger.debug("Reading master information from config")
    val masterHost = config.getString("glint.master.host")
    val masterPort = config.getInt("glint.master.port")
    val masterName = config.getString("glint.master.name")
    val masterSystem = config.getString("glint.master.system")

    logger.info(s"Registering with master ${masterSystem}@${masterHost}:${masterPort}/user/${masterName}")
    implicit val ec = ExecutionContext.Implicits.global
    implicit val timeout = Timeout(config.getDuration("glint.server.registration-timeout", TimeUnit.MILLISECONDS) milliseconds)
    val master = system.actorSelection(s"akka.tcp://${masterSystem}@${masterHost}:${masterPort}/user/${masterName}")
    val registration = master ? RegisterServer(server)

    registration.map {
      case a =>
        logger.info("Server successfully registered with master")
        (system, server)
    }

  }
} 
Example 104
Source File: TokenizerWrapper.scala    From dbpedia-spotlight-model   with Apache License 2.0 5 votes vote down vote up
package org.dbpedia.spotlight.db.concurrent

import java.io.IOException
import java.util.concurrent.TimeUnit

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorSystem, OneForOneStrategy, Props}
import akka.pattern.ask
import akka.routing.SmallestMailboxRouter
import akka.util
import org.apache.commons.lang.NotImplementedException
import org.dbpedia.spotlight.db.model.{StringTokenizer, TextTokenizer}
import org.dbpedia.spotlight.model.{Text, Token}

import scala.concurrent.Await



class TokenizerWrapper(val tokenizers: Seq[TextTokenizer]) extends TextTokenizer {

  var requestTimeout = 60

  val system = ActorSystem()
  val workers = tokenizers.map { case tokenizer: TextTokenizer =>
    system.actorOf(Props(new TokenizerActor(tokenizer)))
  }.seq

  def size: Int = tokenizers.size

  val router = system.actorOf(Props[TokenizerActor].withRouter(
    // This might be a hack
    SmallestMailboxRouter(scala.collection.immutable.Iterable(workers:_*)).withSupervisorStrategy(
      OneForOneStrategy(maxNrOfRetries = 10) {
        case _: IOException => Restart
      })
  )
  )

  implicit val timeout = util.Timeout(requestTimeout, TimeUnit.SECONDS)

  override def tokenizeMaybe(text: Text) {
    val futureResult = router ? TokenizerRequest(text)
    Await.result(futureResult, timeout.duration)
  }

  override def tokenize(text: Text): List[Token] = {
    tokenizeMaybe(text)
    text.featureValue[List[Token]]("tokens").get
  }

  def tokenizeRaw(text: String): Seq[String] = {
    throw new NotImplementedException()
  }

  def close() {
    system.shutdown()
  }

  def getStringTokenizer: StringTokenizer = tokenizers.head.getStringTokenizer

}

class TokenizerActor(val tokenizer: TextTokenizer) extends Actor {

  def receive = {
    case TokenizerRequest(text) => {
      try {
        sender ! tokenizer.tokenizeMaybe(text)

      } catch {
        case e: NullPointerException => throw new IOException("Could not tokenize.")
      }
    }
  }

}


case class TokenizerRequest(text: Text) 
Example 105
Source File: MonadlessTaskSpec.scala    From monadless   with Apache License 2.0 5 votes vote down vote up
package io.monadless.monix

import java.util.concurrent.TimeUnit

import org.scalatest.MustMatchers

import io.monadless.impl.TestSupport
import monix.eval.Task
import monix.execution.Cancelable
import monix.execution.schedulers.ReferenceScheduler

class MonadlessTaskSpec
  extends org.scalatest.FreeSpec
  with MustMatchers
  with MonadlessTask
  with TestSupport[Task] {

  implicit val s = new ReferenceScheduler {
    def scheduleOnce(initialDelay: Long, unit: TimeUnit, r: Runnable) = {
      r.run()
      Cancelable.empty
    }
    def execute(command: Runnable) = command.run()
    def executionModel = monix.execution.ExecutionModel.SynchronousExecution
    def reportFailure(t: Throwable): Unit = {}
  }

  def get[T](f: Task[T]) =
    f.runSyncMaybe.right.get

  def fail[T]: T = throw new Exception

  val one = Task(1)
  val two = Task(2)

  "apply" in
    runLiftTest(1) {
      1
    }

  "collect" in
    runLiftTest(3) {
      unlift(one) + unlift(two)
    }

  "map" in
    runLiftTest(2) {
      unlift(one) + 1
    }

  "flatMap" in
    runLiftTest(3) {
      val a = unlift(one)
      a + unlift(two)
    }

  "rescue" - {
    "success" in
      runLiftTest(1) {
        try unlift(one)
        catch {
          case e: Throwable => unlift(two)
        }
      }
    "failure" in
      runLiftTest(1) {
        try fail[Int]
        catch {
          case e: Exception => unlift(one)
        }
      }
  }

  "ensure" - {
    "success" in
      runLiftTest(1) {
        var i = 0
        def c() = i += 1
        try unlift(one)
        finally {
          c()
        }
        i
      }
    "failure" in
      runLiftTest(1) {
        var i = 0
        def c() = i += 1
        try {
          try unlift(one) / fail[Int]
          finally {
            c()
          }
        } catch {
          case e: Exception => 1
        }
        i
      }
  }
} 
Example 106
Source File: ProcessInterpreter.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.engine.Interpreter

import java.io.{BufferedReader, InputStreamReader, PrintWriter}
import java.util.concurrent.TimeUnit

import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.engine.spark.common.{LineBufferedStream, Starting, State, _}
import com.webank.wedatasphere.linkis.scheduler.executer.{ErrorExecuteResponse, ExecuteResponse, SuccessExecuteResponse}
import org.apache.commons.io.IOUtils
import org.json4s._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}


abstract class ProcessInterpreter(process: Process) extends Interpreter with Logging {

  implicit val executor: ExecutionContext = ExecutionContext.global

  protected[this] var _state: State = Starting()

  protected[this] val stdin = new PrintWriter(process.getOutputStream)
  protected[this] val stdout = new BufferedReader(new InputStreamReader(process.getInputStream()), 1)
  protected[this] val errOut = new LineBufferedStream(process.getErrorStream())

  override def state: State = _state

  override def execute(code: String): ExecuteResponse = {
    if(code == "sc.cancelAllJobs" || code == "sc.cancelAllJobs()") {
      sendExecuteRequest(code)
    }
    _state match {
      case (Dead() | ShuttingDown() | Error() | Success()) =>
        throw new IllegalStateException("interpreter is not running")
      case Idle() =>
        require(state == Idle())
        code match {
          case "SHUTDOWN" =>
            sendShutdownRequest()
            close()
            ErrorExecuteResponse("shutdown",new Exception("shutdown"))
          case _ =>
            _state = Busy()
            sendExecuteRequest(code) match {
              case Some(rep) =>
                _state = Idle()
               // ExecuteComplete(rep)
                SuccessExecuteResponse()
              case None =>
                _state = Error()
                val errorMsg = errOut.lines.mkString(", ")
                throw new Exception(errorMsg)
            }
        }
      case _ => throw new IllegalStateException(s"interpreter is in ${_state} state, cannot do query.")
    }
  }

  Future {
    val exitCode = process.waitFor()
    if (exitCode != 0) {
      errOut.lines.foreach(println)
      println(getClass.getSimpleName+" has stopped with exit code " + process.exitValue)
      _state = Error()
    } else {
      println(getClass.getSimpleName+" has finished.")
      _state = Success()
    }
  }

  protected def waitUntilReady(): Unit

  protected def sendExecuteRequest(request: String): Option[JValue]

  protected def sendShutdownRequest(): Unit = {}


  override def close(): Unit = {
    val future = Future {
      _state match {
        case (Dead() | ShuttingDown() | Success()) =>
          Future.successful()
        case _ =>
          sendShutdownRequest()
      }
    }
    _state = Dead()
    IOUtils.closeQuietly(stdin)
    IOUtils.closeQuietly(stdout)
    errOut.close

    // Give ourselves 10 seconds to tear down the process.
    Utils.tryFinally(Await.result(future, Duration(10, TimeUnit.SECONDS))){
      process.destroy()}
  }

} 
Example 107
Source File: ContextWaiter.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming

import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock

private[streaming] class ContextWaiter {

  private val lock = new ReentrantLock()
  private val condition = lock.newCondition()

  // Guarded by "lock"
  private var error: Throwable = null

  // Guarded by "lock"
  private var stopped: Boolean = false

  def notifyError(e: Throwable): Unit = {
    lock.lock()
    try {
      error = e
      condition.signalAll()
    } finally {
      lock.unlock()
    }
  }

  def notifyStop(): Unit = {
    lock.lock()
    try {
      stopped = true
      condition.signalAll()
    } finally {
      lock.unlock()
    }
  }

  
  def waitForStopOrError(timeout: Long = -1): Boolean = {
    lock.lock()
    try {
      if (timeout < 0) {
        while (!stopped && error == null) {
          condition.await()
        }
      } else {
        var nanos = TimeUnit.MILLISECONDS.toNanos(timeout)
        while (!stopped && error == null && nanos > 0) {
          nanos = condition.awaitNanos(nanos)
        }
      }
      // If already had error, then throw it
      if (error != null) throw error
      // already stopped or timeout
      stopped
    } finally {
      lock.unlock()
    }
  }
} 
Example 108
Source File: UIUtilsSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.ui

import java.util.TimeZone
import java.util.concurrent.TimeUnit

import org.scalatest.Matchers

import org.apache.spark.SparkFunSuite

class UIUtilsSuite extends SparkFunSuite with Matchers{

  test("shortTimeUnitString") {
    assert("ns" === UIUtils.shortTimeUnitString(TimeUnit.NANOSECONDS))
    assert("us" === UIUtils.shortTimeUnitString(TimeUnit.MICROSECONDS))
    assert("ms" === UIUtils.shortTimeUnitString(TimeUnit.MILLISECONDS))
    assert("sec" === UIUtils.shortTimeUnitString(TimeUnit.SECONDS))
    assert("min" === UIUtils.shortTimeUnitString(TimeUnit.MINUTES))
    assert("hrs" === UIUtils.shortTimeUnitString(TimeUnit.HOURS))
    assert("days" === UIUtils.shortTimeUnitString(TimeUnit.DAYS))
  }

  test("normalizeDuration") {
    verifyNormalizedTime(900, TimeUnit.MILLISECONDS, 900)
    verifyNormalizedTime(1.0, TimeUnit.SECONDS, 1000)
    verifyNormalizedTime(1.0, TimeUnit.MINUTES, 60 * 1000)
    verifyNormalizedTime(1.0, TimeUnit.HOURS, 60 * 60 * 1000)
    verifyNormalizedTime(1.0, TimeUnit.DAYS, 24 * 60 * 60 * 1000)
  }

  private def verifyNormalizedTime(
      expectedTime: Double, expectedUnit: TimeUnit, input: Long): Unit = {
    val (time, unit) = UIUtils.normalizeDuration(input)
    time should be (expectedTime +- 1E-6)
    unit should be (expectedUnit)
  }

  test("convertToTimeUnit") {
    verifyConvertToTimeUnit(60.0 * 1000 * 1000 * 1000, 60 * 1000, TimeUnit.NANOSECONDS)
    verifyConvertToTimeUnit(60.0 * 1000 * 1000, 60 * 1000, TimeUnit.MICROSECONDS)
    verifyConvertToTimeUnit(60 * 1000, 60 * 1000, TimeUnit.MILLISECONDS)
    verifyConvertToTimeUnit(60, 60 * 1000, TimeUnit.SECONDS)
    verifyConvertToTimeUnit(1, 60 * 1000, TimeUnit.MINUTES)
    verifyConvertToTimeUnit(1.0 / 60, 60 * 1000, TimeUnit.HOURS)
    verifyConvertToTimeUnit(1.0 / 60 / 24, 60 * 1000, TimeUnit.DAYS)
  }

  private def verifyConvertToTimeUnit(
      expectedTime: Double, milliseconds: Long, unit: TimeUnit): Unit = {
    val convertedTime = UIUtils.convertToTimeUnit(milliseconds, unit)
    convertedTime should be (expectedTime +- 1E-6)
  }

  test("formatBatchTime") {
    val tzForTest = TimeZone.getTimeZone("America/Los_Angeles")
    val batchTime = 1431637480452L // Thu May 14 14:04:40 PDT 2015
    assert("2015/05/14 14:04:40" === UIUtils.formatBatchTime(batchTime, 1000, timezone = tzForTest))
    assert("2015/05/14 14:04:40.452" ===
      UIUtils.formatBatchTime(batchTime, 999, timezone = tzForTest))
    assert("14:04:40" === UIUtils.formatBatchTime(batchTime, 1000, false, timezone = tzForTest))
    assert("14:04:40.452" === UIUtils.formatBatchTime(batchTime, 999, false, timezone = tzForTest))
  }
} 
Example 109
Source File: StatsdSink.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry

import org.apache.spark.SecurityManager
import org.apache.spark.internal.Logging
import org.apache.spark.metrics.MetricsSystem

private[spark] object StatsdSink {
  val STATSD_KEY_HOST = "host"
  val STATSD_KEY_PORT = "port"
  val STATSD_KEY_PERIOD = "period"
  val STATSD_KEY_UNIT = "unit"
  val STATSD_KEY_PREFIX = "prefix"

  val STATSD_DEFAULT_HOST = "127.0.0.1"
  val STATSD_DEFAULT_PORT = "8125"
  val STATSD_DEFAULT_PERIOD = "10"
  val STATSD_DEFAULT_UNIT = "SECONDS"
  val STATSD_DEFAULT_PREFIX = ""
}

private[spark] class StatsdSink(
    val property: Properties,
    val registry: MetricRegistry,
    securityMgr: SecurityManager)
  extends Sink with Logging {
  import StatsdSink._

  val host = property.getProperty(STATSD_KEY_HOST, STATSD_DEFAULT_HOST)
  val port = property.getProperty(STATSD_KEY_PORT, STATSD_DEFAULT_PORT).toInt

  val pollPeriod = property.getProperty(STATSD_KEY_PERIOD, STATSD_DEFAULT_PERIOD).toInt
  val pollUnit =
    TimeUnit.valueOf(property.getProperty(STATSD_KEY_UNIT, STATSD_DEFAULT_UNIT).toUpperCase)

  val prefix = property.getProperty(STATSD_KEY_PREFIX, STATSD_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val reporter = new StatsdReporter(registry, host, port, prefix)

  override def start(): Unit = {
    reporter.start(pollPeriod, pollUnit)
    logInfo(s"StatsdSink started with prefix: '$prefix'")
  }

  override def stop(): Unit = {
    reporter.stop()
    logInfo("StatsdSink stopped.")
  }

  override def report(): Unit = reporter.report()
} 
Example 110
Source File: CsvSink.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.io.File
import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{CsvReporter, MetricRegistry}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class CsvSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val CSV_KEY_PERIOD = "period"
  val CSV_KEY_UNIT = "unit"
  val CSV_KEY_DIR = "directory"

  val CSV_DEFAULT_PERIOD = 10
  val CSV_DEFAULT_UNIT = "SECONDS"
  val CSV_DEFAULT_DIR = "/tmp/"

  val pollPeriod = Option(property.getProperty(CSV_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => CSV_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(CSV_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
    case None => TimeUnit.valueOf(CSV_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val pollDir = Option(property.getProperty(CSV_KEY_DIR)) match {
    case Some(s) => s
    case None => CSV_DEFAULT_DIR
  }

  val reporter: CsvReporter = CsvReporter.forRegistry(registry)
      .formatFor(Locale.US)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build(new File(pollDir))

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 111
Source File: MetricsServlet.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit
import javax.servlet.http.HttpServletRequest

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.json.MetricsModule
import com.fasterxml.jackson.databind.ObjectMapper
import org.eclipse.jetty.servlet.ServletContextHandler

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.ui.JettyUtils._

private[spark] class MetricsServlet(
    val property: Properties,
    val registry: MetricRegistry,
    securityMgr: SecurityManager)
  extends Sink {

  val SERVLET_KEY_PATH = "path"
  val SERVLET_KEY_SAMPLE = "sample"

  val SERVLET_DEFAULT_SAMPLE = false

  val servletPath = property.getProperty(SERVLET_KEY_PATH)

  val servletShowSample = Option(property.getProperty(SERVLET_KEY_SAMPLE)).map(_.toBoolean)
    .getOrElse(SERVLET_DEFAULT_SAMPLE)

  val mapper = new ObjectMapper().registerModule(
    new MetricsModule(TimeUnit.SECONDS, TimeUnit.MILLISECONDS, servletShowSample))

  def getHandlers(conf: SparkConf): Array[ServletContextHandler] = {
    Array[ServletContextHandler](
      createServletHandler(servletPath,
        new ServletParams(request => getMetricsSnapshot(request), "text/json"), securityMgr, conf)
    )
  }

  def getMetricsSnapshot(request: HttpServletRequest): String = {
    mapper.writeValueAsString(registry)
  }

  override def start() { }

  override def stop() { }

  override def report() { }
} 
Example 112
Source File: Slf4jSink.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{MetricRegistry, Slf4jReporter}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class Slf4jSink(
    val property: Properties,
    val registry: MetricRegistry,
    securityMgr: SecurityManager)
  extends Sink {
  val SLF4J_DEFAULT_PERIOD = 10
  val SLF4J_DEFAULT_UNIT = "SECONDS"

  val SLF4J_KEY_PERIOD = "period"
  val SLF4J_KEY_UNIT = "unit"

  val pollPeriod = Option(property.getProperty(SLF4J_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => SLF4J_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(SLF4J_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
    case None => TimeUnit.valueOf(SLF4J_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val reporter: Slf4jReporter = Slf4jReporter.forRegistry(registry)
    .convertDurationsTo(TimeUnit.MILLISECONDS)
    .convertRatesTo(TimeUnit.SECONDS)
    .build()

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 113
Source File: ConsoleSink.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{ConsoleReporter, MetricRegistry}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class ConsoleSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val CONSOLE_DEFAULT_PERIOD = 10
  val CONSOLE_DEFAULT_UNIT = "SECONDS"

  val CONSOLE_KEY_PERIOD = "period"
  val CONSOLE_KEY_UNIT = "unit"

  val pollPeriod = Option(property.getProperty(CONSOLE_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => CONSOLE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(CONSOLE_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
    case None => TimeUnit.valueOf(CONSOLE_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val reporter: ConsoleReporter = ConsoleReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build()

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 114
Source File: GraphiteSink.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter, GraphiteUDP}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase(Locale.ROOT))
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase(Locale.ROOT)) match {
    case Some("udp") => new GraphiteUDP(host, port)
    case Some("tcp") | None => new Graphite(host, port)
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 115
Source File: config.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.history

import java.util.concurrent.TimeUnit

import org.apache.spark.internal.config.ConfigBuilder
import org.apache.spark.network.util.ByteUnit

private[spark] object config {

  val DEFAULT_LOG_DIR = "file:/tmp/spark-events"

  val EVENT_LOG_DIR = ConfigBuilder("spark.history.fs.logDirectory")
    .stringConf
    .createWithDefault(DEFAULT_LOG_DIR)

  val MAX_LOG_AGE_S = ConfigBuilder("spark.history.fs.cleaner.maxAge")
    .timeConf(TimeUnit.SECONDS)
    .createWithDefaultString("7d")

  val LOCAL_STORE_DIR = ConfigBuilder("spark.history.store.path")
    .doc("Local directory where to cache application history information. By default this is " +
      "not set, meaning all history information will be kept in memory.")
    .stringConf
    .createOptional

  val MAX_LOCAL_DISK_USAGE = ConfigBuilder("spark.history.store.maxDiskUsage")
    .bytesConf(ByteUnit.BYTE)
    .createWithDefaultString("10g")

  val HISTORY_SERVER_UI_PORT = ConfigBuilder("spark.history.ui.port")
    .doc("Web UI port to bind Spark History Server")
    .intConf
    .createWithDefault(18080)

} 
Example 116
Source File: LauncherBackendSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.launcher

import java.util.concurrent.TimeUnit

import scala.concurrent.duration._
import scala.language.postfixOps

import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually._

import org.apache.spark._
import org.apache.spark.util.Utils

class LauncherBackendSuite extends SparkFunSuite with Matchers {

  private val tests = Seq(
    "local" -> "local",
    "standalone/client" -> "local-cluster[1,1,1024]")

  tests.foreach { case (name, master) =>
    test(s"$name: launcher handle") {
      // The tests here are failed due to the cmd length limitation up to 8K on Windows.
      assume(!Utils.isWindows)
      testWithMaster(master)
    }
  }

  private def testWithMaster(master: String): Unit = {
    val env = new java.util.HashMap[String, String]()
    env.put("SPARK_PRINT_LAUNCH_COMMAND", "1")
    val handle = new SparkLauncher(env)
      .setSparkHome(sys.props("spark.test.home"))
      .setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, System.getProperty("java.class.path"))
      .setConf("spark.ui.enabled", "false")
      .setConf(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, s"-Dtest.appender=console")
      .setMaster(master)
      .setAppResource(SparkLauncher.NO_RESOURCE)
      .setMainClass(TestApp.getClass.getName().stripSuffix("$"))
      .startApplication()

    try {
      eventually(timeout(30 seconds), interval(100 millis)) {
        handle.getAppId() should not be (null)
      }

      handle.stop()

      eventually(timeout(30 seconds), interval(100 millis)) {
        handle.getState() should be (SparkAppHandle.State.KILLED)
      }
    } finally {
      handle.kill()
    }
  }

}

object TestApp {

  def main(args: Array[String]): Unit = {
    new SparkContext(new SparkConf()).parallelize(Seq(1)).foreach { i =>
      Thread.sleep(TimeUnit.SECONDS.toMillis(20))
    }
  }

} 
Example 117
Source File: Retry.scala    From futiles   with Apache License 2.0 5 votes vote down vote up
package markatta.futiles

import java.util.concurrent.{ThreadLocalRandom, TimeUnit}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random

object Retry {

  private val alwaysRetry: Throwable => Boolean = _ => true

  
  def retryWithBackOff[A](
    times: Int,
    backOffUnit: FiniteDuration,
    shouldRetry: Throwable => Boolean = alwaysRetry
  )(fBlock: => Future[A])(implicit ec: ExecutionContext): Future[A] =
    try {
      if (times <= 1) fBlock
      else retryWithBackOffLoop(times, 1, backOffUnit, shouldRetry)(fBlock)
    } catch {
      // failure to actually create the future
      case x: Throwable => Future.failed(x)
    }

  private def retryWithBackOffLoop[A](
    totalTimes: Int,
    timesTried: Int,
    backOffUnit: FiniteDuration,
    shouldRetry: Throwable => Boolean
  )(fBlock: => Future[A])(implicit ec: ExecutionContext): Future[A] =
    if (totalTimes <= timesTried) fBlock
    else
      fBlock.recoverWith {
        case ex: Throwable if shouldRetry(ex) =>
          val timesTriedNow = timesTried + 1
          val backOff = nextBackOff(timesTriedNow, backOffUnit)
          Timeouts
            .timeout(backOff)(())
            .flatMap(
              _ =>
                retryWithBackOffLoop(
                  totalTimes,
                  timesTriedNow,
                  backOffUnit,
                  shouldRetry
                )(fBlock)
            )
      }

  private[futiles] def nextBackOff(
    tries: Int,
    backOffUnit: FiniteDuration
  ): FiniteDuration = {
    require(tries > 0, "tries should start from 1")
    val rng = new Random(ThreadLocalRandom.current())
    // jitter between 0.5 and 1.5
    val jitter = 0.5 + rng.nextDouble()
    val factor = math.pow(2, tries) * jitter
    FiniteDuration(
      (backOffUnit.toMillis * factor).toLong,
      TimeUnit.MILLISECONDS
    )
  }

} 
Example 118
Source File: AtomLogger.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.logging.atom
import java.time.Instant
import java.util.concurrent.TimeUnit

import cats.effect.Clock
import cats.{Applicative, FlatMap}
import tofu.concurrent.Atom
import tofu.higherKind.Embed
import tofu.logging.{LoggedValue, Logging, Logs}
import tofu.syntax.monadic._

import scala.reflect.{ClassTag, classTag}

final case class LogLine(
    loggerName: String,
    level: Logging.Level,
    message: String,
    timestamp: Instant,
    values: Vector[LoggedValue],
)

class AtomLogging[F[_]: FlatMap: Clock](log: Atom[F, Vector[LogLine]], name: String) extends Logging[F] {
  override def write(level: Logging.Level, message: String, values: LoggedValue*): F[Unit] =
    Clock[F].realTime(TimeUnit.MILLISECONDS).flatMap { time =>
      log.update(
        _ :+ LogLine(
          loggerName = name,
          level = level,
          message = message,
          timestamp = Instant.ofEpochMilli(time),
          values = values.toVector
        )
      )
    }

}

final case class AtomLogs[I[_]: Applicative, F[_]: FlatMap: Clock](flog: F[Atom[F, Vector[LogLine]]])
    extends Logs[I, F] {
  def forService[Svc: ClassTag]: I[Logging[F]] = byName(classTag[Svc].runtimeClass.getName)
  def byName(name: String): I[Logging[F]]      =
    Embed.of(flog.map[Logging[F]](new AtomLogging[F](_, name))).pure[I]
} 
Example 119
Source File: Cached.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.memo

import java.util.concurrent.TimeUnit

import cats.effect.Clock
import tofu.syntax.monadic._
import cats.{Functor, Monad}
import tofu.Guarantee
import tofu.concurrent.{MakeMVar, MakeRef}

import scala.concurrent.duration.FiniteDuration

object Cached {
  def apply[I[_]] = new CachedApply[I]

  private def usingState[F[_]: Monad, A](value: F[A], ttl: Long, control: F[CacheControl])(
      state: CacheState[F, A]
  )(implicit clock: Clock[F]): F[A] = {
    for {
      cacheControl <- control
      now          <- clock.realTime(TimeUnit.MILLISECONDS)
      validAfter    = (now - ttl).max(cacheControl.invalidated.millis)
      result       <- state.getOrElse(value, now, validAfter)
    } yield result
  }

  class CachedApply[I[_]] {
    def apply[F[_]: Monad: Clock, A](
        value: F[A]
    )(ttl: FiniteDuration, control: F[CacheControl])(implicit I: Functor[I]) =
      new CachedApply2[I, F, A](value)(ttl.toMillis, control)
  }

  class CachedApply2[I[_]: Functor, F[_]: Monad: Clock, A](value: F[A])(ttl: Long, control: F[CacheControl]) {
    def apply(method: CacheMethod)(implicit refs: MakeRef[I, F], mvars: MakeMVar[I, F], FG: Guarantee[F]): I[F[A]] =
      CacheState.in[I, F, A](method).map(usingState(value, ttl, control))

    def ref(implicit refs: MakeRef[I, F]) = CacheState.refIn[I, F, A]().map(usingState(value, ttl, control))

    def mvar(implicit mvars: MakeMVar[I, F], FG: Guarantee[F]) =
      CacheState.mvarIn[I, F, A]().map(usingState(value, ttl, control))
  }
} 
Example 120
Source File: CachedFunc.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.memo

import java.util.concurrent.TimeUnit

import cats.effect.Clock
import tofu.syntax.monadic._
import cats.{Functor, Monad}
import tofu.Guarantee
import tofu.concurrent.{MVars, MakeMVar, MakeRef, Refs}

import scala.concurrent.duration.FiniteDuration

object CachedFunc {
  def apply[I[_]] = new CachedApply[I]

  class CachedApply[I[_]] {
    def apply[F[_]: Monad: Clock, A, B](
        f: A => F[B]
    )(ttl: FiniteDuration, control: F[CacheControl])(implicit I: Functor[I]) =
      new CacheApply2[I, F, A, B](f)(ttl.toMillis, control)
  }

  class CacheApply2[I[_]: Functor, F[_]: Clock: Monad, A, B](f: A => F[B])(ttl: Long, control: F[CacheControl]) {
    def apply(method: CacheMethod)(implicit refs: Refs[F], mvars: MVars[F], FG: Guarantee[F]) =
      new CacheApply3[I, F, A, B](f)(ttl, control)(CacheState(method, _))

    def refVals(implicit refs: Refs[F]) = new CacheApply3[I, F, A, B](f)(ttl, control)(CacheState.ref)

    def mvarVals(implicit mvars: MVars[F], FG: Guarantee[F]) =
      new CacheApply3[I, F, A, B](f)(ttl, control)(CacheState.mvar)
  }

  class CacheApply3[I[_]: Functor, F[_]: Clock: Monad, A, B](f: A => F[B])(ttl: Long, control: F[CacheControl])(
      factory: CacheVal[B] => F[CacheState[F, B]]
  ) {
    def apply(method: CacheMethod)(implicit mr: MakeRef[I, F], mv: MakeMVar[I, F], FG: Guarantee[F]): I[A => F[B]] =
      CacheKeyState[I, F, A, B](method)(factory).map(usingState[F, A, B](f)(ttl, control))

    def ref(implicit refs: MakeRef[I, F]): I[A => F[B]] =
      CacheKeyState.ref[I, F, A, B](factory).map(usingState[F, A, B](f)(ttl, control))

    def mvar(implicit mvars: MakeMVar[I, F], FG: Guarantee[F]): I[A => F[B]] =
      CacheKeyState.mvar[I, F, A, B](factory).map(usingState[F, A, B](f)(ttl, control))
  }

  private def usingState[F[_]: Monad, A, B](
      f: A => F[B]
  )(ttl: Long, control: F[CacheControl])(state: CacheKeyState[F, A, B])(a: A)(implicit clock: Clock[F]) =
    for {
      cacheControl <- control
      now          <- clock.realTime(TimeUnit.MILLISECONDS)
      validAfter    = (now - ttl).max(cacheControl.invalidated.millis)
      result       <- state.getOrElse(f(a), a, now, validAfter)
    } yield result
} 
Example 121
Source File: IngraphTestRunner.scala    From ingraph   with Eclipse Public License 1.0 5 votes vote down vote up
package ingraph.testrunners

import java.util.concurrent.TimeUnit

import com.google.common.base.Stopwatch
import ingraph.driver.CypherDriverFactory
import ingraph.driver.data.{IngraphQueryHandler, ResultCollectingChangeListener}
import ingraph.ire.{Indexer, OneTimeQueryAdapter}
import ingraph.tests.LdbcSnbTestCase
import org.supercsv.prefs.CsvPreference

class IngraphTestRunner(tc: LdbcSnbTestCase) {

  val driver = CypherDriverFactory.createIngraphDriver()
  val session = driver.session()

  def run(): (Iterable[Seq[Map[String, Any]]], Iterable[Long]) = {
    val csvPreference = new CsvPreference.Builder('"', '|', "\n").build
    val queryHandler = session.registerQuery(tc.name, tc.querySpecification)

    // initial
    val iStopwatch = Stopwatch.createStarted()
    val listener = new ResultCollectingChangeListener(queryHandler.keys())
    queryHandler.registerDeltaHandler(listener)
    queryHandler.readCsv(tc.vertexCsvPaths, tc.edgeCsvPaths, csvPreference)
    val iResult = queryHandler.result()
    val iTime = iStopwatch.elapsed(TimeUnit.NANOSECONDS)

    val indexer = queryHandler.adapter.indexer

    // updates: append
    val aStopwatch = Stopwatch.createStarted()
    tc.updates.take(20).map { u => update(u, "upd", indexer, queryHandler, listener) }
    val aResult = queryHandler.result()
    val aTime = aStopwatch.elapsed(TimeUnit.NANOSECONDS)

    // updates: delete
    val dStopwatch = Stopwatch.createStarted()
    tc.updates.takeRight(3).map { u => update(u, "upd", indexer, queryHandler, listener) }
    val dResult = queryHandler.result()
    val dTime = dStopwatch.elapsed(TimeUnit.NANOSECONDS)

    val results = Seq(iResult, aResult, dResult)
    val times = Seq(iTime, aTime, dTime)

    println(tc.sf + "," + tc.query + ",ingraph,times," + times.mkString(","))
    println(tc.sf + "," + tc.query + ",ingraph,results," + results.map(_.length).mkString(","))

    return (results, times)
  }

  def update(querySpecification: String,
             queryName: String,
             indexer: Indexer,
             queryHandler: IngraphQueryHandler,
             listener: ResultCollectingChangeListener): List[Map[String, Any]] = {
    val adapter = new OneTimeQueryAdapter(querySpecification, queryName, indexer)
    adapter.results()
    adapter.close()
    val results = queryHandler.result
    listener.terminated()
    return results
  }

  def close(): Unit = {
    session.close()
    driver.close()
  }

} 
Example 122
Source File: Benchmarks.scala    From scala-string-format   with MIT License 5 votes vote down vote up
package com.komanov.stringformat.jmh

import java.util.concurrent.TimeUnit

import com.komanov.stringformat.{InputArg, JavaFormats, ScalaFormats}
import org.openjdk.jmh.annotations._

@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@Fork(value = 2, jvmArgs = Array("-Xmx2G"))
@Measurement(iterations = 7, time = 3, timeUnit = TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 3, timeUnit = TimeUnit.SECONDS)
abstract class BenchmarkBase

class ManyParamsBenchmark extends BenchmarkBase {

  @Param
  var arg: InputArg = InputArg.Tiny

  var nullObject: Object = null

  @Benchmark
  def javaConcat(): String = {
    JavaFormats.concat(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def scalaConcat(): String = {
    ScalaFormats.concat(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def stringFormat(): String = {
    JavaFormats.stringFormat(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def messageFormat(): String = {
    JavaFormats.messageFormat(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def slf4j(): String = {
    JavaFormats.slf4j(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def concatOptimized1(): String = {
    ScalaFormats.optimizedConcat1(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def concatOptimized2(): String = {
    ScalaFormats.optimizedConcat2(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def concatOptimizedMacros(): String = {
    ScalaFormats.optimizedConcatMacros(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def sInterpolator(): String = {
    ScalaFormats.sInterpolator(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def fInterpolator(): String = {
    ScalaFormats.fInterpolator(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def rawInterpolator(): String = {
    ScalaFormats.rawInterpolator(arg.value1, arg.value2, nullObject)
  }

  @Benchmark
  def sfiInterpolator(): String = {
    ScalaFormats.sfiInterpolator(arg.value1, arg.value2, nullObject)
  }

} 
Example 123
Source File: WebResources.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.web

import java.util.concurrent.TimeUnit

import akka.actor.ActorRef
import akka.event.LoggingAdapter
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.Config
import io.radicalbit.nsdb.common.configuration.NSDbConfig.HighLevel._
import io.radicalbit.nsdb.security.NsdbSecurity
import org.json4s.Formats

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success}


trait WebResources extends WsResources with SSLSupport { this: NsdbSecurity =>

  import CORSSupport._
  import VersionHeader._

  implicit def formats: Formats

  def config: Config

  implicit lazy val materializer = ActorMaterializer()
  implicit lazy val dispatcher   = system.dispatcher
  implicit lazy val httpTimeout: Timeout =
    Timeout(config.getDuration("nsdb.http-endpoint.timeout", TimeUnit.SECONDS), TimeUnit.SECONDS)

  def initWebEndpoint(writeCoordinator: ActorRef,
                      readCoordinator: ActorRef,
                      metadataCoordinator: ActorRef,
                      publisher: ActorRef)(implicit logger: LoggingAdapter) =
    authProvider match {
      case Success(provider) =>
        val api: Route = wsResources(publisher, provider) ~ new ApiResources(publisher,
                                                                             readCoordinator,
                                                                             writeCoordinator,
                                                                             metadataCoordinator,
                                                                             provider).apiResources(config)

        val httpExt = akka.http.scaladsl.Http()

        val http: Future[Http.ServerBinding] =
          if (isSSLEnabled) {
            val interface = config.getString(HttpInterface)
            val port      = config.getInt(HttpsPort)
            logger.info(s"Cluster Apis started with https protocol at interface $interface on port $port")
            httpExt.bindAndHandle(withCors(withNSDbVersion(api)), interface, port, connectionContext = serverContext)
          } else {
            val interface = config.getString(HttpInterface)
            val port      = config.getInt(HttpPort)
            logger.info(s"Cluster Apis started with http protocol at interface $interface and port $port")
            httpExt.bindAndHandle(withCors(withNSDbVersion(api)), interface, port)
          }

        scala.sys.addShutdownHook {
          http
            .flatMap(_.unbind())
            .onComplete { _ =>
              system.terminate()
            }
          Await.result(system.whenTerminated, 60 seconds)
        }
      case Failure(ex) =>
        logger.error("error on loading authorization provider", ex)
        System.exit(1)
    }

} 
Example 124
Source File: GRPCClient.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.client.rpc

import java.util.concurrent.TimeUnit

import io.grpc.{ManagedChannel, ManagedChannelBuilder}
import io.radicalbit.nsdb.rpc.health.{HealthCheckRequest, HealthCheckResponse, HealthGrpc}
import io.radicalbit.nsdb.rpc.init._
import io.radicalbit.nsdb.rpc.request.RPCInsert
import io.radicalbit.nsdb.rpc.requestCommand.{DescribeMetric, ShowMetrics, ShowNamespaces}
import io.radicalbit.nsdb.rpc.requestSQL.SQLRequestStatement
import io.radicalbit.nsdb.rpc.response.RPCInsertResult
import io.radicalbit.nsdb.rpc.responseCommand.{DescribeMetricResponse, MetricsGot, Namespaces}
import io.radicalbit.nsdb.rpc.responseSQL.SQLStatementResponse
import io.radicalbit.nsdb.rpc.restore.{RestoreGrpc, RestoreRequest, RestoreResponse}
import io.radicalbit.nsdb.rpc.service.{NSDBServiceCommandGrpc, NSDBServiceSQLGrpc}
import org.slf4j.LoggerFactory

import scala.concurrent.Future


class GRPCClient(host: String, port: Int) {

  private val log = LoggerFactory.getLogger(classOf[GRPCClient])

  private val channel: ManagedChannel = ManagedChannelBuilder.forAddress(host, port).usePlaintext().build
  private val stubHealth              = HealthGrpc.stub(channel)
  private val stubRestore             = RestoreGrpc.stub(channel)
  private val stubSql                 = NSDBServiceSQLGrpc.stub(channel)
  private val stubCommand             = NSDBServiceCommandGrpc.stub(channel)
  private val stubInit                = InitMetricGrpc.stub(channel)

  def checkConnection(): Future[HealthCheckResponse] = {
    log.debug("checking connection")
    stubHealth.check(HealthCheckRequest("whatever"))
  }

  def restore(request: RestoreRequest): Future[RestoreResponse] = {
    log.debug("creating dump")
    stubRestore.restore(request)
  }

  def initMetric(request: InitMetricRequest): Future[InitMetricResponse] = {
    log.debug("Preparing a init request for {}", request)
    stubInit.initMetric(request)
  }

  def write(request: RPCInsert): Future[RPCInsertResult] = {
    log.debug("Preparing a write request for {}...", request)
    stubSql.insertBit(request)
  }

  def executeSQLStatement(request: SQLRequestStatement): Future[SQLStatementResponse] = {
    log.debug("Preparing execution of SQL request: {} ", request.statement)
    stubSql.executeSQLStatement(request)
  }

  def showNamespaces(request: ShowNamespaces): Future[Namespaces] = {
    log.debug("Preparing of command show namespaces")
    stubCommand.showNamespaces(request)
  }

  def showMetrics(request: ShowMetrics): Future[MetricsGot] = {
    log.debug("Preparing of command show metrics for namespace: {} ", request.namespace)
    stubCommand.showMetrics(request)
  }

  def describeMetric(request: DescribeMetric): Future[DescribeMetricResponse] = {
    log.debug("Preparing of command describe metric for namespace: {} ", request.namespace)
    stubCommand.describeMetric(request)
  }

  def close(): Unit = channel.shutdownNow().awaitTermination(10, TimeUnit.SECONDS)

} 
Example 125
Source File: NSDbActors.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster

import java.util.concurrent.TimeUnit

import akka.actor._
import akka.cluster.Cluster
import akka.cluster.ddata.DistributedData
import akka.cluster.singleton._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.util.Timeout
import io.radicalbit.nsdb.cluster.actor._


trait NSDbActors {

  implicit def system: ActorSystem

  implicit lazy val timeout: Timeout =
    Timeout(system.settings.config.getDuration("nsdb.global.timeout", TimeUnit.SECONDS), TimeUnit.SECONDS)

  def initTopLevelActors(): Unit = {
    AkkaManagement(system).start()
    ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(singletonProps = Props(classOf[DatabaseActorsGuardian]),
                                    terminationMessage = PoisonPill,
                                    settings = ClusterSingletonManagerSettings(system)),
      name = "databaseActorGuardian"
    )

    system.actorOf(
      ClusterSingletonProxy.props(singletonManagerPath = "/user/databaseActorGuardian",
                                  settings = ClusterSingletonProxySettings(system)),
      name = "databaseActorGuardianProxy"
    )

    DistributedData(system).replicator

    system.actorOf(ClusterListener.props(true),
                   name = s"cluster-listener_${createNodeName(Cluster(system).selfMember)}")
  }
} 
Example 126
Source File: CommitLogCoordinator.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.coordinator

import java.util.concurrent.TimeUnit

import akka.actor.ActorRef
import akka.pattern.{ask, pipe}
import akka.util.Timeout
import io.radicalbit.nsdb.actors.MetricPerformerActor
import io.radicalbit.nsdb.actors.MetricPerformerActor.PersistedBits
import io.radicalbit.nsdb.commit_log.CommitLogWriterActor._
import io.radicalbit.nsdb.commit_log.RollingCommitLogFileWriter
import io.radicalbit.nsdb.util.ActorPathLogging

import scala.concurrent.Future


class CommitLogCoordinator extends ActorPathLogging {

  private def getWriter(db: String, namespace: String, metric: String): ActorRef =
    context
      .child(s"commit-log-writer-$db-$namespace-$metric")
      .getOrElse(
        context.actorOf(RollingCommitLogFileWriter.props(db, namespace, metric),
                        s"commit-log-writer-$db-$namespace-$metric")
      )

  implicit val timeout: Timeout = Timeout(
    context.system.settings.config.getDuration("nsdb.write-coordinator.timeout", TimeUnit.SECONDS),
    TimeUnit.SECONDS)

  def receive: Receive = {
    case msg @ WriteToCommitLog(db, namespace, metric, _, _, _) =>
      getWriter(db, namespace, metric).forward(msg)

    case persistedBits: PersistedBits =>
      import context.dispatcher
      // Handle successful events of Bit Persistence
      val successfullyPersistedBits = persistedBits.persistedBits

      val successfulCommitLogResponses: Future[Seq[WriteToCommitLogSucceeded]] =
        Future.sequence {
          successfullyPersistedBits.map { persistedBit =>
            (getWriter(persistedBit.db, persistedBit.namespace, persistedBit.metric) ?
              WriteToCommitLog(persistedBit.db,
                               persistedBit.namespace,
                               persistedBit.metric,
                               persistedBit.timestamp,
                               PersistedEntryAction(persistedBit.bit),
                               persistedBit.location)).collect {
              case s: WriteToCommitLogSucceeded => s
            }
          }
        }

      val response = successfulCommitLogResponses.map { responses =>
        if (responses.size == successfullyPersistedBits.size)
          MetricPerformerActor.PersistedBitsAck
        else
          context.system.terminate()
      }
      response.pipeTo(sender())

    case _ =>
      log.error("UnexpectedMessage")
  }
} 
Example 127
Source File: WriteConfig.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.logic

import java.util.concurrent.TimeUnit

import akka.actor.Actor
import akka.cluster.ddata.Replicator.{WriteAll, WriteConsistency, WriteLocal, WriteMajority}

import scala.concurrent.duration.FiniteDuration

trait WriteConfig { this: Actor =>
  private val config = context.system.settings.config

  private lazy val timeout: FiniteDuration =
    FiniteDuration(config.getDuration("nsdb.global.timeout", TimeUnit.SECONDS), TimeUnit.SECONDS)

  
  sealed trait WriteProcessing
  case object Parallel extends WriteProcessing
  case object Serial   extends WriteProcessing

  protected lazy val writeProcessing: WriteProcessing = {
    val configValue = config.getString("nsdb.cluster.write-processing")

    configValue.toLowerCase match {
      case "parallel" => Parallel
      case "serial"   => Serial
      case wrongConfigValue =>
        throw new IllegalArgumentException(s"$wrongConfigValue is not a valid value for write-processing")
    }
  }
} 
Example 128
Source File: ReaderConfig.scala    From pulsar4s   with Apache License 2.0 5 votes vote down vote up
package com.sksamuel.pulsar4s

import org.apache.pulsar.client.api._

import scala.concurrent.duration.FiniteDuration
import scala.util.matching.Regex
import java.util.concurrent.TimeUnit

sealed trait StartMessage
final case class Message(messageId: MessageId) extends StartMessage
final case class RollBack(rollbackDuration: Long, timeunit: TimeUnit) extends StartMessage

case class ReaderConfig(topic: Topic,
                        @deprecated("use startMessage instead", "2.5.3")
                        seek: MessageId = MessageId.earliest,
                        startMessage: StartMessage,
                        startMessageIdInclusive: Boolean = true,
                        receiverQueueSize: Option[Int] = None,
                        reader: Option[String] = None,
                        readCompacted: Option[Boolean] = None,
                        additionalProperties: Map[String, AnyRef] = Map.empty) 
Example 129
Source File: Reader.scala    From pulsar4s   with Apache License 2.0 5 votes vote down vote up
package com.sksamuel.pulsar4s

import java.io.Closeable
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.Duration

trait Reader[T] extends Closeable {
  def hasMessageAvailable: Boolean
  def topic: Topic
  def next: ConsumerMessage[T]
  def next(duration: Duration): Option[ConsumerMessage[T]]
  def nextAsync[F[_] : AsyncHandler]: F[ConsumerMessage[T]]
  def isConnected: Boolean
  def closeAsync[F[_] : AsyncHandler]: F[Unit]
  def seek(timestamp: Long): Unit
  def seek(messageId: MessageId): Unit
  def seekAsync[F[_] : AsyncHandler](timestamp: Long): F[Unit]
  def seekAsync[F[_] : AsyncHandler](messageId: MessageId): F[Unit] 
  def hasReachedEndOfTopic: Boolean
}

class DefaultReader[T](reader: org.apache.pulsar.client.api.Reader[T],
                       override val topic: Topic) extends Reader[T] {

  override def hasMessageAvailable: Boolean = reader.hasMessageAvailable

  override def next: ConsumerMessage[T] = ConsumerMessage.fromJava(reader.readNext)

  override def next(duration: Duration): Option[ConsumerMessage[T]] =
    Option(reader.readNext(duration.toSeconds.toInt, TimeUnit.SECONDS)).map(ConsumerMessage.fromJava)

  override def nextAsync[F[_] : AsyncHandler]: F[ConsumerMessage[T]] = implicitly[AsyncHandler[F]].nextAsync(reader)

  override def isConnected: Boolean = reader.isConnected

  override def close(): Unit = reader.close()
  override def closeAsync[F[_] : AsyncHandler]: F[Unit] = implicitly[AsyncHandler[F]].close(reader)

  override def seek(timestamp: Long): Unit = reader.seek(timestamp)
  override def seek(messageId: MessageId): Unit = reader.seek(messageId)
  override def seekAsync[F[_] : AsyncHandler](timestamp: Long): F[Unit] = implicitly[AsyncHandler[F]].seekAsync(reader, timestamp)
  override def seekAsync[F[_] : AsyncHandler](messageId: MessageId): F[Unit] = implicitly[AsyncHandler[F]].seekAsync(reader, messageId)

  
  override def hasReachedEndOfTopic: Boolean = reader.hasReachedEndOfTopic

} 
Example 130
Source File: ExecutionContextExecutorServiceBridge.scala    From odinson   with Apache License 2.0 5 votes vote down vote up
package ai.lum.odinson.utils

import java.util.Collections
import java.util.concurrent.{ AbstractExecutorService, TimeUnit }
import scala.concurrent.{ ExecutionContext, ExecutionContextExecutorService }

object ExecutionContextExecutorServiceBridge {
  def apply(ec: ExecutionContext): ExecutionContextExecutorService = ec match {
    case null => throw null
    case eces: ExecutionContextExecutorService => eces
    case other => new AbstractExecutorService with ExecutionContextExecutorService {
      override def prepare(): ExecutionContext = other
      override def isShutdown = false
      override def isTerminated = false
      override def shutdown() = ()
      override def shutdownNow() = Collections.emptyList[Runnable]
      override def execute(runnable: Runnable): Unit = other execute runnable
      override def reportFailure(t: Throwable): Unit = other reportFailure t
      override def awaitTermination(length: Long, unit: TimeUnit): Boolean = false
    }
  }
} 
Example 131
Source File: TcpServiceImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4gate_server

import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels.{AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler}
import java.util.UUID
import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit}

import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor._

import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Queue

@SuppressWarnings(Array("org.wartremover.warts.Var")) class ChannelHandler(
  channel: AsynchronousSocketChannel, unregister: ()=>Unit, fail: Throwable=>Unit,
  executor: ScheduledExecutorService, timeout: Long, val compressor: Option[Compressor]
) extends CompletionHandler[Integer,Unit] with SenderToAgent {
  private var queue: Queue[Array[Byte]] = Queue.empty
  private var activeElement: Option[ByteBuffer] = None
  private var purge: Option[ScheduledFuture[_]] = None
  private def startWrite(): Unit =
    queue.dequeueOption.foreach{ case (element,nextQueue) =>
      queue = nextQueue
      activeElement = Option(ByteBuffer.wrap(element))
      channel.write[Unit](activeElement.get, (), this)
    }
  def add(data: Array[Byte]): Unit = synchronized {
    queue = queue.enqueue(data)
    if(activeElement.isEmpty) startWrite()
  }
  def completed(result: Integer, att: Unit): Unit = Trace {
    synchronized {
      if(activeElement.get.hasRemaining) channel.write[Unit](activeElement.get, (), this)
      else {
        purge.foreach(_.cancel(false))
        purge = Option(executor.schedule(new Runnable {
          def run(): Unit = close()
        },timeout,TimeUnit.SECONDS))
        activeElement = None
        startWrite()
      }
    }
  }
  def failed(exc: Throwable, att: Unit): Unit = {
    fail(exc)
    close()
  }
  def close(): Unit = {
    unregister()
    channel.close()  //does close block?
  }
}

class TcpServerImpl(
  port: Int, tcpHandler: TcpHandler, timeout: Long, compressorFactory: StreamCompressorFactory,
  channels: TrieMap[String,ChannelHandler] = TrieMap()
) extends TcpServer with Executable with LazyLogging {
  def getSender(connectionKey: String): Option[SenderToAgent] =
    channels.get(connectionKey)
  def run(): Unit = concurrent.blocking{
    tcpHandler.beforeServerStart()
    val address = new InetSocketAddress(port)
    val listener = AsynchronousServerSocketChannel.open().bind(address)
    val executor = Executors.newScheduledThreadPool(1)
    listener.accept[Unit]((), new CompletionHandler[AsynchronousSocketChannel,Unit] {
      def completed(ch: AsynchronousSocketChannel, att: Unit): Unit = Trace {
        listener.accept[Unit]((), this)
        val key = UUID.randomUUID.toString
        val sender = new ChannelHandler(ch, {() =>
          assert(channels.remove(key).nonEmpty)
          tcpHandler.afterDisconnect(key)
        }, { error =>
          logger.error("channel",error)
        }, executor, timeout, compressorFactory.create())
        assert(channels.put(key,sender).isEmpty)
        tcpHandler.afterConnect(key, sender)
      }
      def failed(exc: Throwable, att: Unit): Unit = logger.error("tcp",exc) //! may be set status-finished
    })
  }
} 
Example 132
Source File: SunServerImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4gate

import java.lang.Math.toIntExact
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}
import ee.cone.c4actor.{Executable, Execution, FinallyClose, Observer, Trace}
import ee.cone.c4gate.HttpProtocol.N_Header
import okio.ByteString

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.{Duration, SECONDS}
import scala.collection.JavaConverters.mapAsScalaMapConverter
import scala.collection.JavaConverters.iterableAsScalaIterableConverter



class SunReqHandler(handler: FHttpHandler, executionContext: ExecutionContext) extends HttpHandler {
  def handle(httpExchange: HttpExchange) =
    Trace{ FinallyClose[HttpExchange,Unit](_.close())(httpExchange) { ex =>
      val method = httpExchange.getRequestMethod
      val path = httpExchange.getRequestURI.getPath
      val reqHeaders: List[N_Header] = httpExchange.getRequestHeaders.asScala
        .flatMap{ case(k,l)=>l.asScala.map(v=>N_Header(k,v)) }.toList
      val buffer = (new okio.Buffer).readFrom(httpExchange.getRequestBody)
      val body = buffer.readByteString()
      val request = FHttpRequest(method, path, reqHeaders, body)
      val responseF = handler.handle(request)(executionContext)
      val response = Await.result(responseF,Duration(600,SECONDS))
      val headers = httpExchange.getResponseHeaders
      response.headers.foreach(header=>headers.add(header.key,header.value))
      val bytes = response.body.toByteArray
      httpExchange.sendResponseHeaders(toIntExact(response.status), bytes.length)
      if(bytes.nonEmpty) httpExchange.getResponseBody.write(bytes)
    } }
}

class SunHttpServer(port: Int, handler: FHttpHandler, execution: Execution) extends Executable {
  def run(): Unit = concurrent.blocking{
    val pool = execution.newExecutorService("http-",None) //newWorkStealingPool
    execution.onShutdown("Pool",()=>{
      val tasks = pool.shutdownNow()
      pool.awaitTermination(Long.MaxValue,TimeUnit.SECONDS)
    })
    val executionContext: ExecutionContext = ExecutionContext.fromExecutor(pool)
    val server: HttpServer = HttpServer.create(new InetSocketAddress(port),0)
    execution.onShutdown("HttpServer",()=>server.stop(Int.MaxValue))
    server.setExecutor(pool)
    server.createContext("/", new SunReqHandler(handler,executionContext))
    server.start()
  }
}

class MutableStatefulReceiver[Message](execution: Execution, inner: List[Observer[Message]]) extends StatefulReceiver[Message] {
  var state: Future[List[Observer[Message]]] = Future.successful(inner)
  def send(message: Message): Unit = execution.fatal{ implicit ec =>
    synchronized{
      state = state.map(_.flatMap(_.activate(message)))
      state
    }
  }
}

class MutableStatefulReceiverFactory(execution: Execution) extends StatefulReceiverFactory {
  def create[Message](inner: List[Observer[Message]])(implicit executionContext: ExecutionContext): Future[StatefulReceiver[Message]] =
    Future.successful(new MutableStatefulReceiver[Message](execution,inner))
} 
Example 133
Source File: StringItemCuckooBenchmark.scala    From bloom-filter-scala   with MIT License 5 votes vote down vote up
package bloomfilter.mutable

import java.util.concurrent.TimeUnit

import org.openjdk.jmh.annotations.{BenchmarkMode, OperationsPerInvocation, OutputTimeUnit, _}

import scala.util.Random

@State(Scope.Benchmark)
class StringItemCuckooBenchmark {

  private val itemsExpected = 100000000L
  private val random = new Random()

  private var bf: CuckooFilter[String] = _

  @Param(Array("1024"))
  var length: Int = _

  private val items = new Array[String](10000)
  var i = 0
  while (i < items.length) {
    items(i) = random.nextString(length)
    i += 1
  }

  @Setup(Level.Iteration)
  def setup(): Unit = {
    bf = CuckooFilter[String](itemsExpected)
  }

  @Benchmark
  @BenchmarkMode(Array(Mode.SingleShotTime))
  @OutputTimeUnit(TimeUnit.NANOSECONDS)
  @OperationsPerInvocation(10000)
  def myPut(): Unit = {
    var i = 0
    while (i < items.length) {
      bf.add(items(i))
      i += 1
    }
  }

  @Benchmark
  @BenchmarkMode(Array(Mode.Throughput))
  @OperationsPerInvocation(10000)
  def myGet(): Unit = {
    var i = 0
    while (i < items.length) {
      bf.mightContain(items(i))
      i += 1
    }
  }

} 
Example 134
Source File: ExecutorDelegationTokenUpdater.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn

import java.util.concurrent.{Executors, TimeUnit}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.{Credentials, UserGroupInformation}

import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.util.{ThreadUtils, Utils}

import scala.util.control.NonFatal

private[spark] class ExecutorDelegationTokenUpdater(
    sparkConf: SparkConf,
    hadoopConf: Configuration) extends Logging {

  @volatile private var lastCredentialsFileSuffix = 0

  private val credentialsFile = sparkConf.get("spark.yarn.credentials.file")
  private val freshHadoopConf =
    SparkHadoopUtil.get.getConfBypassingFSCache(
      hadoopConf, new Path(credentialsFile).toUri.getScheme)

  private val delegationTokenRenewer =
    Executors.newSingleThreadScheduledExecutor(
      ThreadUtils.namedThreadFactory("Delegation Token Refresh Thread"))

  // On the executor, this thread wakes up and picks up new tokens from HDFS, if any.
  private val executorUpdaterRunnable =
    new Runnable {
      override def run(): Unit = Utils.logUncaughtExceptions(updateCredentialsIfRequired())
    }

  def updateCredentialsIfRequired(): Unit = {
    try {
      val credentialsFilePath = new Path(credentialsFile)
      val remoteFs = FileSystem.get(freshHadoopConf)
      SparkHadoopUtil.get.listFilesSorted(
        remoteFs, credentialsFilePath.getParent,
        credentialsFilePath.getName, SparkHadoopUtil.SPARK_YARN_CREDS_TEMP_EXTENSION)
        .lastOption.foreach { credentialsStatus =>
        val suffix = SparkHadoopUtil.get.getSuffixForCredentialsPath(credentialsStatus.getPath)
        if (suffix > lastCredentialsFileSuffix) {
          logInfo("Reading new delegation tokens from " + credentialsStatus.getPath)
          val newCredentials = getCredentialsFromHDFSFile(remoteFs, credentialsStatus.getPath)
          lastCredentialsFileSuffix = suffix
          UserGroupInformation.getCurrentUser.addCredentials(newCredentials)
          logInfo("Tokens updated from credentials file.")
        } else {
          // Check every hour to see if new credentials arrived.
          logInfo("Updated delegation tokens were expected, but the driver has not updated the " +
            "tokens yet, will check again in an hour.")
          delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
          return
        }
      }
      val timeFromNowToRenewal =
        SparkHadoopUtil.get.getTimeFromNowToRenewal(
          sparkConf, 0.8, UserGroupInformation.getCurrentUser.getCredentials)
      if (timeFromNowToRenewal <= 0) {
        // We just checked for new credentials but none were there, wait a minute and retry.
        // This handles the shutdown case where the staging directory may have been removed(see
        // SPARK-12316 for more details).
        delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.MINUTES)
      } else {
        logInfo(s"Scheduling token refresh from HDFS in $timeFromNowToRenewal millis.")
        delegationTokenRenewer.schedule(
          executorUpdaterRunnable, timeFromNowToRenewal, TimeUnit.MILLISECONDS)
      }
    } catch {
      // Since the file may get deleted while we are reading it, catch the Exception and come
      // back in an hour to try again
      case NonFatal(e) =>
        logWarning("Error while trying to update credentials, will try again in 1 hour", e)
        delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
    }
  }

  private def getCredentialsFromHDFSFile(remoteFs: FileSystem, tokenPath: Path): Credentials = {
    val stream = remoteFs.open(tokenPath)
    try {
      val newCredentials = new Credentials()
      newCredentials.readTokenStorageStream(stream)
      newCredentials
    } finally {
      stream.close()
    }
  }

  def stop(): Unit = {
    delegationTokenRenewer.shutdown()
  }

} 
Example 135
Source File: GangliaSink.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.ganglia.GangliaReporter
import info.ganglia.gmetric4j.gmetric.GMetric
import info.ganglia.gmetric4j.gmetric.GMetric.UDPAddressingMode

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

class GangliaSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GANGLIA_KEY_PERIOD = "period"
  val GANGLIA_DEFAULT_PERIOD = 10

  val GANGLIA_KEY_UNIT = "unit"
  val GANGLIA_DEFAULT_UNIT: TimeUnit = TimeUnit.SECONDS

  val GANGLIA_KEY_MODE = "mode"
  val GANGLIA_DEFAULT_MODE: UDPAddressingMode = GMetric.UDPAddressingMode.MULTICAST

  // TTL for multicast messages. If listeners are X hops away in network, must be at least X.
  val GANGLIA_KEY_TTL = "ttl"
  val GANGLIA_DEFAULT_TTL = 1

  val GANGLIA_KEY_HOST = "host"
  val GANGLIA_KEY_PORT = "port"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GANGLIA_KEY_HOST).isDefined) {
    throw new Exception("Ganglia sink requires 'host' property.")
  }

  if (!propertyToOption(GANGLIA_KEY_PORT).isDefined) {
    throw new Exception("Ganglia sink requires 'port' property.")
  }

  val host = propertyToOption(GANGLIA_KEY_HOST).get
  val port = propertyToOption(GANGLIA_KEY_PORT).get.toInt
  val ttl = propertyToOption(GANGLIA_KEY_TTL).map(_.toInt).getOrElse(GANGLIA_DEFAULT_TTL)
  val mode: UDPAddressingMode = propertyToOption(GANGLIA_KEY_MODE)
    .map(u => GMetric.UDPAddressingMode.valueOf(u.toUpperCase)).getOrElse(GANGLIA_DEFAULT_MODE)
  val pollPeriod = propertyToOption(GANGLIA_KEY_PERIOD).map(_.toInt)
    .getOrElse(GANGLIA_DEFAULT_PERIOD)
  val pollUnit: TimeUnit = propertyToOption(GANGLIA_KEY_UNIT)
    .map(u => TimeUnit.valueOf(u.toUpperCase))
    .getOrElse(GANGLIA_DEFAULT_UNIT)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val ganglia = new GMetric(host, port, mode, ttl)
  val reporter: GangliaReporter = GangliaReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build(ganglia)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 136
Source File: ContextWaiter.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming

import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock

private[streaming] class ContextWaiter {

  private val lock = new ReentrantLock()
  private val condition = lock.newCondition()

  // Guarded by "lock"
  private var error: Throwable = null

  // Guarded by "lock"
  private var stopped: Boolean = false

  def notifyError(e: Throwable): Unit = {
    lock.lock()
    try {
      error = e
      condition.signalAll()
    } finally {
      lock.unlock()
    }
  }

  def notifyStop(): Unit = {
    lock.lock()
    try {
      stopped = true
      condition.signalAll()
    } finally {
      lock.unlock()
    }
  }

  
  def waitForStopOrError(timeout: Long = -1): Boolean = {
    lock.lock()
    try {
      if (timeout < 0) {
        while (!stopped && error == null) {
          condition.await()
        }
      } else {
        var nanos = TimeUnit.MILLISECONDS.toNanos(timeout)
        while (!stopped && error == null && nanos > 0) {
          nanos = condition.awaitNanos(nanos)
        }
      }
      // If already had error, then throw it
      if (error != null) throw error
      // already stopped or timeout
      stopped
    } finally {
      lock.unlock()
    }
  }
} 
Example 137
Source File: UIUtilsSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.ui

import java.util.TimeZone
import java.util.concurrent.TimeUnit

import org.scalatest.Matchers

import org.apache.spark.SparkFunSuite

class UIUtilsSuite extends SparkFunSuite with Matchers{

  test("shortTimeUnitString") {
    assert("ns" === UIUtils.shortTimeUnitString(TimeUnit.NANOSECONDS))
    assert("us" === UIUtils.shortTimeUnitString(TimeUnit.MICROSECONDS))
    assert("ms" === UIUtils.shortTimeUnitString(TimeUnit.MILLISECONDS))
    assert("sec" === UIUtils.shortTimeUnitString(TimeUnit.SECONDS))
    assert("min" === UIUtils.shortTimeUnitString(TimeUnit.MINUTES))
    assert("hrs" === UIUtils.shortTimeUnitString(TimeUnit.HOURS))
    assert("days" === UIUtils.shortTimeUnitString(TimeUnit.DAYS))
  }

  test("normalizeDuration") {
    verifyNormalizedTime(900, TimeUnit.MILLISECONDS, 900)
    verifyNormalizedTime(1.0, TimeUnit.SECONDS, 1000)
    verifyNormalizedTime(1.0, TimeUnit.MINUTES, 60 * 1000)
    verifyNormalizedTime(1.0, TimeUnit.HOURS, 60 * 60 * 1000)
    verifyNormalizedTime(1.0, TimeUnit.DAYS, 24 * 60 * 60 * 1000)
  }

  private def verifyNormalizedTime(
      expectedTime: Double, expectedUnit: TimeUnit, input: Long): Unit = {
    val (time, unit) = UIUtils.normalizeDuration(input)
    time should be (expectedTime +- 1E-6)
    unit should be (expectedUnit)
  }

  test("convertToTimeUnit") {
    verifyConvertToTimeUnit(60.0 * 1000 * 1000 * 1000, 60 * 1000, TimeUnit.NANOSECONDS)
    verifyConvertToTimeUnit(60.0 * 1000 * 1000, 60 * 1000, TimeUnit.MICROSECONDS)
    verifyConvertToTimeUnit(60 * 1000, 60 * 1000, TimeUnit.MILLISECONDS)
    verifyConvertToTimeUnit(60, 60 * 1000, TimeUnit.SECONDS)
    verifyConvertToTimeUnit(1, 60 * 1000, TimeUnit.MINUTES)
    verifyConvertToTimeUnit(1.0 / 60, 60 * 1000, TimeUnit.HOURS)
    verifyConvertToTimeUnit(1.0 / 60 / 24, 60 * 1000, TimeUnit.DAYS)
  }

  private def verifyConvertToTimeUnit(
      expectedTime: Double, milliseconds: Long, unit: TimeUnit): Unit = {
    val convertedTime = UIUtils.convertToTimeUnit(milliseconds, unit)
    convertedTime should be (expectedTime +- 1E-6)
  }

  test("formatBatchTime") {
    val tzForTest = TimeZone.getTimeZone("America/Los_Angeles")
    val batchTime = 1431637480452L // Thu May 14 14:04:40 PDT 2015
    assert("2015/05/14 14:04:40" === UIUtils.formatBatchTime(batchTime, 1000, timezone = tzForTest))
    assert("2015/05/14 14:04:40.452" ===
      UIUtils.formatBatchTime(batchTime, 999, timezone = tzForTest))
    assert("14:04:40" === UIUtils.formatBatchTime(batchTime, 1000, false, timezone = tzForTest))
    assert("14:04:40.452" === UIUtils.formatBatchTime(batchTime, 999, false, timezone = tzForTest))
  }
} 
Example 138
Source File: CsvSink.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.io.File
import java.util.{Locale, Properties}
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{CsvReporter, MetricRegistry}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class CsvSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val CSV_KEY_PERIOD = "period"
  val CSV_KEY_UNIT = "unit"
  val CSV_KEY_DIR = "directory"

  val CSV_DEFAULT_PERIOD = 10
  val CSV_DEFAULT_UNIT = "SECONDS"
  val CSV_DEFAULT_DIR = "/tmp/"

  val pollPeriod = Option(property.getProperty(CSV_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => CSV_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(CSV_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(CSV_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val pollDir = Option(property.getProperty(CSV_KEY_DIR)) match {
    case Some(s) => s
    case None => CSV_DEFAULT_DIR
  }

  val reporter: CsvReporter = CsvReporter.forRegistry(registry)
      .formatFor(Locale.US)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build(new File(pollDir))

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 139
Source File: MetricsServlet.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import javax.servlet.http.HttpServletRequest

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.json.MetricsModule
import com.fasterxml.jackson.databind.ObjectMapper
import org.eclipse.jetty.servlet.ServletContextHandler

import org.apache.spark.{SparkConf, SecurityManager}
import org.apache.spark.ui.JettyUtils._

private[spark] class MetricsServlet(
    val property: Properties,
    val registry: MetricRegistry,
    securityMgr: SecurityManager)
  extends Sink {

  val SERVLET_KEY_PATH = "path"
  val SERVLET_KEY_SAMPLE = "sample"

  val SERVLET_DEFAULT_SAMPLE = false

  val servletPath = property.getProperty(SERVLET_KEY_PATH)

  val servletShowSample = Option(property.getProperty(SERVLET_KEY_SAMPLE)).map(_.toBoolean)
    .getOrElse(SERVLET_DEFAULT_SAMPLE)

  val mapper = new ObjectMapper().registerModule(
    new MetricsModule(TimeUnit.SECONDS, TimeUnit.MILLISECONDS, servletShowSample))

  def getHandlers(conf: SparkConf): Array[ServletContextHandler] = {
    Array[ServletContextHandler](
      createServletHandler(servletPath,
        new ServletParams(request => getMetricsSnapshot(request), "text/json"), securityMgr, conf)
    )
  }

  def getMetricsSnapshot(request: HttpServletRequest): String = {
    mapper.writeValueAsString(registry)
  }

  override def start() { }

  override def stop() { }

  override def report() { }
} 
Example 140
Source File: Slf4jSink.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{Slf4jReporter, MetricRegistry}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class Slf4jSink(
    val property: Properties,
    val registry: MetricRegistry,
    securityMgr: SecurityManager)
  extends Sink {
  val SLF4J_DEFAULT_PERIOD = 10
  val SLF4J_DEFAULT_UNIT = "SECONDS"

  val SLF4J_KEY_PERIOD = "period"
  val SLF4J_KEY_UNIT = "unit"

  val pollPeriod = Option(property.getProperty(SLF4J_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => SLF4J_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(SLF4J_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(SLF4J_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val reporter: Slf4jReporter = Slf4jReporter.forRegistry(registry)
    .convertDurationsTo(TimeUnit.MILLISECONDS)
    .convertRatesTo(TimeUnit.SECONDS)
    .build()

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 141
Source File: ConsoleSink.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{ConsoleReporter, MetricRegistry}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class ConsoleSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val CONSOLE_DEFAULT_PERIOD = 10
  val CONSOLE_DEFAULT_UNIT = "SECONDS"

  val CONSOLE_KEY_PERIOD = "period"
  val CONSOLE_KEY_UNIT = "unit"

  val pollPeriod = Option(property.getProperty(CONSOLE_KEY_PERIOD)) match {
    case Some(s) => s.toInt
    case None => CONSOLE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = Option(property.getProperty(CONSOLE_KEY_UNIT)) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(CONSOLE_DEFAULT_UNIT)
  }

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val reporter: ConsoleReporter = ConsoleReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build()

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 142
Source File: GraphiteSink.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.metrics.sink

import java.net.InetSocketAddress
import java.util.Properties
import java.util.concurrent.TimeUnit

import com.codahale.metrics.MetricRegistry
import com.codahale.metrics.graphite.{GraphiteUDP, Graphite, GraphiteReporter}

import org.apache.spark.SecurityManager
import org.apache.spark.metrics.MetricsSystem

private[spark] class GraphiteSink(val property: Properties, val registry: MetricRegistry,
    securityMgr: SecurityManager) extends Sink {
  val GRAPHITE_DEFAULT_PERIOD = 10
  val GRAPHITE_DEFAULT_UNIT = "SECONDS"
  val GRAPHITE_DEFAULT_PREFIX = ""

  val GRAPHITE_KEY_HOST = "host"
  val GRAPHITE_KEY_PORT = "port"
  val GRAPHITE_KEY_PERIOD = "period"
  val GRAPHITE_KEY_UNIT = "unit"
  val GRAPHITE_KEY_PREFIX = "prefix"
  val GRAPHITE_KEY_PROTOCOL = "protocol"

  def propertyToOption(prop: String): Option[String] = Option(property.getProperty(prop))

  if (!propertyToOption(GRAPHITE_KEY_HOST).isDefined) {
    throw new Exception("Graphite sink requires 'host' property.")
  }

  if (!propertyToOption(GRAPHITE_KEY_PORT).isDefined) {
    throw new Exception("Graphite sink requires 'port' property.")
  }

  val host = propertyToOption(GRAPHITE_KEY_HOST).get
  val port = propertyToOption(GRAPHITE_KEY_PORT).get.toInt

  val pollPeriod = propertyToOption(GRAPHITE_KEY_PERIOD) match {
    case Some(s) => s.toInt
    case None => GRAPHITE_DEFAULT_PERIOD
  }

  val pollUnit: TimeUnit = propertyToOption(GRAPHITE_KEY_UNIT) match {
    case Some(s) => TimeUnit.valueOf(s.toUpperCase())
    case None => TimeUnit.valueOf(GRAPHITE_DEFAULT_UNIT)
  }

  val prefix = propertyToOption(GRAPHITE_KEY_PREFIX).getOrElse(GRAPHITE_DEFAULT_PREFIX)

  MetricsSystem.checkMinimalPollingPeriod(pollUnit, pollPeriod)

  val graphite = propertyToOption(GRAPHITE_KEY_PROTOCOL).map(_.toLowerCase) match {
    case Some("udp") => new GraphiteUDP(new InetSocketAddress(host, port))
    case Some("tcp") | None => new Graphite(new InetSocketAddress(host, port))
    case Some(p) => throw new Exception(s"Invalid Graphite protocol: $p")
  }

  val reporter: GraphiteReporter = GraphiteReporter.forRegistry(registry)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .prefixedWith(prefix)
      .build(graphite)

  override def start() {
    reporter.start(pollPeriod, pollUnit)
  }

  override def stop() {
    reporter.stop()
  }

  override def report() {
    reporter.report()
  }
} 
Example 143
Source File: LauncherBackendSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.launcher

import java.util.concurrent.TimeUnit

import scala.concurrent.duration._
import scala.language.postfixOps

import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually._

import org.apache.spark._
import org.apache.spark.launcher._

class LauncherBackendSuite extends SparkFunSuite with Matchers {

  private val tests = Seq(
    "local" -> "local",
    "standalone/client" -> "local-cluster[1,1,1024]")

  tests.foreach { case (name, master) =>
    test(s"$name: launcher handle") {
      testWithMaster(master)
    }
  }

  private def testWithMaster(master: String): Unit = {
    val env = new java.util.HashMap[String, String]()
    env.put("SPARK_PRINT_LAUNCH_COMMAND", "1")
    val handle = new SparkLauncher(env)
      .setSparkHome(sys.props("spark.test.home"))
      .setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, System.getProperty("java.class.path"))
      .setConf("spark.ui.enabled", "false")
      .setConf(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, s"-Dtest.appender=console")
      .setMaster(master)
      .setAppResource("spark-internal")
      .setMainClass(TestApp.getClass.getName().stripSuffix("$"))
      .startApplication()

    try {
      eventually(timeout(30 seconds), interval(100 millis)) {
        handle.getAppId() should not be (null)
      }

      handle.stop()

      eventually(timeout(30 seconds), interval(100 millis)) {
        handle.getState() should be (SparkAppHandle.State.KILLED)
      }
    } finally {
      handle.kill()
    }
  }

}

object TestApp {

  def main(args: Array[String]): Unit = {
    new SparkContext(new SparkConf()).parallelize(Seq(1)).foreach { i =>
      Thread.sleep(TimeUnit.SECONDS.toMillis(20))
    }
  }

} 
Example 144
Source File: Web.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino.www

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, Props, ActorSystem, ScalaActorRef}
import akka.pattern.ask
import akka.util.Timeout
import com.ebay.neutrino.{SLB, NeutrinoPoolId}
import com.ebay.neutrino.api.ApiData
import com.ebay.neutrino.cluster.{SLBTopology, SystemConfiguration}
import com.ebay.neutrino.www.ui.SideMenu
import com.ebay.neutrino.www.ui.PageFormatting
import com.ebay.neutrino.cluster.SLBLoader


import com.typesafe.config.ConfigRenderOptions
import com.typesafe.scalalogging.slf4j.StrictLogging
import spray.http.StatusCodes

import scala.concurrent.Await
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}


trait WebService extends spray.routing.HttpService with ApiData with PageFormatting with StrictLogging
{
  def system: ActorSystem

  def topology = SystemConfiguration(system).topology

  val poolPage   = new SideMenu("Pools") with PoolsPage
  val serverPage = new SideMenu("Servers") with ServersPage

  def webRoutes =
    path ("activity") {
      complete {
        import PageFormatting.ScalaTagsPrettyMarshaller
        ActivityPage.page()
      }
    } ~
    path("pools") {
      complete {
        import PageFormatting.ScalaTagsPrettyMarshaller
        poolPage.summary(topology.toSeq)
      }
    } ~
    path("pools" / Segment) { id =>
      complete {
        import PageFormatting.ScalaTagsPrettyMarshaller
        val pool = topology.getPool(NeutrinoPoolId(id))
        poolPage.detail(pool)
      }
    } ~
    path("servers") {
      complete {
        import PageFormatting.ScalaTagsPrettyMarshaller
        val pools    = topology.toSeq
        val services = topology.asInstanceOf[SLBTopology].core.services
        val nodes    = services flatMap (_.pools()) flatMap (_.nodes())
        serverPage.summary(pools, nodes.toSeq)
      }
    } ~
    path("refresh") {
      complete {
        import PageFormatting.ScalaTagsPrettyMarshaller
        implicit val timeout = Timeout(FiniteDuration(3, TimeUnit.SECONDS))
        // Wait for the result, since refresh api has to be synchronous
        val reloader = Await.result(system.actorSelection("user/loader").resolveOne(), timeout.duration)
        val future = reloader ? "reload"
        val result = Await.result(future, timeout.duration)
        if (result == "complete") {
            logger.warn("Config reloaded, Successfully completed")
        } else {
          logger.warn("Unable to load the configuration")
        }
        poolPage.summary(topology.toSeq)
      }
    } ~
    path("config") {
      complete {
        val sysconfig = SystemConfiguration(system)
        sysconfig.config.root.render(ConfigRenderOptions.defaults)
      }
    } ~
    pathEndOrSingleSlash {
      complete {
        import PageFormatting.ScalaTagsPrettyMarshaller
        Overview.generate(generateStatus())
      }
    } ~
    get {
      redirect("/", StatusCodes.PermanentRedirect)
    }

} 
Example 145
Source File: EchoServer.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino

import java.util.concurrent.TimeUnit

import akka.actor._
import akka.io.IO
import com.ebay.neutrino.util.Random
import com.typesafe.config.{Config, ConfigFactory}
import spray.can.Http
import spray.http._

import scala.concurrent.duration._


object EchoServer extends App {

  // Extract port from args, if provided
  val port = if (args.size > 0) args(0).toInt else 8081

  // Load our configuration from file and merge in the port parameter
  val config = ConfigFactory.parseString(s"echo-server.port = $port") withFallback ConfigFactory.load("echo.conf")
  val system = ActorSystem("echo-server", config)
  system.actorOf(Props[EchoServer], "echo-server")
}


class EchoServer extends Actor with ActorLogging {
  import scala.language.implicitConversions

  implicit val system = context.system
  val startup  = System.currentTimeMillis
  val settings = EchoServerSettings(system)

  //Use the system's dispatcher as ExecutionContext
  import system.dispatcher

  // Register connection service
  IO(Http) ! Http.Bind(self, interface = settings.host, port = settings.port)

  
case class EchoServerSettings(host: String, port: Int, random: Boolean, duration: FiniteDuration)
  extends Extension
{
  def latency = random match {
    case false => duration
    case true  => Random.nextMillis(duration)
  }
}

object EchoServerSettings {

  def apply(c: Config): EchoServerSettings = EchoServerSettings(
    c getString "host",
    c getInt "port",
    c getBoolean "random",
    c getDuration("duration", TimeUnit.MILLISECONDS) milliseconds
  )

  def apply(system: ActorSystem): EchoServerSettings =
    EchoServerSettings(system.settings.config getConfig "echo-server")

} 
Example 146
Source File: ParamOpt.scala    From scalaprops   with MIT License 5 votes vote down vote up
package scalaprops

import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration

final case class ParamOpt(
  seed: Option[Seed],
  minSuccessful: Option[Int],
  maxDiscarded: Option[Int],
  minSize: Option[Int],
  maxSize: Option[Int],
  timeoutSeconds: Option[Int]
) {
  def merge(param: Param): Param = {
    Param(
      seed = seed.getOrElse(param.seed),
      minSuccessful = minSuccessful.getOrElse(param.minSuccessful),
      maxDiscarded = maxDiscarded.getOrElse(param.maxDiscarded),
      minSize = minSize.getOrElse(param.minSize),
      maxSize = maxSize.getOrElse(param.maxSize),
      timeout = timeoutSeconds match {
        case Some(n) => Duration(n, TimeUnit.SECONDS)
        case None => param.timeout
      }
    )
  }
} 
Example 147
Source File: Param.scala    From scalaprops   with MIT License 5 votes vote down vote up
package scalaprops

import java.util.concurrent.TimeUnit
import scalaprops.internal._
import scala.concurrent.duration.Duration

final case class Param(
  seed: Seed,
  minSuccessful: Int = Platform.minSuccessful,
  maxDiscarded: Int = 500,
  minSize: Int = 0,
  maxSize: Int = Gen.defaultSize,
  timeout: Duration = Duration(30, TimeUnit.SECONDS)
) {
  def rand: Rand = seed.createRand
}

object Param {
  def withCurrentTimeSeed(): Param =
    Param(
      seed = Seed.LongSeed(System.nanoTime())
    )

  def rand(rand: Rand): Endo[Param] =
    Endo(_.copy(seed = Seed.RandSeed(rand)))

  def constantSeed(value: Int): Endo[Param] =
    Endo(_.copy(seed = Seed.IntSeed(value)))

  def minSuccessful(n: Int): Endo[Param] =
    Endo(_.copy(minSuccessful = n))

  def maxSize(n: Int): Endo[Param] =
    Endo(_.copy(maxSize = n))

  def timeout(n: Int, timeunit: TimeUnit): Endo[Param] =
    Endo(_.copy(timeout = Duration(n, timeunit)))

  val id: Endo[Param] =
    Endo.idEndo[Param]
} 
Example 148
Source File: StopAndStartCubeSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import java.util.concurrent.TimeUnit

import akka.actor.{ActorIdentity, ActorSystem, Identify}
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop

import scala.util.Try

object StopAndStartCubeSpec {
  val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath

  val classPaths = Array(
    "DummyCube",
    "DummyCubeSvc",
    "DummySvc"
  ) map (dummyJarsDir + "/" + _)

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = StopAndStartCubeSpec
       |  ${JMX.prefixConfig} = true
       |}
       |default-listener.bind-port = 0
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class StopAndStartCubeSpec extends TestKit(StopAndStartCubeSpec.boot.actorSystem)
with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {

  implicit val timeout: akka.util.Timeout =
    Try(System.getProperty("test.timeout").toLong) map { millis =>
      akka.util.Timeout(millis, TimeUnit.MILLISECONDS)
    } getOrElse Timeouts.askTimeout

  import Timeouts.awaitMax

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  "Unicomplex" should "be able to stop a cube" in {
    Unicomplex(system).uniActor ! StopCube("DummyCube")
    within(awaitMax) {
      expectMsg(Ack)
    }
    system.actorSelection("/user/DummyCube") ! Identify("hello")
    within(awaitMax) {
      val id = expectMsgType[ActorIdentity]
      id.ref should be(None)
    }
  }

  "Unicomplex" should "not be able to stop a stopped cube" in {
    Unicomplex(system).uniActor ! StopCube("DummyCube")
    expectNoMessage()
  }

  "Unicomplex" should "be able to start a cube" in {
    Unicomplex(system).uniActor ! StartCube("DummyCube")
    within(awaitMax) {
      expectMsg(Ack)
    }
    system.actorSelection("/user/DummyCube") ! Identify("hello")
    within(awaitMax) {
      val id = expectMsgType[ActorIdentity]
      id.ref should not be None
    }
  }

  "Unicomplex" should "not be able to start a running cube" in {
    Unicomplex(system).uniActor ! StartCube("DummyCube")
    expectNoMessage()
  }

} 
Example 149
Source File: ScanResourceSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import java.util.concurrent.TimeUnit
import javax.management.ObjectName

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Waiters
import org.scalatest.{BeforeAndAfterAll, Inspectors, Matchers, WordSpecLike}
import org.squbs.lifecycle.GracefulStop

import scala.util.Try

object ScanResourceSpec {

  val jmxPrefix = "ScanResourceSpec"

  val config = ConfigFactory.parseString(
    s"""
       |squbs {
       |  actorsystem-name = scanResourceSpec
       |  ${JMX.prefixConfig} = true
       |}
       |
       |default-listener.bind-port = 0
    """.stripMargin)

  implicit val akkaTimeout: Timeout =
    Try(System.getProperty("test.timeout").toLong) map { millis =>
      akka.util.Timeout(millis, TimeUnit.MILLISECONDS)
    } getOrElse Timeouts.askTimeout

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanResources()
    .initExtensions.start()
}

class ScanResourceSpec extends TestKit(ScanResourceSpec.boot.actorSystem) with ImplicitSender with WordSpecLike
    with Matchers with Inspectors with BeforeAndAfterAll with Waiters {

  import ScanResourceSpec._
  import system.dispatcher

  "The scanned resource" must {

    "have some actors started" in {
      val w = new Waiter

      system.actorSelection("/user/ScanResourceCube").resolveOne().onComplete { result =>
        w {
          assert(result.isSuccess)
        }
        w.dismiss()
      }
      w.await()
    }

    "expose proper cube state through MXBean" in {
      import org.squbs.unicomplex.JMX._
      val cubeName = "ScanResourceCube"
      val cubesName = new ObjectName(prefix(system) + cubeStateName + cubeName)
      get(cubesName, "Name") should be (cubeName)
      get(cubesName, "CubeState") should be ("Active")
      val wellKnownActors = get(cubesName, "WellKnownActors").asInstanceOf[String]
      println(wellKnownActors)
      wellKnownActors should include ("Actor[akka://scanResourceSpec/user/ScanResourceCube/Prepender#")
      wellKnownActors should include ("Actor[akka://scanResourceSpec/user/ScanResourceCube/Appender#")
    }
  }

  override protected def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }
} 
Example 150
Source File: DurationConverters.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.util

import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeUnit
import java.time.{Duration => JavaDuration}

import scala.concurrent.duration.{FiniteDuration, Duration => ScalaDuration}



  final def toJava(duration: scala.concurrent.duration.FiniteDuration): java.time.Duration = {
    if (duration.length == 0) JavaDuration.ZERO
    else duration.unit match {
      case TimeUnit.NANOSECONDS => JavaDuration.ofNanos(duration.length)
      case TimeUnit.MICROSECONDS => JavaDuration.of(duration.length, ChronoUnit.MICROS)
      case TimeUnit.MILLISECONDS => JavaDuration.ofMillis(duration.length)
      case TimeUnit.SECONDS => JavaDuration.ofSeconds(duration.length)
      case TimeUnit.MINUTES => JavaDuration.ofMinutes(duration.length)
      case TimeUnit.HOURS => JavaDuration.ofHours(duration.length)
      case TimeUnit.DAYS => JavaDuration.ofDays(duration.length)
    }
  }
} 
Example 151
Source File: StreamConsumerScala.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.examples.twitter

import java.util.concurrent.{Executors, TimeUnit}

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.infinispan.client.hotrod.RemoteCacheManager
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder
import org.infinispan.spark.examples.twitter.Sample.{getSparkConf, runAndExit, usageStream}
import org.infinispan.spark.examples.util.TwitterDStream
import org.infinispan.spark.stream._

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps


object StreamConsumerScala {

   def main(args: Array[String]) {
      Logger.getLogger("org").setLevel(Level.WARN)

      if (args.length < 2) {
         usageStream("StreamConsumerScala")
      }

      val infinispanHost = args(0)
      val duration = args(1).toLong * 1000

      val conf = getSparkConf("spark-infinispan-stream-consumer-scala")
      val sparkContext = new SparkContext(conf)

      val streamingContext = new StreamingContext(sparkContext, Seconds(1))

      val config = Sample.getConnectorConf(infinispanHost)

      val remoteCacheManager = new RemoteCacheManager(new ConfigurationBuilder().withProperties(config.getHotRodClientProperties).build())
      val cache = remoteCacheManager.getCache[Long, Tweet]("default")

      val twitterDStream = TwitterDStream.create(streamingContext)

      val keyValueTweetStream = twitterDStream.map(s => (s.getId, s))

      keyValueTweetStream.writeToInfinispan(config)

      Repeat.every(5 seconds, {
         val keySet = cache.keySet()
         val maxKey = keySet.asScala.max
         println(s"${keySet.size} tweets inserted in the cache")
         println(s"Last tweet:${Option(cache.get(maxKey)).map(_.getText).getOrElse("<no tweets received so far>")}")
         println()
      })

      runAndExit(streamingContext, duration)
   }

   object Repeat {
      def every(d: Duration, code: => Unit) =
         Executors.newSingleThreadScheduledExecutor.scheduleWithFixedDelay(new Runnable {
            override def run(): Unit = code
         }, 10, d.toSeconds, TimeUnit.SECONDS)
   }

} 
Example 152
Source File: StreamingFailOverSuite.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.suites

import java.util.concurrent.TimeUnit

import org.apache.spark.storage.StorageLevel
import org.infinispan.client.hotrod.RemoteCache
import org.infinispan.client.hotrod.event.ClientEvent
import org.infinispan.client.hotrod.event.ClientEvent.Type.{CLIENT_CACHE_ENTRY_CREATED, CLIENT_CACHE_ENTRY_EXPIRED, CLIENT_CACHE_ENTRY_MODIFIED, CLIENT_CACHE_ENTRY_REMOVED}
import org.infinispan.spark.config.ConnectorConfiguration
import org.infinispan.spark.domain.Runner
import org.infinispan.spark.stream._
import org.infinispan.spark.test.StreamingUtils.TestInputDStream
import org.infinispan.spark.test.TestingUtil._
import org.infinispan.spark.test._
import org.scalatest.{DoNotDiscover, FunSuite, Matchers}

import scala.collection._
import scala.concurrent.duration._
import scala.language.postfixOps

@DoNotDiscover
class StreamingFailOverSuite extends FunSuite with SparkStream with MultipleServers with FailOver with Matchers {

   override def getCacheConfig: Option[String] = Some(
      """
        |{
        |    "distributed-cache":{
        |        "mode":"SYNC",
        |        "owners":2,
        |        "statistics":true,
        |        "expiration":{
        |            "interval":500
        |        }
        |    }
        |}
        |""".stripMargin
   )

   protected def getProperties = {
      new ConnectorConfiguration()
        .setServerList(s"localhost:$getServerPort")
        .setCacheName(getCacheName)
   }

   ignore("test stream consumer with failover") {
      val cache = getRemoteCache.asInstanceOf[RemoteCache[Int, String]]
      val stream = new TestInputDStream(ssc, of = Seq(1 -> "value1", 2 -> "value2", 3 -> "value3"), streamItemEvery = 100 millis)

      stream.writeToInfinispan(getProperties)

      ssc.start()

      Thread.sleep(100)
      Cluster.failServer(0)

      waitForCondition(() => cache.size == 3)
      cache.get(1) shouldBe "value1"
      cache.get(2) shouldBe "value2"
      cache.get(3) shouldBe "value3"
   }

   ignore("test stream producer with failover.") {
      val cache = getRemoteCache.asInstanceOf[RemoteCache[Int, Runner]]
      cache.clear()

      val stream = new InfinispanInputDStream[Int, Runner](ssc, StorageLevel.MEMORY_ONLY, getProperties)
      val streamDump = mutable.Set[(Int, Runner, ClientEvent.Type)]()

      stream.foreachRDD(rdd => streamDump ++= rdd.collect())

      ssc.start()

      executeAfterReceiverStarted {
         cache.put(1, new Runner("Bolt", finished = true, 3600, 30))
         cache.put(2, new Runner("Farah", finished = true, 7200, 29))

         Cluster.failServer(0)

         cache.put(3, new Runner("Ennis", finished = true, 7500, 28))
         cache.put(4, new Runner("Gatlin", finished = true, 7900, 26), 50, TimeUnit.MILLISECONDS)
         cache.put(1, new Runner("Bolt", finished = true, 7500, 23))
         cache.remove(2)
      }

      waitForCondition(() => streamDump.size == 7)
      eventsOfType(streamDump)(CLIENT_CACHE_ENTRY_CREATED) shouldBe 4
      eventsOfType(streamDump)(CLIENT_CACHE_ENTRY_REMOVED) shouldBe 1
      eventsOfType(streamDump)(CLIENT_CACHE_ENTRY_MODIFIED) shouldBe 1
      eventsOfType(streamDump)(CLIENT_CACHE_ENTRY_EXPIRED) shouldBe 1
   }

   protected def eventsOfType(streamDump: Set[(Int, Runner, ClientEvent.Type)])(eventType: ClientEvent.Type): Int = {
      streamDump.count { case (_, _, t) => t == eventType }
   }
} 
Example 153
Source File: StreamingUtils.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.test

import java.time.{Duration => JDuration}
import java.util.concurrent.TimeUnit
import java.util.{List => JList}

import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver

import scala.annotation.meta.param
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag


object StreamingUtils {

   class TestReceiver[T](of: Seq[T], streamItemEvery: Duration) extends Receiver[T](StorageLevel.MEMORY_ONLY) {
      override def onStart(): Unit = {
         of.foreach { item =>
            Thread.sleep(streamItemEvery.toMillis)
            store(item)
         }
      }

      override def onStop(): Unit = {}
   }

   class TestInputDStream[T: ClassTag](@(transient@param) ssc_ : StreamingContext, of: Seq[T], streamItemEvery: Duration) extends ReceiverInputDStream[T](ssc_) {
      override def getReceiver(): Receiver[T] = new TestReceiver[T](of, streamItemEvery)
   }

   def createJavaReceiverDInputStream[T](jssc: JavaStreamingContext, of: JList[T], streamItemEvery: JDuration): JavaReceiverInputDStream[T] = {
      implicit val cmt: ClassTag[T] = implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[T]]
      JavaReceiverInputDStream.fromReceiverInputDStream(new TestInputDStream[T](jssc.ssc, of.asScala, Duration(streamItemEvery.getNano, TimeUnit.NANOSECONDS)))
   }

} 
Example 154
Source File: JobSpecSchedulerActor.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package jobspec.impl

import java.time.{Clock, Instant}
import java.util.concurrent.TimeUnit

import akka.actor._
import dcos.metronome.jobrun.JobRunService
import dcos.metronome.model.{JobSpec, ScheduleSpec}

import scala.concurrent.duration._


class JobSpecSchedulerActor(initSpec: JobSpec, clock: Clock, runService: JobRunService)
    extends Actor
    with Stash
    with ActorLogging {

  import JobSpecSchedulerActor._
  import context.dispatcher

  private[impl] var spec = initSpec
  private[impl] var nextSchedule: Option[Cancellable] = None
  private[impl] var scheduledAt: Option[Instant] = None

  override def preStart(): Unit = {
    scheduleNextRun()
  }

  override def postStop(): Unit = {
    cancelSchedule()
  }

  override def receive: Receive = {
    case StartJob(schedule) => runJob(schedule)
    case UpdateJobSpec(newSpec) => updateSpec(newSpec)
  }

  def updateSpec(newSpec: JobSpec): Unit = {
    log.info(s"JobSpec ${newSpec.id} has been updated. Reschedule.")
    spec = newSpec
    scheduledAt = None
    scheduleNextRun()
  }

  def runJob(schedule: ScheduleSpec): Unit = {
    log.info(s"Start next run of job ${spec.id}, which was scheduled for $scheduledAt")
    runService.startJobRun(spec, Some(schedule))
    scheduleNextRun()
  }

  def scheduleNextRun(): Unit = {
    val lastScheduledAt = scheduledAt
    cancelSchedule()
    // TODO: only reschedule for one specific schedule!
    spec.schedules.foreach { schedule =>
      val now = clock.instant()
      val from = lastScheduledAt.getOrElse(now)
      val nextTime = schedule.nextExecution(from)
      scheduledAt = Some(nextTime)
      // 60 secs is the smallest unit of reschedule time for cron
      val inSeconds = Math.max(java.time.Duration.between(now, nextTime).getSeconds, 60)
      nextSchedule =
        Some(context.system.scheduler.scheduleOnce(Duration(inSeconds, TimeUnit.SECONDS), self, StartJob(schedule)))
      log.info(s"Spec ${spec.id}: next run is scheduled for: $nextTime (in $inSeconds seconds)")
    }
  }

  def cancelSchedule(): Unit = {
    nextSchedule.foreach { c => if (!c.isCancelled) c.cancel() }
    nextSchedule = None
    scheduledAt = None
  }
}

object JobSpecSchedulerActor {

  case class StartJob(schedule: ScheduleSpec)
  case class UpdateJobSpec(newSpec: JobSpec)

  def props(spec: JobSpec, clock: Clock, runService: JobRunService): Props = {
    Props(new JobSpecSchedulerActor(spec, clock, runService))
  }
} 
Example 155
Source File: Detector.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package cloudflow.installer

import akka.event.LoggingAdapter
import java.util.concurrent.TimeUnit
import org.zeroturnaround.exec._
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.util.{ Failure, Success, Try }

sealed trait ClusterFeature
final case object Scc                                       extends ClusterFeature
final case class StorageClasses(classes: Set[StorageClass]) extends ClusterFeature

final case class StorageClass(name: String, provisioner: String)

case class ClusterFeatures(
    storageClasses: Set[StorageClass] = Set.empty,
    hasSecurityContextConstraints: Boolean = false
) {

  private val set = {
    var s = Set.empty[ClusterFeature]
    if (storageClasses.nonEmpty) s += StorageClasses(storageClasses)
    if (hasSecurityContextConstraints) s += Scc
    s
  }

  def contains(feature: ClusterFeature) = set.contains(feature)

  def print()(implicit log: LoggingAdapter): Unit = {
    val header = s"""+${"-" * 80}+"""
    log.info(header)
    log.info("Features detected:")
    log.info("")

    if (hasSecurityContextConstraints) log.info("Scc")

    storageClasses.foreach {
      case StorageClass(name, provisioner) =>
        log.info(s"Storage class: $name - $provisioner")
    }

    log.info(header)
  }
}

object Detector {
  def apply(): Detector = Detector(executor)
  def executor(commandLine: Array[String], log: LoggingAdapter, settings: Settings): Try[String] = {

    val command = s"${commandLine.mkString(" ")}"
    log.info(s"Executing command '$command'")

    Try(
      new ProcessExecutor()
        .command(commandLine.toList.asJava)
        .readOutput(true)
        .exitValues(0)
        .timeout(settings.executionTimeout, TimeUnit.SECONDS)
        .execute()
        .outputUTF8()
    )
  }
}

case class Detector(executor: (Array[String], LoggingAdapter, Settings) => Try[String]) {
  def detectClusterFeatures()(implicit log: LoggingAdapter, settings: Settings): ClusterFeatures =
    ClusterFeatures(getStorageClasses(), hasSecurityContextConstraints())

  def hasSecurityContextConstraints()(implicit log: LoggingAdapter, settings: Settings): Boolean =
    executor("oc get scc".split(" "), log, settings).isSuccess

  def getStorageClasses()(implicit log: LoggingAdapter, settings: Settings): Set[StorageClass] = {
    @tailrec
    def extractStorageClass(a: List[String], b: Set[StorageClass] = Set.empty): Set[StorageClass] =
      a match {
        case name :: provisioner :: _ :: tail =>
          extractStorageClass(tail, b + StorageClass(name, provisioner))
        case nil @ _ => b
      }

    executor(
      "kubectl get sc --no-headers".split(" "),
      log,
      settings
    ) match {
      case Success(result) =>
        if (result.startsWith("error:")) Set.empty
        else if (result.contains("No resources found")) Set.empty
        else extractStorageClass(result.replaceAll("\n", " ").split(" ").filter(s => s != "(default)" && s != "").toList)
      case Failure(ex) =>
        log.error(s"Failed to query storage classes, ${ex.getMessage()}")
        Set.empty
    }
  }
} 
Example 156
Source File: QueryGuardEvent.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.common.query.guard

import java.time.Instant
import java.util.concurrent.{Delayed, TimeUnit}

import com.google.common.base.Objects
import com.google.common.primitives.Ints
import org.joda.time.DateTime

import com.paypal.gimel.logger.Logger

private[query] sealed trait QueryGuardEvent

private[query] trait QueryGuardDelayedEvent extends QueryGuardEvent with Delayed

private[query] case class JobSubmitted(jobId: Int,
                                       jobType: String,
                                       startTime: Long =
                                         Instant.now().toEpochMilli,
                                       estimatedJobEndTime: Long,
                                       estimatedDelayEndTime: Long)
    extends QueryGuardDelayedEvent {
  private val logger = Logger(this.getClass.getName)

  override def getDelay(unit: TimeUnit): Long = {
    val currentInstant = Instant.now().toEpochMilli
    val diff = estimatedDelayEndTime - currentInstant
    logger.info(
      s"[JobSubmitted] Comparing Job with ID: $jobId diff: $diff with end time:" +
        s" ${new DateTime(estimatedDelayEndTime)}, and current instant:" +
        s" ${new DateTime(currentInstant)}"
    )
    unit.convert(diff, TimeUnit.MILLISECONDS)
  }

  override def compareTo(o: Delayed): Int = {
    Ints.saturatedCast(
      this.estimatedDelayEndTime - o
        .asInstanceOf[JobSubmitted]
        .estimatedDelayEndTime
    )
  }

  override def toString: String =
    Objects
      .toStringHelper(this)
      .add("jobId", jobId)
      .add("jobType", jobType)
      .add("startTime", startTime)
      .add("estimatedJobEndTime", estimatedJobEndTime)
      .add("estimatedDelayEndTime", estimatedDelayEndTime)
      .toString
}

object JobSubmitted {
  def apply(jobId: Int,
            jobType: String,
            startTime: Long,
            jobTtl: Int,
            delayTtl: Int): JobSubmitted =
    new JobSubmitted(
      jobId,
      jobType,
      startTime,
      startTime + jobTtl,
      startTime + delayTtl
    )

  def apply(job: JobSubmitted, jobTtl: Int, delayTime: Long): JobSubmitted =
    new JobSubmitted(
      jobId = job.jobId,
      jobType = job.jobType,
      startTime = job.startTime,
      estimatedJobEndTime = job.startTime + jobTtl,
      estimatedDelayEndTime = delayTime
    )
}

private[query] case class JobKill(jobId: Int, jobType: String, reason: String)
    extends QueryGuardEvent {
  override def toString: String =
    Objects
      .toStringHelper(this)
      .add("jobId", jobId)
      .add("jobType", jobType)
      .add("reason", reason)
      .toString
} 
Example 157
Source File: Benchmarks.scala    From boopickle   with Apache License 2.0 5 votes vote down vote up
package boopickle.perftests

import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations._

@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class CirceBenchmarks extends TestData with CirceCoding

@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class BoopickleBenchmarks extends TestData with BoopickleCoding

@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class BoopickleSpeedBenchmarks extends TestData with BoopickleSpeedCoding 
Example 158
Source File: ActorTraceSupportSpec.scala    From play-zipkin-tracing   with Apache License 2.0 5 votes vote down vote up
package brave.play.actor

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Props}
import akka.util.Timeout
import org.scalatest.AsyncFlatSpec
import ActorTraceSupport._
import brave.play.{TestZipkinTraceService, ZipkinTraceServiceLike}


class ActorTraceSupportSpec extends AsyncFlatSpec {

  it should "ask pattern" in {
    val system = ActorSystem("mySystem")
    implicit val tracer = new TestZipkinTraceService
    implicit val timeout = Timeout(5, TimeUnit.SECONDS)

    val actor = system.actorOf(Props(classOf[HelloWorldActor], tracer), "test-actor")

    TraceableActorRef(actor) ? HelloWorldMessage("Test", ActorTraceData()) map { result =>
      assert(result == "Received data: Test")
    }

    TimeUnit.SECONDS.sleep(3)
    tracer.tracing.close()
    system.terminate()

    assert(tracer.reporter.spans.length == 2)
    val parent = tracer.reporter.spans.find(_.name == "? - test-actor").get
    val child  = tracer.reporter.spans.find(_.name == "test-actor").get
    assert(parent.id == child.parentId)
    assert(parent.id != child.id)
  }

}

class HelloWorldActor(val tracer: ZipkinTraceServiceLike) extends TraceableActor {
  def receive = {
    case m: HelloWorldMessage =>
      sender() ! s"Received data: ${m.message}"
  }
}

case class HelloWorldMessage(message: String, traceData: ActorTraceData) extends TraceMessage 
Example 159
Source File: IndexController.scala    From play-zipkin-tracing   with Apache License 2.0 5 votes vote down vote up
package controllers

import java.util.concurrent.TimeUnit
import javax.inject.Named

import akka.actor._
import akka.util.Timeout
import brave.play.ZipkinTraceServiceLike
import brave.play.actor.ActorTraceSupport._
import brave.play.implicits.ZipkinTraceImplicits
import com.google.inject.Inject
import play.api.Logging
import play.api.libs.json.Json
import play.api.mvc._
import services.ApiSampleService

import scala.concurrent.{ExecutionContext, Future}

class IndexController @Inject() (
  @Named("hello-actor") helloActor: ActorRef,
  components: ControllerComponents,
  service: ApiSampleService
) (
  implicit ec: ExecutionContext,
  val tracer: ZipkinTraceServiceLike
) extends AbstractController(components) with Logging with ZipkinTraceImplicits {

  def index = Action.async {
    Future.successful(Ok(Json.obj("status" -> "ok")))
  }

  def once = Action.async { implicit req: Request[_] =>
    logger.debug(req.headers.toSimpleMap.map{ case (k, v) => s"${k}:${v}"}.toSeq.mkString("\n"))

    service.sample("http://localhost:9992/api/once").map(_ => Ok(Json.obj("OK"->"OK")))
  }

  def nest = Action.async { implicit req: Request[_] =>
    logger.debug(req.headers.toSimpleMap.map{ case (k, v) => s"${k}:${v}"}.toSeq.mkString("\n"))

    implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)
    val f1 = TraceableActorRef(helloActor) ? HelloActorMessage("This is an actor call!")
    val f2 = service.sample("http://localhost:9992/api/nest")

    for {
      r1 <- f1
      r2 <- f2
    } yield Ok(Json.obj("result" -> (r1 + " " + r2)))
  }
}

case class HelloActorMessage(message: String)(implicit val traceData: ActorTraceData) extends TraceMessage

class HelloActor @Inject()(@Named("child-hello-actor") child: ActorRef)
                          (implicit val tracer: ZipkinTraceServiceLike) extends TraceableActor {
  def receive = {
    case m: HelloActorMessage => {
      Thread.sleep(1000)
      println(m.message)
      TraceableActorRef(child) ! HelloActorMessage("This is a child actor call!")
      sender() ! "result"
    }
  }
}

class ChildHelloActor @Inject()(val tracer: ZipkinTraceServiceLike) extends TraceableActor {
  def receive = {
    case m: HelloActorMessage => {
      Thread.sleep(1000)
      println(m.message)
    }
  }
} 
Example 160
Source File: Main.scala    From play-zipkin-tracing   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.TimeUnit

import actors.{HelloWorldActor, HelloWorldMessage}
import akka.actor._
import akka.util.Timeout
import brave.play.actor.ActorTraceSupport._
import brave.play.actor.ZipkinTraceService

import scala.concurrent._
import scala.concurrent.duration.Duration

object Main extends App {

  val system = ActorSystem("mySystem")
  implicit val tracer = new ZipkinTraceService(system, "zipkin-akka-actor")
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)

  val actor = system.actorOf(Props(classOf[HelloWorldActor], tracer), "parent-actor")

  val f = TraceableActorRef(actor) ? HelloWorldMessage("Test")(ActorTraceData())
  val result = Await.result(f, Duration.Inf)
  println(result)

  Thread.sleep(1000)
  tracer.close()
  system.terminate()

} 
Example 161
Source File: InMemoryStore.scala    From slab   with Apache License 2.0 5 votes vote down vote up
package com.criteo.slab.lib

import java.time.format.{DateTimeFormatter, FormatStyle}
import java.time.temporal.ChronoUnit
import java.time.{Instant, ZoneId}
import java.util.concurrent.{Executors, TimeUnit}

import com.criteo.slab.core.{Codec, Context, Store}
import com.criteo.slab.lib.Values.Slo
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.util.Try


class InMemoryStore(
                     val expiryDays: Int = 30
                   ) extends Store[Any] {
  private val logger = LoggerFactory.getLogger(this.getClass)
  private val cache = TrieMap.empty[(String, Long), Any]
  private val scheduler = Executors.newSingleThreadScheduledExecutor()

  scheduler.scheduleAtFixedRate(InMemoryStore.createCleaner(cache, expiryDays, logger), 1, 1, TimeUnit.HOURS)
  logger.info(s"InMemoryStore started, entries expire in $expiryDays days")

  sys.addShutdownHook {
    logger.info(s"Shutting down...")
    scheduler.shutdown()
  }

  override def upload[T](id: String, context: Context, v: T)(implicit codec: Codec[T, Any]): Future[Unit] = {
    logger.debug(s"Uploading $id")
    Future.successful {
      cache.putIfAbsent((id, context.when.toEpochMilli), codec.encode(v))
      logger.info(s"Store updated, size: ${cache.size}")
    }
  }

  override def uploadSlo(id: String, context: Context, slo: Slo)(implicit codec: Codec[Slo, Any]): Future[Unit] = {
    upload[Slo](id, context, slo)
  }

  def fetchSloHistory(id: String, from: Instant, until: Instant)(implicit codec: Codec[Slo, Any]): Future[Seq[(Long, Slo)]] = {
    fetchHistory[Slo](id, from, until)(codec)
  }

  override def fetch[T](id: String, context: Context)(implicit codec: Codec[T, Any]): Future[Option[T]] = {
    logger.debug(s"Fetching $id")
    Future.successful {
      cache.get((id, context.when.toEpochMilli)) map { v =>
        codec.decode(v).get
      }
    }
  }

  override def fetchHistory[T](
                                id: String,
                                from: Instant,
                                until: Instant
                              )(implicit ev: Codec[T, Any]): Future[Seq[(Long, T)]] = {
    logger.debug(s"Fetching the history of $id from ${format(from)} until ${format(until)}, cache size: ${cache.size}")
    Future.successful {
      cache.withFilter { case ((_id, ts), _) =>
        _id == id && ts >= from.toEpochMilli && ts <= until.toEpochMilli
      }.map { case ((_, ts), repr) =>
        (ts, ev.decode(repr).get)
      }.toList
    }
  }

  private def format(i: Instant) = DateTimeFormatter.ofLocalizedDateTime(FormatStyle.FULL)
    .withZone(ZoneId.systemDefault)
    .format(i)
}

object InMemoryStore {
  implicit def codec[T] = new Codec[T, Any] {
    override def encode(v: T): Any = v

    override def decode(v: Any): Try[T] = Try(v.asInstanceOf[T])
  }

  def createCleaner(cache: TrieMap[(String, Long), Any], expiryDays: Int, logger: Logger): Runnable = {
    object C extends Runnable {
      override def run(): Unit = {
        val expired = cache.filterKeys(_._2 <= Instant.now.minus(expiryDays, ChronoUnit.DAYS).toEpochMilli).keys
        logger.debug(s"${expired.size} out of ${cache.size} entries have expired, cleaning up...")
        cache --= expired
      }
    }
    C
  }
} 
Example 162
Source File: CorsBenchmark.scala    From akka-http-cors   with Apache License 2.0 5 votes vote down vote up
package ch.megard.akka.http.cors

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.headers.{Origin, `Access-Control-Request-Method`}
import akka.http.scaladsl.model.{HttpMethods, HttpRequest}
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.unmarshalling.Unmarshal
import ch.megard.akka.http.cors.scaladsl.CorsDirectives
import ch.megard.akka.http.cors.scaladsl.settings.CorsSettings
import com.typesafe.config.ConfigFactory
import org.openjdk.jmh.annotations._

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}

@State(Scope.Benchmark)
@OutputTimeUnit(TimeUnit.SECONDS)
@BenchmarkMode(Array(Mode.Throughput))
class CorsBenchmark extends Directives with CorsDirectives {
  private val config = ConfigFactory.parseString("akka.loglevel = ERROR").withFallback(ConfigFactory.load())

  implicit private val system: ActorSystem  = ActorSystem("CorsBenchmark", config)
  implicit private val ec: ExecutionContext = scala.concurrent.ExecutionContext.global

  private val http         = Http()
  private val corsSettings = CorsSettings.default

  private var binding: ServerBinding        = _
  private var request: HttpRequest          = _
  private var requestCors: HttpRequest      = _
  private var requestPreflight: HttpRequest = _

  @Setup
  def setup(): Unit = {
    val route = {
      path("baseline") {
        get {
          complete("ok")
        }
      } ~ path("cors") {
        cors(corsSettings) {
          get {
            complete("ok")
          }
        }
      }
    }
    val origin = Origin("http://example.com")

    binding = Await.result(http.bindAndHandle(route, "127.0.0.1", 0), 1.second)
    val base = s"http://${binding.localAddress.getHostString}:${binding.localAddress.getPort}"

    request = HttpRequest(uri = base + "/baseline")
    requestCors = HttpRequest(
      method = HttpMethods.GET,
      uri = base + "/cors",
      headers = List(origin)
    )
    requestPreflight = HttpRequest(
      method = HttpMethods.OPTIONS,
      uri = base + "/cors",
      headers = List(origin, `Access-Control-Request-Method`(HttpMethods.GET))
    )
  }

  @TearDown
  def shutdown(): Unit = {
    val f = for {
      _ <- http.shutdownAllConnectionPools()
      _ <- binding.terminate(1.second)
      _ <- system.terminate()
    } yield ()
    Await.ready(f, 5.seconds)
  }

  @Benchmark
  def baseline(): Unit = {
    val f = http.singleRequest(request).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "ok")
  }

  @Benchmark
  def default_cors(): Unit = {
    val f = http.singleRequest(requestCors).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "ok")
  }

  @Benchmark
  def default_preflight(): Unit = {
    val f = http.singleRequest(requestPreflight).flatMap(r => Unmarshal(r.entity).to[String])
    assert(Await.result(f, 1.second) == "")
  }
} 
Example 163
Source File: package.scala    From fs2-cron   with Apache License 2.0 5 votes vote down vote up
package eu.timepit

import java.time.LocalDateTime
import java.time.temporal.ChronoUnit
import java.util.concurrent.TimeUnit

import cats.ApplicativeError
import cats.effect.{Concurrent, Sync, Timer}
import cron4s.expr.CronExpr
import cron4s.lib.javatime._
import fs2.Stream

import scala.concurrent.duration.FiniteDuration

package object fs2cron {

  
  def sleepCron[F[_]: Sync](cronExpr: CronExpr)(implicit timer: Timer[F]): Stream[F, Unit] =
    durationFromNow(cronExpr).flatMap(Stream.sleep[F])

  def schedule[F[_]: Concurrent, A](tasks: List[(CronExpr, Stream[F, A])])(implicit
      timer: Timer[F]
  ): Stream[F, A] = {
    val scheduled = tasks.map { case (cronExpr, task) => awakeEveryCron[F](cronExpr) >> task }
    Stream.emits(scheduled).covary[F].parJoinUnbounded
  }
} 
Example 164
Source File: TransformerBenchmark.scala    From featran   with Apache License 2.0 5 votes vote down vote up
package com.spotify.featran.jmh

import java.util.concurrent.TimeUnit

import com.spotify.featran.transformers._
import com.spotify.featran._
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole

@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Thread)
class TransformerBenchmark {

  import Fixtures._

  def benchmark[A](transformer: Transformer[A, _, _], bh: Blackhole)
                  (implicit fixture: Seq[A]): Seq[Unit] = {
    implicit val fb: FeatureBuilder[Unit] = new NoOpFeatureBuilder(bh)
    val fe = FeatureSpec.of[A].required(identity)(transformer).extract(fixture)
    fe.featureValues[Unit]
  }

  // TODO: figure out how to verify that all transformers are covered

  @Benchmark def binarizer(bh: Blackhole): Seq[Unit] = benchmark(Binarizer("t"), bh)
  @Benchmark def bucketizer(bh: Blackhole): Seq[Unit] =
    benchmark(Bucketizer("t", Array(0.0, 250.0, 500.0, 750.0, 1000.0)), bh)
  @Benchmark def hashNHotEncoder(bh: Blackhole): Seq[Unit] = benchmark(HashNHotEncoder("t"), bh)
  @Benchmark def hashNHotWeightedEncoder(bh: Blackhole): Seq[Unit] =
    benchmark(HashNHotWeightedEncoder("t"), bh)
  @Benchmark def hashOneHotEncoder(bh: Blackhole): Seq[Unit] = benchmark(HashOneHotEncoder("t"), bh)
  @Benchmark def heavyHitters(bh: Blackhole): Seq[Unit] = benchmark(HeavyHitters("t", 100), bh)
  @Benchmark def identityB(bh: Blackhole): Seq[Unit] = benchmark(Identity("t"), bh)
  @Benchmark def maxAbsScaler(bh: Blackhole): Seq[Unit] = benchmark(MaxAbsScaler("t"), bh)
  @Benchmark def mdl(bh: Blackhole): Seq[Unit] = benchmark(MDL[String]("t"), bh)
  @Benchmark def minMaxScaler(bh: Blackhole): Seq[Unit] = benchmark(MinMaxScaler("t"), bh)
  @Benchmark def nGrams(bh: Blackhole): Seq[Unit] = benchmark(NGrams("t"), bh)
  @Benchmark def nHotEncoder(bh: Blackhole): Seq[Unit] = benchmark(NHotEncoder("t"), bh)
  @Benchmark def nHotWeightedEncoder(bh: Blackhole): Seq[Unit] =
    benchmark(NHotWeightedEncoder("t"), bh)
  @Benchmark def normalizer(bh: Blackhole): Seq[Unit] = benchmark(Normalizer("t"), bh)
  @Benchmark def oneHotEncoder(bh: Blackhole): Seq[Unit] = benchmark(OneHotEncoder("t"), bh)
  @Benchmark def polynomialExpansion(bh: Blackhole): Seq[Unit] =
    benchmark(PolynomialExpansion("t"), bh)
  @Benchmark def quantileDiscretizer(bh: Blackhole): Seq[Unit] =
    benchmark(QuantileDiscretizer("t"), bh)
  @Benchmark def standardScaler(bh: Blackhole): Seq[Unit] = benchmark(StandardScaler("t"), bh)
  @Benchmark def topNOneHotEncoder(bh: Blackhole): Seq[Unit] =
    benchmark(TopNOneHotEncoder("t", 100), bh)
  @Benchmark def vectorIdentity(bh: Blackhole): Seq[Unit] =
    benchmark(VectorIdentity[Array]("t"), bh)
  @Benchmark def vonMisesEvaluator(bh: Blackhole): Seq[Unit] =
    benchmark(VonMisesEvaluator("t", 100.0, 0.001, Array(1.0, 2.0, 3.0, 4.0, 5.0)), bh)

}

private object Fixtures {
  implicit val doubles: Seq[Double] = (0 until 1000).map(_.toDouble)
  implicit val labels: Seq[String] = (0 until 1000).map(x => "l" + (x % 50))
  implicit val mdlRecords: Seq[MDLRecord[String]] =
    (0 until 1000).map(x => MDLRecord((x % 3).toString, x.toDouble))
  implicit val nLabels: Seq[Seq[String]] =
    (0 until 1000).map(x => (0 until (x % 50 + 1)).map("l" + _))
  implicit val nWeightedLabels: Seq[Seq[WeightedLabel]] = nLabels.map(_.map(WeightedLabel(_, 1.0)))
  implicit val vectors: Seq[Array[Double]] = (0 until 1000).map(x => Array.fill(10)(x / 1000.0))
}

private class NoOpFeatureBuilder(val bh: Blackhole) extends FeatureBuilder[Unit] {
  override def init(dimension: Int): Unit = bh.consume(dimension)
  override def result: Unit = bh.consume(Unit)
  override def add(name: String, value: Double): Unit = {
    bh.consume(name)
    bh.consume(value)
  }
  override def skip(): Unit = bh.consume(Unit)
  override def newBuilder: FeatureBuilder[Unit] = new NoOpFeatureBuilder(bh)
} 
Example 165
Source File: ExtractorBenchmark.scala    From featran   with Apache License 2.0 5 votes vote down vote up
package com.spotify.featran.jmh

import java.util.concurrent.TimeUnit

import com.spotify.featran._
import com.spotify.featran.transformers._
import org.openjdk.jmh.annotations._

@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Thread)
class ExtractorBenchmark {

  type A = (Double, String)
  val fs: FeatureSpec[A] = FeatureSpec.of[A]
    .required(_._1)(StandardScaler("std"))
    .required(_._2)(OneHotEncoder("onehot"))
  val input: Seq[A] = (1 to 10).map(x => (x.toDouble, x.toString))
  val settings: Seq[String] = fs.extract(input).featureSettings
  val re: RecordExtractor[A, Seq[Double]] = fs.extractWithSettings(settings.head)

  @Benchmark def collection: Seq[Seq[Double]] =
    fs.extractWithSettings(input, settings).featureValues[Seq[Double]]
  @Benchmark def collection1: Seq[Double] =
    fs.extractWithSettings(Seq((1.0, "1.0")), settings).featureValues[Seq[Double]].head
  @Benchmark def record: Seq[Seq[Double]] = input.map(re.featureValue)
  @Benchmark def record1: Seq[Double] = re.featureValue((1.0, "1.0"))

} 
Example 166
Source File: FeatureBuilderBenchmark.scala    From featran   with Apache License 2.0 5 votes vote down vote up
package com.spotify.featran.jmh

import java.util.concurrent.TimeUnit

import breeze.linalg._
import com.spotify.featran._
import com.spotify.featran.tensorflow._
import org.openjdk.jmh.annotations._
import org.tensorflow.example.Example

@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Thread)
class FeatureBuilderBenchmark {

  private val names = (750 until 1000).map(_.toString)
  private val values = (750 until 1000).map(_.toDouble)

  def benchmark[T: FeatureBuilder]: T = {
    val fb = FeatureBuilder[T]
    fb.init(1000)
    var i = 0
    while (i < 500) {
      fb.add(i.toString, i.toDouble)
      fb.skip()
      i += 2
    }
    fb.skip(250)
    fb.add(names, values)
    fb.result
  }

  @Benchmark def array: Unit = benchmark[Array[Double]]
  @Benchmark def seq: Unit = benchmark[Seq[Double]]
  @Benchmark def sparseArray: Unit = benchmark[SparseArray[Double]]
  @Benchmark def denseVector: Unit = benchmark[DenseVector[Double]]
  @Benchmark def sparseVector: Unit = benchmark[SparseVector[Double]]
  @Benchmark def map: Unit = benchmark[Map[String, Double]]
  @Benchmark def tensorflow: Unit = benchmark[Example]

} 
Example 167
Source File: MockTime.scala    From kafka-connect-sap   with Apache License 2.0 5 votes vote down vote up
package com.sap.kafka.connect.source

import java.lang
import java.util.concurrent.TimeUnit
import java.util.function.Supplier

import org.apache.kafka.common.utils.Time

class MockTime extends Time {
  private var nanos = System.nanoTime()
  private val autoTickMs = 0

  override def milliseconds(): Long = {
    sleep(autoTickMs)
    TimeUnit.MILLISECONDS.convert(this.nanos, TimeUnit.NANOSECONDS)
  }

  override def nanoseconds(): Long = {
    sleep(autoTickMs)
    nanos
  }

  override def hiResClockMs(): Long = ???

  override def sleep(ms: Long): Unit = {
    this.nanos += TimeUnit.NANOSECONDS.convert(ms, TimeUnit.MILLISECONDS)
  }

  override def waitObject(o: Any, supplier: Supplier[lang.Boolean], l: Long): Unit = ???
} 
Example 168
Source File: CachedPulsarClient.scala    From pulsar-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.pulsar

import java.{util => ju}
import java.util.concurrent.{ConcurrentMap, ExecutionException, TimeUnit}

import scala.collection.JavaConverters._
import scala.util.control.NonFatal

import com.google.common.cache._
import com.google.common.util.concurrent.{ExecutionError, UncheckedExecutionException}

import org.apache.spark.SparkEnv
import org.apache.spark.internal.Logging

private[pulsar] object CachedPulsarClient extends Logging {

  private type Client = org.apache.pulsar.client.api.PulsarClient

  private val defaultCacheExpireTimeout = TimeUnit.MINUTES.toMillis(10)

  private lazy val cacheExpireTimeout: Long =
    Option(SparkEnv.get)
      .map(_.conf
        .getTimeAsMs("spark.pulsar.client.cache.timeout", s"${defaultCacheExpireTimeout}ms"))
      .getOrElse(defaultCacheExpireTimeout)

  private val cacheLoader = new CacheLoader[Seq[(String, Object)], Client] {
    override def load(config: Seq[(String, Object)]): Client = {
      val configMap = config.map(x => x._1 -> x._2).toMap.asJava
      createPulsarClient(configMap)
    }
  }

  private val removalListener = new RemovalListener[Seq[(String, Object)], Client]() {
    override def onRemoval(
        notification: RemovalNotification[Seq[(String, Object)], Client]): Unit = {
      val paramsSeq: Seq[(String, Object)] = notification.getKey
      val client: Client = notification.getValue
      logDebug(
        s"Evicting pulsar producer $client params: $paramsSeq, due to ${notification.getCause}")
      close(paramsSeq, client)
    }
  }

  private lazy val guavaCache: LoadingCache[Seq[(String, Object)], Client] =
    CacheBuilder
      .newBuilder()
      .expireAfterAccess(cacheExpireTimeout, TimeUnit.MILLISECONDS)
      .removalListener(removalListener)
      .build[Seq[(String, Object)], Client](cacheLoader)

  private def createPulsarClient(pulsarConf: ju.Map[String, Object]): Client = {
    val pulsarServiceUrl =
      pulsarConf.get(PulsarOptions.SERVICE_URL_OPTION_KEY).asInstanceOf[String]
    val clientConf = new PulsarConfigUpdater(
      "pulsarClientCache",
      pulsarConf.asScala.toMap,
      PulsarOptions.FILTERED_KEYS
    ).rebuild()
    logInfo(s"Client Conf = ${clientConf}")
    try {
      val pulsarClient: Client = org.apache.pulsar.client.api.PulsarClient
        .builder()
        .serviceUrl(pulsarServiceUrl)
        .loadConf(clientConf)
        .build();
      logDebug(
        s"Created a new instance of PulsarClient for serviceUrl = $pulsarServiceUrl,"
          + s" clientConf = $clientConf.")
      pulsarClient
    } catch {
      case e: Throwable =>
        logError(
          s"Failed to create PulsarClient to serviceUrl ${pulsarServiceUrl}"
            + s" using client conf ${clientConf}",
          e)
        throw e
    }
  }

  
  private def close(paramsSeq: Seq[(String, Object)], client: Client): Unit = {
    try {
      logInfo(s"Closing the Pulsar Client with params: ${paramsSeq.mkString("\n")}.")
      client.close()
    } catch {
      case NonFatal(e) => logWarning("Error while closing pulsar producer.", e)
    }
  }

  private[pulsar] def clear(): Unit = {
    logInfo("Cleaning up guava cache.")
    guavaCache.invalidateAll()
  }

  // Intended for testing purpose only.
  private def getAsMap: ConcurrentMap[Seq[(String, Object)], Client] = guavaCache.asMap()
} 
Example 169
Source File: HiveAcidSinkOptions.scala    From spark-acid   with Apache License 2.0 5 votes vote down vote up
package com.qubole.spark.hiveacid.streaming

import java.util.concurrent.TimeUnit

import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap

import scala.util.Try

class HiveAcidSinkOptions(parameters: CaseInsensitiveMap[String]) {

  import HiveAcidSinkOptions._

  def this(parameters: Map[String, String]) = this(CaseInsensitiveMap(parameters))

  val tableName = parameters.get("table").getOrElse{
    throw new IllegalArgumentException("Table Name is not specified")
  }

  val fileCleanupDelayMs = withLongParameter(CLEANUP_DELAY_KEY, DEFAULT_CLEANUP_DELAY)

  val isDeletingExpiredLog = withBooleanParameter(LOG_DELETION_KEY, DEFAULT_LOG_DELETION)

  val compactInterval = withIntParameter(COMPACT_INTERVAL_KEY, DEFAULT_COMPACT_INTERVAL)

  val minBatchesToRetain = withIntParameter(MIN_BATCHES_TO_RETAIN_KEY, DEFAULT_MIN_BATCHES_TO_RETAIN)

  val metadataDir = parameters.get(METADATA_DIR_KEY)

  private def withIntParameter(name: String, default: Int): Int = {
    parameters.get(name).map { str =>
      Try(str.toInt).toOption.filter(_ > 0).getOrElse {
        throw new IllegalArgumentException(
          s"Invalid value '$str' for option '$name', must be a positive integer")
      }
    }.getOrElse(default)
  }

  private def withLongParameter(name: String, default: Long): Long = {
    parameters.get(name).map { str =>
      Try(str.toLong).toOption.filter(_ >= 0).getOrElse {
        throw new IllegalArgumentException(
          s"Invalid value '$str' for option '$name', must be a positive integer")
      }
    }.getOrElse(default)
  }

  private def withBooleanParameter(name: String, default: Boolean): Boolean = {
    parameters.get(name).map { str =>
      try {
        str.toBoolean
      } catch {
        case _: IllegalArgumentException =>
          throw new IllegalArgumentException(
            s"Invalid value '$str' for option '$name', must be true or false")
      }
    }.getOrElse(default)
  }

}

object HiveAcidSinkOptions {

  val DEFAULT_CLEANUP_DELAY = TimeUnit.MINUTES.toMillis(10)
  val DEFAULT_LOG_DELETION = true
  val DEFAULT_COMPACT_INTERVAL = 10
  val DEFAULT_MIN_BATCHES_TO_RETAIN = 100

  val CLEANUP_DELAY_KEY = "spark.acid.streaming.log.cleanupDelayMs"
  val LOG_DELETION_KEY = "spark.acid.streaming.log.deletion"
  val COMPACT_INTERVAL_KEY = "spark.acid.streaming.log.compactInterval"
  val MIN_BATCHES_TO_RETAIN_KEY = "spark.acid.streaming.log.minBatchesToRetain"
  val METADATA_DIR_KEY = "spark.acid.streaming.log.metadataDir"

} 
Example 170
Source File: SwaveIdentityProcessorVerification.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.tck

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}
import org.reactivestreams.Publisher
import org.reactivestreams.tck.{IdentityProcessorVerification, TestEnvironment}
import org.scalatest.testng.TestNGSuiteLike
import org.testng.SkipException
import org.testng.annotations.AfterClass
import swave.core._

abstract class SwaveIdentityProcessorVerification[T](val testEnv: TestEnvironment, publisherShutdownTimeout: Long)
    extends IdentityProcessorVerification[T](testEnv, publisherShutdownTimeout) with TestNGSuiteLike
    with StreamEnvShutdown {

  def this(printlnDebug: Boolean) =
    this(
      new TestEnvironment(Timeouts.defaultTimeout.toMillis, printlnDebug),
      Timeouts.publisherShutdownTimeout.toMillis)

  def this() = this(false)

  override def createFailedPublisher(): Publisher[T] =
    Spout.failing[T](new Exception("Nope")).drainTo(Drain.toPublisher()).get

  // Publishers created by swave don't support fanout by default
  override def maxSupportedSubscribers: Long = 1L

  override def required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber(): Unit =
    throw new SkipException("Not relevant for publisher w/o fanout support")

  override lazy val publisherExecutorService: ExecutorService =
    Executors.newFixedThreadPool(3)

  @AfterClass
  def shutdownPublisherExecutorService(): Unit = {
    publisherExecutorService.shutdown()
    publisherExecutorService.awaitTermination(3, TimeUnit.SECONDS)
  }
} 
Example 171
Source File: SparkConnectorScalaBaseTSE.scala    From neo4j-spark-connector   with Apache License 2.0 5 votes vote down vote up
package org.neo4j.spark

import java.util.concurrent.TimeUnit

import org.apache.spark.{SparkConf, SparkContext}
import org.hamcrest.Matchers
import org.junit._
import org.junit.rules.TestName
import org.neo4j.driver.summary.ResultSummary
import org.neo4j.driver.{Transaction, TransactionWork}

object SparkConnectorScalaBaseTSE {

  private var startedFromSuite = true

  @BeforeClass
  def setUpContainer() = {
    if (!SparkConnectorScalaSuiteIT.server.isRunning) {
      startedFromSuite = false
      SparkConnectorScalaSuiteIT.setUpContainer()
    }
  }

  @AfterClass
  def tearDownContainer() = {
    if (!startedFromSuite) {
      SparkConnectorScalaSuiteIT.tearDownContainer()
    }
  }

}

class SparkConnectorScalaBaseTSE {

  val conf: SparkConf = SparkConnectorScalaSuiteIT.conf
  val sc: SparkContext = SparkConnectorScalaSuiteIT.sc

  val _testName: TestName = new TestName

  @Rule
  def testName = _testName

  @Before
  def before() {
    SparkConnectorScalaSuiteIT.session()
      .writeTransaction(new TransactionWork[ResultSummary] {
        override def execute(tx: Transaction): ResultSummary = tx.run("MATCH (n) DETACH DELETE n").consume()
      })
  }

  @After
  def after() {
    try {
      utils.Assert.assertEventually(new utils.Assert.ThrowingSupplier[Boolean, Exception] {
        override def get(): Boolean = {
          val afterConnections = SparkConnectorScalaSuiteIT.getActiveConnections
          SparkConnectorScalaSuiteIT.connections == afterConnections
        }
      }, Matchers.equalTo(true), 60, TimeUnit.SECONDS)
    } finally {
      val afterConnections = SparkConnectorScalaSuiteIT.getActiveConnections
      if (SparkConnectorScalaSuiteIT.connections != afterConnections) { // just for debug purposes
        println(s"For test ${testName.getMethodName} => connections before: ${SparkConnectorScalaSuiteIT.connections}, after: $afterConnections")
      }
    }
  }

} 
Example 172
Source File: InProcessDeploy.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing.kernel.remote

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean

import polynote.kernel.{BaseEnv, GlobalEnv, Kernel}
import polynote.kernel.environment.CurrentNotebook
import polynote.kernel.logging.Logging
import polynote.kernel.remote.{RemoteKernelClient, SocketTransport}
import zio.{Fiber, RIO, Ref, ZIO}
import zio.duration.Duration

class InProcessDeploy(kernelFactory: Kernel.Factory.LocalService, clientRef: Ref[RemoteKernelClient]) extends SocketTransport.Deploy {
  def deployKernel(transport: SocketTransport, serverAddress: InetSocketAddress): RIO[BaseEnv with GlobalEnv with CurrentNotebook, SocketTransport.DeployedProcess] = {
    val connectClient = RemoteKernelClient.tapRunThrowable(
      RemoteKernelClient.Args(
        Some(serverAddress.getHostString),
        Some(serverAddress.getPort),
        Some(kernelFactory)),
      Some(clientRef))

    connectClient.forkDaemon.map(new InProcessDeploy.Process(_))
  }

}

object InProcessDeploy {
  class Process(fiber: Fiber[Throwable, Int]) extends SocketTransport.DeployedProcess {
    def exitStatus: RIO[BaseEnv, Option[Int]] = fiber.poll.flatMap {
      case Some(exit) => ZIO.fromEither(exit.toEither).map(Some(_))
      case None => ZIO.succeed(None)
    }

    def awaitExit(timeout: Long, timeUnit: TimeUnit): RIO[BaseEnv, Option[Int]] = {
      fiber.join.disconnect.timeout(Duration(timeout, timeUnit))
    }

    def kill(): RIO[BaseEnv, Unit] = fiber.interrupt.unit
  }
} 
Example 173
Source File: MapLifter.scala    From diffy   with GNU Affero General Public License v3.0 5 votes vote down vote up
package ai.diffy.lifter

import com.twitter.concurrent.NamedPoolThreadFactory
import com.twitter.util.{ExecutorServiceFuturePool, Future, FuturePool}
import java.util.concurrent.{ArrayBlockingQueue, ThreadPoolExecutor, TimeUnit}

case class Message(endpoint: Option[String], result: FieldMap[Any])

trait MapLifter {
  def apply(input: Array[Byte]): Future[Message]
}

object MapLifterPool {
  val QueueSizeDefault = 5

  def apply(mapLifterFactory: => MapLifter) = {
    val executorService =
      new ThreadPoolExecutor(
        3,   // core pool size
        10,  // max pool size
        500, // keep alive time
        TimeUnit.MILLISECONDS,
        new ArrayBlockingQueue[Runnable](10), // work queue
        new NamedPoolThreadFactory("maplifter", makeDaemons = true),
        new ThreadPoolExecutor.AbortPolicy()
      )
    executorService.prestartCoreThread()
    new MapLifterPool(mapLifterFactory, new ExecutorServiceFuturePool(executorService))
  }
}

class MapLifterPool(underlying: MapLifter, futurePool: FuturePool) extends MapLifter {
  override def apply(input: Array[Byte]): Future[Message] =
    (futurePool { underlying(input) }).flatten
} 
Example 174
Source File: HelloSpec.scala    From sbt-guardrail   with MIT License 5 votes vote down vote up
package helloworld

import java.util.concurrent.TimeUnit

import com.example.clients.petstore.user.LogoutUserResponse
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures

class HelloSpec extends FlatSpec
  with Matchers
  with ScalaFutures {

  "UserClient" should "pass sanity check" in {
      val userClient = Hello.buildUserClient
      val future = userClient.logoutUser().call().toCompletableFuture
      val logoutResponse = future.get(10, TimeUnit.SECONDS)
      future.isDone shouldBe true
      future.isCompletedExceptionally shouldBe false
      logoutResponse.getClass shouldBe classOf[LogoutUserResponse.Ok]
  }
} 
Example 175
Source File: EarlyStoppingMNIST.scala    From dl4scala   with MIT License 5 votes vote down vote up
package org.dl4scala.examples.misc.earlystopping

import java.util.Collections
import java.util.concurrent.TimeUnit

import org.apache.commons.io.FilenameUtils
import org.deeplearning4j.datasets.iterator.impl.MnistDataSetIterator
import org.deeplearning4j.earlystopping.EarlyStoppingConfiguration
import org.deeplearning4j.earlystopping.saver.LocalFileModelSaver
import org.deeplearning4j.earlystopping.scorecalc.DataSetLossCalculator
import org.deeplearning4j.earlystopping.termination.{MaxEpochsTerminationCondition, MaxTimeIterationTerminationCondition}
import org.deeplearning4j.earlystopping.trainer.EarlyStoppingTrainer
import org.deeplearning4j.nn.api.OptimizationAlgorithm
import org.deeplearning4j.nn.conf.inputs.InputType
import org.deeplearning4j.nn.conf.layers.{ConvolutionLayer, DenseLayer, OutputLayer, SubsamplingLayer}
import org.deeplearning4j.nn.conf.{NeuralNetConfiguration, Updater}
import org.deeplearning4j.nn.weights.WeightInit
import org.nd4j.linalg.activations.Activation
import org.nd4j.linalg.lossfunctions.LossFunctions
import scala.collection.JavaConverters._
import java.util

object EarlyStoppingMNIST {
  def main(args: Array[String]): Unit = {
    // Configure network://Configure network:
    val nChannels = 1
    val outputNum = 10
    val batchSize = 25
    val iterations = 1
    val seed = 123

    val configuration = new NeuralNetConfiguration.Builder()
      .seed(seed)
      .iterations(iterations)
      .regularization(true).l2(0.0005)
      .learningRate(0.02)
      .weightInit(WeightInit.XAVIER)
      .activation(Activation.RELU)
      .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
      .updater(Updater.NESTEROVS)
      .list()
      .layer(0, new ConvolutionLayer.Builder(5, 5)
        .nIn(nChannels)
        .stride(1, 1)
        .nOut(20).dropOut(0.5)
        .build())
      .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX)
        .kernelSize(2, 2)
        .stride(2, 2)
        .build())
      .layer(2, new DenseLayer.Builder()
        .nOut(500).build())
      .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
        .nOut(outputNum)
        .activation(Activation.SOFTMAX)
        .build())
      .setInputType(InputType.convolutionalFlat(28, 28, 1)) //See note in LenetMnistExample
      .backprop(true).pretrain(false).build()

    // Get data:
    val mnistTrain1024 = new MnistDataSetIterator(batchSize, 1024, false, true, true, 12345)
    val mnistTest512 = new MnistDataSetIterator(batchSize, 512, false, false, true, 12345)


    val tempDir = System.getProperty("java.io.tmpdir")
    val exampleDirectory = FilenameUtils.concat(tempDir, "DL4JEarlyStoppingExample/")
    val saver = new LocalFileModelSaver(exampleDirectory)

    val esConf = new EarlyStoppingConfiguration.Builder()
      .epochTerminationConditions(new MaxEpochsTerminationCondition(50)) //Max of 50 epochs
      .evaluateEveryNEpochs(1)
      .iterationTerminationConditions(new MaxTimeIterationTerminationCondition(20, TimeUnit.MINUTES)) //Max of 20 minutes
      .scoreCalculator(new DataSetLossCalculator(mnistTest512, true))     //Calculate test set score
      .modelSaver(saver)
      .build()

    val trainer = new EarlyStoppingTrainer(esConf, configuration, mnistTrain1024)

    //Conduct early stopping training://Conduct early stopping training:
    val result = trainer.fit()
    println("Termination reason: " + result.getTerminationReason)
    println("Termination details: " + result.getTerminationDetails)
    println("Total epochs: " + result.getTotalEpochs)
    println("Best epoch number: " + result.getBestModelEpoch)
    println("Score at best epoch: " + result.getBestModelScore)

    //Print score vs. epoch
    val scoreVsEpoch = result.getScoreVsEpoch

    val list = new util.ArrayList[Integer](scoreVsEpoch.keySet)
    Collections.sort(list)
    System.out.println("Score vs. Epoch:")
    for (i <- list.asScala) {
      println(i + "\t" + scoreVsEpoch.get(i))
    }
  }
} 
Example 176
Source File: StatsDReporter.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.github.jjagged.metrics.reporting.statsd.StatsD
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.typesafe.config.Config


  def report(): Unit = {

    reporter.report(metrics.metricRegistry.getGauges(),
      metrics.metricRegistry.getCounters(),
      metrics.metricRegistry.getHistograms(),
      metrics.metricRegistry.getMeters(),
      metrics.metricRegistry.getTimers())
  }

  private[reporting] def getReporter(): com.github.jjagged.metrics.reporting.StatsDReporter = {

    log.info("Initializing the StatsD metrics reporter");

    com.github.jjagged.metrics.reporting.StatsDReporter.forRegistry(metrics.metricRegistry)
      .prefixedWith(this.prefix)
      .withTags("{'host':'" + host + "', 'application':'" + application.replace(' ', '-').toLowerCase() + "'}")
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .convertRatesTo(TimeUnit.SECONDS)
      .build(statsD);

  }

  private[reporting] def getStatsD(): StatsD = {
    new StatsD(statsdHost, port);
  }
} 
Example 177
Source File: DogStatsDReporter.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.typesafe.config.Config
import org.coursera.metrics.datadog.DatadogReporter.Expansion
import org.coursera.metrics.datadog.transport.{Transport, UdpTransport}
import org.coursera.metrics.datadog.DefaultMetricNameFormatter

import scala.collection.JavaConverters._

class DogStatsDReporter(implicit val system: ActorSystem, val config: Config) extends ScheduledReporter with LoggingAdapter {

  private lazy val reporter = getReporter
  private lazy val transport = getTransport

  private[reporting] val dogHost = config.getString("host")
  private[reporting] val port = config.getInt("port")

  private[reporting] val prefix = config.getString("metric-prefix")
  private[reporting] val apiKey = config.getString("api-key")

  private[reporting] val tags = config.getStringList("tags").asScala ++ Seq(
    s"app:${application.replace(" ", "-").toLowerCase}",
    s"version:$version")

  
  def report(): Unit = {

    reporter.report(metrics.metricRegistry.getGauges(),
      metrics.metricRegistry.getCounters(),
      metrics.metricRegistry.getHistograms(),
      metrics.metricRegistry.getMeters(),
      metrics.metricRegistry.getTimers())
  }

  private[reporting] def getReporter(): org.coursera.metrics.datadog.DatadogReporter = {

    log.info("Initializing the DogStatsD metrics reporter")
    org.coursera.metrics.datadog.DatadogReporter.forRegistry(metrics.metricRegistry)
        .withExpansions(Expansion.ALL)
        .withHost(host)
        .withMetricNameFormatter(new DefaultMetricNameFormatter())
        .withPrefix(prefix)
        .withTags(tags.asJava)
        .withTransport(transport)
        .convertRatesTo(TimeUnit.SECONDS)
        .convertDurationsTo(TimeUnit.MILLISECONDS)
        .build()
  }

  private[reporting] def getTransport(): Transport = {
    new UdpTransport.Builder().withStatsdHost(dogHost).withPort(port).build()
  }

} 
Example 178
Source File: DogStatsDReporterSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import com.typesafe.config.ConfigFactory
import org.coursera.metrics.datadog.transport.Transport
import org.specs2.mock.Mockito
import org.specs2.mutable.SpecificationLike

import scala.concurrent.duration.{FiniteDuration, _}

class DogStatsDReporterSpec extends AkkaTestkitSpecs2Support with SpecificationLike with Mockito {

  "The DatadogReporter reporter" should {

    "report metrics when triggered by the scheduler" in {

      implicit val conf = ConfigFactory.parseString(
        """
         {
          enabled=on
          host="localhost"
          port=8125
          reporting-interval=10ms
          metric-prefix = "pref"
          tags = ["boo", "hoo"]
          api-key = "abc123"
        }
        """)

      val dogStatsDReporter = spy(new DogStatsDReporter)

      val transport = mock[Transport]
      dogStatsDReporter.getTransport returns transport

      val rptr = mock[org.coursera.metrics.datadog.DatadogReporter]
      dogStatsDReporter.getReporter returns rptr

      dogStatsDReporter.start(FiniteDuration(2, TimeUnit.MILLISECONDS))
      there was after(100.millisecond).atLeastOne(dogStatsDReporter).report()

      dogStatsDReporter.tags must containAllOf(Seq("boo", "hoo", "app:container-service", "version:1.0.0.N/A"))
      dogStatsDReporter.prefix must be equalTo "pref"

      dogStatsDReporter.stop
      there was one(transport).close()
    }
  }

} 
Example 179
Source File: StatsDReporterSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import com.github.jjagged.metrics.reporting.statsd.StatsD
import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import com.typesafe.config.ConfigFactory
import org.specs2.mock.Mockito
import org.specs2.mutable.SpecificationLike

import scala.concurrent.duration._


class StatsDReporterSpec extends AkkaTestkitSpecs2Support with SpecificationLike with Mockito {

  "The StatsDReporter reporter" should {

    "report metrics when triggered by the scheduler" in {

      implicit val conf = ConfigFactory.parseString(
        """
         {
          enabled=on
          reporting-interval=10ms
          host="localhost"
          port=9092
          metric-prefix = "pref"
        }
        """)

      val statsdReporter = spy(new StatsDReporter)
      val statsD = mock[StatsD]
      statsdReporter.getStatsD returns statsD

      val rptr = mock[com.github.jjagged.metrics.reporting.StatsDReporter]
      statsdReporter.getReporter returns rptr

      statsdReporter.start(FiniteDuration(2, TimeUnit.MILLISECONDS))
      there was after(100.millisecond).atLeastOne(statsdReporter).report()

      statsdReporter.stop
      there was one(statsD).close()
    }
  }

} 
Example 180
Source File: MetricsReportingManager.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.ConfigurationException
import akka.actor.{Actor, ActorSystem, Props}
import com.github.vonnagy.service.container.health.{GetHealth, HealthInfo, HealthState, RegisteredHealthCheckActor}
import com.github.vonnagy.service.container.log.ActorLoggingAdapter
import com.github.vonnagy.service.container.metrics.Metrics
import com.typesafe.config.{Config, ConfigObject}

import scala.collection.JavaConverters._
import scala.concurrent.duration.FiniteDuration

object MetricsReportingManager {
  def props(): Props =
    Props(classOf[MetricsReportingManager])
}


  private[reporting] def stopReporters(): Unit = {
    reporters.foreach(_.stop)
    reporters = Seq.empty[ScheduledReporter]
  }

  private def checkHealth(): HealthInfo = {
    if (reporters.length == 0) {
      HealthInfo("metrics-reporting", HealthState.OK, s"The system is currently not managing any metrics reporters")
    }
    else {
      val x = for {
        reporter <- reporters
      } yield {
        reporter.getClass.getName
      }

      HealthInfo("metrics-reporting", HealthState.OK, s"The system is currently managing ${reporters.length} metrics reporters", Some(x))
    }
  }
} 
Example 181
Source File: Slf4jReporter.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics.reporting

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.slf4j.LoggerFactory

class Slf4jReporter(implicit val system: ActorSystem, val config: Config) extends ScheduledReporter {

  lazy val reporter = getReporter

  
  def report(): Unit = {

    reporter.report(metrics.metricRegistry.getGauges(),
      metrics.metricRegistry.getCounters(),
      metrics.metricRegistry.getHistograms(),
      metrics.metricRegistry.getMeters(),
      metrics.metricRegistry.getTimers());
  }

  private[reporting] def getReporter: com.codahale.metrics.Slf4jReporter = {
    com.codahale.metrics.Slf4jReporter.forRegistry(metrics.metricRegistry)
      .outputTo(LoggerFactory.getLogger(config.getString("logger")))
      .convertRatesTo(TimeUnit.SECONDS)
      .convertDurationsTo(TimeUnit.MILLISECONDS)
      .build
  }
} 
Example 182
Source File: HttpMetrics.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Cancellable}
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.github.vonnagy.service.container.metrics._

import scala.concurrent.duration._

case class Stats(
                  uptime: FiniteDuration,
                  totalRequests: Long,
                  openRequests: Long,
                  maxOpenRequests: Long,
                  totalConnections: Long,
                  openConnections: Long,
                  maxOpenConnections: Long,
                  requestTimeouts: Long)

private[http] trait HttpMetrics extends LoggingAdapter {

  implicit def system: ActorSystem

  var metricsJob: Option[Cancellable] = None
  var lastStats = Stats(FiniteDuration(0, TimeUnit.MILLISECONDS), 0, 0, 0, 0, 0, 0, 0)

  /// TODO def httpListener: Option[ActorSelection]

  val totConn = Gauge("container.http.connections.total") {
    lastStats.totalConnections
  }
  val openConn = Gauge("container.http.connections.open") {
    lastStats.openConnections
  }
  val maxOpenConn = Gauge("container.http.connections.max-open") {
    lastStats.maxOpenConnections
  }
  val totReq = Gauge("container.http.requests.total") {
    lastStats.totalRequests
  }
  val openReq = Gauge("container.http.requests.open") {
    lastStats.openRequests
  }
  val maxOpenReq = Gauge("container.http.requests.max-open") {
    lastStats.maxOpenRequests
  }
  val uptime = Gauge("container.http.uptime") {
    lastStats.uptime.toMillis
  }
  val idle = Gauge("container.http.idle-timeouts") {
    lastStats.requestTimeouts
  }

  protected[http] def scheduleHttpMetrics(interval: FiniteDuration): Unit = {
    // Schedule an event to gather the http statistics so that we can add information to our metrics system
    log.info("Scheduling http server metrics handler")
    implicit val dis = system.dispatcher
    metricsJob = Some(system.scheduler.schedule(interval, interval)(getMetrics))
  }

  protected[http] def cancelHttpMetrics(): Unit = {
    metricsJob.exists(_.cancel())
    metricsJob = None
  }

  private def getMetrics(): Unit = {

    try {
      // TODO - No stats
//      if (httpListener.isDefined) httpListener.get ? Http.GetStats onSuccess {
//        case x: Stats => lastStats = x
//      }
      lastStats = Stats(0 seconds, 0, 0, 0, 0, 0, 0, 0)
    }
    catch {
      case e: Exception =>
        log.error("An error occurred when trying to fetch and record the http server metrics", e)
    }
  }
} 
Example 183
Source File: SystemShutdown.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.core

import java.util.concurrent.TimeUnit

import akka.actor.ActorSystem
import com.github.vonnagy.service.container.log.LoggingAdapter

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.sys.ShutdownHookThread



  private[container] def shutdownActorSystem(fromHook: Boolean = false)(f: => Unit): Unit = {

    try {
      // Remove the hook
      if (shutdownHook.isDefined && !fromHook) {
        shutdownHook.get.remove

      }
      shutdownHook = None

      log.info("Shutting down the actor system")
      system.terminate()

      // Wait for termination if it is not already complete
      Await.result(system.whenTerminated, Duration.apply(30, TimeUnit.SECONDS))
      log.info("The actor system has terminated")
    }
    catch {
      case t: Throwable =>
        log.error(s"The actor system could not be shutdown: ${t.getMessage}", t)
    }

    // Call the passed function
    f
  }
} 
Example 184
Source File: HttpMetricsSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestActorRef
import com.github.vonnagy.service.container.AkkaTestkitSpecs2Support
import com.github.vonnagy.service.container.metrics.Metrics
import org.specs2.mutable.SpecificationLike

import scala.collection.JavaConverters._
import scala.concurrent.duration._

class HttpMetricsSpec extends AkkaTestkitSpecs2Support with SpecificationLike {

  sequential

  val svcAct = TestActorRef(new Actor {
    def receive = {
      case _ =>
    }

    val listener = context.actorOf(Props(
      new Actor {
        def receive = {
          case _ => sender ! Stats(FiniteDuration(1000, TimeUnit.MILLISECONDS), 1000, 1000, 1000, 1000, 1000, 1000, 1000)
        }

      }), "listener-0")
  }, "http")

  class MetricTest(implicit val system: ActorSystem) extends HttpMetrics {
    def httpListener = Some(system.actorSelection(svcAct.children.head.path))
  }

  val metrics = new MetricTest

  "The HttpMetrics" should {

    "provide basic stats" in {

      metrics.lastStats must be equalTo Stats(FiniteDuration(0, TimeUnit.MILLISECONDS), 0, 0, 0, 0, 0, 0, 0)
      metrics.totConn.name must be equalTo "container.http.connections.total"
      metrics.openConn.name must be equalTo "container.http.connections.open"
      metrics.maxOpenConn.name must be equalTo "container.http.connections.max-open"
      metrics.totReq.name must be equalTo "container.http.requests.total"
      metrics.openReq.name must be equalTo "container.http.requests.open"
      metrics.maxOpenReq.name must be equalTo "container.http.requests.max-open"
      metrics.uptime.name must be equalTo "container.http.uptime"
      metrics.idle.name must be equalTo "container.http.idle-timeouts"

      val metricRegistry = Metrics().metricRegistry
      metricRegistry.getGauges.asScala.foreach(_._2.getValue)
      metricRegistry.getGauges.asScala.filterKeys(g => !g.startsWith("jvm.")).size must be equalTo 8
    }

    "schedule and cancel the metrics job" in {
      metrics.metricsJob must beNone
      metrics.scheduleHttpMetrics(FiniteDuration(100, TimeUnit.MILLISECONDS))
      metrics.metricsJob must not beNone

      metrics.cancelHttpMetrics
      metrics.metricsJob must beNone
    }

    "schedule and fetch the metrics" in {
      metrics.metricsJob must beNone
      metrics.scheduleHttpMetrics(FiniteDuration(100, TimeUnit.MILLISECONDS))
      Thread.sleep(1000)

      metrics.cancelHttpMetrics
      metrics.lastStats must not be Stats(FiniteDuration(0, TimeUnit.MILLISECONDS), 0, 0, 0, 0, 0, 0, 0)

    }

  }
} 
Example 185
Source File: OffsetGraphiteReporter.scala    From kafka-offset-monitor-graphite   with Apache License 2.0 5 votes vote down vote up
package pl.allegro.tech.kafka.offset.monitor.graphite

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

import com.codahale.metrics.{MetricRegistry, MetricFilter}
import com.codahale.metrics.graphite.{GraphiteReporter, Graphite}
import com.google.common.cache._
import com.quantifind.kafka.OffsetGetter.OffsetInfo
import com.codahale.metrics.Gauge

class OffsetGraphiteReporter (pluginsArgs: String) extends com.quantifind.kafka.offsetapp.OffsetInfoReporter {

  GraphiteReporterArguments.parseArguments(pluginsArgs)

  val metrics : MetricRegistry = new MetricRegistry()

  val graphite : Graphite = new Graphite(new InetSocketAddress(GraphiteReporterArguments.graphiteHost, GraphiteReporterArguments.graphitePort))
  val reporter : GraphiteReporter = GraphiteReporter.forRegistry(metrics)
    .prefixedWith(GraphiteReporterArguments.graphitePrefix)
    .convertRatesTo(TimeUnit.SECONDS)
    .convertDurationsTo(TimeUnit.MILLISECONDS)
    .filter(MetricFilter.ALL)
    .build(graphite)

  reporter.start(GraphiteReporterArguments.graphiteReportPeriod, TimeUnit.SECONDS)

  val removalListener : RemovalListener[String, GaugesValues] = new RemovalListener[String, GaugesValues] {
    override def onRemoval(removalNotification: RemovalNotification[String, GaugesValues]) = {
      metrics.remove(removalNotification.getKey() + ".offset")
      metrics.remove(removalNotification.getKey() + ".logSize")
      metrics.remove(removalNotification.getKey() + ".lag")
    }
  }

  val gauges : LoadingCache[String, GaugesValues] = CacheBuilder.newBuilder()
    .expireAfterAccess(GraphiteReporterArguments.metricsCacheExpireSeconds, TimeUnit.SECONDS)
    .removalListener(removalListener)
    .build(
      new CacheLoader[String, GaugesValues]() {
        def load(key: String): GaugesValues = {
          val values: GaugesValues = new GaugesValues()

          val offsetGauge: Gauge[Long] = new Gauge[Long] {
            override def getValue: Long = {
              values.offset
            }
          }

          val lagGauge: Gauge[Long] = new Gauge[Long] {
            override def getValue: Long = {
              values.lag
            }
          }

          val logSizeGauge: Gauge[Long] = new Gauge[Long] {
            override def getValue: Long = {
              values.logSize
            }
          }

          metrics.register(key + ".offset", offsetGauge)
          metrics.register(key + ".logSize", logSizeGauge)
          metrics.register(key + ".lag", lagGauge)

          values
        }
      }
   )

  override def report(info: scala.IndexedSeq[OffsetInfo]) =  {
    info.foreach(i => {
      val values: GaugesValues = gauges.get(getMetricName(i))
      values.logSize = i.logSize
      values.offset = i.offset
      values.lag = i.lag
    })
  }

  def getMetricName(offsetInfo: OffsetInfo): String = {
    offsetInfo.topic.replace(".", "_") + "." + offsetInfo.group.replace(".", "_") + "." + offsetInfo.partition
  }
} 
Example 186
Source File: ShadowsocksApplication.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks

import java.util
import java.util.concurrent.TimeUnit

import android.app.Application
import android.content.pm.PackageManager
import android.preference.PreferenceManager
import android.support.v7.app.AppCompatDelegate
import com.github.shadowsocks.database.{DBHelper, ProfileManager}
import com.github.shadowsocks.utils.{Key, Utils}
import com.google.android.gms.analytics.{GoogleAnalytics, HitBuilders}
import com.google.android.gms.common.api.ResultCallback
import com.google.android.gms.tagmanager.{ContainerHolder, TagManager}
import com.j256.ormlite.logger.LocalLog

object ShadowsocksApplication {
  var app: ShadowsocksApplication = _
}

class ShadowsocksApplication extends Application {
  import ShadowsocksApplication._

  final val SIG_FUNC = "getSignature"
  var containerHolder: ContainerHolder = _
  lazy val tracker = GoogleAnalytics.getInstance(this).newTracker(R.xml.tracker)
  lazy val settings = PreferenceManager.getDefaultSharedPreferences(this)
  lazy val editor = settings.edit
  lazy val profileManager = new ProfileManager(new DBHelper(this))

  def isNatEnabled = settings.getBoolean(Key.isNAT, false)
  def isVpnEnabled = !isNatEnabled

  def getVersionName = try {
    getPackageManager.getPackageInfo(getPackageName, 0).versionName
  } catch {
    case _: PackageManager.NameNotFoundException => "Package name not found"
    case _: Throwable => null
  }

  // send event
  def track(category: String, action: String) = tracker.send(new HitBuilders.EventBuilder()
    .setAction(action)
    .setLabel(getVersionName)
    .build())

  def profileId = settings.getInt(Key.id, -1)
  def profileId(i: Int) = editor.putInt(Key.id, i).apply
  def currentProfile = profileManager.getProfile(profileId)

  def switchProfile(id: Int) = {
    profileId(id)
    profileManager.getProfile(id) getOrElse profileManager.createProfile()
  }

  override def onCreate() {
    java.lang.System.setProperty(LocalLog.LOCAL_LOG_LEVEL_PROPERTY, "ERROR")
    app = this
    AppCompatDelegate.setCompatVectorFromResourcesEnabled(true)
    val tm = TagManager.getInstance(this)
    val pending = tm.loadContainerPreferNonDefault("GTM-NT8WS8", R.raw.gtm_default_container)
    val callback = new ResultCallback[ContainerHolder] {
      override def onResult(holder: ContainerHolder) {
        if (!holder.getStatus.isSuccess) {
          return
        }
        containerHolder = holder
        val container = holder.getContainer
        container.registerFunctionCallMacroCallback(SIG_FUNC,
          (functionName: String, parameters: util.Map[String, AnyRef]) => {
            if (functionName == SIG_FUNC) {
              Utils.getSignature(getApplicationContext)
            }
            null
          })
      }
    }
    pending.setResultCallback(callback, 2, TimeUnit.SECONDS)
  }

  def refreshContainerHolder {
    val holder = app.containerHolder
    if (holder != null) holder.refresh()
  }
} 
Example 187
Source File: DeepBindBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit
import cats.effect.{ContextShift, IO}
import org.openjdk.jmh.annotations._
import scala.concurrent.ExecutionContext.Implicits


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class DeepBindBenchmark {
  implicit val cs: ContextShift[IO] = IO.contextShift(Implicits.global)

  @Param(Array("3000"))
  var size: Int = _

  @Benchmark
  def pure(): Int = {
    def loop(i: Int): IO[Int] =
      for {
        j <- IO.pure(i)
        _ <- if (j > size) IO.pure(j) else loop(j + 1)
      } yield j

    loop(0).unsafeRunSync()
  }

  @Benchmark
  def delay(): Int = {
    def loop(i: Int): IO[Int] =
      for {
        j <- IO(i)
        _ <- if (j > size) IO(j) else loop(j + 1)
      } yield j

    loop(0).unsafeRunSync()
  }

  @Benchmark
  def async(): Int = {
    def loop(i: Int): IO[Int] =
      for {
        j <- IO(i)
        _ <- IO.shift
        _ <- if (j > size) IO(j) else loop(j + 1)
      } yield j

    loop(0).unsafeRunSync()
  }
} 
Example 188
Source File: AttemptBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit

import cats.effect.IO
import org.openjdk.jmh.annotations._


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class AttemptBenchmark {
  @Param(Array("10000"))
  var size: Int = _

  @Benchmark
  def happyPath(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size) IO.pure(i + 1).attempt.flatMap(_.fold(IO.raiseError, loop))
      else IO.pure(i)

    loop(0).unsafeRunSync()
  }

  @Benchmark
  def errorRaised(): Int = {
    val dummy = new RuntimeException("dummy")
    val id = IO.pure[Int] _

    def loop(i: Int): IO[Int] =
      if (i < size)
        IO.raiseError[Int](dummy)
          .flatMap(x => IO.pure(x + 1))
          .attempt
          .flatMap(_.fold(_ => loop(i + 1), id))
      else
        IO.pure(i)

    loop(0).unsafeRunSync()
  }
} 
Example 189
Source File: MapStreamBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit
import cats.effect.IO
import org.openjdk.jmh.annotations._


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class MapStreamBenchmark {
  import MapStreamBenchmark.streamTest

  @Benchmark
  def one(): Long = streamTest(12000, 1)

  @Benchmark
  def batch30(): Long = streamTest(1000, 30)

  @Benchmark
  def batch120(): Long = streamTest(100, 120)
}

object MapStreamBenchmark {
  def streamTest(times: Int, batchSize: Int): Long = {
    var stream = range(0, times)
    var i = 0
    while (i < batchSize) {
      stream = mapStream(addOne)(stream)
      i += 1
    }
    sum(0)(stream).unsafeRunSync()
  }

  final case class Stream(value: Int, next: IO[Option[Stream]])
  val addOne = (x: Int) => x + 1

  def range(from: Int, until: Int): Option[Stream] =
    if (from < until)
      Some(Stream(from, IO(range(from + 1, until))))
    else
      None

  def mapStream(f: Int => Int)(box: Option[Stream]): Option[Stream] =
    box match {
      case Some(Stream(value, next)) =>
        Some(Stream(f(value), next.map(mapStream(f))))
      case None =>
        None
    }

  def sum(acc: Long)(box: Option[Stream]): IO[Long] =
    box match {
      case Some(Stream(value, next)) =>
        next.flatMap(sum(acc + value))
      case None =>
        IO.pure(acc)
    }
} 
Example 190
Source File: MapCallsBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit
import cats.effect.IO
import org.openjdk.jmh.annotations._


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class MapCallsBenchmark {
  import MapCallsBenchmark.test

  @Benchmark
  def one(): Long = test(12000, 1)

  @Benchmark
  def batch30(): Long = test(12000 / 30, 30)

  @Benchmark
  def batch120(): Long = test(12000 / 120, 120)
}

object MapCallsBenchmark {
  def test(iterations: Int, batch: Int): Long = {
    val f = (x: Int) => x + 1
    var io = IO(0)

    var j = 0
    while (j < batch) { io = io.map(f); j += 1 }

    var sum = 0L
    var i = 0
    while (i < iterations) {
      sum += io.unsafeRunSync()
      i += 1
    }
    sum
  }
} 
Example 191
Source File: ShallowBindBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit
import cats.effect.{ContextShift, IO}
import org.openjdk.jmh.annotations._
import scala.concurrent.ExecutionContext.Implicits


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class ShallowBindBenchmark {
  implicit val cs: ContextShift[IO] = IO.contextShift(Implicits.global)

  @Param(Array("10000"))
  var size: Int = _

  @Benchmark
  def pure(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size) IO.pure(i + 1).flatMap(loop)
      else IO.pure(i)

    IO.pure(0)
      .flatMap(loop)
      .unsafeRunSync()
  }

  @Benchmark
  def delay(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size) IO(i + 1).flatMap(loop)
      else IO(i)

    IO(0).flatMap(loop).unsafeRunSync()
  }

  @Benchmark
  def async(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size) IO.shift.flatMap(_ => IO.pure(i + 1)).flatMap(loop)
      else IO.shift.flatMap(_ => IO.pure(i))

    IO(0).flatMap(loop).unsafeRunSync()
  }
} 
Example 192
Source File: HandleErrorBenchmark.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.benchmarks

import java.util.concurrent.TimeUnit
import cats.effect.IO
import org.openjdk.jmh.annotations._


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class HandleErrorBenchmark {
  @Param(Array("10000"))
  var size: Int = _

  @Benchmark
  def happyPath(): Int = {
    def loop(i: Int): IO[Int] =
      if (i < size)
        IO.pure(i + 1)
          .handleErrorWith(IO.raiseError)
          .flatMap(loop)
      else
        IO.pure(i)

    loop(0).unsafeRunSync()
  }

  @Benchmark
  def errorRaised(): Int = {
    val dummy = new RuntimeException("dummy")

    def loop(i: Int): IO[Int] =
      if (i < size)
        IO.raiseError[Int](dummy)
          .flatMap(x => IO.pure(x + 1))
          .flatMap(x => IO.pure(x + 1))
          .handleErrorWith(_ => loop(i + 1))
      else
        IO.pure(i)

    loop(0).unsafeRunSync()
  }
} 
Example 193
Source File: IOTimer.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import java.util.concurrent.{ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, TimeUnit}
import cats.effect.internals.Callback.T
import cats.effect.internals.IOShift.Tick
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.Try


  def apply(ec: ExecutionContext, sc: ScheduledExecutorService): Timer[IO] =
    new IOTimer(ec, sc)

  private[internals] lazy val scheduler: ScheduledExecutorService =
    mkGlobalScheduler(sys.props)

  private[internals] def mkGlobalScheduler(props: collection.Map[String, String]): ScheduledThreadPoolExecutor = {
    val corePoolSize = props
      .get("cats.effect.global_scheduler.threads.core_pool_size")
      .flatMap(s => Try(s.toInt).toOption)
      .filter(_ > 0)
      .getOrElse(2)
    val keepAliveTime = props
      .get("cats.effect.global_scheduler.keep_alive_time_ms")
      .flatMap(s => Try(s.toLong).toOption)
      .filter(_ > 0L)

    val tp = new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactory {
      def newThread(r: Runnable): Thread = {
        val th = new Thread(r)
        th.setName(s"cats-effect-scheduler-${th.getId}")
        th.setDaemon(true)
        th
      }
    })
    keepAliveTime.foreach { timeout =>
      // Call in this order or it throws!
      tp.setKeepAliveTime(timeout, TimeUnit.MILLISECONDS)
      tp.allowCoreThreadTimeOut(true)
    }
    tp.setRemoveOnCancelPolicy(true)
    tp
  }

  final private class ShiftTick(
    conn: IOConnection,
    cb: Either[Throwable, Unit] => Unit,
    ec: ExecutionContext
  ) extends Runnable {
    def run(): Unit = {
      // Shifts actual execution on our `ExecutionContext`, because
      // the scheduler is in charge only of ticks and the execution
      // needs to shift because the tick might continue with whatever
      // bind continuation is linked to it, keeping the current thread
      // occupied
      conn.pop()
      ec.execute(new Tick(cb))
    }
  }
} 
Example 194
Source File: JvmIOTimerTests.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import java.util.concurrent.{ScheduledThreadPoolExecutor, TimeUnit}

import org.scalatest.matchers.should.Matchers
import org.scalatest.funsuite.AnyFunSuite

import scala.util.control.NonFatal

class JvmIOTimerTests extends AnyFunSuite with Matchers {
  private def withScheduler(props: Map[String, String])(f: ScheduledThreadPoolExecutor => Unit): Unit = {
    val s = IOTimer.mkGlobalScheduler(props)
    try f(s)
    finally {
      try s.shutdownNow()
      catch { case NonFatal(e) => e.printStackTrace() }
    }
  }

  test("global scheduler: default core pool size") {
    withScheduler(Map.empty) { s =>
      s.getCorePoolSize shouldBe 2
    }
  }

  test("global scheduler: custom core pool size") {
    withScheduler(Map("cats.effect.global_scheduler.threads.core_pool_size" -> "3")) { s =>
      s.getCorePoolSize shouldBe 3
    }
  }

  test("global scheduler: invalid core pool size") {
    withScheduler(Map("cats.effect.global_scheduler.threads.core_pool_size" -> "-1")) { s =>
      s.getCorePoolSize shouldBe 2
    }
  }

  test("global scheduler: malformed core pool size") {
    withScheduler(Map("cats.effect.global_scheduler.threads.core_pool_size" -> "banana")) { s =>
      s.getCorePoolSize shouldBe 2
    }
  }

  test("global scheduler: default core thread timeout") {
    withScheduler(Map.empty) { s =>
      s.allowsCoreThreadTimeOut shouldBe false
    }
  }

  test("global scheduler: custom core thread timeout") {
    withScheduler(Map("cats.effect.global_scheduler.keep_alive_time_ms" -> "1000")) { s =>
      s.allowsCoreThreadTimeOut shouldBe true
      s.getKeepAliveTime(TimeUnit.MILLISECONDS) shouldBe 1000
    }
  }

  test("global scheduler: invalid core thread timeout") {
    withScheduler(Map("cats.effect.global_scheduler.keep_alive_time_ms" -> "0")) { s =>
      s.allowsCoreThreadTimeOut shouldBe false
    }
  }

  test("global scheduler: malformed core thread timeout") {
    withScheduler(Map("cats.effect.global_scheduler.keep_alive_time_ms" -> "feral hogs")) { s =>
      s.allowsCoreThreadTimeOut shouldBe false
    }
  }
} 
Example 195
Source File: TDatabaseFactory.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.factory

import java.util.concurrent.TimeUnit

import com.gabry.job.db.slicks.{SlickDependencyAccess, SlickJobAccess, SlickScheduleAccess, SlickTaskAccess}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}


class TDatabaseFactory extends FunSuite with BeforeAndAfterAll{
  private implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  private val config = ConfigFactory.load()
  private val duration = FiniteDuration(3,TimeUnit.SECONDS)
  private val dataAccessFactory = DatabaseFactory.getDataAccessFactory(config).get
  override def beforeAll(): Unit = {
    super.beforeAll()
    dataAccessFactory.init()
  }
  override def afterAll(): Unit = {
    super.afterAll()
    dataAccessFactory.destroy()
  }
  test("TDatabaseFactory default jobAccess type"){
    val access = dataAccessFactory.getJobAccess
    assert(access.isInstanceOf[SlickJobAccess])
  }
  test("TDatabaseFactory jobAccess select"){
    val access = dataAccessFactory.getJobAccess
    assert(access.isInstanceOf[SlickJobAccess])

    val select = Await.result(access.selectOne("test"),duration)
    assert(select.isDefined)
    assert(select.get.name == "test")

  }
  test("TDatabaseFactory dependencyAccess type"){
    val access = dataAccessFactory.getDependencyAccess
    assert(access.isInstanceOf[SlickDependencyAccess])
  }
  test("TDatabaseFactory scheduleAccess type"){
    val access = dataAccessFactory.getScheduleAccess
    assert(access.isInstanceOf[SlickScheduleAccess])
  }
  test("TDatabaseFactory taskAccess type"){
    val access = dataAccessFactory.getTaskAccess
    assert(access.isInstanceOf[SlickTaskAccess])
  }
} 
Example 196
Source File: TScheduleAccess.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.domain.UID
import com.gabry.job.core.po.SchedulePo
import com.gabry.job.db.slicks.schema.Tables
import com.gabry.job.utils.Utils
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}

class TScheduleAccess extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val scheduleAccess = new SlickScheduleAccess(db)
  val scheduleNode = "3958164162305738376-node"
  var jobIdAndTriggerTime: (UID, Long) = ("999",1523497644627L)

  val schedulePo:Tables.SchedulesRow = SchedulePo("0",jobIdAndTriggerTime._1,2,3,false,
    jobIdAndTriggerTime._2,scheduleNode,123,false,
    Utils.calcPostOffsetTime(jobIdAndTriggerTime._2,0,TimeUnit.MINUTES),null)
  val duration = FiniteDuration(3,TimeUnit.SECONDS)
  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    Await.result(scheduleAccess.delete(schedulePo) ,duration)
    db.close()
  }
  test("ScheduleAccess insert"){
    val insert = Await.result(scheduleAccess.insert(schedulePo.copy(jobUid = schedulePo.jobUid)) ,duration)
    assert(insert != null )
  }
  test("ScheduleAccess insertOnDuplicateUpdate"){
    val insert1 = Await.result(scheduleAccess.insertOnDuplicateUpdate(schedulePo),duration)
    val insert2 = Await.result(scheduleAccess.insertOnDuplicateUpdate(schedulePo),duration)
    assert(insert1 > 0 )
    assert(insert2 > 0 )
  }

  test("ScheduleAccess select setDispatched"){
    val select = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(select.isDefined)
    assert(select.get.jobUid == jobIdAndTriggerTime._1 && select.get.triggerTime == jobIdAndTriggerTime._2)
    val update = Await.result(scheduleAccess.setDispatched(select.get.uid,true),duration)
    assert(update > 0 )

    val select1 = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(select1.isDefined)
    assert(select1.get.dispatched)
  }
  test("ScheduleAccess update"){
    val updateScheduleNode = "updateScheduleNode"
    val old = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(old.isDefined)
    assert(old.get.scheduleNode!=updateScheduleNode)

    val update = Await.result(scheduleAccess.update(schedulePo.copy(scheduleNode = updateScheduleNode)),duration)
    assert(update > 0 )
    val newJob = Await.result(scheduleAccess.selectOne(jobIdAndTriggerTime),duration)
    assert(newJob.isDefined)
    assert(newJob.get.scheduleNode == updateScheduleNode)
  }
  test("ScheduleAccess selectUnDispatchSchedule"){
    scheduleAccess.selectUnDispatchSchedule("1",scheduleNode,jobIdAndTriggerTime._2+30,2){ r=>
      assert(!r.dispatched)
    }
  }
} 
Example 197
Source File: TTaskAccess.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.domain.UID
import com.gabry.job.db.slicks.schema.Tables
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}

class TTaskAccess extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val taskAccess = new SlickTaskAccess(db)
  var jobIdAndTriggerTime: (UID, Long) = ("999",1523497644627L)
  val taskTrackerNode = "3958164162305738376-node"
  val taskPo:Tables.TasksRow = Tables.TasksRow(-1,jobIdAndTriggerTime._1,jobIdAndTriggerTime._1,"-1",1,taskTrackerNode,"TEST",jobIdAndTriggerTime._2,Some("test"),null)
  val duration = FiniteDuration(3,TimeUnit.SECONDS)

  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    db.close()
  }
  test("TaskAccess insert, select,delete"){
    val insert = Await.result(taskAccess.insert(taskPo) ,duration)
    assert(insert!=null)
    assert(insert.state==taskPo.state)
    val select = Await.result(taskAccess.selectOne(insert.uid),duration)
    assert(select.isDefined)
    assert(select.get.state==insert.state)
    val delete = Await.result(taskAccess.delete(insert),duration)
    assert(delete>0)
    val select1 = Await.result(taskAccess.selectOne(insert.uid),duration)
    assert(select1.isEmpty)
  }
  test("TaskAccess insertOnDuplicateUpdate"){
    val insert = Await.result(taskAccess.insertOnDuplicateUpdate(taskPo) ,duration)
    assert(insert==0)
  }
} 
Example 198
Source File: TJobAccess.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.domain.Job
import com.gabry.job.db.slicks
import com.gabry.job.db.slicks.schema.Tables
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor}


class TJobAccess extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val jobAccess = new SlickJobAccess(db)
  val scheduleNode = "3958164162305738376-node"
  val job:Tables.JobsRow = slicks.jobPo2Row(Job("0", "3958164162305738376-test","com.gabry.job.examples.TestTask","",0,TimeUnit.MINUTES))
    .copy(schedulerNode = Some(scheduleNode))
  val duration = FiniteDuration(3,TimeUnit.SECONDS)
  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    jobAccess.delete(job)
    db.close()
  }
  test("JobAccess insert"){
    val insert = Await.result(jobAccess.insert(job),duration)
    assert(insert != null )
    assert(insert.name == job.name)
  }
  test("JobAccess select"){
    val select = Await.result(jobAccess.selectOne(job.name),duration)
    assert(select.isDefined)
    assert(select.get.name == job.name)
  }
  test("JobAccess update"){
    val updateClassName = "updateClassName"
    val old = Await.result(jobAccess.selectOne(job.name),duration)
    assert(old.isDefined)
    assert(old.get.className!=updateClassName)
    val update = Await.result(jobAccess.update(job.copy(className = updateClassName)),duration)
    assert(update > 0 )
    val newJob = Await.result(jobAccess.selectOne(job.name),duration)
    assert(newJob.isDefined)
    assert(newJob.get.className==updateClassName)
  }
  test("JobAccess selectJobsByScheduleNode"){
    jobAccess.selectJobsByScheduleNode(scheduleNode){ r =>
      assert(r.schedulerNode.isDefined && r.schedulerNode.get == scheduleNode)
    }
  }
  test("JobAccess insertOnDuplicateUpdate"){
    val insert1 = Await.result(jobAccess.insertOnDuplicateUpdate(job),duration)
    val insert2 = Await.result(jobAccess.insertOnDuplicateUpdate(job),duration)
    assert(insert1>0)
    assert(insert2>0)
  }
  test("JobAccess delete"){
    val delete = Await.result(jobAccess.delete(job),duration)
    assert(delete > 0 )
    val select = Await.result(jobAccess.selectOne(job.name),duration)
    assert(select.isEmpty)
  }
} 
Example 199
Source File: TInsertTime.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.db.slicks

import java.util.concurrent.TimeUnit

import com.gabry.job.core.builder.JobBuilder
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import slick.jdbc.MySQLProfile.api._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContextExecutor, Future}

class TInsertTime extends FunSuite with BeforeAndAfterAll{
  implicit lazy val executionContext: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
  val db = Database.forConfig("",ConfigFactory.load().getConfig("db.mysql"))
  val jobAccess = new SlickJobAccess(db)
  val duration = FiniteDuration(3,TimeUnit.DAYS)

  override def beforeAll(): Unit = {
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    db.close()
  }
  test("InsertTime"){
    val recordNum = 10000
    val futures = 1 to recordNum map{ i =>

      val job = JobBuilder().withName(i.toString)
        .withClass("com.gabry.job.examples.TestTask")
        .withDataTimeOffset(0)
        .withDataTimeOffsetUnit(TimeUnit.MINUTES)
        .build()

      jobAccess.insert(job)
    }
    val start = System.currentTimeMillis()
    val all = Future.sequence(futures)
    Await.result(all,duration)
    val end = System.currentTimeMillis()
    println(s"插入 $recordNum 条数据,总耗时 ${end-start} 毫秒,平均 ${(end-start)/recordNum} 毫秒/条")
  }
} 
Example 200
Source File: JobPo.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.core.po

import java.util.concurrent.TimeUnit

import com.gabry.job.core.domain.UID


case class JobPo(uid: UID,
                 name: String,
                 className: String,
                 metaData: String,
                 dataTimeOffset: Long,
                 dataTimeOffsetUnit: TimeUnit,
                 startTime: Long,
                 cron: String,
                 priority: Int,
                 parallel: Int,
                 retryTimes: Int,
                 workerNodes: Option[String],
                 clusterName: String,
                 groupName: String,
                 timeout: Int,
                 replaceIfExist: Boolean,
                 lastGenerateTriggerTime: Option[Long],
                 schedulerNode: Option[String],
                 scheduleFrequency: Option[Long],
                 lastScheduleTime: Option[Long],
                 updateTime: java.sql.Timestamp = null) extends Po{
  override def toString: String = s"JobPo(uid=$uid,name=$name,class=$className,meta=$metaData,dataTimeOffset=$dataTimeOffset,dataTimeOffsetUnit=$dataTimeOffsetUnit," +
    s"startTime=$startTime,cron=$cron,priority=$priority,parallel=$parallel,retryTimes=$retryTimes,workerNodes=${workerNodes.mkString(",")}," +
    s"cluster=$clusterName,group=$groupName,timeout=$timeout,replaceIfExist=$replaceIfExist,lastGenerateTriggerTime=$lastGenerateTriggerTime," +
    s"schedulerNode=$schedulerNode,scheduleFrequency=$scheduleFrequency,lastScheduleTime=$lastScheduleTime)"
}