java.util.concurrent.atomic.AtomicReference Scala Examples

The following examples show how to use java.util.concurrent.atomic.AtomicReference. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: AmqpProtocol.scala    From gatling-amqp-plugin   with Apache License 2.0 6 votes vote down vote up
package ru.tinkoff.gatling.amqp.protocol

import java.util.concurrent.atomic.AtomicReference

import com.rabbitmq.client.ConnectionFactory
import io.gatling.core.CoreComponents
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.protocol.{Protocol, ProtocolKey}
import ru.tinkoff.gatling.amqp.client.{AmqpConnectionPool, TrackerPool}
import ru.tinkoff.gatling.amqp.request.AmqpProtocolMessage

object AmqpProtocol {
  val amqpProtocolKey: ProtocolKey[AmqpProtocol, AmqpComponents] = new ProtocolKey[AmqpProtocol, AmqpComponents] {
    override def protocolClass: Class[Protocol] = classOf[AmqpProtocol].asInstanceOf[Class[Protocol]]

    override def defaultProtocolValue(configuration: GatlingConfiguration): AmqpProtocol =
      throw new IllegalStateException("Can't provide a default value for AmqpProtocol")

    private val trackerPoolRef    = new AtomicReference[TrackerPool]()
    private val connectionPoolRef = new AtomicReference[AmqpConnectionPool]()

    private def getOrCreateConnectionPool(protocol: AmqpProtocol) = {
      if (connectionPoolRef.get() == null) {
        connectionPoolRef.lazySet(
          new AmqpConnectionPool(protocol.connectionFactory, protocol.consumersThreadCount)
        )
      }
      connectionPoolRef.get()
    }

    private def getOrCreateTrackerPool(components: CoreComponents, pool: AmqpConnectionPool) = {
      if (trackerPoolRef.get() == null) {
        trackerPoolRef.lazySet(
          new TrackerPool(pool, components.actorSystem, components.statsEngine, components.clock, components.configuration)
        )
      }
      trackerPoolRef.get()
    }

    override def newComponents(coreComponents: CoreComponents): AmqpProtocol => AmqpComponents =
      amqpProtocol => {
        val pool = getOrCreateConnectionPool(amqpProtocol)
        coreComponents.actorSystem.registerOnTermination(pool.close())
        val trackerPool = getOrCreateTrackerPool(coreComponents, pool)
        AmqpComponents(amqpProtocol, pool, trackerPool)
      }
  }
}

case class AmqpProtocol(
    connectionFactory: ConnectionFactory,
    deliveryMode: Int,
    replyTimeout: Option[Long],
    consumersThreadCount: Int,
    messageMatcher: AmqpMessageMatcher,
    responseTransformer: Option[AmqpProtocolMessage => AmqpProtocolMessage]
) extends Protocol {
  type Components = AmqpComponents
} 
Example 2
Source File: ComponentsFixture.scala    From daml   with Apache License 2.0 6 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.test

import java.util.concurrent.atomic.AtomicReference

import com.daml.navigator.test.config.Arguments
import com.daml.navigator.test.runner.{HeadNavigator, PackagedDamlc, PackagedSandbox}
import com.typesafe.scalalogging.LazyLogging

import scala.io.Source
import scala.util.{Failure, Success, Try}

class ComponentsFixture(
    val args: Arguments,
    val navigatorPort: Int,
    val sandboxPort: Int,
    val scenario: String
) extends LazyLogging {

  // A list of commands on how to destroy started processes
  private val killProcs: AtomicReference[List[Unit => Unit]] = new AtomicReference(List.empty)

  private val onlineUrl = s"http://localhost:$navigatorPort/api/about"

  private def get(
      url: String,
      connectTimeout: Int = 1000,
      readTimeout: Int = 1000,
      requestMethod: String = "GET"
  ): String = {
    import java.net.{URL, HttpURLConnection}
    val connection = (new URL(url)).openConnection.asInstanceOf[HttpURLConnection]
    connection.setConnectTimeout(connectTimeout)
    connection.setReadTimeout(readTimeout)
    connection.setRequestMethod(requestMethod)
    val inputStream = connection.getInputStream
    val content = Source.fromInputStream(inputStream).mkString
    if (inputStream != null) inputStream.close()
    content
  }

  def startup(): Try[Unit] = {
    if (args.startComponents) {
      logger.info("Starting the sandbox and the Navigator")
      for {
        (darFile, tempFiles) <- Try(PackagedDamlc.run(args.damlPath))
        sandbox <- Try(PackagedSandbox.runAsync(sandboxPort, darFile, scenario))
        _ = killProcs.updateAndGet(s => sandbox :: s)
        navigator <- Try(
          HeadNavigator.runAsync(args.navConfPAth, args.navigatorDir, navigatorPort, sandboxPort))
        _ = killProcs.updateAndGet(s => navigator :: s)
      } yield { () }
    } else {
      Success(())
    }
  }

  private def retry[R](action: => R, maxRetries: Int, delayMillis: Int): Try[R] = {
    def retry0(count: Int): Try[R] = {
      Try(action) match {
        case Success(r) => Success(r)
        case Failure(e) =>
          if (count > maxRetries) {
            logger.error(
              s"Navigator is not available after $maxRetries retries with $delayMillis millis interval.")
            Failure(e)
          } else {
            logger.info(s"Navigator is not available yet, waiting $delayMillis millis ")
            Thread.sleep(delayMillis.toLong)
            retry0(count + 1)
          }
      }
    }

    retry0(0)
  }

  def waitForNavigator(): Try[Unit] = {
    logger.info(s"Waiting for the Navigator to start up (waiting for $onlineUrl)")
    retry({ get(onlineUrl); () }, 120, 1000)
  }

  def shutdown(): Unit = {
    killProcs.getAndUpdate(procs => {
      procs.foreach(killAction => Try { killAction(()) })
      List.empty
    })
    ()
  }
} 
Example 3
Source File: nodes.scala    From akka-viz   with MIT License 5 votes vote down vote up
package scalatags.rx

import java.util.concurrent.atomic.AtomicReference

import org.scalajs.dom
import org.scalajs.dom.Element
import org.scalajs.dom.ext._
import org.scalajs.dom.raw.Comment
import rx._

import scala.collection.immutable
import scala.language.implicitConversions
import scalatags.JsDom.all._
import scalatags.jsdom
import scalatags.rx.ext._

trait RxNodeInstances {

  implicit class rxStringFrag(v: Rx[String])(implicit val ctx: Ctx.Owner) extends jsdom.Frag {
    def render: dom.Text = {
      val node = dom.document.createTextNode(v.now)
      v foreach { s => node.replaceData(0, node.length, s) } attachTo node
      node
    }
  }

  implicit class bindRxElement[T <: dom.Element](e: Rx[T])(implicit val ctx: Ctx.Owner) extends Modifier {
    def applyTo(t: Element) = {
      val element = new AtomicReference(e.now)
      t.appendChild(element.get())
      e.triggerLater {
        val current = e.now
        val previous = element getAndSet current
        t.replaceChild(current, previous)
      } attachTo t
    }
  }

  implicit class bindRxElements(e: Rx[immutable.Iterable[Element]])(implicit val ctx: Ctx.Owner) extends Modifier {
    def applyTo(t: Element) = {
      val nonEmpty = e.map { t => if (t.isEmpty) List(new Comment) else t }
      val fragments = new AtomicReference(nonEmpty.now)
      nonEmpty.now foreach t.appendChild
      nonEmpty triggerLater {
        val current = e.now
        val previous = fragments getAndSet current
        val i = t.childNodes.indexOf(previous.head)
        if (i < 0) throw new IllegalStateException("Children changed")
        0 to (previous.size - 1) foreach (_ => t.removeChild(t.childNodes.item(i)))
        if (t.childNodes.length > i) {
          val next = t.childNodes.item(i)
          current foreach (t.insertBefore(_, next))
        } else {
          current foreach t.appendChild
        }
      }
    }
  }

} 
Example 4
Source File: JobStatusFlusherSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestKit}
import io.hydrosphere.mist.master.JobDetails.Status
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.{ActorSpec, JobDetails, TestData}
import io.hydrosphere.mist.master.logging.JobLogger
import mist.api.data._
import org.scalatest.concurrent.Eventually
import org.scalatest.{FunSpecLike, Matchers}
import org.scalatest.prop.TableDrivenPropertyChecks._
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.{Future, Promise}

class JobStatusFlusherSpec extends ActorSpec("job-status-flusher") with TestData with Eventually {

  it("should flush status correctly") {
    val initial = Promise[JobDetails]

    val updateResult = new AtomicReference[Option[JobDetails]](None)
    val props = JobStatusFlusher.props(
      id = "id",
      get = (_) => initial.future,
      update = (d: JobDetails) => {updateResult.set(Some(d));Future.successful(())},
      loggerF = _ => JobLogger.NOOP
    )
    val flusher = TestActorRef(props)

    flusher ! ReportedEvent.plain(QueuedEvent("id"))
    flusher ! ReportedEvent.plain(StartedEvent("id", System.currentTimeMillis()))
    flusher ! ReportedEvent.plain(FinishedEvent("id", System.currentTimeMillis(), JsNumber(42)))

    initial.success(mkDetails(JobDetails.Status.Initialized))

    eventually(timeout(Span(3, Seconds))) {
      val value = updateResult.get
      value.isDefined shouldBe true

      val d = value.get
      d.status shouldBe JobDetails.Status.Finished
    }
  }

  describe("event conversion") {

    val baseDetails = mkDetails(JobDetails.Status.Initialized)

    val expected = Table(
      ("event", "details"),
      (QueuedEvent("id"), baseDetails.copy(status = Status.Queued)),
      (StartedEvent("id", 1), baseDetails.copy(status = Status.Started, startTime = Some(1))),
      (CancellingEvent("id", 1), baseDetails.copy(status = Status.Cancelling)),
      (CancelledEvent("id", 1), baseDetails.copy(status = Status.Canceled, endTime = Some(1))),
      (FinishedEvent("id", 1, JsMap("1" -> JsNumber(2))),
        baseDetails.copy(
          status = Status.Finished,
          endTime = Some(1),
          jobResult =
            Some(
              Right(JsMap("1" -> JsNumber(2)))
            )
        )),
      (FailedEvent("id", 1, "error"),
        baseDetails.copy(status = Status.Failed, endTime = Some(1), jobResult = Some(Left("error")))),
      (WorkerAssigned("id", "workerId"), baseDetails.copy(workerId = Some("workerId")))
    )

    it("should correct update job details") {
      forAll(expected) { (e: UpdateStatusEvent, d: JobDetails) =>
        JobStatusFlusher.applyStatusEvent(baseDetails, e) shouldBe d
      }
    }
  }
} 
Example 5
Source File: CoordinatorEmbeddedSpec.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.core.cluster

import java.util.concurrent.atomic.AtomicReference

import akka.actor.{Actor, ActorRef, Props}
import com.typesafe.config.{ConfigFactory, ConfigValueFactory}
import io.amient.affinity.avro.MemorySchemaRegistry
import io.amient.affinity.core.cluster.Coordinator.MembershipUpdate
import io.amient.affinity.{AffinityActorSystem, Conf}
import org.scalatest.{FlatSpec, Matchers}

import scala.collection.JavaConverters._

class CoordinatorEmbeddedSpec extends FlatSpec with Matchers {

  "CoordinatorEmbedded instances" should "share the underlying space for the same id and group" in {
    val config = ConfigFactory.empty()
      .withValue(Conf.Affi.SystemName.path, ConfigValueFactory.fromAnyRef("101"))
      .withValue(Conf.Affi.Node.path, ConfigValueFactory.fromMap(Map[String, String]().asJava))
      .withValue(Conf.Affi.Avro.Class.path, ConfigValueFactory.fromAnyRef(classOf[MemorySchemaRegistry].getName))
      .withValue(Conf.Affi.Coordinator.Class.path, ConfigValueFactory.fromAnyRef(classOf[CoordinatorEmbedded].getName))
    val system = AffinityActorSystem.create(config)
    try {
      val coordinator1 = Coordinator.create(system, "group1")
      val actor1 = system.actorOf(Props(new Actor {
        override def receive: Receive = {
          case null =>
        }
      }), "actor1")
      coordinator1.register(actor1.path)
      val update1 = new AtomicReference[scala.collection.Set[ActorRef]](Set.empty)
      update1 synchronized {
        coordinator1.watch(system.actorOf(Props(new Actor {
          override def receive: Receive = {
            case MembershipUpdate(masters) => update1 synchronized update1.set(masters.values.toSet)
          }
        }), "subscriber1"))
      }
      coordinator1.close()

      val coordinator2 = Coordinator.create(system, "group1")
      val update2 = new AtomicReference[scala.collection.Set[ActorRef]](Set.empty)
      update2 synchronized {
        coordinator2.watch(system.actorOf(Props(new Actor {
          override def receive: Receive = {
            case MembershipUpdate(masters) => update2 synchronized update2.set(masters.values.toSet)
          }
        }), "subscriber2"))
        update2.wait(1000)
        update2.get.map(_.path.toString) should be(Set("akka://101/user/actor1"))
        update1.get.map(_.path.toString) should be(Set("akka://101/user/actor1"))
      }
      coordinator2.close()


    } finally {
      system.terminate()
    }

  }

} 
Example 6
Source File: CustomCodeEntryPoint.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.workflowexecutor.customcode

import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicReference

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}

import org.apache.spark.api.java.JavaSparkContext
import org.apache.spark.sql.DataFrame
import org.apache.spark.{SparkConf, SparkContext}

import ai.deepsense.commons.utils.Logging
import ai.deepsense.deeplang._
import ai.deepsense.sparkutils.SparkSQLSession


class CustomCodeEntryPoint(
    val sparkContext: SparkContext,
    val sparkSQLSession: SparkSQLSession,
    val dataFrameStorage: DataFrameStorage,
    val operationExecutionDispatcher: OperationExecutionDispatcher)
  extends Logging {
  import ai.deepsense.workflowexecutor.customcode.CustomCodeEntryPoint._
  def getSparkContext: JavaSparkContext = sparkContext

  def getSparkSQLSession: SparkSQLSession = sparkSQLSession

  def getNewSparkSQLSession: SparkSQLSession = sparkSQLSession.newSession()

  def getSparkConf: SparkConf = sparkContext.getConf

  private val codeExecutor: AtomicReference[Promise[CustomCodeExecutor]] =
    new AtomicReference(Promise())

  private val pythonPort: AtomicReference[Promise[Int]] =
    new AtomicReference(Promise())

  def getCodeExecutor(timeout: Duration): CustomCodeExecutor =
    getFromPromise(codeExecutor.get, timeout)

  def getPythonPort(timeout: Duration): Int =
    getFromPromise(pythonPort.get, timeout)

  def registerCodeExecutor(newCodeExecutor: CustomCodeExecutor): Unit =
    replacePromise(codeExecutor, newCodeExecutor)

  def registerCallbackServerPort(newPort: Int): Unit =
    replacePromise(pythonPort, newPort)

  def retrieveInputDataFrame(workflowId: String, nodeId: String, portNumber: Int): DataFrame =
    dataFrameStorage.getInputDataFrame(workflowId, nodeId, portNumber).get

  def retrieveOutputDataFrame(workflowId: String, nodeId: String, portNumber: Int): DataFrame =
    dataFrameStorage.getOutputDataFrame(workflowId, nodeId, portNumber).get

  def registerOutputDataFrame(
      workflowId: String, nodeId: String, portNumber: Int, dataFrame: DataFrame): Unit =
    dataFrameStorage.setOutputDataFrame(workflowId, nodeId, portNumber, dataFrame)

  def executionCompleted(workflowId: String, nodeId: String): Unit =
    operationExecutionDispatcher.executionEnded(workflowId, nodeId, Right(()))

  def executionFailed(workflowId: String, nodeId: String, error: String): Unit =
    operationExecutionDispatcher.executionEnded(workflowId, nodeId, Left(error))
}

object CustomCodeEntryPoint {
  private case class PromiseReplacedException() extends Exception

  @tailrec
  private def getFromPromise[T](promise: => Promise[T], timeout: Duration): T = {
    try {
      Await.result(promise.future, timeout)
    } catch {
      case e: TimeoutException => throw e
      case e: PromiseReplacedException => getFromPromise(promise, timeout)
    }
  }

  private def replacePromise[T](promise: AtomicReference[Promise[T]], newValue: T): Unit = {
    val oldPromise = promise.getAndSet(Promise.successful(newValue))
    try {
      oldPromise.failure(new PromiseReplacedException)
    } catch {
      // The oldPromise will have been completed always, except for the first time.
      // The illegal state is expected, but we have to complete the oldPromise,
      // since someone might be waiting on it.
      case e: IllegalStateException => ()
    }
  }

  case class CustomCodeEntryPointConfig(
    pyExecutorSetupTimeout: Duration = 5.seconds)
} 
Example 7
Source File: PortManager.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}


case class PortManager(initialPort: Int, step: Int) {
  def getPort(index: Int): Int = {
    initialPort + step * (index - 1)
  }
}

case object CassandraPortManager {
  val jmxPortManager = PortManager(7199, 2)
  val dmx4jPortManager = PortManager(8989, 1)
}

case object ElasticsearchPortManager {
  val jmxPortManager = PortManager(7200, 2)
  val httpPortManager = PortManager(9201, 1)
  val transportPortManager = PortManager(9301, 1)
}

case object BgPortManager {
  val jmxPortManager = PortManager(7196, 1)
  val monitorPortManager = PortManager(8050, 1)
}

case object CtrlPortManager {
  val monitorPortManager = PortManager(8000, 1)
  val jmxPortManager = PortManager(7192, 1)
}

case object WebServicePortManager {
  val jmxPortManager = PortManager(7194, 1)
  val playHttpPortManager = PortManager(9000, 1)
  val monitorPortManager = PortManager(8010, 1)
}

case object CwPortManager {
  val monitorPortManager = PortManager(8030, 1)
}

case object DcPortManager {
  val monitorPortManager = PortManager(8040, 1)
  val jmxPortManager = PortManager(7193, 1)
}

case object KafkaPortManager {
  val jmxPortManager = PortManager(7191, 1)
}

object PortManagers {
  val cas = CassandraPortManager
  val es = ElasticsearchPortManager
  val bg = BgPortManager
  val ws = WebServicePortManager
  val ctrl = CtrlPortManager
  val cw = CwPortManager
  val dc = DcPortManager
  val kafka = KafkaPortManager
} 
Example 8
Source File: MergeIntoAccumulatorSuite.scala    From delta   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.delta

import java.util.concurrent.atomic.AtomicReference

import scala.collection.JavaConverters._

import org.apache.spark.sql.delta.commands.MergeIntoCommand
import org.apache.spark.sql.delta.test.DeltaSQLCommandTest

import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.status.TaskDataWrapper
import org.apache.spark.util.JsonProtocol


class MergeIntoAccumulatorSuite extends QueryTest with SharedSparkSession with DeltaSQLCommandTest {

  import testImplicits._

  private def runTestMergeCommand(): Unit = {
    // Run a simple merge command
    withTempView("source") {
      withTempDir { tempDir =>
        val tempPath = tempDir.getCanonicalPath
        Seq((1, 1), (0, 3)).toDF("key", "value").createOrReplaceTempView("source")
        Seq((2, 2), (1, 4)).toDF("key", "value").write.format("delta").save(tempPath)
        spark.sql(s"""
          |MERGE INTO delta.`$tempPath` target
          |USING source src
          |ON src.key = target.key
          |WHEN MATCHED THEN UPDATE SET *
          |WHEN NOT MATCHED THEN INSERT *
          |""".stripMargin)
      }
    }
  }

  test("accumulators used by MERGE should not be tracked by Spark UI") {
    runTestMergeCommand()

    // Make sure all Spark events generated by the above command have been processed
    spark.sparkContext.listenerBus.waitUntilEmpty(30000)

    val store = spark.sparkContext.statusStore.store
    val iter = store.view(classOf[TaskDataWrapper]).closeableIterator()
    try {
      // Collect all accumulator names tracked by Spark UI.
      val accumNames = iter.asScala.toVector.flatMap { task =>
        task.accumulatorUpdates.map(_.name)
      }.toSet
      // Verify accumulators used by MergeIntoCommand are not tracked.
      assert(!accumNames.contains(MergeIntoCommand.TOUCHED_FILES_ACCUM_NAME))
    } finally {
      iter.close()
    }
  }

} 
Example 9
Source File: TVar.scala    From cats-stm   with Apache License 2.0 5 votes vote down vote up
package io.github.timwspence.cats.stm

import java.util.concurrent.atomic.AtomicReference

import io.github.timwspence.cats.stm.STM.internal._


  def modify(f: A => A): STM[Unit] = STM { log =>
    val entry   = getOrInsert(log)
    val updated = f(entry.unsafeGet[A])
    TSuccess(entry.unsafeSet(updated))
  }

  private def getOrInsert(log: TLog): TLogEntry =
    if (log.contains(id))
      log(id)
    else {
      val entry = TLogEntry(this, value)
      log += id -> entry
      entry
    }

}

object TVar {

  def of[A](value: A): STM[TVar[A]] = STM { _ =>
    val id = IdGen.incrementAndGet
    TSuccess(new TVar(id, value, new AtomicReference(Map())))
  }

} 
Example 10
Source File: MockDiscovery.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.discovery

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.annotation.InternalApi
import akka.discovery.ServiceDiscovery.Resolved
import akka.event.Logging

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

@InternalApi
object MockDiscovery {
  private val data = new AtomicReference[Map[Lookup, () => Future[Resolved]]](Map.empty)

  def set(name: Lookup, to: () => Future[Resolved]): Unit = {
    val d = data.get()
    if (data.compareAndSet(d, d.updated(name, to))) ()
    else set(name, to) // retry
  }

  def remove(name: Lookup): Unit = {
    val d = data.get()
    if (data.compareAndSet(d, d - name)) ()
    else remove(name) // retry
  }
}

@InternalApi
final class MockDiscovery(system: ActorSystem) extends ServiceDiscovery {

  private val log = Logging(system, getClass)

  override def lookup(query: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = {
    MockDiscovery.data.get().get(query) match {
      case Some(res) =>
        val items = res()
        log.info("Mock-resolved [{}] to [{}:{}]", query, items, items.value)
        items
      case None =>
        log.info("No mock-data for [{}], resolving as 'Nil'. Current mocks: {}", query, MockDiscovery.data.get())
        Future.successful(Resolved(query.serviceName, Nil))
    }
  }

} 
Example 11
Source File: ClusterBootstrap.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap

import java.util.concurrent.atomic.AtomicReference

import akka.AkkaVersion
import scala.concurrent.{ Future, Promise, TimeoutException }
import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.actor.ClassicActorSystemProvider
import akka.actor.ExtendedActorSystem
import akka.actor.Extension
import akka.actor.ExtensionId
import akka.actor.ExtensionIdProvider
import akka.annotation.InternalApi
import akka.cluster.Cluster
import akka.discovery.{ Discovery, ServiceDiscovery }
import akka.event.Logging
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.server.Route
import akka.management.cluster.bootstrap.contactpoint.HttpClusterBootstrapRoutes
import akka.management.cluster.bootstrap.internal.BootstrapCoordinator
import akka.management.scaladsl.ManagementRouteProviderSettings
import akka.management.scaladsl.ManagementRouteProvider

final class ClusterBootstrap(implicit system: ExtendedActorSystem) extends Extension with ManagementRouteProvider {

  import ClusterBootstrap.Internal._
  import system.dispatcher

  private val log = Logging(system, classOf[ClusterBootstrap])

  private final val bootstrapStep = new AtomicReference[BootstrapStep](NotRunning)

  AkkaVersion.require("cluster-bootstrap", "2.5.27")

  val settings: ClusterBootstrapSettings = ClusterBootstrapSettings(system.settings.config, log)

  // used for initial discovery of contact points
  lazy val discovery: ServiceDiscovery =
    settings.contactPointDiscovery.discoveryMethod match {
      case "akka.discovery" =>
        val discovery = Discovery(system).discovery
        log.info("Bootstrap using default `akka.discovery` method: {}", Logging.simpleName(discovery))
        discovery

      case otherDiscoveryMechanism =>
        log.info("Bootstrap using `akka.discovery` method: {}", otherDiscoveryMechanism)
        Discovery(system).loadServiceDiscovery(otherDiscoveryMechanism)
    }

  private val joinDecider: JoinDecider = {
    system.dynamicAccess
      .createInstanceFor[JoinDecider](
        settings.joinDecider.implClass,
        List((classOf[ActorSystem], system), (classOf[ClusterBootstrapSettings], settings))
      )
      .get
  }

  private[this] val _selfContactPointUri: Promise[Uri] = Promise()

  override def routes(routeProviderSettings: ManagementRouteProviderSettings): Route = {
    log.info(s"Using self contact point address: ${routeProviderSettings.selfBaseUri}")
    this.setSelfContactPoint(routeProviderSettings.selfBaseUri)

    new HttpClusterBootstrapRoutes(settings).routes
  }

  def start(): Unit =
    if (Cluster(system).settings.SeedNodes.nonEmpty) {
      log.warning(
        "Application is configured with specific `akka.cluster.seed-nodes`: {}, bailing out of the bootstrap process! " +
        "If you want to use the automatic bootstrap mechanism, make sure to NOT set explicit seed nodes in the configuration. " +
        "This node will attempt to join the configured seed nodes.",
        Cluster(system).settings.SeedNodes.mkString("[", ", ", "]")
      )
    } else if (bootstrapStep.compareAndSet(NotRunning, Initializing)) {
      log.info("Initiating bootstrap procedure using {} method...", settings.contactPointDiscovery.discoveryMethod)

      ensureSelfContactPoint()
      val bootstrapProps = BootstrapCoordinator.props(discovery, joinDecider, settings)
      val bootstrap = system.systemActorOf(bootstrapProps, "bootstrapCoordinator")
      // Bootstrap already logs in several other execution points when it can't form a cluster, and why.
      selfContactPoint.foreach { uri =>
        bootstrap ! BootstrapCoordinator.Protocol.InitiateBootstrapping(uri)
      }
    } else log.warning("Bootstrap already initiated, yet start() method was called again. Ignoring.")

  
  private[bootstrap] object Internal {
    sealed trait BootstrapStep
    case object NotRunning extends BootstrapStep
    case object Initializing extends BootstrapStep
  }

} 
Example 12
Source File: EmbeddedZookeeper.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.io.{File => JFile}
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference

import scala.util.Try
import org.I0Itec.zkclient.exception.ZkMarshallingError
import org.I0Itec.zkclient.serialize.ZkSerializer
import org.apache.zookeeper.server.{ NIOServerCnxnFactory, ZooKeeperServer }


  def start(): Unit = {
    val server = new ZooKeeperServer(snapDir, dataDir, tickTime)
    _zookeeper.set(Some(server))

    val (ip, port) = {
      val splits = connectTo.split(":")
      (splits(0), splits(1).toInt)
    }

    val f = new NIOServerCnxnFactory()
    f.configure(new InetSocketAddress(ip, port), 16)
    f.startup(server)

    _factory.set(Some(f))

    logger.info(s"ZooKeeperServer isRunning: $isRunning")
  }

  def shutdown(): Unit = {
    logger.info(s"Shutting down ZK NIOServerCnxnFactory.")

    for (v <- _factory.get) v.shutdown()
    _factory.set(None)

    for (v <- _zookeeper.get) {
      Try(v.shutdown())
      //awaitCond(!v.isRunning, 2000.millis)
      logger.info(s"ZooKeeper server shut down.")
    }
    _zookeeper.set(None)
  }
}

object DefaultStringSerializer extends ZkSerializer {

  @throws(classOf[ZkMarshallingError])
  def serialize(data: Object): Array[Byte] = data match {
    case a: String => a.getBytes("UTF-8")
    case _         => throw new ZkMarshallingError(s"Unsupported type '${data.getClass}'")
  }

  @throws(classOf[ZkMarshallingError])
  def deserialize(bytes: Array[Byte]): Object = bytes match {
    case b if Option(b).isEmpty => "" //ick
    case b                      => new String(bytes, "UTF-8")
  }
} 
Example 13
Source File: GrpcGraphStage.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}

import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import io.grpc.stub.{ClientCallStreamObserver, ClientResponseObserver}

class GrpcGraphStage[I, O](operator: GrpcOperator[I, O]) extends GraphStage[FlowShape[I, O]] {
  val in = Inlet[I]("grpc.in")
  val out = Outlet[O]("grpc.out")

  override val shape: FlowShape[I, O] = FlowShape.of(in, out)

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
    new GraphStageLogic(shape) with InHandler with OutHandler {

      var requestStream = new AtomicReference[Option[ClientCallStreamObserver[I]]](None)
      val element = new AtomicReference[Option[I]](None)
      val requested = new AtomicBoolean(false)

      val outObs = new ClientResponseObserver[I, O] with Runnable {
        override def beforeStart(reqStream: ClientCallStreamObserver[I]): Unit = {
          requestStream.set(Some(reqStream))
          reqStream.disableAutoInboundFlowControl()
          reqStream.setOnReadyHandler(this)
        }

        override def onError(t: Throwable) =
          getAsyncCallback((t: Throwable) => fail(out, t)).invoke(t)

        override def onCompleted() =
          getAsyncCallback((_: Unit) => complete(out)).invoke(())

        override def onNext(value: O) =
          getAsyncCallback((value: O) => push(out, value)).invoke(value)

        override def run(): Unit = requestStream.get().foreach { reqStream =>
          if (requested.compareAndSet(true, false)) reqStream.request(1)
          if (reqStream.isReady) {
            element.getAndSet(None).foreach { value =>
              reqStream.onNext(value)
              tryPull(in)
            }
          }
        }
      }

      val inObs = operator(outObs)

      override def onPush(): Unit = {
        val value = grab(in)
        requestStream.get() match {
          case Some(reqStream) if reqStream.isReady() =>
            reqStream.onNext(value)
            pull(in)
          case _ => element.compareAndSet(None, Some(value))
        }
      }

      override def onUpstreamFinish(): Unit = inObs.onCompleted()

      override def onUpstreamFailure(t: Throwable): Unit = inObs.onError(t)

      override def onPull(): Unit =
        requestStream.get() match {
          case Some(reqStream) => reqStream.request(1)
          case _ => requested.compareAndSet(false, true)
        }

      override def preStart(): Unit = pull(in)

      setHandler(in, this)
      setHandler(out, this)
    }
} 
Example 14
Source File: GrpcAkkaStreamsClientCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Source}
import com.trueaccord.scalapb.grpc.Grpc
import io.grpc.{ClientCall, Metadata, Status}
import io.grpc.stub._

object GrpcAkkaStreamsClientCalls {

  def unaryFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow[I].flatMapConcat(request =>
      Source.fromFuture(
        Grpc.guavaFuture2ScalaFuture(
          ClientCalls.futureUnaryCall(call, request)
        )
      )
    )

  def serverStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(
      new GrpcGraphStage[I, O](outputObserver => {
        val out = outputObserver.asInstanceOf[ClientResponseObserver[I, O]]
        val in = new ClientCallStreamObserver[I] {
          val halfClosed = new AtomicBoolean(false)
          val onReadyHandler = new AtomicReference[Option[Runnable]](None)
          val listener = new ClientCall.Listener[O] {
            override def onClose(status: Status, trailers: Metadata): Unit =
              status.getCode match {
                case Status.Code.OK => out.onCompleted()
                case _ => out.onError(status.asException(trailers))
              }
            override def onMessage(message: O): Unit =
              out.onNext(message)
            override def onReady(): Unit =
              onReadyHandler.get().foreach(_.run())
          }
          call.start(listener, new Metadata())

          override def cancel(message: String, cause: Throwable): Unit =
            call.cancel(message, cause)
          override def setOnReadyHandler(onReadyHandler: Runnable): Unit =
            this.onReadyHandler.set(Some(onReadyHandler))
          override def request(count: Int): Unit = call.request(count)
          override def disableAutoInboundFlowControl(): Unit = ()
          override def isReady: Boolean = !halfClosed.get() || call.isReady
          override def setMessageCompression(enable: Boolean): Unit =
            call.setMessageCompression(enable)
          override def onError(t: Throwable): Unit =
            call.cancel("Cancelled by client with StreamObserver.onError()", t)
          override def onCompleted(): Unit = ()
          override def onNext(request: I): Unit = {
            call.sendMessage(request)
            halfClosed.set(true)
            call.halfClose()
          }
        }
        out.beforeStart(in)
        in
      })
    )

  def clientStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(new GrpcGraphStage[I, O](ClientCalls.asyncClientStreamingCall(call, _)))

  def bidiStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(new GrpcGraphStage[I, O](ClientCalls.asyncBidiStreamingCall(call, _)))
} 
Example 15
Source File: EvilSparkContext.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import com.holdenkarau.spark.testing.LocalSparkContext

import java.util.concurrent.atomic.AtomicReference


object EvilSparkContext {
  def stopActiveSparkContext(): Unit = {
    val declaredFields = classOf[SparkContext].getDeclaredFields()
    declaredFields.foreach{field => field.setAccessible(true) }
    val activeContextField =  declaredFields.filter(_.getName.contains("active"))
    val activeContextValue = activeContextField.map(field => field.get(SparkContext$.MODULE$))
    val activeContextRef = activeContextValue.filter(ctx => ctx != null && ctx.isInstanceOf[AtomicReference[_]])
    activeContextRef.foreach{ctx =>
      LocalSparkContext.stop(ctx.asInstanceOf[AtomicReference[SparkContext]].get())
    }
  }
} 
Example 16
Source File: SQLContext.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext

import java.util.concurrent.atomic.AtomicReference


  def getOrCreate(sparkContext: SparkContext): SQLContext = {
    INSTANTIATION_LOCK.synchronized {
      lastInstantiatedContext.get() match {
        case null =>
          lastInstantiatedContext.set(new SQLContext(sparkContext))
        case _ if lastInstantiatedContext.get().sparkContext != sparkContext =>
          clearLastInstantiatedContext()
          lastInstantiatedContext.set(new SQLContext(sparkContext))
      }
      lastInstantiatedContext.get()
    }
  }

  private def clearLastInstantiatedContext(): Unit = {
    INSTANTIATION_LOCK.synchronized {
      lastInstantiatedContext.set(null)
    }
  }

  private def setLastInstantiatedContext(sqlContext: SQLContext): Unit = {
    INSTANTIATION_LOCK.synchronized {
      lastInstantiatedContext.set(sqlContext)
    }
  }
} 
Example 17
Source File: TestingLogger.scala    From log4cats   with Apache License 2.0 5 votes vote down vote up
package io.chrisdavenport.log4cats.testing

import io.chrisdavenport.log4cats.{SelfAwareLogger}
import cats.effect.Sync
import cats.implicits._
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec

trait TestingLogger[F[_]] extends SelfAwareLogger[F] {
  import TestingLogger.LogMessage
  def logged: F[Vector[LogMessage]]
}

object TestingLogger {

  sealed trait LogMessage {
    def message: String
    def throwOpt: Option[Throwable]
  }

  final case class TRACE(message: String, throwOpt: Option[Throwable]) extends LogMessage
  final case class DEBUG(message: String, throwOpt: Option[Throwable]) extends LogMessage
  final case class INFO(message: String, throwOpt: Option[Throwable]) extends LogMessage
  final case class WARN(message: String, throwOpt: Option[Throwable]) extends LogMessage
  final case class ERROR(message: String, throwOpt: Option[Throwable]) extends LogMessage

  def impl[F[_]: Sync](
      traceEnabled: Boolean = true,
      debugEnabled: Boolean = true,
      infoEnabled: Boolean = true,
      warnEnabled: Boolean = true,
      errorEnabled: Boolean = true
  ): TestingLogger[F] = {
    val ar = new AtomicReference(Vector.empty[LogMessage])
    def appendLogMessage(m: LogMessage): F[Unit] = Sync[F].delay {
      @tailrec
      def mod(): Unit = {
        val c = ar.get
        val u = c :+ m
        if (!ar.compareAndSet(c, u)) mod
        else ()
      }
      mod()
    }

    new TestingLogger[F] {
      def logged: F[Vector[LogMessage]] = Sync[F].delay(ar.get)

      def isTraceEnabled: F[Boolean] = Sync[F].pure(traceEnabled)
      def isDebugEnabled: F[Boolean] = Sync[F].pure(debugEnabled)
      def isInfoEnabled: F[Boolean] = Sync[F].pure(infoEnabled)
      def isWarnEnabled: F[Boolean] = Sync[F].pure(warnEnabled)
      def isErrorEnabled: F[Boolean] = Sync[F].pure(errorEnabled)

      def error(message: => String): F[Unit] =
        if (errorEnabled) appendLogMessage(ERROR(message, None)) else Sync[F].pure(())
      def error(t: Throwable)(message: => String): F[Unit] =
        if (errorEnabled) appendLogMessage(ERROR(message, t.some)) else Sync[F].pure(())

      def warn(message: => String): F[Unit] =
        if (warnEnabled) appendLogMessage(WARN(message, None)) else Sync[F].pure(())
      def warn(t: Throwable)(message: => String): F[Unit] =
        if (warnEnabled) appendLogMessage(WARN(message, t.some)) else Sync[F].pure(())

      def info(message: => String): F[Unit] =
        if (infoEnabled) appendLogMessage(INFO(message, None)) else Sync[F].pure(())
      def info(t: Throwable)(message: => String): F[Unit] =
        if (infoEnabled) appendLogMessage(INFO(message, t.some)) else Sync[F].pure(())

      def debug(message: => String): F[Unit] =
        if (debugEnabled) appendLogMessage(DEBUG(message, None)) else Sync[F].pure(())
      def debug(t: Throwable)(message: => String): F[Unit] =
        if (debugEnabled) appendLogMessage(DEBUG(message, t.some)) else Sync[F].pure(())

      def trace(message: => String): F[Unit] =
        if (traceEnabled) appendLogMessage(TRACE(message, None)) else Sync[F].pure(())
      def trace(t: Throwable)(message: => String): F[Unit] =
        if (traceEnabled) appendLogMessage(TRACE(message, t.some)) else Sync[F].pure(())
    }
  }

} 
Example 18
Source File: AddressTerminatedTopic.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.event

import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.AddressTerminated
import akka.actor.ExtendedActorSystem
import akka.actor.Extension
import akka.actor.ExtensionId
import akka.actor.ExtensionIdProvider


private[akka] final class AddressTerminatedTopic extends Extension {

  private val subscribers = new AtomicReference[Set[ActorRef]](Set.empty[ActorRef])

  @tailrec def subscribe(subscriber: ActorRef): Unit = {
    val current = subscribers.get
    if (!subscribers.compareAndSet(current, current + subscriber))
      subscribe(subscriber) // retry
  }

  @tailrec def unsubscribe(subscriber: ActorRef): Unit = {
    val current = subscribers.get
    if (!subscribers.compareAndSet(current, current - subscriber))
      unsubscribe(subscriber) // retry
  }

  def publish(msg: AddressTerminated): Unit = {
    subscribers.get foreach { _.tell(msg, ActorRef.noSender) }
  }

} 
Example 19
Source File: StoreFlusherSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import java.util.concurrent.atomic.AtomicReference

import akka.testkit.TestActorRef
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.logging.JobLogger
import io.hydrosphere.mist.master.{ActorSpec, JobDetails, TestData}
import mist.api.data.JsNumber
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.{Future, Promise}

class StoreFlusherSpec extends ActorSpec("store-flusher") with TestData with Eventually {

  it("should flush job statuses") {
    val initial1 = Promise[JobDetails]
    val initial2 = Promise[JobDetails]

    val updateResult1 = new AtomicReference[Option[JobDetails]](None)
    val updateResult2 = new AtomicReference[Option[JobDetails]](None)
    val props = StoreFlusher.props(
      get = (id: String) => id match {
        case "1" => initial1.future
        case "2" => initial2.future
      },
      update = (d: JobDetails) => {
        d.jobId match {
          case "1" => updateResult1.set(Some(d))
          case "2" => updateResult2.set(Some(d))
        }
        Future.successful(())
      },
      jobLoggerF = _ => JobLogger.NOOP
    )
    val flusher = TestActorRef(props)

    Seq("1", "2").foreach(id => {
      flusher ! ReportedEvent.plain(QueuedEvent(id))
      flusher ! ReportedEvent.plain(StartedEvent(id, System.currentTimeMillis()))
      flusher ! ReportedEvent.plain(FinishedEvent(id, System.currentTimeMillis(), JsNumber(42)))
    })
    initial1.success(mkDetails(JobDetails.Status.Initialized).copy(jobId = "1"))
    initial2.success(mkDetails(JobDetails.Status.Initialized).copy(jobId = "2"))

    def test(ref: AtomicReference[Option[JobDetails]]): Unit = {
      val value = ref.get
      value.isDefined shouldBe true

      val d = value.get
      d.status shouldBe JobDetails.Status.Finished
    }

    eventually(timeout(Span(3, Seconds))) {
      test(updateResult1)
      test(updateResult2)
    }
  }
} 
Example 20
Source File: ReloadableSchemaProvider.scala    From graphql-gateway   with Apache License 2.0 5 votes vote down vote up
package sangria.gateway.schema

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.stream.{Materializer, OverflowStrategy}
import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source}
import better.files.File
import sangria.gateway.AppConfig
import sangria.gateway.file.FileMonitorActor
import sangria.gateway.http.client.HttpClient
import sangria.gateway.schema.materializer.{GatewayContext, GatewayMaterializer}
import sangria.gateway.util.Logging

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

// TODO: on a timer reload all external schemas and check for changes
class ReloadableSchemaProvider(config: AppConfig, client: HttpClient, mat: GatewayMaterializer)(implicit system: ActorSystem, ec: ExecutionContext, amat: Materializer) extends SchemaProvider[GatewayContext, Any] with Logging {
  val loader = new SchemaLoader(config, client, mat)
  val schemaRef = new AtomicReference[Option[SchemaInfo[GatewayContext, Any]]](None)

  system.actorOf(FileMonitorActor.props(config.watch.allFiles, config.watch.threshold, config.watch.allGlobs, reloadSchema))

  private val producer = Source.actorRef[Boolean](100, OverflowStrategy.dropTail)
  private val runnableGraph = producer.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
  private val (changesPublisher, changesSource) = runnableGraph.run()

  val schemaChanges = Some(changesSource)

  def schemaInfo =
    schemaRef.get() match {
      case v @ Some(_) ⇒ Future.successful(v)
      case None ⇒ reloadSchema
    }

  def reloadSchema(files: Vector[File]): Unit = {
    logger.info(s"Schema files are changed: ${files mkString ", "}. Reloading schema")

    reloadSchema
  }

  def reloadSchema: Future[Option[SchemaInfo[GatewayContext, Any]]] =
    loader.loadSchema.andThen {
      case Success(Some(newSchema)) ⇒
        schemaRef.get() match {
          case Some(currentSchema) ⇒
            val changes = newSchema.schema.compare(currentSchema.schema)
            val renderedChanges =
              if (changes.nonEmpty)
                " with following changes:\n" + changes.map(c ⇒ "  * " + c.description + (if (c.breakingChange) " (breaking)" else "")).mkString("\n")
              else
                " without any changes."

            changesPublisher ! true
            logger.info(s"Schema successfully reloaded$renderedChanges")
          case None ⇒
            logger.info(s"Schema successfully loaded from files:\n${newSchema.files.map(f ⇒ "  * " + f).mkString("\n")}")
        }

        schemaRef.set(Some(newSchema))
      case Failure(error) ⇒
        logger.error("Failed to load the schema", error)
    }
} 
Example 21
Source File: Repl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4actor_repl_impl

import java.util.concurrent.atomic.AtomicReference

import ee.cone.c4actor._
import ee.cone.c4actor.QProtocol.S_Firstborn
import ee.cone.c4actor.Types.SrcId
import ee.cone.c4assemble.{Assemble, assemble, c4assemble}
import ee.cone.c4assemble.Types.{Each, Values}
import ammonite.sshd._
import ammonite.util.Bind
import ee.cone.c4di.c4multi
import org.apache.sshd.server.auth.pubkey.AcceptAllPublickeyAuthenticator

@c4assemble("SSHDebugApp") class SSHDebugAssembleBase(factory: SSHDebugTxFactory)   {
  def join(
    key: SrcId,
    firstborn: Each[S_Firstborn]
  ): Values[(SrcId,TxTransform)] =
    List(WithPK(factory.create()))
}

@c4multi("SSHDebugApp") final case class SSHDebugTx(srcId: SrcId="SSHDebug")(
  reducer: RichRawWorldReducer,
  qMessages: QMessages
) extends TxTransform {
  def init(): RichContext=>Unit = {
    val ref = new AtomicReference[Option[RichContext]](None)
    def ctx(): RichContext = ref.get.get
    def tx(f: Context=>Object): List[_] = {
      val context = ref.get.get
      f(new Context(context.injected,context.assembled,context.executionContext,Map.empty)) match {
        case local: Context =>
          val nLocal = qMessages.send(local)
          Nil
        case res: List[_] => res
      }
    }
    val server = new SshdRepl(
      SshServerConfig(
        address = "localhost", // or "0.0.0.0" for public-facing shells
        port = 22222,
        publicKeyAuthenticator = Option(AcceptAllPublickeyAuthenticator.INSTANCE)
      ),
      replArgs = List(Bind[(Context=>Object)=>Object]("tx",tx))
    )
    server.start()
    v=>ref.set(Option(v))
  }
  def transform(local: Context): Context = {
    val nLocal = if(SSHDebugKey.of(local).nonEmpty) local
      else SSHDebugKey.set(Option(init()))(local)
    SSHDebugKey.of(nLocal).get(reducer.reduce(Option(nLocal),Nil))
    nLocal
  }
}




case object SSHDebugKey extends TransientLens[Option[RichContext=>Unit]](None) 
Example 22
Source File: AtomicRef.scala    From grpcexample   with MIT License 5 votes vote down vote up
package concurrency

import java.util.concurrent.atomic.AtomicReference

import scala.annotation.tailrec

class AtomicRef[T](initial: T) {
  private val ref: AtomicReference[T] = new AtomicReference[T](initial)

  def get: T = ref.get()

  @tailrec final def updateAndGet(update: T => T): T = {
    val oldValue = ref.get
    val newValue = update(oldValue)
    if (ref.compareAndSet(oldValue, newValue)) {
      newValue
    } else {
      updateAndGet(update)
    }
  }
} 
Example 23
Source File: ClassIndexer.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.kernel.interpreter.scal

import java.io.File
import java.net.URI
import java.util.concurrent.atomic.AtomicReference
import java.util.function.UnaryOperator

import io.github.classgraph.ClassGraph
import polynote.kernel.ScalaCompiler
import polynote.kernel.util.pathOf
import zio.blocking.{Blocking, effectBlocking}
import zio.{Fiber, RIO, UIO, ZIO}

import scala.collection.immutable.TreeMap

trait ClassIndexer {

  
  def findMatches(name: String): UIO[Map[String, List[(Int, String)]]]

  def await: UIO[Unit]
}

object ClassIndexer {
  def default: ZIO[Blocking with ScalaCompiler.Provider, Nothing, ClassIndexer] =
    SimpleClassIndexer()
}

class SimpleClassIndexer(ref: AtomicReference[TreeMap[String, List[(Int, String)]]], process: Fiber[Throwable, Any]) extends ClassIndexer {

  override def findMatches(name: String): UIO[Map[String, List[(Int, String)]]] =
    ZIO.effectTotal(ref.get).map(_.range(name, name + Char.MaxValue))

  override def await: UIO[Unit] = process.await.unit
}

object SimpleClassIndexer {
  def apply(): ZIO[Blocking with ScalaCompiler.Provider, Nothing, SimpleClassIndexer] = {
    def buildIndex(
      priorityDependencies: Array[File],
      classPath: Array[File],
      classes: AtomicReference[TreeMap[String, List[(Int, String)]]]
    ) = effectBlocking {
      import scala.collection.JavaConverters._

      val lastPriority = priorityDependencies.length + classPath.length
      val priorities = (priorityDependencies ++ classPath.diff(priorityDependencies)).distinct.zipWithIndex.toMap

      val classGraph = new ClassGraph().overrideClasspath(priorityDependencies ++ classPath: _*).enableClassInfo()
      val scanResult = classGraph.scan()
      scanResult.getAllClasses.iterator().asScala
        .filter(_.isPublic)
        .filterNot(_.isSynthetic)
        .filterNot(_.getSimpleName.contains("$"))
        .foreach {
          classInfo =>
            val priority = priorities.getOrElse(classInfo.getClasspathElementFile, lastPriority)
            classes.updateAndGet(new UnaryOperator[TreeMap[String, List[(Int, String)]]] {
              def apply(t: TreeMap[String, List[(Int, String)]]): TreeMap[String, List[(Int, String)]] =
                t + (classInfo.getSimpleName -> ((priority -> classInfo.getName) :: t.getOrElse(classInfo.getSimpleName, Nil)))
            })
        }
      classes.get()
    }

    def javaLibraryPath = Option(classOf[Object].getResource("Object.class")).flatMap {
      case url if url.getProtocol == "jar"  => try Some(new File(new URI(url.getPath.stripSuffix("!/java/lang/Object.class")))) catch { case err: Throwable => None }
      case url if url.getProtocol == "file" => try Some(new File(url.toURI)) catch { case err: Throwable => None }
      case _ => None
    }

    for {
      classPath <- ScalaCompiler.settings.map(_.classpath.value.split(File.pathSeparatorChar).map(new File(_)))
      deps      <- ScalaCompiler.dependencies
      priorities = new File(pathOf(classOf[List[_]]).toURI) :: javaLibraryPath.toList ::: deps
      indexRef   = new AtomicReference[TreeMap[String, List[(Int, String)]]](new TreeMap)
      process   <- buildIndex(priorities.toArray, classPath, indexRef).forkDaemon
    } yield new SimpleClassIndexer(indexRef, process)
  }
} 
Example 24
Source File: CancelableF.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import java.util.concurrent.atomic.AtomicReference
import scala.util.Either

private[effect] object CancelableF {

  
  def apply[F[_], A](k: (Either[Throwable, A] => Unit) => F[CancelToken[F]])(implicit F: Concurrent[F]): F[A] =
    F.asyncF { cb =>
      val state = new AtomicReference[Either[Throwable, Unit] => Unit](null)
      val cb1 = (a: Either[Throwable, A]) => {
        try {
          cb(a)
        } finally {
          // This CAS can only succeed in case the operation is already finished
          // and no cancellation token was installed yet
          if (!state.compareAndSet(null, Callback.dummy1)) {
            val cb2 = state.get()
            state.lazySet(null)
            cb2(Callback.rightUnit)
          }
        }
      }
      // Until we've got a cancellation token, the task needs to be evaluated
      // uninterruptedly, otherwise risking a leak, hence the bracket
      F.bracketCase(k(cb1)) { _ =>
        F.async[Unit] { cb =>
          if (!state.compareAndSet(null, cb)) {
            cb(Callback.rightUnit)
          }
        }
      } { (token, e) =>
        e match {
          case ExitCase.Canceled => token
          case _                 => F.unit
        }
      }
    }
} 
Example 25
Source File: ForwardCancelable.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import java.util.concurrent.atomic.AtomicReference
import cats.effect.internals.TrampolineEC.immediate
import scala.annotation.tailrec
import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal


  sealed abstract private class State

  final private case class Empty(stack: List[Callback.T[Unit]]) extends State
  final private case class Active(token: CancelToken[IO]) extends State

  private val init: State = Empty(Nil)
  private val finished: State = Active(IO.unit)
  private val context: ExecutionContext = immediate

  private def execute(token: CancelToken[IO], stack: List[Callback.T[Unit]]): Unit =
    context.execute(new Runnable {
      def run(): Unit =
        token.unsafeRunAsync { r =>
          for (cb <- stack)
            try {
              cb(r)
            } catch {
              // $COVERAGE-OFF$
              case NonFatal(e) => Logger.reportFailure(e)
              // $COVERAGE-ON$
            }
        }
    })
} 
Example 26
Source File: IOConnection.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import scala.concurrent.Promise


  val uncancelable: IOConnection =
    new Uncancelable

  final private class Uncancelable extends IOConnection {
    def cancel = IO.unit
    def isCanceled: Boolean = false
    def push(token: CancelToken[IO]): Unit = ()
    def pop(): CancelToken[IO] = IO.unit
    def pushPair(lh: IOConnection, rh: IOConnection): Unit = ()
  }

  final private class Impl extends IOConnection {
    private[this] val state = new AtomicReference(List.empty[CancelToken[IO]])
    private[this] val p: Promise[Unit] = Promise()

    val cancel = IO.suspend {
      state.getAndSet(null) match {
        case Nil  => IO { p.success(()); () }
        case null => IOFromFuture(p.future)
        case list =>
          CancelUtils
            .cancelAll(list.iterator)
            .redeemWith(ex => IO(p.success(())).flatMap(_ => IO.raiseError(ex)), _ => IO { p.success(()); () })
      }
    }

    def isCanceled: Boolean =
      state.get eq null

    @tailrec def push(cancelable: CancelToken[IO]): Unit =
      state.get() match {
        case null =>
          cancelable.unsafeRunAsyncAndForget()
        case list =>
          val update = cancelable :: list
          if (!state.compareAndSet(list, update)) push(cancelable)
      }

    def pushPair(lh: IOConnection, rh: IOConnection): Unit =
      push(CancelUtils.cancelAll(lh.cancel, rh.cancel))

    @tailrec def pop(): CancelToken[IO] =
      state.get() match {
        case null | Nil => IO.unit
        case current @ (x :: xs) =>
          if (!state.compareAndSet(current, xs)) pop()
          else x
      }
  }
} 
Example 27
Source File: Var.scala    From korolev   with Apache License 2.0 5 votes vote down vote up
package korolev.effect

import java.util.concurrent.atomic.AtomicReference

final class Var[F[_]: Effect, T](initialValue: T) {
  private val casRef = new AtomicReference[T](initialValue)
  def set(value: T): F[Unit] =
    Effect[F].delay(casRef.set(value))
  def get: F[T] =
    Effect[F].delay(casRef.get)
  def compareAndSet(expected: T, value: T): F[Boolean] =
    Effect[F].delay(casRef.compareAndSet(expected, value))
}

object Var {
  def apply[F[_]: Effect, T](initialValue: T): Var[F, T] =
    new Var(initialValue)
} 
Example 28
Source File: DynamicWideTransactionGenerator.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.generator

import java.util.concurrent.atomic.AtomicReference

import cats.Show
import com.wavesplatform.account.KeyPair
import com.wavesplatform.generator.DynamicWideTransactionGenerator.Settings
import com.wavesplatform.generator.utils.Gen
import com.wavesplatform.transaction.Transaction

class DynamicWideTransactionGenerator(settings: Settings, accounts: Seq[KeyPair]) extends TransactionGenerator {
  require(accounts.nonEmpty)

  private val nextTxsNumber = new AtomicReference[Double](settings.start)

  private val limitedRecipientGen = Gen.address(settings.limitDestAccounts)

  override def next(): Iterator[Transaction] = {
    val currTxsNumber = nextTxsNumber.getAndUpdate { x =>
      val newValue = x + settings.growAdder
      settings.maxTxsPerRequest.foldLeft(newValue)(Math.min(_, _))
    }.toInt

    Gen.txs(settings.minFee, settings.maxFee, accounts, limitedRecipientGen).take(currTxsNumber)
  }

}

object DynamicWideTransactionGenerator {

  case class Settings(start: Int, growAdder: Double, maxTxsPerRequest: Option[Int], limitDestAccounts: Option[Int], minFee: Long, maxFee: Long) {
    require(start >= 1)
  }

  object Settings {
    implicit val toPrintable: Show[Settings] = { x =>
      import x._
      s"""txs at start: $start
         |grow adder: $growAdder
         |max txs: $maxTxsPerRequest
         |limit destination accounts: $limitDestAccounts
         |min fee: $minFee
         |max fee: $maxFee""".stripMargin
    }
  }

} 
Example 29
Source File: MultipleEndpointsDocumentationHttp4sServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import java.util.concurrent.atomic.AtomicReference

import cats.effect._
import cats.implicits._
import com.github.ghik.silencer.silent
import io.circe.generic.auto._
import org.http4s.HttpRoutes
import org.http4s.server.Router
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.syntax.kleisli._
import sttp.tapir._
import sttp.tapir.docs.openapi._
import sttp.tapir.json.circe._
import sttp.tapir.openapi.OpenAPI
import sttp.tapir.openapi.circe.yaml._
import sttp.tapir.server.http4s._
import sttp.tapir.swagger.http4s.SwaggerHttp4s

import scala.concurrent.ExecutionContext

object MultipleEndpointsDocumentationHttp4sServer extends App {
  // endpoint descriptions
  case class Author(name: String)
  case class Book(title: String, year: Int, author: Author)

  val booksListing: Endpoint[Unit, Unit, Vector[Book], Nothing] = endpoint.get
    .in("books")
    .in("list" / "all")
    .out(jsonBody[Vector[Book]])

  val addBook: Endpoint[Book, Unit, Unit, Nothing] = endpoint.post
    .in("books")
    .in("add")
    .in(
      jsonBody[Book]
        .description("The book to add")
        .example(Book("Pride and Prejudice", 1813, Author("Jane Austen")))
    )

  // server-side logic
  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global
  implicit val contextShift: ContextShift[IO] = IO.contextShift(ec)
  implicit val timer: Timer[IO] = IO.timer(ec)

  val books = new AtomicReference(
    Vector(
      Book("The Sorrows of Young Werther", 1774, Author("Johann Wolfgang von Goethe")),
      Book("Iliad", -8000, Author("Homer")),
      Book("Nad Niemnem", 1888, Author("Eliza Orzeszkowa")),
      Book("The Colour of Magic", 1983, Author("Terry Pratchett")),
      Book("The Art of Computer Programming", 1968, Author("Donald Knuth")),
      Book("Pharaoh", 1897, Author("Boleslaw Prus"))
    )
  )

  val booksListingRoutes: HttpRoutes[IO] = booksListing.toRoutes(_ => IO(books.get().asRight[Unit]))
  @silent("discarded")
  val addBookRoutes: HttpRoutes[IO] = addBook.toRoutes(book => IO((books.getAndUpdate(books => books :+ book): Unit).asRight[Unit]))
  val routes: HttpRoutes[IO] = booksListingRoutes <+> addBookRoutes

  // generating the documentation in yml; extension methods come from imported packages
  val openApiDocs: OpenAPI = List(booksListing, addBook).toOpenAPI("The tapir library", "1.0.0")
  val openApiYml: String = openApiDocs.toYaml

  // starting the server
  BlazeServerBuilder[IO](ec)
    .bindHttp(8080, "localhost")
    .withHttpApp(Router("/" -> (routes <+> new SwaggerHttp4s(openApiYml).routes[IO])).orNotFound)
    .resource
    .use { _ =>
      IO {
        println("Go to: http://localhost:8080/docs")
        println("Press any key to exit ...")
        scala.io.StdIn.readLine()
      }
    }
    .unsafeRunSync()
} 
Example 30
Source File: MultipleEndpointsDocumentationAkkaServer.scala    From tapir   with Apache License 2.0 5 votes vote down vote up
package sttp.tapir.examples

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import com.github.ghik.silencer.silent
import io.circe.generic.auto._
import sttp.tapir._
import sttp.tapir.docs.openapi._
import sttp.tapir.json.circe._
import sttp.tapir.openapi.OpenAPI
import sttp.tapir.openapi.circe.yaml._
import sttp.tapir.server.akkahttp._
import sttp.tapir.swagger.akkahttp.SwaggerAkka

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

object MultipleEndpointsDocumentationAkkaServer extends App {
  // endpoint descriptions
  case class Author(name: String)
  case class Book(title: String, year: Int, author: Author)

  val booksListing: Endpoint[Unit, Unit, Vector[Book], Nothing] = endpoint.get
    .in("books")
    .in("list" / "all")
    .out(jsonBody[Vector[Book]])

  val addBook: Endpoint[Book, Unit, Unit, Nothing] = endpoint.post
    .in("books")
    .in("add")
    .in(
      jsonBody[Book]
        .description("The book to add")
        .example(Book("Pride and Prejudice", 1813, Author("Jane Austen")))
    )

  // server-side logic
  val books = new AtomicReference(
    Vector(
      Book("The Sorrows of Young Werther", 1774, Author("Johann Wolfgang von Goethe")),
      Book("Iliad", -8000, Author("Homer")),
      Book("Nad Niemnem", 1888, Author("Eliza Orzeszkowa")),
      Book("The Colour of Magic", 1983, Author("Terry Pratchett")),
      Book("The Art of Computer Programming", 1968, Author("Donald Knuth")),
      Book("Pharaoh", 1897, Author("Boleslaw Prus"))
    )
  )

  val booksListingRoute = booksListing.toRoute(_ => Future.successful(Right(books.get())))
  @silent("discarded")
  val addBookRoute = addBook.toRoute(book => Future.successful(Right(books.getAndUpdate(books => books :+ book))))

  // generating the documentation in yml; extension methods come from imported packages
  val openApiDocs: OpenAPI = List(booksListing, addBook).toOpenAPI("The tapir library", "1.0.0")
  val openApiYml: String = openApiDocs.toYaml

  // starting the server
  implicit val actorSystem: ActorSystem = ActorSystem()
  import actorSystem.dispatcher

  val routes = {
    import akka.http.scaladsl.server.Directives._
    booksListingRoute ~ addBookRoute ~ new SwaggerAkka(openApiYml).routes
  }

  val bindAndCheck = Http().bindAndHandle(routes, "localhost", 8080).map { _ =>
    // testing
    println("Go to: http://localhost:8080/docs")
    println("Press any key to exit ...")
    scala.io.StdIn.readLine()
  }

  // cleanup
  Await.result(bindAndCheck.transformWith { r => actorSystem.terminate().transform(_ => r) }, 1.minute)
} 
Example 31
Source File: CustomCodeEntryPoint.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.workflowexecutor.customcode

import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicReference

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}

import org.apache.spark.api.java.JavaSparkContext
import org.apache.spark.sql.DataFrame
import org.apache.spark.{SparkConf, SparkContext}

import io.deepsense.commons.utils.Logging
import io.deepsense.deeplang._
import io.deepsense.sparkutils.SparkSQLSession


class CustomCodeEntryPoint(
    val sparkContext: SparkContext,
    val sparkSQLSession: SparkSQLSession,
    val dataFrameStorage: DataFrameStorage,
    val operationExecutionDispatcher: OperationExecutionDispatcher)
  extends Logging {
  import io.deepsense.workflowexecutor.customcode.CustomCodeEntryPoint._
  def getSparkContext: JavaSparkContext = sparkContext

  def getSparkSQLSession: SparkSQLSession = sparkSQLSession

  def getNewSparkSQLSession: SparkSQLSession = sparkSQLSession.newSession()

  def getSparkConf: SparkConf = sparkContext.getConf

  private val codeExecutor: AtomicReference[Promise[CustomCodeExecutor]] =
    new AtomicReference(Promise())

  private val pythonPort: AtomicReference[Promise[Int]] =
    new AtomicReference(Promise())

  def getCodeExecutor(timeout: Duration): CustomCodeExecutor =
    getFromPromise(codeExecutor.get, timeout)

  def getPythonPort(timeout: Duration): Int =
    getFromPromise(pythonPort.get, timeout)

  def registerCodeExecutor(newCodeExecutor: CustomCodeExecutor): Unit =
    replacePromise(codeExecutor, newCodeExecutor)

  def registerCallbackServerPort(newPort: Int): Unit =
    replacePromise(pythonPort, newPort)

  def retrieveInputDataFrame(workflowId: String, nodeId: String, portNumber: Int): DataFrame =
    dataFrameStorage.getInputDataFrame(workflowId, nodeId, portNumber).get

  def retrieveOutputDataFrame(workflowId: String, nodeId: String, portNumber: Int): DataFrame =
    dataFrameStorage.getOutputDataFrame(workflowId, nodeId, portNumber).get

  def registerOutputDataFrame(
      workflowId: String, nodeId: String, portNumber: Int, dataFrame: DataFrame): Unit =
    dataFrameStorage.setOutputDataFrame(workflowId, nodeId, portNumber, dataFrame)

  def executionCompleted(workflowId: String, nodeId: String): Unit =
    operationExecutionDispatcher.executionEnded(workflowId, nodeId, Right(()))

  def executionFailed(workflowId: String, nodeId: String, error: String): Unit =
    operationExecutionDispatcher.executionEnded(workflowId, nodeId, Left(error))
}

object CustomCodeEntryPoint {
  private case class PromiseReplacedException() extends Exception

  @tailrec
  private def getFromPromise[T](promise: => Promise[T], timeout: Duration): T = {
    try {
      Await.result(promise.future, timeout)
    } catch {
      case e: TimeoutException => throw e
      case e: PromiseReplacedException => getFromPromise(promise, timeout)
    }
  }

  private def replacePromise[T](promise: AtomicReference[Promise[T]], newValue: T): Unit = {
    val oldPromise = promise.getAndSet(Promise.successful(newValue))
    try {
      oldPromise.failure(new PromiseReplacedException)
    } catch {
      // The oldPromise will have been completed always, except for the first time.
      // The illegal state is expected, but we have to complete the oldPromise,
      // since someone might be waiting on it.
      case e: IllegalStateException => ()
    }
  }

  case class CustomCodeEntryPointConfig(
    pyExecutorSetupTimeout: Duration = 5.seconds)
} 
Example 32
Source File: HttpManagementServer.scala    From akka-cluster-manager   with MIT License 5 votes vote down vote up
package io.orkestra.cluster.management

import java.util.concurrent.atomic.AtomicReference

import akka.Done
import akka.actor.{ActorSystem, ActorRef}
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
import io.orkestra.cluster.protocol.Response.{Failure, Success}
import io.orkestra.cluster.routing.ClusterListener._
import akka.pattern.ask
import play.api.libs.json.Json
import scala.concurrent.{Promise, Future, ExecutionContext}
import scala.concurrent.duration._

class HttpManagementServer(clusterListener: ActorRef, hostName: String = "127.0.0.1", port: Int = 33333)(
    implicit
    val system:                ActorSystem,
    implicit val materializer: Materializer,
    implicit val executer:     ExecutionContext
) {

  import PlayJsonSupport._

  def handleOrkestraRequest(req: ManagementReguest) =
    (clusterListener ? req)(3.seconds).map {
      case res: Success =>
        res.httpStatusCode -> res.asJson
      case res: Failure =>
        res.httpStatusCode -> res.asJson
    }

  def orkestraRoutes =
    pathPrefix("orkestra" / "routers") {
      pathEndOrSingleSlash {
        get {
          complete(handleOrkestraRequest(GetRouters))
        }
      } ~
        path(Segment ~ Slash.?) { role =>
          get {
            complete(handleOrkestraRequest(GetRouter(role)))
          }
        } ~
        path(Segment / Remaining ~ Slash.?) { (role, routeePath) =>
          delete {
            complete(handleOrkestraRequest(DeleteRoutee(role, routeePath)))
          }
        }
    }

  private val bindingFuture = new AtomicReference[Future[Http.ServerBinding]]()

  def start() = {
    val serverBindingPromise = Promise[Http.ServerBinding]()
    if (bindingFuture.compareAndSet(null, serverBindingPromise.future)) {
      Http().bindAndHandle(orkestraRoutes, hostName, port)
      println(Console.CYAN + s"cluster http management server online at http://${hostName}:${port}/" + Console.WHITE)
    }
  }

  def shutdown =
    if (bindingFuture.get() == null) {
      Future(Done)
    } else {
      val stopFuture = bindingFuture.get().flatMap(_.unbind()).map(_ => Done)
      bindingFuture.set(null)
      stopFuture
    }

} 
Example 33
Source File: DataStreamPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.atomic.{AtomicBoolean, AtomicMarkableReference, AtomicReference}

import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator
import io.eels.Row
import io.eels.schema.StructType


class DataStreamPublisher(override val schema: StructType) extends DataStream {

  private val queue = new LinkedBlockingQueue[Seq[Row]]
  private val running = new AtomicBoolean(true)
  private val failure = new AtomicReference[Throwable](null)

  def isCancelled: Boolean = !running.get

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      subscriber.subscribed(new Subscription {
        override def cancel(): Unit = {
          queue.clear()
          queue.put(Row.Sentinel)
          running.set(false)
        }
      })
      BlockingQueueConcurrentIterator(queue, Row.Sentinel).takeWhile(_ => running.get).foreach(subscriber.next)
      failure.get match {
        case t: Throwable => subscriber.error(t)
        case _ => subscriber.completed()
      }
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }

  def publish(row: Seq[Row]): Unit = queue.put(row)
  def error(t: Throwable): Unit = {
    failure.set(t)
    queue.clear()
    queue.add(Row.Sentinel)
  }
  def close(): Unit = queue.add(Row.Sentinel)
} 
Example 34
Source File: StaticTime.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.services.testing.time

import java.time.Instant
import java.util.concurrent.atomic.AtomicReference

import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, RunnableGraph, Sink}
import akka.stream.{ClosedShape, KillSwitches, Materializer, UniqueKillSwitch}
import com.daml.api.util.{TimeProvider, TimestampConversion}
import com.daml.api.util.TimestampConversion._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.{TimeService, TimeServiceStub}
import com.daml.ledger.client.LedgerClient

import scala.concurrent.{ExecutionContext, Future}

class StaticTime(
    timeService: TimeService,
    clock: AtomicReference[Instant],
    killSwitch: UniqueKillSwitch,
    ledgerId: String)
    extends TimeProvider
    with AutoCloseable {

  def getCurrentTime: Instant = clock.get

  def timeRequest(instant: Instant) =
    SetTimeRequest(
      ledgerId,
      Some(TimestampConversion.fromInstant(getCurrentTime)),
      Some(TimestampConversion.fromInstant(instant)))

  def setTime(instant: Instant)(implicit ec: ExecutionContext): Future[Unit] = {
    timeService.setTime(timeRequest(instant)).map { _ =>
      val _ = StaticTime.advanceClock(clock, instant)
    }
  }

  override def close(): Unit = killSwitch.shutdown()
}

object StaticTime {
  def advanceClock(clock: AtomicReference[Instant], instant: Instant): Instant = {
    clock.updateAndGet {
      case current if instant isAfter current => instant
      case current => current
    }
  }

  def updatedVia(timeService: TimeServiceStub, ledgerId: String, token: Option[String] = None)(
      implicit m: Materializer,
      esf: ExecutionSequencerFactory): Future[StaticTime] = {
    val clockRef = new AtomicReference[Instant](Instant.EPOCH)
    val killSwitchExternal = KillSwitches.single[Instant]
    val sinkExternal = Sink.head[Instant]

    RunnableGraph
      .fromGraph {
        GraphDSL.create(killSwitchExternal, sinkExternal) {
          case (killSwitch, futureOfFirstElem) =>
            // We serve this in a future which completes when the first element has passed through.
            // Thus we make sure that the object we serve already received time data from the ledger.
            futureOfFirstElem.map(_ => new StaticTime(timeService, clockRef, killSwitch, ledgerId))(
              DirectExecutionContext)
        } { implicit b => (killSwitch, sinkHead) =>
          import GraphDSL.Implicits._
          val instantSource = b.add(
            ClientAdapter
              .serverStreaming(
                GetTimeRequest(ledgerId),
                LedgerClient.stub(timeService, token).getTime)
              .map(r => toInstant(r.getCurrentTime)))

          val updateClock = b.add(Flow[Instant].map { i =>
            advanceClock(clockRef, i)
            i
          })

          val broadcastTimes = b.add(Broadcast[Instant](2))

          val ignore = b.add(Sink.ignore)

          // format: OFF
          instantSource ~> killSwitch ~> updateClock ~> broadcastTimes.in
                                                        broadcastTimes.out(0) ~> sinkHead
                                                        broadcastTimes.out(1) ~> ignore
          // format: ON

          ClosedShape
        }
      }
      .run()
  }

} 
Example 35
Source File: LedgerEntries.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.stores.ledger.inmemory

import java.util.concurrent.atomic.AtomicReference

import akka.NotUsed
import akka.stream.scaladsl.Source
import com.daml.ledger.participant.state.v1.Offset
import com.daml.lf.data.Ref
import com.daml.platform.akkastreams.dispatcher.Dispatcher
import com.daml.platform.akkastreams.dispatcher.SubSource.RangeSource
import org.slf4j.LoggerFactory
import com.daml.platform.ApiOffset.ApiOffsetConverter
import com.daml.platform.sandbox.stores.ledger.SandboxOffset

import scala.collection.immutable.TreeMap

private[ledger] class LedgerEntries[T](identify: T => String) {

  private val logger = LoggerFactory.getLogger(this.getClass)

  private case class Entries(ledgerEnd: Offset, items: TreeMap[Offset, T])

  // Tuple of (ledger end cursor, ledger map). There is never an entry for the initial cursor. End is inclusive.
  private val state = new AtomicReference(Entries(ledgerBeginning, TreeMap.empty))

  private def store(item: T): Offset = {
    val Entries(newOffset, _) = state.updateAndGet({
      case Entries(ledgerEnd, ledger) =>
        val newEnd = SandboxOffset.toOffset(SandboxOffset.fromOffset(ledgerEnd) + 1)
        Entries(newEnd, ledger + (newEnd -> item))
    })
    if (logger.isTraceEnabled())
      logger.trace("Recording `{}` at offset `{}`", identify(item): Any, newOffset.toApiString: Any)
    newOffset
  }

  def incrementOffset(increment: Int): Offset = {
    val Entries(newOffset, _) = state.updateAndGet({
      case Entries(ledgerEnd, ledger) =>
        val newEnd = SandboxOffset.toOffset(SandboxOffset.fromOffset(ledgerEnd) + increment)
        Entries(newEnd, ledger)
    })
    if (logger.isTraceEnabled())
      logger.trace("Bumping offset to `{}`", newOffset.toApiString)
    newOffset
  }

  private val dispatcher = Dispatcher[Offset]("inmemory-ledger", Offset.beforeBegin, ledgerEnd)

  def getSource(
      startExclusive: Option[Offset],
      endInclusive: Option[Offset]): Source[(Offset, T), NotUsed] =
    dispatcher.startingAt(
      startExclusive.getOrElse(ledgerBeginning),
      RangeSource(
        (exclusiveStart, inclusiveEnd) =>
          Source[(Offset, T)](
            state.get().items.from(exclusiveStart).filter(_._1 > exclusiveStart).to(inclusiveEnd)),
      ),
      endInclusive
    )

  def publish(item: T): Offset = {
    val newHead = store(item)
    dispatcher.signalNewHead(newHead)
    newHead
  }

  def ledgerBeginning: Offset = SandboxOffset.toOffset(0)

  def items = state.get().items.iterator

  def ledgerEnd: Offset = state.get().ledgerEnd

  def nextTransactionId: Ref.LedgerString =
    Ref.LedgerString.assertFromString((SandboxOffset.fromOffset(ledgerEnd) + 1).toString)
} 
Example 36
Source File: TimeServiceBackend.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver

import java.time.Instant
import java.util.concurrent.atomic.AtomicReference

import com.daml.api.util.TimeProvider

import scala.concurrent.Future

trait TimeServiceBackend extends TimeProvider {
  def setCurrentTime(currentTime: Instant, newTime: Instant): Future[Boolean]
}

object TimeServiceBackend {
  def simple(startTime: Instant): TimeServiceBackend =
    new SimpleTimeServiceBackend(startTime)

  private final class SimpleTimeServiceBackend(startTime: Instant) extends TimeServiceBackend {
    private val timeRef = new AtomicReference[Instant](startTime)

    override def getCurrentTime: Instant = timeRef.get

    override def setCurrentTime(expectedTime: Instant, newTime: Instant): Future[Boolean] = {
      val currentTime = timeRef.get
      val res = currentTime == expectedTime && timeRef.compareAndSet(currentTime, newTime)
      Future.successful(res)
    }
  }
} 
Example 37
Source File: TrackerMap.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services.tracking

import java.util.concurrent.atomic.AtomicReference

import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest
import com.daml.ledger.api.v1.completion.Completion
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import org.slf4j.LoggerFactory

import scala.collection.immutable.HashMap
import scala.concurrent.duration.{FiniteDuration, _}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


  final class AsyncResource[T <: AutoCloseable](future: Future[T]) {
    private val logger = LoggerFactory.getLogger(this.getClass)

    // Must progress Waiting => Ready => Closed or Waiting => Closed.
    val state: AtomicReference[AsyncResourceState[T]] = new AtomicReference(Waiting)

    future.andThen({
      case Success(t) =>
        if (!state.compareAndSet(Waiting, Ready(t))) {
          // This is the punch line of AsyncResource.
          // If we've been closed in the meantime, we must close the underlying resource also.
          // This "on-failure-to-complete" behavior is not present in scala or java Futures.
          t.close()
        }
      // Someone should be listening to this failure downstream
      // TODO(mthvedt): Refactor so at least one downstream listener is always present,
      // and exceptions are never dropped.
      case Failure(ex) =>
        logger.error("failure to get async resource", ex)
        state.set(Closed)
    })(DirectExecutionContext)

    def flatMap[U](f: T => Future[U])(implicit ex: ExecutionContext): Future[U] = {
      state.get() match {
        case Waiting => future.flatMap(f)
        case Closed => throw new IllegalStateException()
        case Ready(t) => f(t)
      }
    }

    def map[U](f: T => U)(implicit ex: ExecutionContext): Future[U] =
      flatMap(t => Future.successful(f(t)))

    def ifPresent[U](f: T => U): Option[U] = state.get() match {
      case Ready(t) => Some(f(t))
      case _ => None
    }

    def close(): Unit = state.getAndSet(Closed) match {
      case Ready(t) => t.close()
      case _ =>
    }
  }

  def apply(retentionPeriod: FiniteDuration)(implicit logCtx: LoggingContext): TrackerMap =
    new TrackerMap(retentionPeriod)
} 
Example 38
Source File: DispatcherImpl.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams.dispatcher

import java.util.concurrent.atomic.AtomicReference

import akka.NotUsed
import akka.stream.scaladsl.Source
import com.github.ghik.silencer.silent
import org.slf4j.LoggerFactory

import scala.collection.immutable

final class DispatcherImpl[Index: Ordering](
    name: String,
    zeroIndex: Index,
    headAtInitialization: Index)
    extends Dispatcher[Index] {

  private val logger = LoggerFactory.getLogger(getClass)

  require(
    !indexIsBeforeZero(headAtInitialization),
    s"head supplied at Dispatcher initialization $headAtInitialization is before zero index $zeroIndex. " +
      s"This would imply that the ledger end is before the ledger begin, which makes this invalid configuration."
  )

  private sealed abstract class State extends Product with Serializable {
    def getSignalDispatcher: Option[SignalDispatcher]

    def getLastIndex: Index
  }

  // the following silent are due to
  // <https://github.com/scala/bug/issues/4440>
  @silent
  private final case class Running(lastIndex: Index, signalDispatcher: SignalDispatcher)
      extends State {
    override def getLastIndex: Index = lastIndex

    override def getSignalDispatcher: Option[SignalDispatcher] = Some(signalDispatcher)
  }

  @silent
  private final case class Closed(lastIndex: Index) extends State {
    override def getLastIndex: Index = lastIndex

    override def getSignalDispatcher: Option[SignalDispatcher] = None
  }

  // So why not broadcast the actual new index, instead of using a signaller?
  // The reason is if we do that, the new indices race with readHead
  // in a way that makes it hard to start up new subscriptions. In particular,
  // we can tolerate NewIndexSignals being out of order or dropped, maintaining the weaker invariant that,
  // if head is updated, at least one NewIndexSignal eventually arrives.

  private val state = new AtomicReference[State](Running(headAtInitialization, SignalDispatcher()))

  
    override def apply(newHead: Index): immutable.Iterable[(Index, Index)] =
      if (Ordering[Index].gt(newHead, max)) {
        val intervalBegin = max
        max = newHead
        List(intervalBegin -> newHead)
      } else Nil
  }

  private def indexIsBeforeZero(checkedIndex: Index): Boolean =
    Ordering[Index].gt(zeroIndex, checkedIndex)

  def close(): Unit =
    state.getAndUpdate {
      case Running(idx, _) => Closed(idx)
      case c: Closed => c
    } match {
      case Running(idx, disp) =>
        disp.signal()
        disp.close()
      case c: Closed => ()
    }

  private def closedError: IllegalStateException =
    new IllegalStateException(s"$name: Dispatcher is closed")

} 
Example 39
Source File: DispatcherTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams.dispatcher

import java.util.concurrent.atomic.AtomicReference

import akka.NotUsed
import akka.stream.scaladsl.{Keep, Sink}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.platform.akkastreams.dispatcher.SubSource.OneAfterAnother
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.{ExecutionContextExecutor, Future}

//TODO: merge/review the tests we have around the Dispatcher!
class DispatcherTest extends WordSpec with AkkaBeforeAndAfterAll with Matchers with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(250, Milliseconds)))

  "A Dispatcher" should {
    "not race when creating new subscriptions" in {
      // The test setup here is a little different from the above tests,
      // because we wanted to be specific about emitted pairs and use of Thread.sleep.

      implicit val ec: ExecutionContextExecutor = materializer.executionContext

      val elements = new AtomicReference(Map.empty[Int, Int])
      def readElement(i: Int): Future[Int] = Future {
        Thread.sleep(10) // In a previous version of Dispatcher, this sleep caused a race condition.
        elements.get()(i)
      }
      def readSuccessor(i: Int): Int = i + 1

      // compromise between catching flakes and not taking too long
      0 until 25 foreach { _ =>
        val d = Dispatcher("test", 0, 0)

        // Verify that the results are what we expected
        val subscriptions = 1 until 10 map { i =>
          elements.updateAndGet(m => m + (i -> i))
          d.signalNewHead(i)
          d.startingAt(i - 1, OneAfterAnother(readSuccessor, readElement))
            .toMat(Sink.collection)(Keep.right[NotUsed, Future[Seq[(Int, Int)]]])
            .run()
        }

        d.close()

        subscriptions.zip(1 until 10) foreach {
          case (f, i) =>
            whenReady(f) { vals =>
              vals.map(_._1) should contain theSameElementsAs (i to 9)
              vals.map(_._2) should contain theSameElementsAs (i until 10)
            }
        }
      }
    }
  }
} 
Example 40
Source File: DerivedResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.atomic.AtomicReference

import scala.reflect.ClassTag

abstract class DerivedResource[Source, Target: ClassTag](source: Resource[Source])
    extends Resource[(Source, Target)] {

  private val resourceRef = new AtomicReference[Target]()

  def derivedValue: Target = {
    val res = resourceRef.get()
    if (res != null) res
    else
      throw new IllegalStateException(
        s"Attempted to read non-initialized resource of class ${implicitly[ClassTag[Target]].runtimeClass.getName}")
  }

  override def value: (Source, Target) = {
    source.value -> derivedValue
  }

  override def setup(): Unit = {
    resourceRef.updateAndGet((resource: Target) =>
      if (resource == null) {
        source.setup()
        construct(source.value)
      } else throw new IllegalStateException(s"Resource $resource is already set up"))
    ()
  }

  protected def construct(source: Source): Target

  override def close(): Unit = {
    resourceRef.updateAndGet(resource => {
      if (resource != null) {
        destruct(resource)
        source.close()
      }
      null.asInstanceOf[Target]
    })
    ()
  }

  protected def destruct(target: Target): Unit
} 
Example 41
Source File: PartyState.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.model

import java.util.concurrent.atomic.AtomicReference

import com.daml.lf.{iface => DamlLfIface}
import com.daml.ledger.api.refinements.ApiTypes

import scalaz.Tag

case class State(ledger: Ledger, packageRegistry: PackageRegistry)


class PartyState(val name: ApiTypes.Party, val useDatabase: Boolean) {
  private val stateRef: AtomicReference[State] = new AtomicReference(
    State(Ledger(name, None, useDatabase), new PackageRegistry))

  def ledger: Ledger = stateRef.get.ledger
  def packageRegistry: PackageRegistry = stateRef.get.packageRegistry

  def addLatestTransaction(tx: Transaction): Unit = {
    stateRef.updateAndGet(state =>
      state.copy(ledger = state.ledger.withTransaction(tx, packageRegistry)))
    ()
  }

  def addCommand(cmd: Command): Unit = {
    stateRef.updateAndGet(state => state.copy(ledger = state.ledger.withCommand(cmd)))
    ()
  }

  def addCommandStatus(id: ApiTypes.CommandId, status: CommandStatus): Unit = {
    stateRef.updateAndGet(state => state.copy(ledger = state.ledger.withCommandStatus(id, status)))
    ()
  }

  def addPackages(packs: List[DamlLfIface.Interface]): Unit = {
    stateRef.updateAndGet(state =>
      state.copy(packageRegistry = packageRegistry.withPackages(packs)))
    ()
  }

  override def hashCode(): Int = Tag.unwrap(name).hashCode()

  override def equals(obj: scala.Any): Boolean = obj match {
    case that: PartyState => Tag.unwrap(this.name) equals Tag.unwrap(that.name)
    case _ => false
  }

  override def toString: String = "Party(" + name.toString + ")"

  def contracts(): Stream[Contract] = this.ledger.allContracts(this.packageRegistry)
} 
Example 42
Source File: ResettableResourceOwner.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import java.util.concurrent.atomic.AtomicReference

import com.daml.resources.ResettableResourceOwner._

import scala.annotation.tailrec
import scala.concurrent.{ExecutionContext, Future, Promise}

class ResettableResourceOwner[A, ResetValue] private (
    initialValue: ResetValue,
    owner: Reset => ResetValue => ResourceOwner[A],
    resetOperation: A => Future[ResetValue],
) extends ResourceOwner[A] {
  override def acquire()(implicit executionContext: ExecutionContext): Resource[A] =
    new Resource[A] {
      private val resettableOwner: ResetValue => ResourceOwner[A] = owner(reset _)

      @volatile
      private var resource = resettableOwner(initialValue).acquire()
      private val resetPromise = new AtomicReference[Option[Promise[Unit]]](None)

      override def asFuture: Future[A] =
        resetPromise.get().getOrElse(Promise.successful(())).future.flatMap(_ => resource.asFuture)

      override def release(): Future[Unit] =
        resetPromise.get().getOrElse(Promise.successful(())).future.flatMap(_ => resource.release())

      @tailrec
      private def reset(): Future[Unit] = {
        val currentResetPromise = resetPromise.get()
        currentResetPromise match {
          case None =>
            val newResetPromise = Some(Promise[Unit]())
            if (resetPromise.compareAndSet(None, newResetPromise)) {
              for {
                value <- resource.asFuture
                _ <- resource.release()
                resetValue <- resetOperation(value)
              } yield {
                resource = resettableOwner(resetValue).acquire()
                newResetPromise.get.success(())
                resetPromise.set(None)
              }
            } else {
              reset()
            }
          case Some(currentResetPromise) =>
            currentResetPromise.future
        }
      }
    }
}

object ResettableResourceOwner {
  type Reset = () => Future[Unit]

  def apply[A](owner: Reset => ResourceOwner[A]) =
    new ResettableResourceOwner[A, Unit](
      initialValue = (),
      reset => _ => owner(reset),
      resetOperation = _ => Future.unit,
    )

  def apply[A, ResetValue](
      initialValue: ResetValue,
      owner: Reset => ResetValue => ResourceOwner[A],
      resetOperation: A => Future[ResetValue],
  ) = new ResettableResourceOwner(initialValue, owner, resetOperation)
} 
Example 43
Source File: OrderBookAskAdapter.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import java.util.concurrent.atomic.AtomicReference

import akka.actor._
import akka.actor.typed.scaladsl.adapter._
import akka.http.scaladsl.model.HttpResponse
import cats.syntax.either._
import cats.syntax.option._
import com.wavesplatform.dex.actors.orderbook.AggregatedOrderBookActor.{Depth, Query}
import com.wavesplatform.dex.actors.orderbook.OrderBookActor.MarketStatus
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.error
import com.wavesplatform.dex.error.MatcherError
import com.wavesplatform.dex.model.MatcherModel.DecimalsFormat
import com.wavesplatform.dex.model.OrderBookAggregatedSnapshot

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.reflect.ClassTag

// Will be removed after a migration to typed actors
class OrderBookAskAdapter(orderBooks: AtomicReference[Map[AssetPair, Either[Unit, ActorRef]]], askTimeout: FiniteDuration)(
    implicit system: ActorSystem) {
  import system.dispatcher

  type Result[T] = Future[Either[MatcherError, Option[T]]]

  def getMarketStatus(assetPair: AssetPair): Result[MarketStatus] = get[Query.GetMarketStatus, MarketStatus](assetPair, Query.GetMarketStatus(_))

  def getAggregatedSnapshot(assetPair: AssetPair): Result[OrderBookAggregatedSnapshot] =
    get[Query.GetAggregatedSnapshot, OrderBookAggregatedSnapshot](assetPair, Query.GetAggregatedSnapshot(_))

  def getHttpView(assetPair: AssetPair, format: DecimalsFormat, depth: Depth): Result[HttpResponse] =
    get[Query.GetHttpView, HttpResponse](assetPair, Query.GetHttpView(format, depth, _))

  private val default = Future.successful(Right(None))

  private def get[M <: Query, R: ClassTag](assetPair: AssetPair, message: ActorRef => M): Result[R] = orderBooks.get().get(assetPair) match {
    case None => default
    case Some(ob) =>
      ob match {
        case Left(_) => Future.successful(error.OrderBookBroken(assetPair).asLeft)
        case Right(ob) =>
          val (askRef, r) = AskActor.mk[R](askTimeout)
          ob ! message(askRef)
          r.map(_.some.asRight)
      }
  }
} 
Example 44
Source File: MonadlessFutureSpec.scala    From monadless   with Apache License 2.0 5 votes vote down vote up
package io.monadless.stdlib

import org.scalatest.MustMatchers
import scala.concurrent.Future
import io.monadless.impl.TestSupport
import java.util.concurrent.atomic.AtomicReference
import scala.util.Try
import scala.concurrent.ExecutionContext

class MonadlessFutureSpec
  extends org.scalatest.FreeSpec
  with MustMatchers
  with MonadlessFuture
  with TestSupport[Future] {

  implicit val ec = new ExecutionContext {
    def execute(runnable: Runnable): Unit = runnable.run()
    def reportFailure(cause: Throwable): Unit = {}
  }

  def get[T](f: Future[T]) = {
    // can't use Await because of scala.js
    val r = new AtomicReference[Try[T]]
    f.onComplete(r.set)
    r.get.get
  }

  def fail[T]: T = throw new Exception

  val one = Future.successful(1)
  val two = Future.successful(2)

  "apply" in
    runLiftTest(1) {
      1
    }

  "collect" in
    runLiftTest(3) {
      unlift(one) + unlift(two)
    }

  "map" in
    runLiftTest(2) {
      unlift(one) + 1
    }

  "flatMap" in
    runLiftTest(3) {
      val a = unlift(one)
      a + unlift(two)
    }

  "rescue" - {
    "success" in
      runLiftTest(1) {
        try unlift(one)
        catch {
          case e: Throwable => unlift(two)
        }
      }
    "failure" in
      runLiftTest(1) {
        try fail[Int]
        catch {
          case e: Exception => unlift(one)
        }
      }
  }

  "ensure" - {
    "success" in
      runLiftTest(1) {
        var i = 0
        def c() = i += 1
        try unlift(one)
        finally {
          c()
        }
        i
      }
    "failure" in
      runLiftTest(1) {
        var i = 0
        def c() = i += 1
        try {
          try unlift(one) / fail[Int]
          finally {
            c()
          }
        } catch {
          case e: Exception => 1
        }
        i
      }
  }
} 
Example 45
Source File: ExistsSubscriber.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.atomic.AtomicReference

import com.sksamuel.exts.Logging
import io.eels.Row

class ExistsSubscriber(fn: Row => Boolean) extends Subscriber[Seq[Row]] with Logging {

  val result = new AtomicReference[Either[Throwable, Boolean]](null)

  private var subscription: Subscription = null
  private var exists = false

  override def subscribed(subscription: Subscription): Unit = this.subscription = subscription

  override def error(t: Throwable): Unit = {
    logger.error("Subscriber received error", t)
    result.set(Left(t))
  }

  override def next(t: Seq[Row]): Unit = {
    if (!exists) {
      exists = t.exists(fn)
      if (exists) {
        logger.debug("Value found, cancelling rest of stream")
        subscription.cancel()
      }
    }
  }

  override def completed(): Unit = {
    result.set(Right(exists))
  }
} 
Example 46
Source File: Publisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, TimeUnit}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator
import com.sksamuel.exts.concurrent.ExecutorImplicits._

import scala.collection.concurrent.TrieMap

trait Publisher[T] {
  def subscribe(subscriber: Subscriber[T])
}

object Publisher extends Logging {

  
  def merge[T](publishers: Seq[Publisher[T]], sentinel: T)(implicit executor: ExecutorService): Publisher[T] = {

    new Publisher[T] {
      override def subscribe(s: Subscriber[T]): Unit = {

        // subscribers to the returned publisher will be fed from an intermediate queue
        val queue = new LinkedBlockingQueue[Either[Throwable, T]](DataStream.DefaultBufferSize)

        // to keep track of how many subscribers are yet to finish; only once all upstream
        // publishers have finished will this subscriber be completed.
        val outstanding = new AtomicInteger(publishers.size)

        // we make a collection of all the subscriptions, so if there's an error at any point in the
        // merge, we can cancel all upstream producers
        val subscriptions = TrieMap.empty[Subscription, Int]

        // this cancellable can be used to cancel all the subscriptions
        val subscription = new Subscription {
          override def cancel(): Unit = subscriptions.keys.foreach(_.cancel)
        }

        // status flag that an error occured and the subscriptions should watch for it
        val errorRef = new AtomicReference[Throwable](null)
        def terminate(t: Throwable): Unit = {
          logger.error(s"Error in merge", t)
          errorRef.set(t)
          subscription.cancel()
          queue.clear()
          queue.put(Right(sentinel))
        }

        // each subscriber will occupy its own thread, on the provided executor
        publishers.foreach { publisher =>
          executor.submit {
            try {
              publisher.subscribe(new Subscriber[T] {
                override def subscribed(sub: Subscription): Unit = if (sub != null) subscriptions.put(sub, 1)                
                override def next(t: T): Unit = {
                  var success = true
                  do {
                    success = queue.offer(Right(t), 100, TimeUnit.MILLISECONDS)
                  } while(!success && errorRef.get == null)
                }
                override def error(t: Throwable): Unit = terminate(t)
                override def completed(): Unit = {
                  if (outstanding.decrementAndGet() == 0) {
                    logger.debug("All subscribers have finished; marking queue with sentinel")
                    queue.put(Right(sentinel))
                  }
                }
              })
            } catch {
              case t: Throwable => terminate(t)
            }
          }
        }

        try {
          s.subscribed(subscription)
          BlockingQueueConcurrentIterator(queue, Right(sentinel)).takeWhile(_ => errorRef.get == null).foreach {
            case Left(t) => s.error(t)
            case Right(t) => s.next(t)
          }
          // once we've had an error that's it, we don't complete the subscriber
          if (errorRef.get == null)
            s.completed()
          else 
            s.error(errorRef.get)
        } catch {
          case t: Throwable =>
            logger.error("Error in merge subscriber", t)
            subscription.cancel()
            s.error(t)
        }

        logger.debug("Merge subscriber has completed")
      }
    }
  }
} 
Example 47
Source File: FindSubscriber.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.atomic.AtomicReference

import com.sksamuel.exts.Logging
import io.eels.Row

class FindSubscriber(p: Row => Boolean) extends Subscriber[Seq[Row]] with Logging {

  val result = new AtomicReference[Either[Throwable, Option[Row]]](null)

  private var subscription: Subscription = null
  private var value: Option[Row] = None

  override def subscribed(c: Subscription): Unit = this.subscription = c

  override def error(t: Throwable): Unit = {
    logger.error("Subscriber received error", t)
    result.set(Left(t))
  }

  override def next(t: Seq[Row]): Unit = {
    if (value.isEmpty) {
      value = t.find(p)
      if (value.isDefined) {
        logger.debug("Value found, cancelling rest of stream")
        subscription.cancel()
      }
    }
  }

  override def completed(): Unit = {
    result.set(Right(value))
  }
} 
Example 48
Source File: SimpleDnsCache.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.io

import java.util.concurrent.atomic.AtomicReference
import akka.io.Dns.Resolved

import scala.annotation.tailrec
import scala.collection.immutable

private[io] sealed trait PeriodicCacheCleanup {
  def cleanup(): Unit
}

class SimpleDnsCache extends Dns with PeriodicCacheCleanup {
  import akka.io.SimpleDnsCache._

  private val cache = new AtomicReference(new Cache(
    immutable.SortedSet()(ExpiryEntryOrdering),
    immutable.Map(), clock _))

  private val nanoBase = System.nanoTime()

  override def cached(name: String): Option[Resolved] = {
    cache.get().get(name)
  }

  protected def clock(): Long = {
    val now = System.nanoTime()
    if (now - nanoBase < 0) 0
    else (now - nanoBase) / 1000000
  }

  @tailrec
  private[io] final def put(r: Resolved, ttlMillis: Long): Unit = {
    val c = cache.get()
    if (!cache.compareAndSet(c, c.put(r, ttlMillis)))
      put(r, ttlMillis)
  }

  @tailrec
  override final def cleanup(): Unit = {
    val c = cache.get()
    if (!cache.compareAndSet(c, c.cleanup()))
      cleanup()
  }
}

object SimpleDnsCache {
  private class Cache(queue: immutable.SortedSet[ExpiryEntry], cache: immutable.Map[String, CacheEntry], clock: () ⇒ Long) {
    def get(name: String): Option[Resolved] = {
      for {
        e ← cache.get(name)
        if e.isValid(clock())
      } yield e.answer
    }

    def put(answer: Resolved, ttlMillis: Long): Cache = {
      val until0 = clock() + ttlMillis
      val until = if (until0 < 0) Long.MaxValue else until0

      new Cache(
        queue + new ExpiryEntry(answer.name, until),
        cache + (answer.name → CacheEntry(answer, until)),
        clock)
    }

    def cleanup(): Cache = {
      val now = clock()
      var q = queue
      var c = cache
      while (q.nonEmpty && !q.head.isValid(now)) {
        val minEntry = q.head
        val name = minEntry.name
        q -= minEntry
        if (c.get(name).filterNot(_.isValid(now)).isDefined)
          c -= name
      }
      new Cache(q, c, clock)
    }
  }

  private case class CacheEntry(answer: Dns.Resolved, until: Long) {
    def isValid(clock: Long): Boolean = clock < until
  }

  private class ExpiryEntry(val name: String, val until: Long) extends Ordered[ExpiryEntry] {
    def isValid(clock: Long): Boolean = clock < until
    override def compare(that: ExpiryEntry): Int = -until.compareTo(that.until)
  }

  private object ExpiryEntryOrdering extends Ordering[ExpiryEntry] {
    override def compare(x: ExpiryEntry, y: ExpiryEntry): Int = {
      x.until.compareTo(y.until)
    }
  }
} 
Example 49
Source File: NonblockingPar.scala    From learning-fpinscala   with MIT License 5 votes vote down vote up
package com.satansk.fpinscala.parallelism

import java.util.concurrent.{Callable, CountDownLatch, ExecutorService}
import java.util.concurrent.atomic.AtomicReference

import com.sun.glass.ui.MenuItem.Callback


  def unit[A](a: A): Par[A] =
    _ ⇒ new Future[A] {
      def apply(callback: A ⇒ Unit): Unit = callback(a)
    }

  def fork[A](a: ⇒ Par[A]): Par[A] =
    es ⇒ new Future[A] {
      def apply(callback: (A) ⇒ Unit): Unit =
        eval(es)(a(es)(callback))
    }

  def eval(es: ExecutorService)(r: ⇒ Unit): Unit =
    es.submit(new Callable[Unit] {
      def call = r
    })

} 
Example 50
Source File: NonBlockingMutex.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.libs.concurrent

import scala.annotation.tailrec
import java.util.concurrent.atomic.AtomicReference


  def exclusive(body: => Unit): Unit = {
    schedule(() => body)
  }

  private type Op = () => Unit

  private val state = new AtomicReference[Vector[Op]](null)

  @tailrec
  private def schedule(op: Op): Unit = {
    val prevState = state.get
    val newState = prevState match {
      case null => Vector.empty // This is very cheap because Vector.empty is only allocated once
      case pending => pending :+ op
    }
    if (state.compareAndSet(prevState, newState)) {
      prevState match {
        case null =>
          // We've update the state to say that we're running an op,
          // so we need to actually start it running.
          executeAll(op)
        case _ =>
      }
    } else schedule(op) // Try again
  }

  @tailrec
  private def executeAll(op: Op): Unit = {
    op.apply()
    val nextOp = dequeueNextOpToExecute()
    nextOp match {
      case None => ()
      case Some(op) => executeAll(op)
    }
  }

  @tailrec
  private def dequeueNextOpToExecute(): Option[Op] = {
    val prevState = state.get
    val (newState, nextOp) = prevState match {
      case null => throw new IllegalStateException("When executing, must have a queue of pending elements")
      case pending if pending.isEmpty => (null, None)
      case pending => (pending.tail, Some(pending.head))
    }
    if (state.compareAndSet(prevState, newState)) nextOp else dequeueNextOpToExecute()
  }

} 
Example 51
Source File: ApplicationIdleMonitor.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.monitor.job

import java.util.concurrent.{Executors, ScheduledFuture, TimeUnit}
import java.util.concurrent.atomic.AtomicReference

import scala.collection.JavaConverters._

import org.apache.spark.JobExecutionStatus
import org.apache.spark.alarm.{AlertMessage, HtmlMessage}
import org.apache.spark.monitor.{Monitor, MonitorItem}
import org.apache.spark.monitor.MonitorItem.MonitorItem
import org.apache.spark.scheduler.{SparkListenerEvent, SparkListenerJobEnd, SparkListenerJobStart}
import org.apache.spark.status.JobDataWrapper

class ApplicationIdleMonitor extends JobMonitor {

  override val item: MonitorItem = MonitorItem.APP_IDLE_WARNER
  val delayThread = Executors.newScheduledThreadPool(1)
  lazy val endureLimit =
    conf.getTimeAsMs(s"${Monitor.PREFIX}.${item.toString.toLowerCase}.timeout", "1h")
  private var idleTimeout: AtomicReference[ScheduledFuture[_]] = new AtomicReference()

  private def getActiveJobNum(): Int = {
//    appStore.count(classOf[JobDataWrapper], "completionTime", -1L)
    kvStore
      .view(classOf[JobDataWrapper])
      .reverse()
      .asScala
      .map(_.info)
      .filter(_.status == JobExecutionStatus.RUNNING)
      .size
  }

  private def stopIdleTimeout(): Unit = {
    val idleTimeout = this.idleTimeout.getAndSet(null)
    if (idleTimeout != null) {
      idleTimeout.cancel(false)
    }
  }

  private def setupIdleTimeout(): Unit = {
    if (getActiveJobNum > 0) return
    val timeoutTask = new Runnable() {
      override def run(): Unit = {
        // scalastyle:off
        val driverlUrl = conf
          .get(
            "spark.org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter.param.PROXY_URI_BASES")
          .split(",")
          .head
        val a = <h2>您的Spark应用</h2>
            <a href={driverlUrl}>{driverlUrl}</a>
            <h2>空闲已超过 {conf.get(
              s"${Monitor.PREFIX}.${item}.timeout", "1h")}</h2>
            <h2>请及时关闭</h2>
        val message = new HtmlMessage(title = item, content = a.mkString)
        alarms.foreach(_.alarm(message))
        // scalastyle:on
      }
    }

    val timeout = delayThread
      .scheduleWithFixedDelay(timeoutTask, endureLimit, endureLimit, TimeUnit.MILLISECONDS)
    // If there's already an idle task registered, then cancel the new one.
    if (!this.idleTimeout.compareAndSet(null, timeout)) {
      timeout.cancel(false)
    }
    // If a new client connected while the idle task was being set up, then stop the task.
    if (getActiveJobNum > 0) stopIdleTimeout()
  }

  override def watchOut(event: SparkListenerEvent): Option[AlertMessage] = {
    event match {
      case env: SparkListenerJobStart =>
        stopIdleTimeout
        Option.empty
      case env: SparkListenerJobEnd =>
        setupIdleTimeout
        Option.empty
      case _ =>
        Option.empty
    }
  }
} 
Example 52
Source File: RawSqlSourceProvider.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.sources

import java.util.concurrent.atomic.AtomicReference

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.Statistics
import org.apache.spark.sql.execution.{PhysicalRDD, RDDConversions, SparkPlan}
import org.apache.spark.sql.sources.RawDDLObjectType.RawDDLObjectType
import org.apache.spark.sql.sources.RawDDLStatementType.RawDDLStatementType
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{Row, SQLContext}

case object RawDDLObjectType {

  sealed trait RawDDLObjectType {
    val name: String
    override def toString: String = name
  }

  sealed abstract class BaseRawDDLObjectType(val name: String) extends RawDDLObjectType
  sealed trait RawData

  case object PartitionFunction extends BaseRawDDLObjectType("partition function")
  case object PartitionScheme   extends BaseRawDDLObjectType("partition scheme")
  case object Collection        extends BaseRawDDLObjectType("collection") with RawData
  case object Series            extends BaseRawDDLObjectType("table") with RawData
  case object Graph             extends BaseRawDDLObjectType("graph") with RawData
}

case object RawDDLStatementType {

  sealed trait RawDDLStatementType

  case object Create extends RawDDLStatementType
  case object Drop   extends RawDDLStatementType
  case object Append extends RawDDLStatementType
  case object Load   extends RawDDLStatementType
}


  protected def calculateSchema(): StructType
} 
Example 53
Source File: EnrichTruckData.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.nifi.processors

import java.io.{InputStream, OutputStream}
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicReference
import java.util.Scanner

import com.orendainx.trucking.commons.models.{EnrichedTruckData, TruckData}
import com.orendainx.trucking.enrichment.WeatherAPI
import org.apache.nifi.annotation.behavior._
import org.apache.nifi.annotation.documentation.{CapabilityDescription, Tags}
import org.apache.nifi.components.PropertyDescriptor
import org.apache.nifi.logging.ComponentLog
import org.apache.nifi.processor.io.InputStreamCallback
import org.apache.nifi.processor.io.OutputStreamCallback
import org.apache.nifi.processor._

import scala.collection.JavaConverters._


@Tags(Array("trucking", "data", "event", "enrich", "iot"))
@CapabilityDescription("Enriches simulated truck sensor data. Find the master project and its code, documentation and corresponding tutorials at: https://github.com/orendain/trucking-iot")
@InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED)
@TriggerSerially
@WritesAttributes(Array(
  new WritesAttribute(attribute = "dataType", description = "The class name of the resulting enriched data type.")
))
class EnrichTruckData extends AbstractProcessor {

  private var log: ComponentLog = _
  private val RelSuccess = new Relationship.Builder().name("success").description("All generated data is routed to this relationship.").build

  override def init(context: ProcessorInitializationContext): Unit = {
    log = context.getLogger
  }

  override def onTrigger(context: ProcessContext, session: ProcessSession): Unit = {

    var flowFile = session.get
    log.debug(s"Flowfile received: $flowFile")

    // Convert the entire stream of bytes from the flow file into a string
    val content = new AtomicReference[String]
    session.read(flowFile, new InputStreamCallback {
      override def process(inputStream: InputStream) = {
        val scanner = new Scanner(inputStream).useDelimiter("\\A")
        val result = if (scanner.hasNext()) scanner.next() else ""
        log.debug(s"Parsed content: $result")
        content.set(result)
      }
    })

    // Form a TruckData object from content, then creating an EnrichedTruckData object by making the appropriate
    // calls to WeatherAPI
    val truckData = TruckData.fromCSV(content.get())
    val enrichedTruckData = EnrichedTruckData(truckData, WeatherAPI.default.getFog(truckData.eventType),
      WeatherAPI.default.getRain(truckData.eventType), WeatherAPI.default.getWind(truckData.eventType))

    log.debug(s"EnrichedData generated: $enrichedTruckData")

    // Add the new data type as a flow file attribute
    flowFile = session.putAttribute(flowFile, "dataType", enrichedTruckData.getClass.getSimpleName)

    // Replace the flow file, writing in the new content
    flowFile = session.write(flowFile, new OutputStreamCallback {
      override def process(outputStream: OutputStream) =
        outputStream.write(enrichedTruckData.toCSV.getBytes(StandardCharsets.UTF_8))
    })

    // TODO: document what this does
    session.getProvenanceReporter.route(flowFile, RelSuccess)
    session.transfer(flowFile, RelSuccess)
    session.commit()
  }

  // Define properties and relationships
  override def getSupportedPropertyDescriptors: java.util.List[PropertyDescriptor] = List.empty[PropertyDescriptor].asJava

  override def getRelationships: java.util.Set[Relationship] = Set(RelSuccess).asJava
} 
Example 54
Source File: ZTestRunner.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test.sbt

import java.util.concurrent.atomic.AtomicReference

import sbt.testing._

import zio.ZIO
import zio.test.{ Summary, TestArgs }

final class ZTestRunner(val args: Array[String], val remoteArgs: Array[String], testClassLoader: ClassLoader)
    extends Runner {
  val summaries: AtomicReference[Vector[Summary]] = new AtomicReference(Vector.empty)

  val sendSummary: SendSummary = SendSummary.fromSendM(summary =>
    ZIO.effectTotal {
      summaries.updateAndGet(_ :+ summary)
      ()
    }
  )

  def done(): String = {
    val allSummaries = summaries.get

    val total  = allSummaries.map(_.total).sum
    val ignore = allSummaries.map(_.ignore).sum

    if (allSummaries.isEmpty || total == ignore)
      s"${Console.YELLOW}No tests were executed${Console.RESET}"
    else
      allSummaries
        .map(_.summary)
        .filter(_.nonEmpty)
        .flatMap(summary => colored(summary) :: "\n" :: Nil)
        .mkString("", "", "Done")
  }

  def tasks(defs: Array[TaskDef]): Array[Task] = {
    val testArgs        = TestArgs.parse(args)
    val tasks           = defs.map(new ZTestTask(_, testClassLoader, sendSummary, testArgs))
    val entrypointClass = testArgs.testTaskPolicy.getOrElse(classOf[ZTestTaskPolicyDefaultImpl].getName)
    val taskPolicy = getClass.getClassLoader
      .loadClass(entrypointClass)
      .getConstructor()
      .newInstance()
      .asInstanceOf[ZTestTaskPolicy]
    taskPolicy.merge(tasks)
  }
}

final class ZTestTask(taskDef: TaskDef, testClassLoader: ClassLoader, sendSummary: SendSummary, testArgs: TestArgs)
    extends BaseTestTask(taskDef, testClassLoader, sendSummary, testArgs)

trait ZTestTaskPolicy {
  def merge(zioTasks: Array[ZTestTask]): Array[Task]
}

class ZTestTaskPolicyDefaultImpl extends ZTestTaskPolicy {
  override def merge(zioTasks: Array[ZTestTask]): Array[Task] = zioTasks.toArray
} 
Example 55
Source File: MockLogger.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test.sbt

import java.util.concurrent.atomic.AtomicReference

import sbt.testing.Logger

import zio.test.sbt.TestingSupport._

class MockLogger extends Logger {
  private val logged = new AtomicReference(Vector.empty[String])
  private def log(str: String) = {
    logged.getAndUpdate(_ :+ str)
    ()
  }
  private def logWithPrefix(s: String)(prefix: String): Unit =
    log(s.split("\n").map(reset(prefix) + _).mkString("\n"))
  def messages: Seq[String] = logged.get()

  override def ansiCodesSupported(): Boolean = false
  override def error(msg: String): Unit      = logWithPrefix(msg)("error: ")
  override def warn(msg: String): Unit       = logWithPrefix(msg)("warn: ")
  override def info(msg: String): Unit       = logWithPrefix(msg)("info: ")
  override def debug(msg: String): Unit      = logWithPrefix(msg)("debug: ")
  override def trace(t: Throwable): Unit     = logWithPrefix(t.toString)("trace: ")
} 
Example 56
Source File: FiberRefSpecJvm.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio

import java.util.concurrent.atomic.AtomicReference

import zio.FiberRefSpecUtil._
import zio.test.Assertion._
import zio.test._

object FiberRefSpecJvm extends ZIOBaseSpec {

  def spec = suite("FiberRefSpecJvm")(
    testM("unsafe handles behave properly if fiber specific data cannot be accessed") {
      for {
        fiberRef <- FiberRef.make(initial)
        handle   <- fiberRef.unsafeAsThreadLocal
        resRef   <- UIO(new AtomicReference(("", "", "")))

        unsafelyGetSetGet = new Runnable {
          def run(): Unit = {
            val v1 = handle.get()
            handle.set(update2)
            val v2 = handle.get()
            handle.remove()
            val v3 = handle.get()
            resRef.set((v1, v2, v3))
          }
        }

        _      <- fiberRef.set(update1)
        thread <- UIO(new Thread(unsafelyGetSetGet))
        _      <- UIO(thread.start()).ensuring(UIO(thread.join()))

        value0                   <- fiberRef.get
        values                   <- UIO(resRef.get())
        (value1, value2, value3) = values
      } yield assert((value0, value1, value2, value3))(equalTo((update1, initial, update2, initial)))
    }
  )
} 
Example 57
Source File: OneElementConcQueueNoMetric.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.util.concurrent.atomic.AtomicReference

import zio.internal.MutableConcurrentQueue

class OneElementConcQueueNoMetric[A] extends MutableConcurrentQueue[A] {
  private[this] final val ref = new AtomicReference[AnyRef]()

  override final val capacity: Int = 1

  override final def dequeuedCount(): Long =
    throw new NotImplementedError("dequeuedCount is not supported")
  override final def enqueuedCount(): Long =
    throw new NotImplementedError("enqueuedCount is not supported")

  override final def isEmpty(): Boolean = ref.get() == null
  override final def isFull(): Boolean  = !isEmpty()

  override final def offer(a: A): Boolean = {
    assert(a != null)

    val aRef    = ref
    var ret     = false
    var looping = true

    while (looping) {
      if (aRef.get() != null) looping = false
      else {
        if (aRef.compareAndSet(null, a.asInstanceOf[AnyRef])) {
          ret = true
          looping = false
        }
      }
    }

    ret
  }

  override final def poll(default: A): A = {
    var ret     = default
    var looping = true
    val aRef    = ref
    var el      = null.asInstanceOf[AnyRef]

    while (looping) {
      el = aRef.get()
      if (el == null) looping = false
      else {
        if (aRef.compareAndSet(el, null)) {
          ret = el.asInstanceOf[A]
          looping = false
        }
      }
    }

    ret
  }

  override final def size(): Int = if (isEmpty()) 0 else 1
} 
Example 58
Source File: OneElementConcurrentQueue.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.io.Serializable
import java.util.concurrent.atomic.{ AtomicReference, LongAdder }

import zio.internal.MutableConcurrentQueue


final class OneElementConcurrentQueue[A] extends MutableConcurrentQueue[A] with Serializable {
  private[this] val ref      = new AtomicReference[AnyRef]()
  private[this] val deqAdder = new LongAdder()

  override final val capacity = 1

  override def dequeuedCount(): Long = deqAdder.sum()
  override def enqueuedCount(): Long =
    if (isEmpty()) dequeuedCount() else dequeuedCount() + 1

  override def isEmpty(): Boolean = ref.get() == null
  override def isFull(): Boolean  = !isEmpty()

  override def offer(a: A): Boolean = {
    assert(a != null)

    val aRef    = ref
    var ret     = false
    var looping = true

    while (looping) {
      if (aRef.get() != null) looping = false
      else {
        if (aRef.compareAndSet(null, a.asInstanceOf[AnyRef])) {
          ret = true
          looping = false
        }
      }
    }

    ret
  }

  override def poll(default: A): A = {
    var ret     = default
    var looping = true
    val aRef    = ref
    var el      = null.asInstanceOf[AnyRef]

    while (looping) {
      el = aRef.get()
      if (el == null) looping = false
      else {
        if (aRef.compareAndSet(el, null)) {
          ret = el.asInstanceOf[A]
          deqAdder.increment()
          looping = false
        }
      }
    }

    ret
  }

  override def size(): Int = if (isEmpty()) 0 else 1
} 
Example 59
Source File: OneElementConcurrentQueue.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.io.Serializable
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong, AtomicReference }

import zio.internal.MutableConcurrentQueue

final class OneElementConcurrentQueue[A] extends MutableConcurrentQueue[A] with Serializable {
  private[this] val ref = new AtomicReference[AnyRef]()

  private[this] val headCounter   = new AtomicLong(0L)
  private[this] val deqInProgress = new AtomicBoolean(false)

  private[this] val tailCounter   = new AtomicLong(0L)
  private[this] val enqInProgress = new AtomicBoolean(false)

  override final val capacity = 1

  override def dequeuedCount(): Long = headCounter.get()
  override def enqueuedCount(): Long = tailCounter.get()

  override def isEmpty(): Boolean = ref.get() == null
  override def isFull(): Boolean  = !isEmpty()

  override def offer(a: A): Boolean = {
    assert(a != null)

    var res     = false
    var looping = true

    while (looping) {
      if (isFull()) {
        looping = false
      } else {
        if (enqInProgress.compareAndSet(false, true)) { // get an exclusive right to offer
          if (ref.get() == null) {
            tailCounter.lazySet(tailCounter.get() + 1)
            ref.lazySet(a.asInstanceOf[AnyRef])
            res = true
          }

          enqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def poll(default: A): A = {
    var res     = default
    var looping = true

    while (looping) {
      if (isEmpty()) {
        looping = false
      } else {
        if (deqInProgress.compareAndSet(false, true)) { // get an exclusive right to poll
          val el = ref.get().asInstanceOf[A]

          if (el != null) {
            res = el
            headCounter.lazySet(headCounter.get() + 1)
            ref.lazySet(null.asInstanceOf[AnyRef])
          }

          deqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def size(): Int = if (isEmpty()) 0 else 1
} 
Example 60
Source File: OneElementConcurrentQueue.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.io.Serializable

import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong, AtomicReference }

import zio.internal.MutableConcurrentQueue

final class OneElementConcurrentQueue[A] extends MutableConcurrentQueue[A] with Serializable {
  private[this] val ref = new AtomicReference[AnyRef]()

  private[this] val headCounter   = new AtomicLong(0L)
  private[this] val deqInProgress = new AtomicBoolean(false)

  private[this] val tailCounter   = new AtomicLong(0L)
  private[this] val enqInProgress = new AtomicBoolean(false)

  override final val capacity = 1

  override def dequeuedCount(): Long = headCounter.get()
  override def enqueuedCount(): Long = tailCounter.get()

  override def isEmpty(): Boolean = ref.get() == null
  override def isFull(): Boolean  = !isEmpty()

  override def offer(a: A): Boolean = {
    assert(a != null)

    var res     = false
    var looping = true

    while (looping) {
      if (isFull()) {
        looping = false
      } else {
        if (enqInProgress.compareAndSet(false, true)) { // get an exclusive right to offer
          if (ref.get() == null) {
            tailCounter.lazySet(tailCounter.get() + 1)
            ref.lazySet(a.asInstanceOf[AnyRef])
            res = true
          }

          enqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def poll(default: A): A = {
    var res     = default
    var looping = true

    while (looping) {
      if (isEmpty()) {
        looping = false
      } else {
        if (deqInProgress.compareAndSet(false, true)) { // get an exclusive right to poll
          val el = ref.get().asInstanceOf[A]

          if (el != null) {
            res = el
            headCounter.lazySet(headCounter.get() + 1)
            ref.lazySet(null.asInstanceOf[AnyRef])
          }

          deqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def size(): Int = if (isEmpty()) 0 else 1
} 
Example 61
Source File: FinagleBackend.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.http.finagle

import java.util.concurrent.atomic.AtomicReference

import com.twitter.finagle.Service
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.http.{Request, Response, Status}
import com.twitter.util.{Future, Promise, Return, Throw}
import wvlet.airframe.http.{HttpBackend, HttpRequestAdapter, HttpStatus}
import wvlet.log.LogSupport

import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success}
import scala.{concurrent => sc}


object FinagleBackend extends HttpBackend[Request, Response, Future] {
  override protected implicit val httpRequestAdapter: HttpRequestAdapter[Request] = FinagleHttpRequestAdapter

  override def wrapException(e: Throwable): Future[Response] = {
    Future.exception(e)
  }
  override def newResponse(status: HttpStatus, content: String): Response = {
    val r = Response(Status.fromCode(status.code))
    r.contentString = content
    r
  }

  def wrapFilter(filter: com.twitter.finagle.Filter[Request, Response, Request, Response]): FinagleFilter = {
    new FinagleFilter with LogSupport {
      override def apply(request: Request, context: Context): Future[Response] = {
        filter(request, Service.mk { req: Request => context(req) })
      }
    }
  }

  override def toFuture[A](a: A): Future[A] = Future.value(a)
  override def toScalaFuture[A](a: Future[A]): sc.Future[A] = {
    val promise: sc.Promise[A] = sc.Promise()
    a.respond {
      case Return(value)    => promise.success(value)
      case Throw(exception) => promise.failure(exception)
    }
    promise.future
  }
  override def toFuture[A](a: sc.Future[A], e: ExecutionContext): Future[A] = {
    val promise: Promise[A] = Promise()
    a.onComplete {
      case Success(value)     => promise.setValue(value)
      case Failure(exception) => promise.setException(exception)
    }(e)
    promise
  }

  override def isFutureType(cl: Class[_]): Boolean = {
    classOf[Future[_]].isAssignableFrom(cl)
  }
  override def isRawResponseType(cl: Class[_]): Boolean = {
    classOf[Response].isAssignableFrom(cl)
  }
  override def mapF[A, B](f: Future[A], body: A => B): Future[B] = {
    f.map(body)
  }

  private val contextParamHolderKey = new Contexts.local.Key[AtomicReference[collection.mutable.Map[String, Any]]]

  override def withThreadLocalStore(body: => Future[Response]): Future[Response] = {
    val newParamHolder = collection.mutable.Map.empty[String, Any]
    Contexts.local
      .let(contextParamHolderKey, new AtomicReference[collection.mutable.Map[String, Any]](newParamHolder)) {
        body
      }
  }

  override def setThreadLocal[A](key: String, value: A): Unit = {
    Contexts.local.get(contextParamHolderKey).foreach { ref => ref.get().put(key, value) }
  }

  override def getThreadLocal[A](key: String): Option[A] = {
    Contexts.local.get(contextParamHolderKey).flatMap { ref => ref.get.get(key).asInstanceOf[Option[A]] }
  }
} 
Example 62
Source File: Batcher.scala    From gfc-concurrent   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.concurrent

import java.util.concurrent.{Executors, ScheduledExecutorService => JScheduledExecutorService}
import java.util.concurrent.atomic.AtomicReference

import com.gilt.gfc.logging.Loggable

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.util.control.NonFatal



  @tailrec
  override
  def flush(): Unit = {
    val b@(_, records) = currentBatch.get()
    if (currentBatch.compareAndSet(b, emptyBatch)) {
      safeSubmitBatch(records)
    } else {
      flush() // retry
    }
  }

  override
  def shutdown(): Unit = {
    isRunning = false
    task.cancel(true)
    flush()
  }

  private[this]
  def safeSubmitBatch(records: Vector[R]): Unit = {
    if (!records.isEmpty) {
      lastSubmit = System.currentTimeMillis()
      try {
        submitBatch(records)
      } catch {
        case NonFatal(e) =>
          error(s"Failed to flush ${name} batch: ${e.getMessage}", e)
      }
    }
  }
}