java.util.concurrent.ConcurrentHashMap Scala Examples

The following examples show how to use java.util.concurrent.ConcurrentHashMap. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TestDefaultValuesAutoComplete.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.{ConcurrentHashMap, TimeUnit}

import oharastream.ohara.client.configurator.{ConnectorApi, WorkerApi}
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.Test
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}

class TestDefaultValuesAutoComplete extends WithBrokerWorker {
  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] val configurator =
    Configurator.builder.fake(testUtil().brokersConnProps(), testUtil().workersConnProps()).build()

  private[this] val workerClusterInfo = result(
    WorkerApi.access.hostname(configurator.hostname).port(configurator.port).list()
  ).head

  private[this] val connectorApi = ConnectorApi.access.hostname(configurator.hostname).port(configurator.port)

  @Test
  def testDefaultValuesForPerfSource(): Unit = {
    val connector = result(
      connectorApi.request
        .workerClusterKey(workerClusterInfo.key)
        .className("oharastream.ohara.connector.perf.PerfSource")
        .create()
    )
    connector.settings.keySet should contain("perf.batch")
    connector.settings.keySet should contain("perf.frequency")
    connector.settings.keySet should contain("perf.cell.length")
  }

  @Test
  def testDefaultValuesForConsoleSink(): Unit = {
    val connector = result(
      connectorApi.request
        .workerClusterKey(workerClusterInfo.key)
        .className("oharastream.ohara.connector.console.ConsoleSink")
        .create()
    )
    connector.settings.keySet should contain("console.sink.frequence")
    connector.settings.keySet should contain("console.sink.row.divider")

    val a = new ConcurrentHashMap[String, String]()
    import scala.jdk.CollectionConverters._
    a.elements().asScala.toSeq
  }
} 
Example 2
Source File: InMemoryPrivateLedgerData.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.engine
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import com.daml.lf.data.{FrontStack, FrontStackCons}
import com.daml.lf.transaction.Node._
import com.daml.lf.transaction.{Transaction => Tx}
import com.daml.lf.value.Value._

import scala.annotation.tailrec

trait PrivateLedgerData {
  def update(tx: Tx.Transaction): Unit
  def get(id: ContractId): Option[ContractInst[VersionedValue[ContractId]]]
  def transactionCounter: Int
  def clear(): Unit
}

private[engine] class InMemoryPrivateLedgerData extends PrivateLedgerData {
  private val pcs: ConcurrentHashMap[ContractId, ContractInst[Tx.Value[ContractId]]] =
    new ConcurrentHashMap()
  private val txCounter: AtomicInteger = new AtomicInteger(0)

  def update(tx: Tx.Transaction): Unit =
    updateWithContractId(tx)

  def updateWithContractId(tx: Tx.Transaction): Unit =
    this.synchronized {
      // traverse in topo order and add / remove
      @tailrec
      def go(remaining: FrontStack[Tx.NodeId]): Unit = remaining match {
        case FrontStack() => ()
        case FrontStackCons(nodeId, nodeIds) =>
          val node = tx.nodes(nodeId)
          node match {
            case nc: NodeCreate.WithTxValue[ContractId] =>
              pcs.put(nc.coid, nc.coinst)
              go(nodeIds)
            case ne: NodeExercises.WithTxValue[Tx.NodeId, ContractId] =>
              go(ne.children ++: nodeIds)
            case _: NodeLookupByKey[_, _] | _: NodeFetch[_, _] =>
              go(nodeIds)
          }
      }
      go(FrontStack(tx.roots))
      txCounter.incrementAndGet()
      ()
    }

  def get(id: ContractId): Option[ContractInst[VersionedValue[ContractId]]] =
    this.synchronized {
      Option(pcs.get(id))
    }

  def clear(): Unit = this.synchronized {
    pcs.clear()
  }

  def transactionCounter: Int = txCounter.intValue()

  override def toString: String = s"InMemoryPrivateContractStore@{txCounter: $txCounter, pcs: $pcs}"
}

private[engine] object InMemoryPrivateLedgerData {
  def apply(): PrivateLedgerData = new InMemoryPrivateLedgerData()
} 
Example 3
Source File: Main.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.test

import scala.collection._
import scala.collection.JavaConverters._
import java.util.concurrent.ConcurrentHashMap

import com.daml.navigator.test.config.Arguments
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.events._
import org.scalatest.{Args, ConfigMap, Reporter}

import scala.util.Try

object Main extends LazyLogging {
  def main(args: Array[String]): Unit = {
    Arguments.parse(args) match {
      case None =>
        System.exit(1)
      case Some(arguments) =>
        val reporter = new LoggerReporter()
        val status = new BrowserTest(arguments)
          .run(None, Args(reporter = reporter, configMap = new ConfigMap(Map.empty[String, Any])))
        val success = Try(status.succeeds()).getOrElse(false)
        val exitCode = if (success) 0 else 1
        val header =
          """
            | 
            |***************************************************************************
            |
            |     Test Results
            |
            |***************************************************************************
            |
          """.stripMargin
        logger.info(header)
        reporter.results.foreach { kv =>
          logger.info(s"${kv._1}")
          kv._2.foreach(logger.info(_))
        }
        val results = reporter.results.toList
        val allTests = results.size
        val failedTests = results.count(kv => kv._2.isDefined)
        val footer =
          s"""
            | 
            |***************************************************************************
            |
            |     All tests: $allTests; tests failed: $failedTests
            |
            |***************************************************************************
            |
          """.stripMargin
        logger.info(footer)
        System.exit(exitCode)
    }
  }
}

class LoggerReporter extends Reporter {

  // Test statuses with optional errors
  val results: concurrent.Map[String, Option[String]] =
    new ConcurrentHashMap[String, Option[String]]().asScala

  override def apply(e: Event): Unit = {
    e match {
      case t: TestSucceeded =>
        results.put(s"  Test succeeded: ${t.testName}", None)
        ()
      case t: TestFailed =>
        results.put(
          s"  Test failed: ${t.testName}",
          t.throwable.map(_.getMessage).map(e => s"      error: $e")
        )
        ()

      case _ => ()
    }

  }

} 
Example 4
Source File: SparkSQLOperationManager.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver.server

import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap

import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.{ExecuteStatementOperation, Operation, OperationManager}
import org.apache.hive.service.cli.session.HiveSession

import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveSessionState
import org.apache.spark.sql.hive.thriftserver.{ReflectionUtils, SparkExecuteStatementOperation}


private[thriftserver] class SparkSQLOperationManager()
  extends OperationManager with Logging {

  val handleToOperation = ReflectionUtils
    .getSuperField[JMap[OperationHandle, Operation]](this, "handleToOperation")

  val sessionToActivePool = new ConcurrentHashMap[SessionHandle, String]()
  val sessionToContexts = new ConcurrentHashMap[SessionHandle, SQLContext]()

  override def newExecuteStatementOperation(
      parentSession: HiveSession,
      statement: String,
      confOverlay: JMap[String, String],
      async: Boolean): ExecuteStatementOperation = synchronized {
    val sqlContext = sessionToContexts.get(parentSession.getSessionHandle)
    require(sqlContext != null, s"Session handle: ${parentSession.getSessionHandle} has not been" +
      s" initialized or had already closed.")
    val sessionState = sqlContext.sessionState.asInstanceOf[HiveSessionState]
    val runInBackground = async && sessionState.hiveThriftServerAsync
    val operation = new SparkExecuteStatementOperation(parentSession, statement, confOverlay,
      runInBackground)(sqlContext, sessionToActivePool)
    handleToOperation.put(operation.getHandle, operation)
    logDebug(s"Created Operation for $statement with session=$parentSession, " +
      s"runInBackground=$runInBackground")
    operation
  }
} 
Example 5
Source File: NettyStreamManager.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.io.File
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc.RpcEnvFileServer
import org.apache.spark.util.Utils


private[netty] class NettyStreamManager(rpcEnv: NettyRpcEnv)
  extends StreamManager with RpcEnvFileServer {

  private val files = new ConcurrentHashMap[String, File]()
  private val jars = new ConcurrentHashMap[String, File]()
  private val dirs = new ConcurrentHashMap[String, File]()

  override def getChunk(streamId: Long, chunkIndex: Int): ManagedBuffer = {
    throw new UnsupportedOperationException()
  }

  override def openStream(streamId: String): ManagedBuffer = {
    val Array(ftype, fname) = streamId.stripPrefix("/").split("/", 2)
    val file = ftype match {
      case "files" => files.get(fname)
      case "jars" => jars.get(fname)
      case other =>
        val dir = dirs.get(ftype)
        require(dir != null, s"Invalid stream URI: $ftype not found.")
        new File(dir, fname)
    }

    if (file != null && file.isFile()) {
      new FileSegmentManagedBuffer(rpcEnv.transportConf, file, 0, file.length())
    } else {
      null
    }
  }

  override def addFile(file: File): String = {
    val existingPath = files.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/files/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addJar(file: File): String = {
    val existingPath = jars.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/jars/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addDirectory(baseUri: String, path: File): String = {
    val fixedBaseUri = validateDirectoryUri(baseUri)
    require(dirs.putIfAbsent(fixedBaseUri.stripPrefix("/"), path) == null,
      s"URI '$fixedBaseUri' already registered.")
    s"${rpcEnv.address.toSparkURL}$fixedBaseUri"
  }

} 
Example 6
Source File: BlockManagerId.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.SparkContext
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils


  def apply(
      execId: String,
      host: String,
      port: Int,
      topologyInfo: Option[String] = None): BlockManagerId =
    getCachedBlockManagerId(new BlockManagerId(execId, host, port, topologyInfo))

  def apply(in: ObjectInput): BlockManagerId = {
    val obj = new BlockManagerId()
    obj.readExternal(in)
    getCachedBlockManagerId(obj)
  }

  val blockManagerIdCache = new ConcurrentHashMap[BlockManagerId, BlockManagerId]()

  def getCachedBlockManagerId(id: BlockManagerId): BlockManagerId = {
    blockManagerIdCache.putIfAbsent(id, id)
    blockManagerIdCache.get(id)
  }
} 
Example 7
Source File: DnnStorage.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.tensor

import java.io.{IOException, ObjectInputStream, ObjectOutputStream}
import com.intel.analytics.bigdl.mkl.Memory
import com.intel.analytics.bigdl.nn.mkldnn.MemoryOwner
import scala.reflect._


private[bigdl] class Pointer(val address: Long)

object DnnStorage {
  private[tensor] val CACHE_LINE_SIZE = System.getProperty("bigdl.cache.line", "64").toInt
  private[bigdl] val FLOAT_BYTES: Int = 4
  private[bigdl] val INT8_BYTES: Int = 1
  private[bigdl] val INT_BYTES: Int = 4

  import java.util.concurrent.ConcurrentHashMap
  private val nativeStorages: ConcurrentHashMap[Long, Boolean] = new ConcurrentHashMap()

  def checkAndSet(pointer: Long): Boolean = {
    nativeStorages.replace(pointer, false, true)
  }

  def add(key: Long): Unit = {
    nativeStorages.put(key, false)
  }

  def get(): Map[Long, Boolean] = {
    import scala.collection.JavaConverters._
    nativeStorages.asScala.toMap
  }
} 
Example 8
Source File: ParameterSynchronizer.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils

import java.util.concurrent.{ConcurrentHashMap, CyclicBarrier}

import com.intel.analytics.bigdl.tensor.Tensor
import org.apache.spark.TaskContext

import scala.reflect._


  def reset(): Unit = {
    barrier.await
    if (data.size != 0) {
      data.synchronized {
        if (data.size != 0) {
          data.clear
        }
      }
    }
    barrier.await
  }
} 
Example 9
Source File: AssetsStorage.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.dex.db.leveldb.DBExt
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.asset.Asset.IssuedAsset
import com.wavesplatform.dex.effect.{FutureResult, liftValueAsync}
import com.wavesplatform.dex.grpc.integration.dto.BriefAssetDescription
import org.iq80.leveldb.DB

trait AssetsStorage {
  def put(asset: IssuedAsset, item: BriefAssetDescription): Unit
  def get(asset: IssuedAsset): Option[BriefAssetDescription]
}

object AssetsStorage {

  val wavesLifted: FutureResult[BriefAssetDescription] = liftValueAsync { BriefAssetDescription.wavesDescription }

  def cache(inner: AssetsStorage): AssetsStorage = new AssetsStorage {

    private val assetsCache = new ConcurrentHashMap[Asset, BriefAssetDescription]

    def put(asset: Asset.IssuedAsset, item: BriefAssetDescription): Unit = {
      inner.put(asset, item)
      assetsCache.putIfAbsent(asset, item)
    }

    def get(asset: Asset.IssuedAsset): Option[BriefAssetDescription] = Option {
      assetsCache.computeIfAbsent(asset, inner.get(_).orNull)
    }
  }

  def levelDB(db: DB): AssetsStorage = new AssetsStorage {
    def put(asset: IssuedAsset, record: BriefAssetDescription): Unit = db.readWrite(_.put(DbKeys.asset(asset), Some(record)))
    def get(asset: IssuedAsset): Option[BriefAssetDescription]       = db.readOnly(_.get(DbKeys.asset(asset)))
  }

  def inMem: AssetsStorage = new AssetsStorage {
    private val assetsCache                                        = new ConcurrentHashMap[Asset, BriefAssetDescription]
    def put(asset: IssuedAsset, item: BriefAssetDescription): Unit = assetsCache.putIfAbsent(asset, item)
    def get(asset: IssuedAsset): Option[BriefAssetDescription]     = Option(assetsCache get asset)
  }

  final implicit class Ops(val self: AssetsStorage) extends AnyVal {

    def get(asset: Asset): Option[BriefAssetDescription] = asset.fold(BriefAssetDescription.someWavesDescription)(self.get)

    def unsafeGet(asset: Asset): BriefAssetDescription = {
      get(asset).getOrElse(throw new RuntimeException(s"Unknown asset: ${asset.toString}"))
    }

    def unsafeGetDecimals(asset: Asset): Int      = unsafeGet(asset).decimals
    def unsafeGetHasScript(asset: Asset): Boolean = unsafeGet(asset).hasScript
  }
} 
Example 10
Source File: OrderBookSnapshotDB.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.util.concurrent.ConcurrentHashMap

import cats.instances.option.catsStdInstancesForOption
import cats.syntax.apply.catsSyntaxTuple2Semigroupal
import com.wavesplatform.dex.db.leveldb.{DBExt, Key}
import com.wavesplatform.dex.domain.asset.AssetPair
import com.wavesplatform.dex.model.OrderBookSnapshot
import com.wavesplatform.dex.queue.QueueEventWithMeta.Offset
import org.iq80.leveldb.DB

trait OrderBookSnapshotDB {
  def get(assetPair: AssetPair): Option[(Offset, OrderBookSnapshot)]
  def update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]): Unit
  def delete(assetPair: AssetPair): Unit
}

object OrderBookSnapshotDB {
  def apply(db: DB): OrderBookSnapshotDB = new OrderBookSnapshotDB {
    override def get(assetPair: AssetPair): Option[(Offset, OrderBookSnapshot)] = db.readOnly { ro =>
      val (obOffsetKey, obKey) = keys(assetPair)
      (ro.get(obOffsetKey), ro.get(obKey)).tupled
    }

    override def update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]): Unit = db.readWrite { rw =>
      val (obOffsetKey, obKey) = keys(assetPair)
      rw.put(obOffsetKey, Some(offset))
      newSnapshot.foreach(x => rw.put(obKey, Some(x)))
    }

    override def delete(assetPair: AssetPair): Unit = db.readWrite { rw =>
      val (obOffsetKey, obKey) = keys(assetPair)
      rw.delete(obOffsetKey)
      rw.delete(obKey)
    }

    private def keys(assetPair: AssetPair): (Key[Option[Offset]], Key[Option[OrderBookSnapshot]]) =
      (DbKeys.orderBookSnapshotOffset(assetPair), DbKeys.orderBookSnapshot(assetPair))
  }

  def inMem: OrderBookSnapshotDB = new OrderBookSnapshotDB {
    private val snapshots = new ConcurrentHashMap[AssetPair, (Offset, OrderBookSnapshot)]()

    override def get(assetPair: AssetPair): Option[(Offset, OrderBookSnapshot)] = Option(snapshots.get(assetPair))

    override def update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]): Unit =
      snapshots.compute(assetPair, (_, current) => (offset, newSnapshot.getOrElse(current._2)))

    override def delete(assetPair: AssetPair): Unit = snapshots.remove(assetPair)
  }
} 
Example 11
Source File: RateCache.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.caches

import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.dex.db.RateDB
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.asset.Asset.{IssuedAsset, Waves}
import org.iq80.leveldb.DB

import scala.collection.JavaConverters._
import scala.collection.concurrent.TrieMap

trait RateCache {

  
  def deleteRate(asset: Asset): Option[Double]

}

object RateCache {

  private val WavesRate = Option(1d)

  def apply(db: DB): RateCache = new RateCache {

    private val rateDB  = RateDB(db)
    private val rateMap = new ConcurrentHashMap[IssuedAsset, Double](rateDB.getAllRates.asJava)

    def upsertRate(asset: Asset, value: Double): Option[Double] =
      asset.fold { WavesRate } { issuedAsset =>
        rateDB.upsertRate(issuedAsset, value)
        Option(rateMap.put(issuedAsset, value))
      }

    def getRate(asset: Asset): Option[Double] = asset.fold(WavesRate)(asset => Option(rateMap get asset))

    def getAllRates: Map[Asset, Double] = {
      rateMap.asScala.toMap.map { case (issuedAsset, value) => Asset.fromCompatId(issuedAsset.compatId) -> value } + (Waves -> 1d)
    }

    def deleteRate(asset: Asset): Option[Double] = asset.fold(WavesRate) { issuedAsset =>
      rateDB.deleteRate(issuedAsset)
      Option(rateMap.remove(issuedAsset))
    }
  }

  def inMem: RateCache = new RateCache {

    private val rates: TrieMap[Asset, Double] = TrieMap(Waves -> 1d)

    def upsertRate(asset: Asset, value: Double): Option[Double] = {
      asset.fold { WavesRate } { issuedAsset =>
        val previousValue = rates.get(issuedAsset)
        rates += (asset -> value)
        previousValue
      }
    }

    def getRate(asset: Asset): Option[Double] = rates.get(asset)
    def getAllRates: Map[Asset, Double]       = rates.toMap

    def deleteRate(asset: Asset): Option[Double] =
      asset.fold { Option(1d) } { issuedAsset =>
        val previousValue = rates.get(issuedAsset)
        rates -= issuedAsset
        previousValue
      }
  }
} 
Example 12
Source File: MatchingRulesCache.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.caches

import java.util.concurrent.ConcurrentHashMap

import cats.data.NonEmptyList
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.settings.{DenormalizedMatchingRule, MatcherSettings, MatchingRule}

import scala.util.Try
import scala.util.control.NonFatal

class MatchingRulesCache(matcherSettings: MatcherSettings) extends ScorexLogging {

  private val allMatchingRules    = new ConcurrentHashMap[AssetPair, NonEmptyList[DenormalizedMatchingRule]]
  private val currentMatchingRule = new ConcurrentHashMap[AssetPair, DenormalizedMatchingRule]

  def getMatchingRules(assetPair: AssetPair, assetDecimals: Asset => Int): NonEmptyList[DenormalizedMatchingRule] = {
    allMatchingRules.computeIfAbsent(
      assetPair,
      _ => DenormalizedMatchingRule.getDenormalizedMatchingRules(matcherSettings, assetDecimals, assetPair)
    )
  }

  // DEX-488 TODO remove after found a reason of NPE
  def getDenormalizedRuleForNextOrder(assetPair: AssetPair, currentOffset: Long, assetDecimals: Asset => Int): DenormalizedMatchingRule = {

    lazy val defaultRule = DenormalizedMatchingRule.getDefaultRule(assetPair, assetDecimals)

    val result =
      Try {
        getMatchingRules(assetPair, assetDecimals).foldLeft(defaultRule) { case (acc, mr) => if (mr.startOffset <= (currentOffset + 1)) mr else acc }
      }.recover { case NonFatal(e) => log.error(s"Can't get a denormalized rule for a next order", e); defaultRule }
        .getOrElse(defaultRule)

    result.copy(tickSize = result.tickSize max defaultRule.tickSize)
  }

  def getNormalizedRuleForNextOrder(assetPair: AssetPair, currentOffset: Long, assetDecimals: Asset => Int): MatchingRule = {
    getDenormalizedRuleForNextOrder(assetPair, currentOffset, assetDecimals).normalize(assetPair, assetDecimals)
  }

  def updateCurrentMatchingRule(assetPair: AssetPair, denormalizedMatchingRule: DenormalizedMatchingRule): Unit = {
    currentMatchingRule.put(assetPair, denormalizedMatchingRule)
  }

  def setCurrentMatchingRuleForNewOrderBook(assetPair: AssetPair, currentOffset: Long, assetDecimals: Asset => Int): Unit = {
    updateCurrentMatchingRule(assetPair, getDenormalizedRuleForNextOrder(assetPair, currentOffset, assetDecimals))
  }
} 
Example 13
Source File: BlockchainCacheSpecification.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.caches

import java.time.Duration
import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Executors}

import mouse.any.anySyntaxMouse
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.JavaConverters._
import scala.concurrent._

class BlockchainCacheSpecification extends AnyWordSpecLike with Matchers with BeforeAndAfterAll {

  private val executor: ExecutorService                          = Executors.newCachedThreadPool
  implicit private val blockingContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(executor)

  private class BlockchainCacheTest(loader: String => Future[String], expiration: Option[Duration], invalidationPredicate: String => Boolean)
      extends BlockchainCache[String, String](loader, expiration, invalidationPredicate)

  private def createCache(loader: String => Future[String],
                          expiration: Option[Duration] = None,
                          invalidationPredicate: String => Boolean = _ => false): BlockchainCacheTest = {
    new BlockchainCacheTest(loader, expiration, invalidationPredicate)
  }

  override def afterAll(): Unit = {
    super.afterAll()
    executor.shutdownNow()
  }

  private val andThenAwaitTimeout = 300

  "BlockchainCache" should {

    "not keep failed futures" in {

      val goodKey = "good key"
      val badKey  = "gRPC Error"

      val keyAccessMap = new ConcurrentHashMap[String, Int] unsafeTap (m => { m.put(goodKey, 0); m.put(badKey, 0) })
      val gRPCError    = new RuntimeException("gRPC Error occurred")

      val cache =
        createCache(
          key => {
            (if (key == badKey) Future.failed(gRPCError) else Future.successful(s"value = $key")) unsafeTap { _ =>
              keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1)
            }
          }
        )

      val badKeyAccessCount = 10

      Await.result(
        (1 to badKeyAccessCount).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey recover { case _ => "sad" }
          } yield { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }

    "not keep values according to the predicate" in {

      val goodKey = "111"
      val badKey  = "222"

      val keyAccessMap = new ConcurrentHashMap[String, Int](Map(goodKey -> 0, badKey -> 0).asJava)

      val cache = createCache(
        key => { keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1); Future.successful(key) },
        invalidationPredicate = _.startsWith("2")
      )

      Await.result(
        (1 to 10).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey
          } yield blocking { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }
  }
} 
Example 14
Source File: HasWebSockets.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.websockets

import java.lang
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.wavesplatform.dex.api.ws.connection.{WsConnection, WsConnectionOps}
import com.wavesplatform.dex.api.ws.entities.{WsBalances, WsOrder}
import com.wavesplatform.dex.api.ws.protocol.{WsAddressSubscribe, WsInitial, WsOrderBookSubscribe}
import com.wavesplatform.dex.domain.account.KeyPair
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.error.ErrorFormatterContext
import com.wavesplatform.dex.it.config.PredefinedAssets
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import mouse.any._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._

trait HasWebSockets extends BeforeAndAfterAll with HasJwt with WsConnectionOps with WsMessageOps {
  _: Suite with Eventually with Matchers with DiffMatcherWithImplicits with PredefinedAssets =>

  implicit protected val system: ActorSystem        = ActorSystem()
  implicit protected val materializer: Materializer = Materializer.matFromSystem(system)
  implicit protected val efc: ErrorFormatterContext = assetDecimalsMap.apply

  protected def getWsStreamUri(dex: DexContainer): String = s"ws://127.0.0.1:${dex.restApiAddress.getPort}/ws/v0"

  protected val knownWsConnections: ConcurrentHashMap.KeySetView[WsConnection, lang.Boolean] =
    ConcurrentHashMap.newKeySet[WsConnection]()

  protected def addConnection(connection: WsConnection): Unit = knownWsConnections.add(connection)

  protected def mkWsAddressConnection(client: KeyPair,
                                      dex: DexContainer,
                                      keepAlive: Boolean = true,
                                      subscriptionLifetime: FiniteDuration = 1.hour): WsConnection = {
    val jwt        = mkJwt(client, lifetime = subscriptionLifetime)
    val connection = mkDexWsConnection(dex, keepAlive)
    connection.send(WsAddressSubscribe(client.toAddress, WsAddressSubscribe.defaultAuthType, jwt))
    connection
  }

  protected def mkWsOrderBookConnection(assetPair: AssetPair, dex: DexContainer, depth: Int = 1): WsConnection = {
    val connection = mkDexWsConnection(dex)
    connection.send(WsOrderBookSubscribe(assetPair, depth))
    connection
  }

  protected def mkWsInternalConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(s"${getWsStreamUri(dex)}/internal", keepAlive)

  protected def mkDexWsConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(getWsStreamUri(dex), keepAlive)

  protected def mkWsConnection(uri: String, keepAlive: Boolean = true): WsConnection = {
    new WsConnection(uri, keepAlive) unsafeTap { wsc =>
      addConnection(wsc)
      eventually { wsc.collectMessages[WsInitial] should have size 1 }
      wsc.clearMessages()
    }
  }

  protected def assertChanges(c: WsConnection, squash: Boolean = true)(expBs: Map[Asset, WsBalances]*)(expOs: WsOrder*): Unit = {
    eventually {
      if (squash) {
        c.balanceChanges.size should be <= expBs.size
        c.balanceChanges.squashed should matchTo { expBs.toList.squashed }
        c.orderChanges.size should be <= expOs.size
        c.orderChanges.squashed should matchTo { expOs.toList.squashed }
      } else {
        c.balanceChanges should matchTo(expBs)
        c.orderChanges should matchTo(expOs)
      }
    }

    c.clearMessages()
  }

  protected def cleanupWebSockets(): Unit = {
    if (!knownWsConnections.isEmpty) {
      knownWsConnections.forEach { _.close() }
      materializer.shutdown()
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    cleanupWebSockets()
  }
} 
Example 15
Source File: GlobalIssues.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.mwz.sonar.scala
package pr

import java.util.concurrent.ConcurrentHashMap

import scala.jdk.CollectionConverters._

import org.sonar.api.batch.fs.InputFile
import org.sonar.api.batch.rule.Severity
import org.sonar.api.batch.{InstantiationStrategy, ScannerSide}
import org.sonar.api.rule.RuleKey

final case class Issue(
  key: RuleKey,
  file: InputFile,
  line: Int,
  severity: Severity,
  message: String
)


// TODO: Both @ScannerSide and @InstantiationStrategy are deprecated, we should switch
//  to the org.sonar.api.scanner.ScannerSide in the future.
@ScannerSide
@InstantiationStrategy(InstantiationStrategy.PER_BATCH)
final class GlobalIssues {
  private[this] val issues: ConcurrentHashMap[InputFile, List[Issue]] =
    new ConcurrentHashMap()

  def add(issue: Issue): Unit =
    issues.merge(issue.file, List(issue), (current, value) => value ::: current)

  def allIssues: Map[InputFile, List[Issue]] =
    issues.asScala.toMap
} 
Example 16
Source File: RoomCrowd.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws

import java.util.concurrent.ConcurrentHashMap
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext

import ipc._

final class RoomCrowd(
    json: CrowdJson,
    groupedWithin: util.GroupedWithin
)(implicit ec: ExecutionContext) {

  import RoomCrowd._

  private val rooms = new ConcurrentHashMap[RoomId, RoomState](1024)

  def connect(roomId: RoomId, user: Option[User]): Unit =
    publish(
      roomId,
      rooms.compute(roomId, (_, cur) => Option(cur).getOrElse(RoomState()) connect user)
    )

  def disconnect(roomId: RoomId, user: Option[User]): Unit = {
    val room = rooms.computeIfPresent(
      roomId,
      (_, room) => {
        val newRoom = room disconnect user
        if (newRoom.isEmpty) null else newRoom
      }
    )
    if (room != null) publish(roomId, room)
  }

  def getUsers(roomId: RoomId): Set[User.ID] =
    Option(rooms get roomId).fold(Set.empty[User.ID])(_.users.keySet)

  def isPresent(roomId: RoomId, userId: User.ID): Boolean =
    Option(rooms get roomId).exists(_.users contains userId)

  private def publish(roomId: RoomId, room: RoomState): Unit =
    outputBatch(outputOf(roomId, room))

  private val outputBatch = groupedWithin[Output](1024, 1.second) { outputs =>
    outputs
      .foldLeft(Map.empty[RoomId, Output]) {
        case (crowds, crowd) => crowds.updated(crowd.roomId, crowd)
      }
      .values foreach { output =>
      json room output foreach {
        Bus.publish(_ room output.roomId, _)
      }
    }
  }

  def size = rooms.size
}

object RoomCrowd {

  case class Output(
      roomId: RoomId,
      members: Int,
      users: Iterable[User.ID],
      anons: Int
  )

  def outputOf(roomId: RoomId, room: RoomState) =
    Output(
      roomId = roomId,
      members = room.nbMembers,
      users = room.users.keys,
      anons = room.anons
    )

  case class RoomState(
      anons: Int = 0,
      users: Map[User.ID, Int] = Map.empty
  ) {
    def nbUsers   = users.size
    def nbMembers = anons + nbUsers
    def isEmpty   = nbMembers < 1

    def connect(user: Option[User]) =
      user.fold(copy(anons = anons + 1)) { u =>
        copy(users = users.updatedWith(u.id)(cur => Some(cur.fold(1)(_ + 1))))
      }
    def disconnect(user: Option[User]) =
      user.fold(copy(anons = anons - 1)) { u =>
        copy(users = users.updatedWith(u.id)(_.map(_ - 1).filter(_ > 0)))
      }
  }
} 
Example 17
Source File: History.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws

import java.util.concurrent.ConcurrentHashMap

final class History[K <: StringValue, V <: ipc.ClientIn.HasVersion](
    historySize: Int,
    initialCapacity: Int
) {

  private val histories = new ConcurrentHashMap[String, List[V]](initialCapacity)

  def add(key: K, event: V): Unit =
    histories.compute(
      key.toString,
      (_, cur) => event :: Option(cur).getOrElse(Nil).take(historySize)
    )

  def getFrom(key: K, versionOpt: Option[SocketVersion]): Option[List[V]] = {
    val allEvents = histories.getOrDefault(key.toString, Nil)
    versionOpt
      .fold(Option(allEvents.take(5))) { since =>
        if (allEvents.headOption.fold(true)(_.version <= since)) Some(Nil)
        else {
          val events = allEvents.takeWhile(_.version > since)
          if (events.size == events.headOption.fold(0)(_.version.value) - since.value) Some(events)
          else None
        }
      }
      .map(_.reverse)
  }

  def stop(key: K) = histories.remove(key.toString)

  def hasEvents(key: K) = Option(histories get key.toString).exists(_.nonEmpty)

  def size = histories.size

  def allVersions: Array[(String, SocketVersion)] = {
    val res = scala.collection.mutable.ArrayBuffer.empty[(String, SocketVersion)]
    histories.forEach { (key, events) =>
      events.headOption foreach { event => res += (key.toString -> event.version) }
    }
    res.toArray
  }
}

object History {

  val room  = new History[RoomId, ipc.ClientIn.Versioned](20, 4096)
  val round = new History[Game.Id, ipc.ClientIn.RoundVersioned](20, 32768)
} 
Example 18
Source File: EventBus.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws.util

import java.util.concurrent.ConcurrentHashMap

final class EventBus[Event, Channel, Subscriber](
    initialCapacity: Int,
    publish: (Subscriber, Event) => Unit
) {

  private val entries = new ConcurrentHashMap[Channel, Set[Subscriber]](initialCapacity)

  def subscribe(channel: Channel, subscriber: Subscriber): Unit =
    entries.compute(
      channel,
      (_, subs) => {
        Option(subs).fold(Set(subscriber))(_ + subscriber)
      }
    )

  def unsubscribe(channel: Channel, subscriber: Subscriber): Unit =
    entries.computeIfPresent(
      channel,
      (_, subs) => {
        val newSubs = subs - subscriber
        if (newSubs.isEmpty) null
        else newSubs
      }
    )

  def publish(channel: Channel, event: Event): Unit =
    Option(entries get channel) foreach {
      _ foreach {
        publish(_, event)
      }
    }

  def size                     = entries.size
  def sizeOf(channel: Channel) = Option(entries get channel).fold(0)(_.size)
} 
Example 19
Source File: Fens.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws

import akka.actor.typed.ActorRef
import chess.format.{ FEN, Uci }
import java.util.concurrent.ConcurrentHashMap

import ipc._


object Fens {

  case class Position(lastUci: Uci, fen: FEN)
  case class Watched(position: Option[Position], clients: Set[ActorRef[ClientMsg]])

  private val games = new ConcurrentHashMap[Game.Id, Watched](1024)

  // client starts watching
  def watch(gameIds: Iterable[Game.Id], client: Client): Unit =
    gameIds foreach { gameId =>
      games
        .compute(
          gameId,
          {
            case (_, null)                  => Watched(None, Set(client))
            case (_, Watched(pos, clients)) => Watched(pos, clients + client)
          }
        )
        .position foreach {
        case Position(lastUci, fen) => client ! ClientIn.Fen(gameId, lastUci, fen)
      }
    }

  // when a client disconnects
  def unwatch(gameIds: Iterable[Game.Id], client: Client): Unit =
    gameIds foreach { gameId =>
      games.computeIfPresent(
        gameId,
        (_, watched) => {
          val newClients = watched.clients - client
          if (newClients.isEmpty) null
          else watched.copy(clients = newClients)
        }
      )
    }

  // move coming from the server
  def move(gameId: Game.Id, json: JsonString): Unit = {
    games.computeIfPresent(
      gameId,
      (_, watched) =>
        json.value match {
          case MoveRegex(uciS, fenS) =>
            Uci(uciS).fold(watched) { lastUci =>
              val fen = FEN(fenS)
              val msg = ClientIn.Fen(gameId, lastUci, fen)
              watched.clients foreach { _ ! msg }
              watched.copy(position = Some(Position(lastUci, fen)))
            }
          case _ => watched
        }
    )
  }

  // ...,"uci":"h2g2","san":"Rg2","fen":"r2qb1k1/p2nbrpn/6Np/3pPp1P/1ppP1P2/2P1B3/PP2B1R1/R2Q1NK1",...
  private val MoveRegex = """uci":"([^"]+)".+fen":"([^"]+)""".r.unanchored

  def size = games.size
} 
Example 20
Source File: Users.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws

import akka.actor.typed.Scheduler
import java.util.concurrent.ConcurrentHashMap
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.jdk.CollectionConverters._

import ipc._

final class Users(implicit scheduler: Scheduler, ec: ExecutionContext) {

  private val users       = new ConcurrentHashMap[User.ID, Set[Client]](32768)
  private val disconnects = ConcurrentHashMap.newKeySet[User.ID](2048)

  private def publish(msg: Any) = Bus.internal.publish("users", msg)

  scheduler.scheduleWithFixedDelay(7.seconds, 5.seconds) { () =>
    publish(LilaIn.DisconnectUsers(disconnects.iterator.asScala.toSet))
    disconnects.clear()
  }

  def connect(user: User, client: Client, silently: Boolean = false): Unit =
    users.compute(
      user.id,
      {
        case (_, null) =>
          if (!disconnects.remove(user.id)) publish(LilaIn.ConnectUser(user, silently))
          Set(client)
        case (_, clients) =>
          clients + client
      }
    )

  def disconnect(user: User, client: Client): Unit =
    users.computeIfPresent(
      user.id,
      (_, clients) => {
        val newClients = clients - client
        if (newClients.isEmpty) {
          disconnects add user.id
          null
        } else newClients
      }
    )

  def tellOne(userId: User.ID, payload: ClientMsg): Unit =
    Option(users get userId) foreach {
      _ foreach { _ ! payload }
    }

  def tellMany(userIds: Iterable[User.ID], payload: ClientMsg): Unit =
    userIds foreach { tellOne(_, payload) }

  def kick(userId: User.ID): Unit =
    Option(users get userId) foreach {
      _ foreach { _ ! ClientCtrl.Disconnect }
    }

  def setTroll(userId: User.ID, v: IsTroll): Unit =
    Option(users get userId) foreach {
      _ foreach { _ ! ipc.SetTroll(v) }
    }

  def isOnline(userId: User.ID): Boolean = users containsKey userId

  def size = users.size
} 
Example 21
Source File: UserListener.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.resourcemanager.listener

import java.util
import java.util.concurrent.ConcurrentHashMap
import java.util.{Comparator, List => JList, Map => JMap}

import com.webank.wedatasphere.linkis.common.listener.Event
import com.webank.wedatasphere.linkis.common.utils.Logging
import com.webank.wedatasphere.linkis.resourcemanager.event.metric.{MetricRMEvent, MetricRMEventListener, UserSessionEndEvent, UserSessionStartEvent}

import scala.beans.BeanProperty


class UserListener extends MetricRMEventListener with Logging {

  import UserListener._

  override def onMetricRMEvent(e: MetricRMEvent): Unit = e match {
    case e: UserSessionStartEvent => {
      info("[Add a startup event(添加启动事件)]user:" + e.user + ",UserSessionStartEvent: " + e)

      userSessionCollect.put(e.userUsedResource.moduleInstance.getInstance, e)

    }
    case e: UserSessionEndEvent => {
      info("[Cleanup event(清理结束事件)]user:" + e.user + ",UserSessionEndEvent: " + e)
      userSessionCollect.remove(e.userReleasedResource.moduleInstance)
    }
    case _ => {
      warn("not found : " + e)
    }
  }

  def onEventError(event: Event, t: scala.Throwable): scala.Unit = {}
}

class userEventByUserName extends Comparator[UserSessionStartEvent] {
  override def compare(o1: UserSessionStartEvent, o2: UserSessionStartEvent): Int = {
    o1.user.compareTo(o2.user)
  }
}

case class UserSessionInfo(@BeanProperty userName: String,
                           @BeanProperty id: String,
                           @BeanProperty state: String,
                           @BeanProperty running: Boolean,
                           @BeanProperty completed: Boolean,
                           @BeanProperty moduleName: String,
                           @BeanProperty instance: String,
                           @BeanProperty resource: JMap[String, Any],
                           @BeanProperty ticketId: String
                          )

object UserListener {
  val userSessionCollect: JMap[String, UserSessionStartEvent] = new ConcurrentHashMap[String, UserSessionStartEvent]()

  def parseResource(r: String): JMap[String, Any] = {
    val resource: JMap[String, Any] = new util.HashMap
    if (r == null) return resource;
    val r0 = r.split(",|:")
    var i = 0
    while (i < r0.length) {
      if (i == 0) {
        resource.put("type", r0(i))
        i = i + 1
      } else {
        resource.put(r0(i), r0(i + 1))
        i = i + 2
      }
    }
    resource
  }

  def getUserEvent(userName: String): JList[UserSessionInfo] = {
    val eventIterator = userSessionCollect.values().iterator()
    val userList: JList[UserSessionInfo] = new util.ArrayList[UserSessionInfo]();
    while (eventIterator.hasNext) {
      val e = eventIterator.next()
      if (e.user.equals(userName)) {
        val usi = UserSessionInfo(e.user, e.getId, e.getState.toString, e.isRunning, e.isCompleted, e.userUsedResource.moduleInstance.getApplicationName
          , e.userUsedResource.moduleInstance.getInstance, parseResource(e.userUsedResource.resource.toString), e.userUsedResource.ticketId)
        userList.add(usi)
      }
    }
    userList
  }
} 
Example 22
Source File: ModuleListener.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.resourcemanager.listener

import java.util
import java.util.concurrent.ConcurrentHashMap
import java.util.{Comparator, Map => JMap}

import com.webank.wedatasphere.linkis.common.utils.Logging
import com.webank.wedatasphere.linkis.resourcemanager.event.notify.{ModuleRegisterEvent, ModuleUnregisterEvent, NotifyRMEvent, NotifyRMEventListener}

import scala.beans.BeanProperty


case class EMModule(@BeanProperty moduleName: String,
                    @BeanProperty instance: String,
                    @BeanProperty createTime: Long,
                    @BeanProperty endTime: Long,
                    @BeanProperty id: String,
                    @BeanProperty state: String,
                    @BeanProperty totalResource: JMap[String, Any],
                    @BeanProperty usedResource: JMap[String, Any],
                    @BeanProperty running: Boolean,
                    @BeanProperty completed: Boolean
                   )


object ModuleListener {

  val moduleInfoCollect: JMap[String, ModuleRegisterEvent] = new ConcurrentHashMap[String, ModuleRegisterEvent]()

  def parseResource(r: String): JMap[String, Any] = {
    val resource: JMap[String, Any] = new util.HashMap

    val r0 = r.split(",|:")
    var i = 0
    while (i < r0.length) {
      if (i == 0) {
        resource.put("type", r0(i))
        i = i + 1
      } else {
        resource.put(r0(i), r0(i + 1))
        i = i + 2
      }
    }
    resource
  }
} 
Example 23
Source File: AbstractAuthenticationStrategy.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.httpclient.authentication

import java.util.concurrent.ConcurrentHashMap

import com.webank.wedatasphere.linkis.httpclient.Client
import com.webank.wedatasphere.linkis.httpclient.config.ClientConfig
import com.webank.wedatasphere.linkis.httpclient.request.{Action, UserAction}
import org.apache.commons.lang.StringUtils
import org.apache.http.HttpResponse


abstract class AbstractAuthenticationStrategy extends AuthenticationStrategy {
  private var client: Client = _
  private val userNameToAuthentications = new ConcurrentHashMap[String, Authentication]()
  private var clientConfig: ClientConfig = _
  protected val sessionMaxAliveTime: Long

  def setClient(client: Client): Unit = this.client = client
  def getClient: Client = client

  protected def getKeyByUserAndURL(user: String, serverUrl: String): String = user + "@" + serverUrl

  protected def getKey(requestAction: Action, serverUrl: String): String = requestAction match {
    case _: AuthenticationAction => null
    case authAction: UserAction =>
      getKeyByUserAndURL(authAction.getUser, serverUrl)
    case _ if StringUtils.isNotBlank(clientConfig.getAuthTokenKey) =>
      getKeyByUserAndURL(clientConfig.getAuthTokenKey, serverUrl)
    case _ => null
  }

  def setClientConfig(clientConfig: ClientConfig): Unit = this.clientConfig = clientConfig
  def getClientConfig: ClientConfig = clientConfig

  def login(requestAction: Action, serverUrl: String): Authentication = {
    val key = getKey(requestAction, serverUrl)
    if(key == null) return null
    if(userNameToAuthentications.containsKey(key) && !isTimeout(userNameToAuthentications.get(key))) {
      val authenticationAction = userNameToAuthentications.get(key)
      authenticationAction.updateLastAccessTime()
      authenticationAction
    } else key.intern() synchronized {
      var authentication = userNameToAuthentications.get(key)
      if(authentication == null || isTimeout(authentication)) {
        authentication = tryLogin(requestAction, serverUrl)
        userNameToAuthentications.put(key, authentication)
      }
      authentication
    }
  }

  def tryLogin(requestAction: Action, serverUrl: String): Authentication = {
    val action = getAuthenticationAction(requestAction, serverUrl)
    client.execute(action, 5000) match {
      case r: AuthenticationResult => r.getAuthentication
    }
  }

  protected def getAuthenticationAction(requestAction: Action, serverUrl: String): AuthenticationAction

  def getAuthenticationResult(response: HttpResponse, requestAction: AuthenticationAction): AuthenticationResult

  def isTimeout(authentication: Authentication): Boolean = System.currentTimeMillis() - authentication.getLastAccessTime >= sessionMaxAliveTime
} 
Example 24
Source File: HostsStatuses.scala    From algoliasearch-client-scala   with MIT License 5 votes vote down vote up
package algolia

import java.util.concurrent.ConcurrentHashMap

import org.slf4j.{Logger, LoggerFactory}

case class HostsStatuses(
    configuration: AlgoliaClientConfiguration,
    utils: AlgoliaUtils,
    queryHosts: Seq[String],
    indexingHosts: Seq[String]
) {

  private[algolia] val hostStatuses: ConcurrentHashMap[String, HostStatus] =
    new ConcurrentHashMap[String, HostStatus](5)

  val logger: Logger = LoggerFactory.getLogger("algoliasearch")

  def markHostAsUp(host: String): Unit = {
    logger.debug("Marking {} as `up`", host)
    hostStatuses.put(host, HostStatus.up(utils.now()))
  }

  def markHostAsDown(host: String): Unit = {
    logger.debug("Marking {} as `down`", host)
    hostStatuses.put(host, HostStatus.down(utils.now()))
  }

  def indexingHostsThatAreUp(): Seq[String] = hostsThatAreUp(indexingHosts)

  def queryHostsThatAreUp(): Seq[String] = hostsThatAreUp(queryHosts)

  private def hostsThatAreUp(hosts: Seq[String]): Seq[String] = {
    val filteredHosts = hosts.filter(h => isUpOrCouldBeRetried(getHostStatus(h))
    )
    if (filteredHosts.isEmpty) {
      hosts
    } else {
      filteredHosts
    }
  }

  def isUpOrCouldBeRetried(hostStatus: HostStatus): Boolean =
    hostStatus.up || (utils
      .now() - hostStatus.updatedAt) >= configuration.hostDownTimeoutMs

  private def getHostStatus(host: String): HostStatus =
    hostStatuses.getOrDefault(host, HostStatus.up(utils.now()))
}

private case class HostStatus(up: Boolean, updatedAt: Long)

private object HostStatus {

  def up(now: Long) = HostStatus(up = true, now)

  def down(now: Long) = HostStatus(up = false, now)

} 
Example 25
Source File: ConnectionPool.scala    From spark-redis   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.redislabs.provider.redis

import redis.clients.jedis.{JedisPoolConfig, Jedis, JedisPool}
import redis.clients.jedis.exceptions.JedisConnectionException

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions._


object ConnectionPool {
  @transient private lazy val pools: ConcurrentHashMap[RedisEndpoint, JedisPool] =
    new ConcurrentHashMap[RedisEndpoint, JedisPool]()

  def connect(re: RedisEndpoint): Jedis = {
    val pool = pools.getOrElseUpdate(re,
      {
        val poolConfig: JedisPoolConfig = new JedisPoolConfig();
        poolConfig.setMaxTotal(250)
        poolConfig.setMaxIdle(32)
        poolConfig.setTestOnBorrow(false)
        poolConfig.setTestOnReturn(false)
        poolConfig.setTestWhileIdle(false)
        poolConfig.setMinEvictableIdleTimeMillis(60000)
        poolConfig.setTimeBetweenEvictionRunsMillis(30000)
        poolConfig.setNumTestsPerEvictionRun(-1)

        new JedisPool(poolConfig, re.host, re.port, re.timeout, re.auth, re.dbNum, re.ssl)
      }
    )
    var sleepTime: Int = 4
    var conn: Jedis = null
    while (conn == null) {
      try {
        conn = pool.getResource
      }
      catch {
        case e: JedisConnectionException if e.getCause.toString.
          contains("ERR max number of clients reached") => {
          if (sleepTime < 500) sleepTime *= 2
          Thread.sleep(sleepTime)
        }
        case e: Exception => throw e
      }
    }
    conn
  }
} 
Example 26
Source File: HealthService.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.api

import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{ Route, StandardRoute }
import com.ing.wbaa.rokku.proxy.data.HealthCheck.{ RGWListBuckets, S3ListBucket }
import com.ing.wbaa.rokku.proxy.handler.radosgw.RadosGatewayHandler
import com.ing.wbaa.rokku.proxy.provider.aws.S3Client
import com.typesafe.scalalogging.LazyLogging
import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Success, Try }

object HealthService {
  private def timestamp: Long = System.currentTimeMillis()

  private val statusMap = new ConcurrentHashMap[Long, StandardRoute]()

  private def clearStatus(): Unit = statusMap.clear()

  private def addStatus(probeResult: StandardRoute): StandardRoute = statusMap.put(timestamp, probeResult)

  private def getCurrentStatusMap: Future[mutable.Map[Long, StandardRoute]] = Future.successful(statusMap.asScala)

  private def getRouteStatus(implicit ec: ExecutionContext): Future[Option[StandardRoute]] = getCurrentStatusMap.map(_.headOption.map { case (_, r) => r })

}

trait HealthService extends RadosGatewayHandler with S3Client with LazyLogging {

  protected[this] implicit def executionContext: ExecutionContext

  import HealthService.{ addStatus, getCurrentStatusMap, clearStatus, getRouteStatus, timestamp }

  private lazy val interval = storageS3Settings.hcInterval

  private def updateStatus: Future[StandardRoute] = Future {
    clearStatus()
    storageS3Settings.hcMethod match {
      case RGWListBuckets => addStatus(execProbe(listAllBuckets _))
      case S3ListBucket   => addStatus(execProbe(listBucket _))
    }
  }
  private def updateStatusAndGet: Future[Option[StandardRoute]] =
    for {
      _ <- updateStatus
      s <- getRouteStatus
    } yield s

  def getStatus(currentTime: Long): Future[Option[StandardRoute]] =
    getCurrentStatusMap.flatMap(_ match {
      case m if m.isEmpty =>
        logger.debug("Status cache empty, running probe")
        updateStatusAndGet
      case m => m.keys.map {
        case entryTime if (entryTime + interval) < currentTime =>
          logger.debug("Status entry expired, renewing")
          updateStatusAndGet
        case _ =>
          logger.debug("Serving status from cache")
          Future.successful(m.map { case (_, r) => r }.headOption)
      }.head
    })

  private def execProbe[A](p: () => A): StandardRoute =
    Try {
      p()
    } match {
      case Success(_)  => complete("pong")
      case Failure(ex) => complete(StatusCodes.InternalServerError -> s"storage not available - $ex")
    }

  final val healthRoute: Route =
    path("ping") {
      get {
        onComplete(getStatus(timestamp)) {
          case Success(opt) => opt.getOrElse(complete(StatusCodes.InternalServerError -> "Failed to read status cache"))
          case Failure(e)   => complete(StatusCodes.InternalServerError -> "Failed to read status cache " + e.getMessage)
        }
      }
    }
} 
Example 27
Source File: SparkSQLOperationManager.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver.server

import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap

import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.{ExecuteStatementOperation, Operation, OperationManager}
import org.apache.hive.service.cli.session.HiveSession

import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.thriftserver.{ReflectionUtils, SparkExecuteStatementOperation}
import org.apache.spark.sql.internal.SQLConf


private[thriftserver] class SparkSQLOperationManager()
  extends OperationManager with Logging {

  val handleToOperation = ReflectionUtils
    .getSuperField[JMap[OperationHandle, Operation]](this, "handleToOperation")

  val sessionToActivePool = new ConcurrentHashMap[SessionHandle, String]()
  val sessionToContexts = new ConcurrentHashMap[SessionHandle, SQLContext]()

  override def newExecuteStatementOperation(
      parentSession: HiveSession,
      statement: String,
      confOverlay: JMap[String, String],
      async: Boolean): ExecuteStatementOperation = synchronized {
    val sqlContext = sessionToContexts.get(parentSession.getSessionHandle)
    require(sqlContext != null, s"Session handle: ${parentSession.getSessionHandle} has not been" +
      s" initialized or had already closed.")
    val conf = sqlContext.sessionState.conf
    val hiveSessionState = parentSession.getSessionState
    setConfMap(conf, hiveSessionState.getOverriddenConfigurations)
    setConfMap(conf, hiveSessionState.getHiveVariables)
    val runInBackground = async && conf.getConf(HiveUtils.HIVE_THRIFT_SERVER_ASYNC)
    val operation = new SparkExecuteStatementOperation(parentSession, statement, confOverlay,
      runInBackground)(sqlContext, sessionToActivePool)
    handleToOperation.put(operation.getHandle, operation)
    logDebug(s"Created Operation for $statement with session=$parentSession, " +
      s"runInBackground=$runInBackground")
    operation
  }

  def setConfMap(conf: SQLConf, confMap: java.util.Map[String, String]): Unit = {
    val iterator = confMap.entrySet().iterator()
    while (iterator.hasNext) {
      val kv = iterator.next()
      conf.setConfString(kv.getKey, kv.getValue)
    }
  }
} 
Example 28
Source File: ConnectionPool.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.datasources.redis

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConverters._

import redis.clients.jedis.{Jedis, JedisPool, JedisPoolConfig}
import redis.clients.jedis.exceptions.JedisConnectionException

object ConnectionPool {
  @transient private lazy val pools: ConcurrentHashMap[RedisEndpoint, JedisPool] =
    new ConcurrentHashMap[RedisEndpoint, JedisPool]()
  def connect(re: RedisEndpoint): Jedis = {
    val pool = pools.asScala.getOrElseUpdate(re, {
      val poolConfig: JedisPoolConfig = new JedisPoolConfig();
      poolConfig.setMaxTotal(250)
      poolConfig.setMaxIdle(32)
      poolConfig.setTestOnBorrow(false)
      poolConfig.setTestOnReturn(false)
      poolConfig.setTestWhileIdle(false)
      poolConfig.setMinEvictableIdleTimeMillis(60000)
      poolConfig.setTimeBetweenEvictionRunsMillis(30000)
      poolConfig.setNumTestsPerEvictionRun(-1)
      new JedisPool(poolConfig, re.host, re.port, re.timeout, re.auth, re.dbNum)
    })
    var sleepTime: Int = 4
    var conn: Jedis = null
    while (conn == null) {
      try {
        conn = pool.getResource
      } catch {
        case e: JedisConnectionException
            if e.getCause.toString.contains("ERR max number of clients reached") => {
          if (sleepTime < 500) sleepTime *= 2
          Thread.sleep(sleepTime)
        }
        case e: Exception => throw e
      }
    }
    conn
  }
} 
Example 29
Source File: SQLExecution.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.ui.{SparkListenerSQLExecutionEnd, SparkListenerSQLExecutionStart}

object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  private val executionIdToQueryExecution = new ConcurrentHashMap[Long, QueryExecution]()

  def getQueryExecution(executionId: Long): QueryExecution = {
    executionIdToQueryExecution.get(executionId)
  }

  private val testing = sys.props.contains("spark.testing")

  private[sql] def checkSQLExecutionId(sparkSession: SparkSession): Unit = {
    val sc = sparkSession.sparkContext
    // only throw an exception during tests. a missing execution ID should not fail a job.
    if (testing && sc.getLocalProperty(EXECUTION_ID_KEY) == null) {
      // Attention testers: when a test fails with this exception, it means that the action that
      // started execution of a query didn't call withNewExecutionId. The execution ID should be
      // set by calling withNewExecutionId in the action that begins execution, like
      // Dataset.collect or DataFrameWriter.insertInto.
      throw new IllegalStateException("Execution ID should be set")
    }
  }

  
  def withSQLConfPropagated[T](sparkSession: SparkSession)(body: => T): T = {
    val sc = sparkSession.sparkContext
    // Set all the specified SQL configs to local properties, so that they can be available at
    // the executor side.
    val allConfigs = sparkSession.sessionState.conf.getAllConfs
    val originalLocalProps = allConfigs.collect {
      case (key, value) if key.startsWith("spark") =>
        val originalValue = sc.getLocalProperty(key)
        sc.setLocalProperty(key, value)
        (key, originalValue)
    }

    try {
      body
    } finally {
      for ((key, value) <- originalLocalProps) {
        sc.setLocalProperty(key, value)
      }
    }
  }
} 
Example 30
Source File: AbsoluteOverrideCatalog.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.catalyst.analysis

import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Subquery}
import scala.collection.JavaConverters._


  abstract override def lookupRelation(
                                        tableIdent: TableIdentifier,
                                        alias: Option[String] = None): LogicalPlan = {
    getOverriddenTable(tableIdent) match {
      case Some(table) =>
        val tableName = getTableName(tableIdent)
        val tableWithQualifiers = Subquery(tableName, table)

        // If an alias was specified by the lookup, wrap the plan in a sub-query so that attributes
        // are properly qualified with this alias.
        alias.map(a => Subquery(a, tableWithQualifiers)).getOrElse(tableWithQualifiers)

      case None => throw new NoSuchTableException
    }
  }

  abstract override def getTables(databaseName: Option[String]): Seq[(String, Boolean)] = {
    overrides.keySet().asScala.map(_ -> true).toSeq
  }

  override def registerTable(tableIdent: TableIdentifier, plan: LogicalPlan): Unit = {
    overrides.put(getTableName(tableIdent), plan)
  }

  override def unregisterTable(tableIdent: TableIdentifier): Unit = {
    if (tableIdent.database.isEmpty) {
      overrides.remove(getTableName(tableIdent))
    }
  }

  override def unregisterAllTables(): Unit = {
    overrides.clear()
  }
} 
Example 31
Source File: TrackerPool.scala    From gatling-amqp-plugin   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.gatling.amqp.client

import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import io.gatling.commons.util.Clock
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.stats.StatsEngine
import io.gatling.core.util.NameGen
import ru.tinkoff.gatling.amqp.action.AmqpLogging
import ru.tinkoff.gatling.amqp.client.AmqpMessageTrackerActor.MessageConsumed
import ru.tinkoff.gatling.amqp.protocol.AmqpMessageMatcher
import ru.tinkoff.gatling.amqp.request.AmqpProtocolMessage

class TrackerPool(
    pool: AmqpConnectionPool,
    system: ActorSystem,
    statsEngine: StatsEngine,
    clock: Clock,
    configuration: GatlingConfiguration
) extends AmqpLogging with NameGen {

  private val trackers = new ConcurrentHashMap[String, AmqpMessageTracker]

  def tracker(sourceQueue: String,
              listenerThreadCount: Int,
              messageMatcher: AmqpMessageMatcher,
              responseTransformer: Option[AmqpProtocolMessage => AmqpProtocolMessage]): AmqpMessageTracker =
    trackers.computeIfAbsent(
      sourceQueue,
      _ => {
        val actor =
          system.actorOf(AmqpMessageTrackerActor.props(statsEngine, clock, configuration), genName("amqpTrackerActor"))

        for (_ <- 1 to listenerThreadCount) {
          val consumerChannel = pool.createConsumerChannel
          consumerChannel.basicConsume(
            sourceQueue,
            true,
            (_, message) => {
              val receivedTimestamp = clock.nowMillis
              val amqpMessage       = AmqpProtocolMessage(message.getProperties, message.getBody)
              val replyId           = messageMatcher.responseMatchId(amqpMessage)
              logMessage(s"Message received AmqpMessageID=${message.getProperties.getMessageId} matchId=$replyId", amqpMessage)
              actor ! MessageConsumed(replyId,
                                      receivedTimestamp,
                                      responseTransformer.map(_(amqpMessage)).getOrElse(amqpMessage))
            },
            _ => ()
          )
        }

        new AmqpMessageTracker(actor)
      }
    )
} 
Example 32
Source File: MarkdownPagesEndpoint.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web.guide.markdown

import java.io.{BufferedReader, File, FileReader}
import java.time.Instant
import java.util.concurrent.ConcurrentHashMap

import com.avsystem.commons._
import com.vladsch.flexmark.ext.toc.TocExtension
import com.vladsch.flexmark.html.HtmlRenderer
import com.vladsch.flexmark.parser.Parser

import scala.concurrent.{ExecutionContext, Future}

final class MarkdownPagesEndpoint(guideResourceBase: String)(implicit ec: ExecutionContext) extends MarkdownPageRPC {

  private val tocExtension = TocExtension.create
  private val parser = Parser.builder.extensions(JList(tocExtension)).build
  private val renderer = HtmlRenderer.builder.extensions(JList(tocExtension)).build
  private val renderedPages = new ConcurrentHashMap[MarkdownPage, (Future[String], Instant)]

  private def render(file: File): Future[String] = Future {
    val reader = new BufferedReader(new FileReader(file))
    val document = parser.parseReader(reader)
    renderer.render(document)
  }

  override def loadContent(page: MarkdownPage): Future[String] = {
    val (result, _) = renderedPages.compute(page, { (_, cached) =>
      val pageFile = new File(guideResourceBase + page.file)
      cached.opt.filter {
        case (currentRender, renderedInstant) =>
          currentRender.value.exists(_.isSuccess) && renderedInstant.toEpochMilli >= pageFile.lastModified()
      }.getOrElse((render(pageFile), Instant.ofEpochMilli(pageFile.lastModified())))
    })
    result
  }
} 
Example 33
Source File: Tracer.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.stacktracer

import java.util.concurrent.ConcurrentHashMap

abstract class Tracer extends Serializable {
  def traceLocation(lambda: AnyRef): ZTraceElement
}

object Tracer {
  def globallyCached(tracer: Tracer): Tracer = cached(globalMutableSharedTracerCache)(tracer)

  def cached(tracerCache: TracerCache)(tracer: Tracer): Tracer =
    new Tracer {
      def traceLocation(lambda: AnyRef): ZTraceElement = {
        val clazz = lambda.getClass

        val res = tracerCache.get(clazz)
        if (res eq null) {
          val v = tracer.traceLocation(lambda)
          tracerCache.put(clazz, v)
          v
        } else {
          res
        }
      }
    }

  private[this] final type TracerCache = ConcurrentHashMap[Class[_], ZTraceElement]
  private[this] lazy val globalMutableSharedTracerCache = new TracerCache(10000)

  lazy val Empty = new Tracer {
    private[this] val res                                     = ZTraceElement.NoLocation("Tracer.Empty disables trace extraction")
    override def traceLocation(lambda: AnyRef): ZTraceElement = res
  }
} 
Example 34
Source File: OffHeapMemoryAllocator.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.canvas
import java.lang.ref.ReferenceQueue
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import wvlet.log.LogSupport

import scala.jdk.CollectionConverters._

private[canvas] case class MemoryRefHolder(ref: MemoryReference, size: Long)


class OffHeapMemoryAllocator extends AutoCloseable with LogSupport {
  type Address = Long

  private val allocatedMemoryAddresses = new ConcurrentHashMap[Address, MemoryRefHolder]().asScala
  private val memoryQueue              = new ReferenceQueue[Memory]()
  private val totalAllocatedMemorySize = new AtomicLong(0)

  {
    // Start a off-heap memory collector to release allocated memory upon GC
    val collectorThread = new Thread(new Runnable {
      override def run(): Unit = {
        val ref = classOf[MemoryReference].cast(memoryQueue.remove())
        release(ref)
      }
    })
    collectorThread.setName("AirframeCanvas-GC")
    collectorThread.setDaemon(true) // Allow shutting down JVM while this thread is running
    logger.debug("Start Canvas-GC memory collector")
    collectorThread.start()
  }

  def allocatedMemorySize: Long = {
    totalAllocatedMemorySize.get()
  }

  def allocate(size: Long): OffHeapMemory = {
    val address = UnsafeUtil.unsafe.allocateMemory(size)
    val m       = OffHeapMemory(address, size, this)
    register(m)
    m
  }

  private def register(m: OffHeapMemory): Unit = {
    // Register a memory reference that will be collected by GC
    val ref    = new MemoryReference(m, memoryQueue)
    val holder = MemoryRefHolder(ref, m.size)
    allocatedMemoryAddresses.put(m.address, holder)
    totalAllocatedMemorySize.addAndGet(m.size)
  }

  private[canvas] def release(m: OffHeapMemory): Unit = {
    releaseMemoryAt(m.address)
  }

  private[canvas] def release(reference: MemoryReference): Unit = {
    releaseMemoryAt(reference.address)
  }

  private def releaseMemoryAt(address: Long): Unit = {
    debug(f"Releasing memory at ${address}%x")
    allocatedMemoryAddresses.get(address) match {
      case Some(h) =>
        if (address != 0) {
          UnsafeUtil.unsafe.freeMemory(address)
          totalAllocatedMemorySize.getAndAdd(-h.size)
        }
        allocatedMemoryAddresses.remove(address)
      case None =>
        warn(f"Unknown allocated memory address: ${address}%x")
    }
  }

  override def close(): Unit = {
    for (address <- allocatedMemoryAddresses.keys) {
      releaseMemoryAt(address)
    }
  }
} 
Example 35
Source File: DIStats.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.tracing

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import wvlet.airframe.surface.Surface
import wvlet.airframe.{Design, Session}
import wvlet.log.LogSupport

import scala.jdk.CollectionConverters._

case class DIStatsReport(
    coverage: Double,
    observedTypes: Seq[Surface],
    initCount: Map[Surface, Long],
    injectCount: Map[Surface, Long],
    unusedTypes: Seq[Surface]
) {
  override def toString: String = {
    val report = Seq.newBuilder[String]

    // Coverage report
    report += "[coverage]"
    report += f"design coverage: ${coverage * 100}%.1f%%"
    if (unusedTypes.nonEmpty) {
      report += "[unused types]"
      unusedTypes.map { x => report += x.toString }
    }
    // Access stat report
    report += "[access stats]"
    val allTypes = injectCount.keySet ++ initCount.keySet
    for (s <- observedTypes) {
      report += s"[${s}] init:${initCount.getOrElse(s, 0)}, inject:${injectCount.getOrElse(s, 0)}"
    }

    report.result().mkString("\n")
  }
}


class DIStats extends LogSupport with Serializable {
  // This will holds the stat data while the session is active.
  // To avoid holding too many stats for applications that create many child sessions,
  // we will just store the aggregated stats.
  private val injectCountTable = new ConcurrentHashMap[Surface, AtomicLong]().asScala
  private val initCountTable   = new ConcurrentHashMap[Surface, AtomicLong]().asScala

  private val baseNano  = System.nanoTime()
  private val firstSeen = new ConcurrentHashMap[Surface, Long]().asScala

  private[airframe] def observe(s: Surface): Unit = {
    firstSeen.getOrElseUpdate(s, System.nanoTime() - baseNano)
  }

  private[airframe] def incrementInjectCount(session: Session, surface: Surface): Unit = {
    observe(surface)
    val counter = injectCountTable.getOrElseUpdate(surface, new AtomicLong(0))
    counter.incrementAndGet()
  }

  private[airframe] def incrementInitCount(session: Session, surface: Surface): Unit = {
    observe(surface)
    val counter = initCountTable.getOrElseUpdate(surface, new AtomicLong(0))
    counter.incrementAndGet()
  }

  private def getInjectCount(surface: Surface): Long = {
    injectCountTable.get(surface).map(_.get()).getOrElse(0)
  }

  def coverageReportFor(design: Design): DIStatsReport = {
    var bindingCount     = 0
    var usedBindingCount = 0
    val unusedBindings   = Seq.newBuilder[Surface]
    for (b <- design.binding) yield {
      bindingCount += 1
      val surface     = b.from
      val injectCount = getInjectCount(surface)
      if (injectCount > 0) {
        usedBindingCount += 1
      } else {
        unusedBindings += surface
      }
    }
    val coverage =
      if (bindingCount == 0) {
        1.0
      } else {
        usedBindingCount.toDouble / bindingCount
      }
    DIStatsReport(
      coverage = coverage,
      observedTypes = firstSeen.toSeq.sortBy(_._2).map(_._1).toSeq,
      initCount = initCountTable.map(x => x._1 -> x._2.get()).toMap,
      injectCount = injectCountTable.map(x => x._1 -> x._2.get()).toMap,
      unusedTypes = unusedBindings.result()
    )
  }
} 
Example 36
Source File: MetricLogger.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.fluentd
import java.util.concurrent.ConcurrentHashMap

import javax.annotation.{PostConstruct, PreDestroy}
import wvlet.airframe.codec.{MessageCodec, MessageCodecFactory}
import wvlet.airframe.surface.Surface
import wvlet.log.LogSupport

abstract class MetricLogger extends AutoCloseable {
  protected def tagPrefix: Option[String]
  def withTagPrefix(newTagPrefix: String): MetricLogger

  protected def emitRaw(fullTag: String, event: Map[String, Any]): Unit
  protected def emitRawMsgPack(fullTag: String, event: Array[Byte]): Unit

  def emit(tag: String, event: Map[String, Any]): Unit = {
    emitRaw(enrichTag(tag), event)
  }

  def emitMsgPack(tag: String, event: Array[Byte]): Unit = {
    emitRawMsgPack(enrichTag(tag), event)
  }

  private def enrichTag(tag: String): String = {
    tagPrefix match {
      case None => tag
      case Some(prefix) =>
        s"${prefix}.${tag}"
    }
  }

  @PostConstruct
  def start(): Unit = {}

  @PreDestroy
  override def close(): Unit
}

class TypedMetricLogger[T <: TaggedMetric](fluentdClient: MetricLogger, codec: MessageCodec[T]) {
  def emit(event: T): Unit = {
    fluentdClient.emitMsgPack(event.metricTag, codec.toMsgPack(event))
  }
}

class MetricLoggerFactory(
    fluentdClient: MetricLogger,
    codecFactory: MessageCodecFactory = MessageCodecFactory.defaultFactory.withMapOutput
) extends LogSupport {
  def getLogger: MetricLogger = fluentdClient
  def getLoggerWithTagPrefix(tagPrefix: String): MetricLogger =
    fluentdClient.withTagPrefix(tagPrefix)

  import scala.jdk.CollectionConverters._
  import scala.reflect.runtime.{universe => ru}

  private val loggerCache = new ConcurrentHashMap[Surface, TypedMetricLogger[_]]().asScala

  def getTypedLogger[T <: TaggedMetric: ru.TypeTag]: TypedMetricLogger[T] = {
    loggerCache
      .getOrElseUpdate(
        Surface.of[T], {
          // Ensure to serialize as map type of MessagePack
          val codec = codecFactory.withMapOutput.of[T]
          new TypedMetricLogger[T](getLogger, codec)
        }
      ).asInstanceOf[TypedMetricLogger[T]]
  }

  def getTypedLoggerWithTagPrefix[T <: TaggedMetric: ru.TypeTag](tagPrefix: String): TypedMetricLogger[T] = {
    loggerCache
      .getOrElseUpdate(
        Surface.of[T], {
          // Ensure to serialize as map type of MessagePack
          val codec = codecFactory.withMapOutput.of[T]
          new TypedMetricLogger[T](getLoggerWithTagPrefix(tagPrefix), codec)
        }
      ).asInstanceOf[TypedMetricLogger[T]]
  }

  @PostConstruct
  def start: Unit = {
    debug("Starting MetricLoggerFactory")
  }

  @PreDestroy
  def shutdown: Unit = {
    debug("Closing MetricLoggerFactory")
    fluentdClient.close()
  }
} 
Example 37
Source File: SingletonMemorySink.scala    From milan   with Apache License 2.0 5 votes vote down vote up
package com.amazon.milan.application.sinks

import java.time.{Duration, Instant}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentLinkedQueue}
import java.util.function

import com.amazon.milan.Id
import com.amazon.milan.application.DataSink
import com.amazon.milan.typeutil.TypeDescriptor
import com.fasterxml.jackson.annotation.JsonIgnore
import com.fasterxml.jackson.databind.annotation.{JsonDeserialize, JsonSerialize}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.TimeoutException


object SingletonMemorySink {
  private val values = new ConcurrentHashMap[String, ArrayBuffer[MemorySinkRecord[_]]]()
  private val nextSeqNum = new mutable.HashMap[String, Int]()
  private val locks = new ConcurrentHashMap[String, Object]()

  private def makeCreateBufferFunction[T]: java.util.function.Function[String, ArrayBuffer[MemorySinkRecord[_]]] =
    new function.Function[String, ArrayBuffer[MemorySinkRecord[_]]] {
      override def apply(t: String): ArrayBuffer[MemorySinkRecord[_]] =
        (new ArrayBuffer[MemorySinkRecord[T]]()).asInstanceOf[ArrayBuffer[MemorySinkRecord[_]]]
    }

  private val createLocker = new java.util.function.Function[String, Object] {
    override def apply(t: String): AnyRef = new Object()
  }

  
  @JsonIgnore
  def getRecordCount: Int = SingletonMemorySink.getBuffer(this.sinkId).size

  @JsonIgnore
  def getValues: List[T] = {
    SingletonMemorySink.getBuffer[T](this.sinkId).map(_.value).toList
  }

  @JsonIgnore
  def getRecords: List[MemorySinkRecord[T]] = {
    SingletonMemorySink.getBuffer[T](this.sinkId).toList
  }

  def waitForItems(itemCount: Int, timeout: Duration = null): Unit = {
    val endTime = if (timeout == null) Instant.MAX else Instant.now().plus(timeout)

    while (SingletonMemorySink.getBuffer(this.sinkId).size < itemCount) {
      if (Instant.now().isAfter(endTime)) {
        throw new TimeoutException()
      }

      Thread.sleep(1)
    }
  }

  override def equals(obj: Any): Boolean = {
    obj match {
      case o: SingletonMemorySink[_] =>
        this.sinkId.equals(o.sinkId)

      case _ =>
        false
    }
  }
}


class MemorySinkRecord[T](val seqNum: String, val createdTime: Instant, val value: T) extends Serializable 
Example 38
Source File: KustoClientCache.scala    From azure-kusto-spark   with Apache License 2.0 5 votes vote down vote up
package com.microsoft.kusto.spark.utils

import java.util.concurrent.ConcurrentHashMap
import java.util.function

import com.microsoft.azure.kusto.data.ConnectionStringBuilder
import com.microsoft.kusto.spark.authentication.{AadApplicationAuthentication, KeyVaultAuthentication, KustoAccessTokenAuthentication, KustoAuthentication}
import com.microsoft.kusto.spark.utils.{KustoConstants => KCONST}

object KustoClientCache {
  var clientCache = new ConcurrentHashMap[AliasAndAuth, KustoClient]

  def getClient(clusterAlias: String, authentication: KustoAuthentication): KustoClient = {
    val aliasAndAuth = AliasAndAuth(clusterAlias, authentication)
    clientCache.computeIfAbsent(aliasAndAuth, adderSupplier)
  }

  val adderSupplier: function.Function[AliasAndAuth, KustoClient] = new java.util.function.Function[AliasAndAuth, KustoClient]() {
    override def apply(aa: AliasAndAuth): KustoClient = createClient(aa)
  }

  private def createClient(aliasAndAuth: AliasAndAuth): KustoClient = {
    val (engineKcsb, ingestKcsb) = aliasAndAuth.authentication match {
      case null => throw new MatchError("Can't create ConnectionStringBuilder with null authentication params")
      case app: AadApplicationAuthentication => (
        ConnectionStringBuilder.createWithAadApplicationCredentials(aliasAndAuth.engineUri, app.ID, app.password, app.authority),
        ConnectionStringBuilder.createWithAadApplicationCredentials(aliasAndAuth.ingestUri, app.ID, app.password, app.authority)
      )
      case keyVaultParams: KeyVaultAuthentication =>
        val app = KeyVaultUtils.getAadAppParametersFromKeyVault(keyVaultParams)
        (
          ConnectionStringBuilder.createWithAadApplicationCredentials(aliasAndAuth.engineUri, app.ID, app.password, app.authority),
          ConnectionStringBuilder.createWithAadApplicationCredentials(aliasAndAuth.ingestUri, app.ID, app.password, app.authority)
        )
      case userToken: KustoAccessTokenAuthentication => (
        ConnectionStringBuilder.createWithAadAccessTokenAuthentication(aliasAndAuth.engineUri, userToken.token),
        ConnectionStringBuilder.createWithAadAccessTokenAuthentication(aliasAndAuth.ingestUri, userToken.token)
      )
    }

    engineKcsb.setClientVersionForTracing(KCONST.clientName)
    ingestKcsb.setClientVersionForTracing(KCONST.clientName)

    new KustoClient(aliasAndAuth.clusterAlias, engineKcsb, ingestKcsb)
  }

  private[KustoClientCache] case class AliasAndAuth(clusterAlias: String, authentication: KustoAuthentication) {
    private[AliasAndAuth] val clusterUri = "https://%s.kusto.windows.net"
    val ingestClusterAlias = s"ingest-$clusterAlias"
    val engineUri: String = clusterUri.format(clusterAlias)
    val ingestUri: String = clusterUri.format(ingestClusterAlias)

    override def equals(that: Any): Boolean = that match {
      case aa: AliasAndAuth => clusterAlias == aa.clusterAlias && authentication == aa.authentication
      case _ => false
    }

    override def hashCode(): Int = clusterAlias.hashCode + authentication.hashCode
  }

} 
Example 39
Source File: MorphlinesParser.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.plugin.transformation.morphline

import java.io.{ByteArrayInputStream, Serializable => JSerializable}
import java.util.concurrent.ConcurrentHashMap

import com.stratio.sparta.sdk.pipeline.transformation.Parser
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import com.typesafe.config.ConfigFactory
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StructField, StructType}
import org.kitesdk.morphline.api.Record

import scala.collection.JavaConverters._
import scala.util.Try

class MorphlinesParser(order: Integer,
                       inputField: Option[String],
                       outputFields: Seq[String],
                       schema: StructType,
                       properties: Map[String, JSerializable])
  extends Parser(order, inputField, outputFields, schema, properties) {

  assert(inputField.isDefined, "It's necessary to define one inputField in the Morphline Transformation")

  private val config: String = properties.getString("morphline")

  override def parse(row: Row): Seq[Row] = {
    val inputValue = Option(row.get(inputFieldIndex))
    val newData = Try {
      inputValue match {
        case Some(s: String) =>
          if (s.isEmpty) returnWhenError(new IllegalStateException(s"Impossible to parse because value is empty"))
          else parseWithMorphline(new ByteArrayInputStream(s.getBytes("UTF-8")))
        case Some(b: Array[Byte]) =>
          if (b.length == 0)  returnWhenError(new IllegalStateException(s"Impossible to parse because value is empty"))
          else parseWithMorphline(new ByteArrayInputStream(b))
        case _ =>
          returnWhenError(new IllegalStateException(s"Impossible to parse because value is empty"))
      }
    }
    returnData(newData, removeInputFieldMorphline(row))
  }

   private def removeIndex(row: Row, inputFieldIndex: Int): Row =
    if (row.size < inputFieldIndex) row
    else Row.fromSeq(row.toSeq.take(inputFieldIndex) ++ row.toSeq.drop(inputFieldIndex + 1))


   private def removeInputFieldMorphline(row: Row): Row =
    if (inputFieldRemoved && inputField.isDefined) removeIndex(row, inputFieldIndex)
    else row

  private def parseWithMorphline(value: ByteArrayInputStream): Row = {
    val record = new Record()
    record.put(inputField.get, value)
    MorphlinesParser(order, config, outputFieldsSchema).process(record)
  }
}

object MorphlinesParser {

  private val instances = new ConcurrentHashMap[String, KiteMorphlineImpl].asScala

  def apply(order: Integer, config: String, outputFieldsSchema: Array[StructField]): KiteMorphlineImpl = {
    instances.get(config) match {
      case Some(kiteMorphlineImpl) =>
        kiteMorphlineImpl
      case None =>
        val kiteMorphlineImpl = KiteMorphlineImpl(ConfigFactory.parseString(config), outputFieldsSchema)
        instances.putIfAbsent(config, kiteMorphlineImpl)
        kiteMorphlineImpl
    }
  }
} 
Example 40
Source File: HashMapWrapper.scala    From freestyle   with Apache License 2.0 5 votes vote down vote up
package freestyle.free.cache.hashmap

import freestyle.free.cache.KeyValueMap
import freestyle.free.Capture
import java.util.concurrent.ConcurrentHashMap


  override def get(key: Key): F[Option[Value]] = C.capture {
    Option(table.get(hkey(key))) // Option.apply handles null
  }

  override def put(key: Key, value: Value): F[Unit] = C.capture {
    table.put(hkey(key), value) // Option.apply handles null
    ()
  }

  override def putAll(keyValues: Map[Key, Value]): F[Unit] = C.capture {
    keyValues.foreach{ case (k, v) => table.put(hkey(k), v) }
  }

  override def putIfAbsent(key: Key, newVal: Value): F[Unit] = C.capture {
    table.putIfAbsent(hkey(key), newVal)
    ()
  }

  override def delete(key: Key): F[Unit] = C.capture {
    table.remove(hkey(key)) // Option.apply handles null
    ()
  }

  override def hasKey(key: Key): F[Boolean] =
    C.capture(table.containsKey(hkey(key)))

  override def keys: F[List[Key]] = {
    import scala.collection.JavaConverters._
    C.capture(table.keySet().asScala.toList.map(_.key))
  }

  override def clear: F[Unit] = C.capture(table.clear)

  override def replace(key: Key, newVal: Value): F[Unit] = C.capture {
    table.replace(hkey(key), newVal)
    ()
  }

  override def isEmpty: F[Boolean] = C.capture{
    table.isEmpty
  }

}

object implicits {

  implicit def concurrentHashMapWrapper[F[_], Key, Value](
    implicit hasher: Hasher[Key],
    C: Capture[F]
  ): KeyValueMap[F, Key, Value] =
    new ConcurrentHashMapWrapper[F, Key, Value]()

} 
Example 41
Source File: ScribeLoggerFactory.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.slf4j

import java.util.concurrent.ConcurrentHashMap

import org.slf4j.{Logger, ILoggerFactory}

class ScribeLoggerFactory extends ILoggerFactory {
  private val map = new ConcurrentHashMap[String, Logger]

  override def getLogger(name: String): Logger = {
    val loggerName = if (name.equalsIgnoreCase(Logger.ROOT_LOGGER_NAME)) {
      ""
    } else {
      name
    }
    Option(map.get(loggerName)) match {
      case Some(logger) => logger
      case None => {
        val adapter = new ScribeLoggerAdapter(loggerName)
        val old = map.putIfAbsent(loggerName, adapter)
        Option(old) match {
          case Some(a) => a
          case None => adapter
        }
      }
    }
  }
} 
Example 42
Source File: ScribeLoggerFactory.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.slf4j

import java.util.concurrent.ConcurrentHashMap

import org.slf4j.{ILoggerFactory, Logger}

object ScribeLoggerFactory extends ILoggerFactory {
  private lazy val map = new ConcurrentHashMap[String, Logger]()

  override def getLogger(name: String): Logger = Option(map.get(name)) match {
    case Some(logger) => logger
    case None => {
      val logger = new ScribeLoggerAdapter(name)
      val oldInstance = map.putIfAbsent(name, logger)
      Option(oldInstance).getOrElse(logger)
    }
  }
} 
Example 43
Source File: RestService.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.project

import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import com.shashank.akkahttp.project.Models.{LoadRequest, ServiceJsonProtoocol}
import spray.json.JsArray

import scala.collection.JavaConverters._
import spray.json.{DefaultJsonProtocol, JsArray, pimpAny}
import spray.json.DefaultJsonProtocol._
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql._


trait RestService {
  implicit val system: ActorSystem
  implicit val materializer: ActorMaterializer
  implicit val sparkSession: SparkSession
  val datasetMap = new ConcurrentHashMap[String, Dataset[Row]]()

  import ServiceJsonProtoocol._

  val route =
    pathSingleSlash {
      get {
        complete {
          "welcome to rest service"
        }
      }
    } ~
      path("load") {
        post {
          entity(as[LoadRequest]) {
            loadRequest => complete {
              val id = "" + System.nanoTime()
              val dataset = sparkSession.read.format("csv")
                .option("header", "true")
                .load(loadRequest.path)
              datasetMap.put(id, dataset)
              id
            }
          }
        }
      } ~
      path("view" / """[\w[0-9]-_]+""".r) { id =>
        get {
          complete {
            val dataset = datasetMap.get(id)
            dataset.take(10).map(row => row.toString())
          }
        }
      }
} 
Example 44
Source File: StoppedReadSideProcessor.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.example.helloworld.impl.readsides

import java.util.concurrent.ConcurrentHashMap

import akka.Done
import akka.NotUsed
import akka.persistence.query.Offset
import akka.stream.scaladsl.Flow
import com.example.helloworld.impl.GreetingMessageChanged
import com.example.helloworld.impl.HelloWorldEvent
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import com.lightbend.lagom.scaladsl.persistence.ReadSide
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler

import scala.concurrent.Future

// -------- Started instance of the processor --------
object StartedReadSideProcessor {
  val Name = "StartedProcessor"
  private val greetings = new ConcurrentHashMap[String, String]()
}

class StartedReadSideProcessor(readSide: ReadSide)
    extends AbstractReadSideProcessor(
      readSide,
      StartedReadSideProcessor.Name,
      StartedReadSideProcessor.greetings
    )

// -------- Started instance of the processor --------
object StoppedReadSideProcessor {
  val Name = "StoppedProcessor"
  private val greetings = new ConcurrentHashMap[String, String]()
}

class StoppedReadSideProcessor(readSide: ReadSide)
    extends AbstractReadSideProcessor(
      readSide,
      StoppedReadSideProcessor.Name,
      StoppedReadSideProcessor.greetings
    )

// -------- Abstract  processor --------
class AbstractReadSideProcessor(private val readSide: ReadSide,
                                processorName: String,
                                inMemoryView: ConcurrentHashMap[String, String])
    extends ReadSideProcessor[HelloWorldEvent] {

  override def readSideName: String = processorName

  override def aggregateTags: Set[AggregateEventTag[HelloWorldEvent]] =
    HelloWorldEvent.Tag.allTags

  def getLastMessage(id: String): String =
    inMemoryView.getOrDefault(id, "default-projected-message")

  override def buildHandler()
    : ReadSideProcessor.ReadSideHandler[HelloWorldEvent] = {
    new ReadSideHandler[HelloWorldEvent] {

      val completedDone = Future.successful(Done)
      override def globalPrepare(): Future[Done] = completedDone

      override def prepare(
        tag: AggregateEventTag[HelloWorldEvent]
      ): Future[Offset] =
        Future.successful(Offset.noOffset)

      override def handle()
        : Flow[EventStreamElement[HelloWorldEvent], Done, NotUsed] = {
        Flow[EventStreamElement[HelloWorldEvent]]
          .mapAsync(1) { streamElement =>
            streamElement.event match {
              case GreetingMessageChanged(id, message) =>
                inMemoryView.put(id, message)
                completedDone
            }
          }
      }
    }
  }

} 
Example 45
Source File: ProducerStubFactory.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.testkit

import java.util.concurrent.ConcurrentHashMap
import java.util.function.{ Function => JFunction }

import akka.Done
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Props
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Source
import com.lightbend.lagom.internal.testkit.InternalSubscriberStub
import com.lightbend.lagom.internal.testkit.TopicBufferActor
import com.lightbend.lagom.scaladsl.api.broker.Topic.TopicId
import com.lightbend.lagom.scaladsl.api.broker.Message
import com.lightbend.lagom.scaladsl.api.broker.Subscriber
import com.lightbend.lagom.scaladsl.api.broker.Topic

import scala.concurrent.Future


  def send(message: Message[T]): Unit = bufferActor.tell(message, ActorRef.noSender)
}

private[lagom] class TopicStub[T](val topicId: Topic.TopicId, topicBuffer: ActorRef)(
    implicit materializer: Materializer
) extends Topic[T] {
  def subscribe = new SubscriberStub[T, T]("default", topicBuffer, _.payload)

  class SubscriberStub[Payload, SubscriberPayload](
      groupId: String,
      topicBuffer: ActorRef,
      transform: Message[Payload] => SubscriberPayload
  )(implicit materializer: Materializer)
      extends InternalSubscriberStub[Payload, Message](groupId, topicBuffer)
      with Subscriber[SubscriberPayload] {
    override def withMetadata: Subscriber[Message[SubscriberPayload]] =
      new SubscriberStub[Payload, Message[SubscriberPayload]](
        groupId,
        topicBuffer,
        msg => msg.withPayload(transform(msg))
      )

    override def withGroupId(groupId: String): Subscriber[SubscriberPayload] =
      new SubscriberStub[Payload, SubscriberPayload](groupId, topicBuffer, transform)

    override def atMostOnceSource: Source[SubscriberPayload, _] = super.mostOnceSource.map(transform)

    override def atLeastOnce(flow: Flow[SubscriberPayload, Done, _]): Future[Done] =
      super.leastOnce(Flow[Message[Payload]].map(transform).via(flow))
  }
} 
Example 46
Source File: SparkSQLOperationManager.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver.server

import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap

import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.{ExecuteStatementOperation, Operation, OperationManager}
import org.apache.hive.service.cli.session.HiveSession

import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveSessionState
import org.apache.spark.sql.hive.thriftserver.{ReflectionUtils, SparkExecuteStatementOperation}


private[thriftserver] class SparkSQLOperationManager()
  extends OperationManager with Logging {

  val handleToOperation = ReflectionUtils
    .getSuperField[JMap[OperationHandle, Operation]](this, "handleToOperation")

  val sessionToActivePool = new ConcurrentHashMap[SessionHandle, String]()
  val sessionToContexts = new ConcurrentHashMap[SessionHandle, SQLContext]()

  override def newExecuteStatementOperation(
      parentSession: HiveSession,
      statement: String,
      confOverlay: JMap[String, String],
      async: Boolean): ExecuteStatementOperation = synchronized {
    val sqlContext = sessionToContexts.get(parentSession.getSessionHandle)
    require(sqlContext != null, s"Session handle: ${parentSession.getSessionHandle} has not been" +
      s" initialized or had already closed.")
    val sessionState = sqlContext.sessionState.asInstanceOf[HiveSessionState]
    val runInBackground = async && sessionState.hiveThriftServerAsync
    val operation = new SparkExecuteStatementOperation(parentSession, statement, confOverlay,
      runInBackground)(sqlContext, sessionToActivePool)
    handleToOperation.put(operation.getHandle, operation)
    logDebug(s"Created Operation for $statement with session=$parentSession, " +
      s"runInBackground=$runInBackground")
    operation
  }
} 
Example 47
Source File: NettyStreamManager.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.io.File
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc.RpcEnvFileServer
import org.apache.spark.util.Utils


private[netty] class NettyStreamManager(rpcEnv: NettyRpcEnv)
  extends StreamManager with RpcEnvFileServer {

  private val files = new ConcurrentHashMap[String, File]()
  private val jars = new ConcurrentHashMap[String, File]()
  private val dirs = new ConcurrentHashMap[String, File]()

  override def getChunk(streamId: Long, chunkIndex: Int): ManagedBuffer = {
    throw new UnsupportedOperationException()
  }

  override def openStream(streamId: String): ManagedBuffer = {
    val Array(ftype, fname) = streamId.stripPrefix("/").split("/", 2)
    val file = ftype match {
      case "files" => files.get(fname)
      case "jars" => jars.get(fname)
      case other =>
        val dir = dirs.get(ftype)
        require(dir != null, s"Invalid stream URI: $ftype not found.")
        new File(dir, fname)
    }

    if (file != null && file.isFile()) {
      new FileSegmentManagedBuffer(rpcEnv.transportConf, file, 0, file.length())
    } else {
      null
    }
  }

  override def addFile(file: File): String = {
    val existingPath = files.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/files/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addJar(file: File): String = {
    val existingPath = jars.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/jars/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addDirectory(baseUri: String, path: File): String = {
    val fixedBaseUri = validateDirectoryUri(baseUri)
    require(dirs.putIfAbsent(fixedBaseUri.stripPrefix("/"), path) == null,
      s"URI '$fixedBaseUri' already registered.")
    s"${rpcEnv.address.toSparkURL}$fixedBaseUri"
  }

} 
Example 48
Source File: MutableURLClassLoader.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{URLClassLoader, URL}
import java.util.Enumeration
import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions._

import org.apache.spark.util.ParentClassLoader


  private val locks = new ConcurrentHashMap[String, Object]()

  override def loadClass(name: String, resolve: Boolean): Class[_] = {
    var lock = locks.get(name)
    if (lock == null) {
      val newLock = new Object()
      lock = locks.putIfAbsent(name, newLock)
      if (lock == null) {
        lock = newLock
      }
    }

    lock.synchronized {
      try {
        super.loadClass(name, resolve)
      } catch {
        case e: ClassNotFoundException =>
          parentClassLoader.loadClass(name, resolve)
      }
    }
  }

  override def getResource(name: String): URL = {
    val url = super.findResource(name)
    val res = if (url != null) url else parentClassLoader.getResource(name)
    res
  }

  override def getResources(name: String): Enumeration[URL] = {
    val urls = super.findResources(name)
    val res =
      if (urls != null && urls.hasMoreElements()) {
        urls
      } else {
        parentClassLoader.getResources(name)
      }
    res
  }

  override def addURL(url: URL) {
    super.addURL(url)
  }

} 
Example 49
Source File: TimeStampedHashSet.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions
import scala.collection.mutable.Set

private[spark] class TimeStampedHashSet[A] extends Set[A] {
  val internalMap = new ConcurrentHashMap[A, Long]()

  def contains(key: A): Boolean = {
    internalMap.contains(key)
  }

  def iterator: Iterator[A] = {
    val jIterator = internalMap.entrySet().iterator()
    JavaConversions.asScalaIterator(jIterator).map(_.getKey)
  }

  override def + (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet += elem
    newSet
  }

  override def - (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet -= elem
    newSet
  }

  override def += (key: A): this.type = {
    internalMap.put(key, currentTime)
    this
  }

  override def -= (key: A): this.type = {
    internalMap.remove(key)
    this
  }

  override def empty: Set[A] = new TimeStampedHashSet[A]()

  override def size(): Int = internalMap.size()

  override def foreach[U](f: (A) => U): Unit = {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      f(iterator.next.getKey)
    }
  }

  
  def clearOldValues(threshTime: Long) {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      val entry = iterator.next()
      if (entry.getValue < threshTime) {
        iterator.remove()
      }
    }
  }

  private def currentTime: Long = System.currentTimeMillis()
} 
Example 50
Source File: BlockManagerId.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.SparkContext
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils


  def apply(execId: String, host: String, port: Int) =
    getCachedBlockManagerId(new BlockManagerId(execId, host, port))

  def apply(in: ObjectInput) = {
    val obj = new BlockManagerId()
    obj.readExternal(in)
    getCachedBlockManagerId(obj)
  }

  val blockManagerIdCache = new ConcurrentHashMap[BlockManagerId, BlockManagerId]()

  def getCachedBlockManagerId(id: BlockManagerId): BlockManagerId = {
    blockManagerIdCache.putIfAbsent(id, id)
    blockManagerIdCache.get(id)
  }
} 
Example 51
Source File: PoolSuite.scala    From reactive-async   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package com.phaller.rasync
package test

import java.util.concurrent.{ ConcurrentHashMap, CountDownLatch }

import com.phaller.rasync.cell.{ Cell, CellCompleter }
import org.scalatest.FunSuite

import scala.concurrent.{ Await, Promise }
import scala.concurrent.duration._
import com.phaller.rasync.lattice.Updater
import com.phaller.rasync.pool.HandlerPool
import com.phaller.rasync.test.lattice.{ IntUpdater, StringIntKey }

class PoolSuite extends FunSuite {
  test("onQuiescent") {
    val pool = HandlerPool[Int]

    var i = 0
    while (i < 10000) {
      val p1 = Promise[Boolean]()
      val p2 = Promise[Boolean]()
      pool.execute { () => { p1.success(true) }: Unit }
      pool.onQuiescent { () => p2.success(true) }
      try {
        Await.result(p2.future, 1.seconds)
      } catch {
        case t: Throwable =>
          assert(false, s"failure after $i iterations")
      }
      i += 1
    }

    pool.shutdown()
  }

  test("register cells concurrently") {
    implicit val stringIntUpdater: Updater[Int] = new IntUpdater

    implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s"))
    var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]()
    for (_ <- 1 to 1000) {
      pool.execute(() => {
        val completer = CellCompleter[Int, Null]()
        completer.cell.trigger()
        regCells.put(completer.cell, completer.cell)
        ()
      })
    }
    val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback
    Await.ready(fut, 5.seconds)

    regCells.values().removeIf(_.getResult() != 0)
    assert(regCells.size === 0)
  }

  test("register cells concurrently 2") {
    implicit val stringIntUpdater: Updater[Int] = new IntUpdater

    implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s"))
    var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]()
    for (_ <- 1 to 1000) {
      pool.execute(() => {
        val completer = CellCompleter[Int, Null]()
        regCells.put(completer.cell, completer.cell)
        ()
      })
    }
    val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback
    Await.ready(fut, 5.seconds)

    assert(regCells.size === 1000)
  }

  test("handler pool quiescence") {
    implicit val pool = new HandlerPool[Int, Null]
    val latch = new CountDownLatch(1)
    val latch2 = new CountDownLatch(1)
    pool.execute { () => latch.await() }
    pool.onQuiescent { () => latch2.countDown() }
    latch.countDown()

    latch2.await()
    assert(true)

    pool.onQuiescenceShutdown()
  }

} 
Example 52
Source File: DummyCpgProvider.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.cpg

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.jdk.CollectionConverters._
import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}
import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.codepropertygraph.generated.nodes.NewMethod
import io.shiftleft.cpgserver.query.{CpgOperationFailure, CpgOperationResult, CpgOperationSuccess}
import io.shiftleft.passes.{CpgPass, DiffGraph}
import io.shiftleft.semanticcpg.language._


class DummyCpgProvider(implicit cs: ContextShift[IO]) extends CpgProvider {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val cpgMap: Map[UUID, CpgOperationResult[Cpg]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[Cpg]].asScala

  private val uuidProvider = IO(UUID.randomUUID)

  private class MyPass(cpg: Cpg) extends CpgPass(cpg) {
    override def run(): Iterator[DiffGraph] = {
      implicit val diffGraph: DiffGraph.Builder = DiffGraph.newBuilder
      NewMethod(name = "main", isExternal = false).start.store
      Iterator(diffGraph.build())
    }
  }

  override def createCpg(filenames: Set[String]): IO[UUID] = {
    val cpg = new Cpg

    for {
      cpgId <- uuidProvider
      _ <- blocker
        .blockOn(IO(new MyPass(cpg).createAndApply()))
        .runAsync {
          case Right(_) => IO(cpgMap.put(cpgId, CpgOperationSuccess(cpg))).map(_ => ())
          case Left(ex) => IO(cpgMap.put(cpgId, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield cpgId
  }

  override def retrieveCpg(uuid: UUID): OptionT[IO, CpgOperationResult[Cpg]] = {
    OptionT.fromOption(cpgMap.get(uuid))
  }
} 
Example 53
Source File: ServerAmmoniteExecutor.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.query

import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}

import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.console.scripting.AmmoniteExecutor

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import scala.jdk.CollectionConverters._

abstract class ServerAmmoniteExecutor(implicit cs: ContextShift[IO]) extends AmmoniteExecutor {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val queryResultMap: Map[UUID, CpgOperationResult[String]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[String]].asScala

  private val uuidProvider = IO { UUID.randomUUID }

  def executeQuery(cpg: Cpg, query: String): IO[UUID] = {
    for {
      resultUuid <- uuidProvider
      _ <- blocker
        .blockOn(runQuery(query, cpg))
        .runAsync {
          case Right(result) => IO(queryResultMap.put(resultUuid, CpgOperationSuccess(result.toString))).map(_ => ())
          case Left(ex)      => IO(queryResultMap.put(resultUuid, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield resultUuid
  }

  def retrieveQueryResult(queryId: UUID): OptionT[IO, CpgOperationResult[String]] = {
    OptionT.fromOption(queryResultMap.get(queryId))
  }

  def executeQuerySync(cpg: Cpg, query: String): IO[CpgOperationResult[String]] = {
    for {
      result <- runQuery(query, cpg)
        .map(v => CpgOperationSuccess(v.toString))
        .handleErrorWith(err => IO(CpgOperationFailure(err)))
    } yield result
  }
} 
Example 54
Source File: TagSequenceNumbering.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.internal
import java.util.concurrent.ConcurrentHashMap

import akka.annotation.InternalApi
import akka.event.LoggingAdapter
import akka.persistence.couchbase.internal.CouchbaseSchema.{Fields, Queries}
import com.couchbase.client.java.query.N1qlParams

import scala.concurrent.{ExecutionContext, Future}


  def evictSeqNrsFor(pid: PersistenceId): Unit = {
    val keys = taggingPerPidSequenceNumbers.keySet.iterator()
    while (keys.hasNext) {
      val key @ (keyPid, _) = keys.next()
      if (keyPid == pid)
        keys.remove()
    }
  }

  protected def currentTagSeqNrFromDb(pid: PersistenceId, tag: Tag): Future[Option[Long]] =
    withCouchbaseSession { session =>
      val query = highestTagSequenceNumberQuery(pid, tag, queryConsistency)
      log.debug("currentTagSeqNrFromDb: {}", query)
      session.singleResponseQuery(query).map {
        case Some(json) => Some(json.getLong(Fields.TagSeqNr))
        case None => None
      }
    }
} 
Example 55
Source File: ClassStore.scala    From avrohugger   with Apache License 2.0 5 votes vote down vote up
package avrohugger
package stores

import org.apache.avro.Schema

import treehugger.forest.Symbol

import java.util.concurrent.ConcurrentHashMap
import scala.jdk.CollectionConverters._

class ClassStore {

  val generatedClasses: scala.collection.concurrent.Map[Schema, Symbol] = {
  	new ConcurrentHashMap[Schema, Symbol]().asScala
  }

  def accept(schema: Schema, caseClassDef: Symbol) = {
    if (!generatedClasses.contains(schema)) {
      val _ = generatedClasses += schema -> caseClassDef
    }
  }
} 
Example 56
Source File: Memoize.scala    From bosatsu   with Apache License 2.0 5 votes vote down vote up
package org.bykn.bosatsu.graph

import java.util.concurrent.ConcurrentHashMap
import org.bykn.bosatsu.Par
import scala.collection.immutable.SortedMap

object Memoize {

  
  def memoizeDagFuture[A, B](fn: (A, A => Par.F[B]) => Par.F[B]): A => Par.F[B] = {
    val cache: ConcurrentHashMap[A, Par.P[B]] = new ConcurrentHashMap[A, Par.P[B]]()

    new Function[A, Par.F[B]] { self =>
      def apply(a: A) = {
        val prom = Par.promise[B]
        val prevProm = cache.putIfAbsent(a, prom)
        if (prevProm eq null) {
          // no one was running this job, we have to
          val resFut = fn(a, self)
          Par.complete(prom, resFut)
          resFut
        }
        else {
          // someone else is already working:
          Par.toF(prevProm)
        }
      }
    }
  }
} 
Example 57
Source File: HandlebarsTemplates.scala    From fintrospect   with Apache License 2.0 5 votes vote down vote up
package io.fintrospect.templating

import java.io.File
import java.util.concurrent.ConcurrentHashMap

import com.gilt.handlebars.scala.Handlebars
import com.gilt.handlebars.scala.binding.dynamic._
import com.twitter.io.Buf
import com.twitter.io.Buf.ByteArray.Owned

import scala.collection.JavaConverters._
import scala.io.Source

object HandlebarsTemplates extends Templates {

  def CachingClasspath(baseClasspathPackage: String = "."): TemplateRenderer =
    new TemplateRenderer {

      private val classToTemplate = new ConcurrentHashMap[Class[_], Handlebars[Any]]().asScala

      private def loadView[T <: View](view: T): Handlebars[Any] =
        classToTemplate.getOrElseUpdate(view.getClass,
          Option(getClass.getResourceAsStream("/" + view.template + ".hbs"))
            .map(t => Handlebars[Any](Source.fromInputStream(t).mkString))
            .getOrElse(throw new ViewNotFound(view))
        )

      override def toBuf(view: View): Buf = Owned(loadView(view)(view).getBytes)
    }

  def Caching(baseTemplateDir: String): TemplateRenderer = {
    val baseDir = new File(baseTemplateDir)

    new TemplateRenderer {
      private val classToTemplate = new ConcurrentHashMap[Class[_], Handlebars[Any]]().asScala

      private def loadView[T <: View](view: T): Handlebars[Any] =
        classToTemplate.getOrElseUpdate(view.getClass, {
          val file = new File(baseDir, view.template + ".hbs")
          if (!file.exists()) throw new ViewNotFound(view)
          Handlebars[Any](Source.fromFile(file).mkString)
        })

      override def toBuf(view: View): Buf = Owned(loadView(view)(view).getBytes)
    }
  }

  def HotReload(baseTemplateDir: String = "."): TemplateRenderer = new TemplateRenderer {
    override def toBuf(view: View): Buf = {
      val handlebars = Handlebars[Any](new File(baseTemplateDir, view.template + ".hbs"))
      Owned(handlebars(view).getBytes)
    }
  }
} 
Example 58
Source File: EventsController.scala    From chatoverflow   with Eclipse Public License 2.0 5 votes vote down vote up
package org.codeoverflow.chatoverflow.ui.web.rest.events

import java.io.PrintWriter
import java.util.concurrent.ConcurrentHashMap

import javax.servlet.AsyncContext
import javax.servlet.http.HttpServletRequest
import org.codeoverflow.chatoverflow.ui.web.JsonServlet
import org.scalatra.servlet.ScalatraAsyncSupport
import org.scalatra.{BadRequest, Unauthorized}
import org.scalatra.swagger.Swagger

class EventsController(implicit val swagger: Swagger) extends JsonServlet with ScalatraAsyncSupport with EventsControllerDefinition {
  private val connectionWriters = new ConcurrentHashMap[AsyncContext, PrintWriter]()

  def broadcast(messageType: String, message: String = null): Unit = {
    connectionWriters.forEach((_, writer) => {
      try {
        sendMessage(writer, messageType, message)
      } catch {
        //probably lost or closed connection, remove from the list of connected clients
        case _: Throwable => connectionWriters.remove(writer)
      }
    })
  }

  def closeConnections(): Unit = {
    connectionWriters.forEach((_, writer) => {
      try {
        sendMessage(writer, "close", null)
        writer.close()
      } finally {
        connectionWriters.remove(writer)
      }
    })
  }

  private def sendMessage(writer: PrintWriter, messageType: String, message: String): Unit = {
    

    var msg = "event: " + messageType.replace("\n", "") + "\n"
    if (message != null)
      msg += "data: " + message.replace("\n", "\ndata: ") + "\n\n"
    writer.write(msg)
    writer.flush()
  }

  get("/", operation(getEvents)) {
    val accept = request.getHeader("Accept")
    if (accept == null || !accept.replace(" ", "").split(",").contains("text/event-stream")) {
      status = 406
    } else {
      authParamRequired {
        contentType = "text/event-stream"

        val asyncContext = request.startAsync()
        asyncContext.setTimeout(0)

        val writer = asyncContext.getResponse.getWriter
        connectionWriters.put(asyncContext, writer)
      }
    }
  }

  private def authParamRequired(func: => Any)(implicit request: HttpServletRequest): Any = {
    val authKeyKey = "authKey"

    if (!request.parameters.contains(authKeyKey) || request.getParameter(authKeyKey).isEmpty) {
      BadRequest()
    } else if (request.getParameter(authKeyKey) != chatOverflow.credentialsService.generateAuthKey()) {
      Unauthorized()
    } else {
      func
    }
  }
} 
Example 59
Source File: TestServer.scala    From gatling-grpc   with Apache License 2.0 5 votes vote down vote up
package com.github.phisgr.example

import java.util.concurrent.{ConcurrentHashMap, TimeUnit}

import com.github.phisgr.example.greet._
import com.github.phisgr.example.util._
import com.typesafe.scalalogging.StrictLogging
import io.grpc._
import io.grpc.health.v1.health.HealthCheckResponse.ServingStatus.SERVING
import io.grpc.health.v1.health.HealthGrpc.Health
import io.grpc.health.v1.health.{HealthCheckRequest, HealthCheckResponse, HealthGrpc}
import io.grpc.stub.StreamObserver

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.util.{Random, Try}

object TestServer extends StrictLogging {
  def startServer(): Server = {

    val accounts: collection.concurrent.Map[String, String] = new ConcurrentHashMap[String, String]().asScala

    val greetService = new GreetServiceGrpc.GreetService {
      override def greet(request: HelloWorld) = Future.fromTry(Try {
        val token = Option(TokenContextKey.get).getOrElse {
          val trailers = new Metadata()
          trailers.put(ErrorResponseKey, CustomError("You are not authenticated!"))
          throw Status.UNAUTHENTICATED.asException(trailers)
        }

        val username = request.username
        if (!accounts.get(username).contains(token)) throw Status.PERMISSION_DENIED.asException()
        ChatMessage(username = username, data = s"Server says: Hello ${request.name}!")
      })

      override def register(request: RegisterRequest) = Future.fromTry(Try {
        val token = new Random().alphanumeric.take(10).mkString
        val success = accounts.putIfAbsent(request.username, token).isEmpty

        if (success) {
          RegisterResponse(
            username = request.username,
            token = token
          )
        } else {
          val trailers = new Metadata()
          trailers.put(ErrorResponseKey, CustomError("The username is already taken!"))
          throw Status.ALREADY_EXISTS.asException(trailers)
        }
      })
    }

    // normally, it just adds the "token" header, if any, to the context
    // but for demo purpose, it fails the call with 0.001% chance
    val interceptor = new ServerInterceptor {
      override def interceptCall[ReqT, RespT](
        call: ServerCall[ReqT, RespT], headers: Metadata,
        next: ServerCallHandler[ReqT, RespT]
      ): ServerCall.Listener[ReqT] = {
        if (new Random().nextInt(100000) == 0) {
          val trailers = new Metadata()
          trailers.put(ErrorResponseKey, CustomError("1 in 100,000 chance!"))
          call.close(Status.UNAVAILABLE.withDescription("You're unlucky."), trailers)
          new ServerCall.Listener[ReqT] {}
        } else {
          val context = Context.current()
          val newContext = Option(headers.get(TokenHeaderKey)).fold(context)(context.withValue(TokenContextKey, _))
          Contexts.interceptCall(newContext, call, headers, next)
        }

      }
    }

    val port = 8080
    val server = ServerBuilder.forPort(port)
      .addService(GreetServiceGrpc.bindService(greetService, scala.concurrent.ExecutionContext.global))
      .intercept(interceptor)
      .build.start
    logger.info(s"Server started, listening on $port")

    server
  }

  def startEmptyServer() = {
    val service = new Health {
      override def check(request: HealthCheckRequest): Future[HealthCheckResponse] =
        Future.successful(HealthCheckResponse(SERVING))
      override def watch(request: HealthCheckRequest, responseObserver: StreamObserver[HealthCheckResponse]): Unit =
        responseObserver.onError(Status.UNIMPLEMENTED.asRuntimeException())
    }
    ServerBuilder.forPort(9999)
      .addService(HealthGrpc.bindService(service, scala.concurrent.ExecutionContext.global))
      .build.start
  }

  def main(args: Array[String]): Unit = {
    val server = startServer()
    server.awaitTermination(10, TimeUnit.MINUTES)
  }
} 
Example 60
Source File: Project.scala    From ScalaClean   with Apache License 2.0 5 votes vote down vote up
package scalaclean.model.impl

import scalaclean.model._
import java.io.File
import java.net.{URL, URLClassLoader}
import java.nio.file.{Files, Path, Paths}
import java.util.Properties
import java.util.concurrent.ConcurrentHashMap

import scalafix.v1.SymbolInformation

import scala.meta.internal.symtab.{GlobalSymbolTable, SymbolTable}
import scala.meta.io.{AbsolutePath, Classpath}

object Project {

  import org.scalaclean.analysis.PropertyNames._

  def apply(propsPath: Path, projects: ProjectSet): Project = {
    val props = new Properties()
    println("PropsPath = " + propsPath)
    props.load(Files.newBufferedReader(propsPath))
    val classpathValue = props.getProperty(prop_classpath)
    val outputPath = props.getProperty(prop_outputDir)
    val elementsFilePath = props.getProperty(prop_elementsFile)
    val relationshipsFilePath = props.getProperty(prop_relationshipsFile)
    val extensionsFilePath = props.getProperty(prop_extensionsFile)
    val src = props.getProperty(prop_src)
    val srcBuildBase = props.getProperty(prop_srcBuildBase)
    val srcFiles = props.getProperty(prop_srcFiles, "").split(File.pathSeparatorChar).toSet
    val srcRoots = props.getProperty(prop_srcRoots).split(File.pathSeparatorChar).toList.sortWith((s1, s2) => s1.length > s1.length || s1 < s2).map(AbsolutePath(_))
    println("srcRoots = " + srcRoots)
    assert(classpathValue ne null, props.keys)
    assert(outputPath ne null, props.keys)

    val classPath = Classpath.apply(classpathValue)

    new Project(projects, classPath, outputPath, src, srcRoots, srcBuildBase, elementsFilePath, relationshipsFilePath, extensionsFilePath, srcFiles)
  }
}

class Project private(
                       val projects: ProjectSet, val classPath: Classpath, val outputPath: String, val src: String, val srcRoots: List[AbsolutePath], val srcBuildBase: String,
                       elementsFilePath: String, relationshipsFilePath: String, extensionsFilePath: String,
                       val srcFiles: Set[String]) {
  def symbolTable: SymbolTable = GlobalSymbolTable(classPath, includeJdk = true)

  lazy val classloader: ClassLoader = new URLClassLoader(Array(new URL("file:" + outputPath + "/")), null)

  private val infos = new ConcurrentHashMap[LegacyElementId, SymbolInformation]()

  def symbolInfo(viewedFrom: ElementModelImpl, symbol: LegacyElementId): SymbolInformation = {
    infos.computeIfAbsent(symbol,
      s => //any doc in the project would do though
        viewedFrom.source.doc.info(s.symbol).orNull)
  }

  def read: (Vector[ElementModelImpl], BasicRelationshipInfo) = ModelReader.read(this, elementsFilePath, relationshipsFilePath, extensionsFilePath)

  private val sourcesMap = new ConcurrentHashMap[String, SourceData]()

  def source(name: String): SourceData = {
    sourcesMap.computeIfAbsent(name, p => SourceData(this, Paths.get(p)))
  }

} 
Example 61
Source File: ResolverCache.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.cache

import java.util.UUID
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import cats.Monad
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig}
import ch.epfl.bluebrain.nexus.kg.cache.Cache._
import ch.epfl.bluebrain.nexus.kg.resolve.Resolver
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri

class ResolverCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, ResolverProjectCache[F]])(
    implicit
    as: ActorSystem,
    config: KeyValueStoreConfig
) {

  
private class ResolverProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, Resolver])
    extends Cache[F, AbsoluteIri, Resolver](store) {

  implicit private val ordering: Ordering[Resolver] = Ordering.by(_.priority)

  def get: F[List[Resolver]] = store.values.map(_.toList.sorted)

  def put(resolver: Resolver): F[Unit] =
    if (resolver.deprecated) store.remove(resolver.id)
    else store.put(resolver.id, resolver)

}

private object ResolverProjectCache {

  def apply[F[_]: Effect: Timer](
      project: ProjectRef
  )(implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverProjectCache[F] =
    new ResolverProjectCache(KeyValueStore.distributed(s"resolver-${project.id}", (_, resolver) => resolver.rev))

}

object ResolverCache {

  def apply[F[_]: Effect: Timer](implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverCache[F] =
    new ResolverCache(new ConcurrentHashMap[UUID, ResolverProjectCache[F]]())
} 
Example 62
Source File: StorageCache.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.cache

import java.time.{Clock, Instant}
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import cats.Monad
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig}
import ch.epfl.bluebrain.nexus.kg.RevisionedValue
import ch.epfl.bluebrain.nexus.kg.cache.Cache._
import ch.epfl.bluebrain.nexus.kg.cache.StorageProjectCache._
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.kg.storage.Storage
import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri

class StorageCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, StorageProjectCache[F]])(
    implicit
    as: ActorSystem,
    config: KeyValueStoreConfig,
    clock: Clock
) {

  
private class StorageProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, RevisionedStorage])
    extends Cache[F, AbsoluteIri, RevisionedStorage](store) {

  implicit private val ordering: Ordering[RevisionedStorage] = Ordering.by((s: RevisionedStorage) => s.rev).reverse

  implicit private def revisioned(storage: Storage)(implicit instant: Instant): RevisionedStorage =
    RevisionedValue(instant.toEpochMilli, storage)

  def get: F[List[Storage]] =
    store.values.map(_.toList.sorted.map(_.value))

  def getDefault: F[Option[Storage]]                            =
    get.map(_.collectFirst { case storage if storage.default => storage })

  def getBy(id: AbsoluteIri): F[Option[Storage]]                =
    get(id).map(_.collectFirst { case RevisionedValue(_, storage) if storage.id == id => storage })

  def put(storage: Storage)(implicit instant: Instant): F[Unit] =
    if (storage.deprecated) store.remove(storage.id)
    else store.put(storage.id, storage)
}

private object StorageProjectCache {

  type RevisionedStorage = RevisionedValue[Storage]

  def apply[F[_]: Effect: Timer](
      project: ProjectRef
  )(implicit as: ActorSystem, config: KeyValueStoreConfig): StorageProjectCache[F] =
    new StorageProjectCache(
      KeyValueStore.distributed(s"storage-${project.id}", (_, storage) => storage.value.rev)
    )

}

object StorageCache {

  def apply[F[_]: Timer: Effect](implicit as: ActorSystem, config: KeyValueStoreConfig, clock: Clock): StorageCache[F] =
    new StorageCache(new ConcurrentHashMap[UUID, StorageProjectCache[F]]())
} 
Example 63
Source File: RichExecutionProgress.scala    From sbt-optimizer   with Apache License 2.0 5 votes vote down vote up
package sbt.executionreporter

import java.util.concurrent.ConcurrentHashMap

import net.virtualvoid.optimizer._
import IvyLockReporter.{ SpentTimeInLock, Listener }
import ExecutionProgressReporter.TaskTiming

import sbt._
import internal.TaskName

import scala.annotation.tailrec

private[sbt] class RichExecutionProgress extends ExecuteProgress[Task] {
  private[this] val calledBy = new ConcurrentHashMap[Task[_], Task[_]]
  private[this] val anonOwners = new ConcurrentHashMap[Task[_], Task[_]]
  private[this] val data = new ConcurrentHashMap[Task[_], TaskTiming]

  type S = Unit
  def initial: S = {
    data.clear()
    calledBy.clear()
    anonOwners.clear()
  }
  def registered(state: S, task: Task[_], allDeps: Iterable[Task[_]], pendingDeps: Iterable[Task[_]]): S = {
    pendingDeps foreach { t ⇒ if (TaskName.transformNode(t).isEmpty) anonOwners.put(t, task) }
    if (data.containsKey(task))
      updateData(task)(d ⇒ d.copy(task, deps = d.deps ++ allDeps.toSeq, registerTime = theTime()))
    else
      data.put(task, TaskTiming(task, mappedName(task), deps = allDeps.toSeq, registerTime = theTime()))
  }

  def ready(state: S, task: Task[_]): S = updateData(task)(_.copy(readyTime = theTime()))
  def workStarting(task: Task[_]): Unit = {
    val listener = new DownloadListener {
      def downloadOccurred(download: NetworkAccess): Unit = updateData(task)(d ⇒ d.copy(downloads = d.downloads :+ download))
    }
    val lockListener = new Listener {
      def spentTimeInLock(spent: SpentTimeInLock): Unit = updateData(task)(d ⇒ d.copy(locks = d.locks :+ spent))
    }
    require(IvyDownloadReporter.listener.get == null)
    IvyDownloadReporter.listener.set(listener)
    IvyLockReporter.listener.set(lockListener)
    updateData(task)(_.copy(startTime = theTime(), threadId = Thread.currentThread().getId))
  }

  def workFinished[T](task: Task[T], result: Either[Task[T], Result[T]]): Unit = {
    IvyDownloadReporter.listener.set(null)
    IvyLockReporter.listener.set(null)
    updateData(task)(_.copy(finishTime = theTime()))
    result.left.foreach { t ⇒
      calledBy.put(t, task)
      data.put(t, TaskTiming(t, mappedName(t), deps = Seq(task)))
    }
  }
  def completed[T](state: S, task: Task[T], result: Result[T]): S = updateData(task)(_.copy(completeTime = theTime()))
  def allCompleted(state: S, results: RMap[Task, Result]): S = ExecutionProgressReporter.report(data)

  def theTime(): Option[Long] = Some(System.nanoTime())

  @tailrec
  private[this] def updateData(task: Task[_])(f: TaskTiming ⇒ TaskTiming): Unit = {
    val old = data.get(task)
    val newValue = f(old)
    if (!data.replace(task, old, newValue)) updateData(task)(f)
  }

  private[this] def inferredName(t: Task[_]): Option[String] =
    Option(anonOwners.get(t)).map(t ⇒ mappedName(t) + "<anon>") orElse
      Option(calledBy.get(t)).map(t ⇒ mappedName(t) + "<called-by>")
  var cache = Map.empty[Task[_], String]
  private[this] def mappedName(t: Task[_]): String = {
    cache.get(t) match {
      case Some(name) ⇒ name
      case None ⇒
        cache = cache.updated(t, "<cyclic>")
        val name = mappedNameImpl(t)
        cache = cache.updated(t, name)
        name
    }
  }
  private[this] def mappedNameImpl(t: Task[_]): String = TaskName.definedName(t) orElse inferredName(t) getOrElse TaskName.anonymousName(t)
}

object RichExecutionProgress {
  val install: sbt.Def.Setting[_] = Keys.executeProgress := (_ ⇒ new Keys.TaskProgress(new RichExecutionProgress))
} 
Example 64
Source File: InMemoryCachingFetcher.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.internal

import java.util.concurrent.ConcurrentHashMap

import coursier.core.Repository
import coursier.util.{Artifact, EitherT, Sync}


final class InMemoryCachingFetcher[F[_]](underlying: Repository.Fetch[F])(implicit S: Sync[F]) {

  @volatile private var onlyCache0 = false
  private val cache = new ConcurrentHashMap[Artifact, Either[String, String]]
  private val byUrl = new ConcurrentHashMap[String, Either[String, String]]

  def onlyCache(): Unit = {
    onlyCache0 = true
  }

  def fromCache(url: String): String =
    Option(byUrl.get(url)) match {
      case None =>
        throw new NoSuchElementException(url)
      case Some(Left(err)) =>
        sys.error(s"$url is errored: $err")
      case Some(Right(s)) => s
    }

  def fetcher: Repository.Fetch[F] =
    artifact =>
      EitherT {
        Option(cache.get(artifact)) match {
          case None =>
            if (onlyCache0)
              S.fromAttempt(Left(new NoSuchElementException(s"Artifact $artifact")))
            else
              S.map(underlying(artifact).run) { res =>
                val res0 = Option(cache.putIfAbsent(artifact, res))
                  .getOrElse(res)
                byUrl.putIfAbsent(artifact.url, res0)
                res0
              }
          case Some(res) => S.point(res)
        }
      }

} 
Example 65
Source File: ConnectionPoolHolder.scala    From bandar-log   with Apache License 2.0 5 votes vote down vote up
package com.aol.one.dwh.infra.sql.pool

import java.util.concurrent.ConcurrentHashMap

import com.aol.one.dwh.infra.config.ConnectorConfig
import com.typesafe.config.Config

class ConnectionPoolHolder(mainConf: Config) {

  private val connectionPools = new ConcurrentHashMap[String, HikariConnectionPool]()

  def get(connectorConf: ConnectorConfig): HikariConnectionPool = {
    val poolKey = s"${connectorConf.connectorType}_${connectorConf.configId}"

    if (connectionPools.containsKey(poolKey)) {
      connectionPools.get(poolKey)
    } else {
      val connectionPool = new HikariConnectionPool(PoolConfig(connectorConf, mainConf))
      connectionPools.put(poolKey, connectionPool)
      connectionPool
    }
  }
} 
Example 66
Source File: Subject.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.messaging

import java.util.concurrent.ConcurrentHashMap

trait Subject[T] extends Observable[T] with Observer[T]

object Subject {
  def serialized[T] = new SerializedSubject[T]
}


class SerializedSubject[T] extends Subject[T] {

  private object lock
  private val observers = new ConcurrentHashMap[String, Observer[T]]()
  private var isDone = false

  override def onNext(item: T): Unit = invokeWithErrorCollection(o => o.onNext(item))

  override def onError(error: Throwable): Unit = invokeWithErrorCollection(beDone(o => o.onError(error)))

  override def onComplete(): Unit = invokeWithErrorCollection(beDone(o => o.onComplete()))

  override def subscribe(observer: Observer[T]): Subscription = {
    val id = java.util.UUID.randomUUID().toString
    observers.put(id, observer)
    () => removeObserverWithId(id)
  }

  private def removeObserverWithId(id: String): Unit = observers.remove(id)

  private def aggregateExceptions(exceptions: List[Exception]): Exception = exceptions match {
    case x :: Nil => x
    case xs =>
      val ex = new RuntimeException("An error occurred during observer dispatching.")
      xs.foreach(ex.addSuppressed)
      ex
  }

  private def beDone(fun: (Observer[T]) => Unit): (Observer[T]) => Unit = { obs =>
    try fun(obs) finally {
      lock.synchronized {
        isDone = true
      }
    }
  }

  private def invokeWithErrorCollection(fun: (Observer[T]) => Unit): Unit = {
    import scala.collection.JavaConverters._
    if (isDone) return
    lock.synchronized {
      var exceptions = List.empty[Exception]
      observers.values().asScala.foreach { observer =>
        try fun(observer) catch {
          case ex: Exception =>
            exceptions :+= ex
        }
      }
      if (exceptions.nonEmpty) throw aggregateExceptions(exceptions)
    }
  }
} 
Example 67
Source File: NettyStreamManager.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.io.File
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc.RpcEnvFileServer
import org.apache.spark.util.Utils


private[netty] class NettyStreamManager(rpcEnv: NettyRpcEnv)
  extends StreamManager with RpcEnvFileServer {

  private val files = new ConcurrentHashMap[String, File]()
  private val jars = new ConcurrentHashMap[String, File]()
  private val dirs = new ConcurrentHashMap[String, File]()

  override def getChunk(streamId: Long, chunkIndex: Int): ManagedBuffer = {
    throw new UnsupportedOperationException()
  }

  override def openStream(streamId: String): ManagedBuffer = {
    val Array(ftype, fname) = streamId.stripPrefix("/").split("/", 2)
    val file = ftype match {
      case "files" => files.get(fname)
      case "jars" => jars.get(fname)
      case other =>
        val dir = dirs.get(ftype)
        require(dir != null, s"Invalid stream URI: $ftype not found.")
        new File(dir, fname)
    }

    if (file != null && file.isFile()) {
      new FileSegmentManagedBuffer(rpcEnv.transportConf, file, 0, file.length())
    } else {
      null
    }
  }

  override def addFile(file: File): String = {
    val existingPath = files.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/files/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addJar(file: File): String = {
    val existingPath = jars.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/jars/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addDirectory(baseUri: String, path: File): String = {
    val fixedBaseUri = validateDirectoryUri(baseUri)
    require(dirs.putIfAbsent(fixedBaseUri.stripPrefix("/"), path) == null,
      s"URI '$fixedBaseUri' already registered.")
    s"${rpcEnv.address.toSparkURL}$fixedBaseUri"
  }

} 
Example 68
Source File: MutableURLClassLoader.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{URLClassLoader, URL}
import java.util.Enumeration
import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions._


  private val locks = new ConcurrentHashMap[String, Object]()

  override def loadClass(name: String, resolve: Boolean): Class[_] = {
    var lock = locks.get(name)
    if (lock == null) {
      val newLock = new Object()
      lock = locks.putIfAbsent(name, newLock)
      if (lock == null) {
        lock = newLock
      }
    }

    lock.synchronized {
      try {
        super.loadClass(name, resolve)
      } catch {
        case e: ClassNotFoundException =>
          parentClassLoader.loadClass(name, resolve)
      }
    }
  }

  override def getResource(name: String): URL = {
    val url = super.findResource(name)
    val res = if (url != null) url else parentClassLoader.getResource(name)
    res
  }

  override def getResources(name: String): Enumeration[URL] = {
    val urls = super.findResources(name)
    val res =
      if (urls != null && urls.hasMoreElements()) {
        urls
      } else {
        parentClassLoader.getResources(name)
      }
    res
  }

  override def addURL(url: URL) {
    super.addURL(url)
  }

} 
Example 69
Source File: TimeStampedHashSet.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions
import scala.collection.mutable.Set

private[spark] class TimeStampedHashSet[A] extends Set[A] {
  val internalMap = new ConcurrentHashMap[A, Long]()

  def contains(key: A): Boolean = {
    internalMap.contains(key)
  }

  def iterator: Iterator[A] = {
    val jIterator = internalMap.entrySet().iterator()
    JavaConversions.asScalaIterator(jIterator).map(_.getKey)
  }

  override def + (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet += elem
    newSet
  }

  override def - (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet -= elem
    newSet
  }

  override def += (key: A): this.type = {
    internalMap.put(key, currentTime)
    this
  }

  override def -= (key: A): this.type = {
    internalMap.remove(key)
    this
  }

  override def empty: Set[A] = new TimeStampedHashSet[A]()

  override def size(): Int = internalMap.size()

  override def foreach[U](f: (A) => U): Unit = {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      f(iterator.next.getKey)
    }
  }

  
  def clearOldValues(threshTime: Long) {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      val entry = iterator.next()
      if (entry.getValue < threshTime) {
        iterator.remove()
      }
    }
  }

  private def currentTime: Long = System.currentTimeMillis()
} 
Example 70
Source File: BlockManagerId.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.SparkContext
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils


  def apply(execId: String, host: String, port: Int): BlockManagerId =
    getCachedBlockManagerId(new BlockManagerId(execId, host, port))

  def apply(in: ObjectInput): BlockManagerId = {
    val obj = new BlockManagerId()
    obj.readExternal(in)
    getCachedBlockManagerId(obj)
  }

  val blockManagerIdCache = new ConcurrentHashMap[BlockManagerId, BlockManagerId]()

  def getCachedBlockManagerId(id: BlockManagerId): BlockManagerId = {
    blockManagerIdCache.putIfAbsent(id, id)
    blockManagerIdCache.get(id)
  }
} 
Example 71
Source File: Disposer.scala    From tensorflow_scala   with Apache License 2.0 5 votes vote down vote up
package org.platanios.tensorflow.api.utilities

import java.lang.Thread.currentThread
import java.lang.ref.{PhantomReference, Reference, ReferenceQueue}
import java.security.{AccessController, PrivilegedAction}
import java.util
import java.util.concurrent.ConcurrentHashMap

import scala.annotation.tailrec


  def add(target: Any, disposer: () => Unit ): Reference[Any] = {
    val reference = new PhantomReference(target, queue)
    records.put(reference, disposer)
    // TODO: make sure reference isn't GC'd before this point (e.g., with org.openjdk.jmh.infra.Blackhole::consume).
    reference
  }

  AccessController.doPrivileged(new PrivilegedAction[Unit] {
    override def run(): Unit = {
      // The thread must be a member of a thread group which will not get GCed before the VM exit. For this reason, we
      // make its parent the top-level thread group.
      @tailrec def rootThreadGroup(group: ThreadGroup = currentThread.getThreadGroup): ThreadGroup = {
        group.getParent match {
          case null => group
          case parent => rootThreadGroup(parent)
        }
      }
     
      new Thread(rootThreadGroup(), "TensorFlow Scala API Disposer") {
        override def run = while (true) {
          // Blocks until there is a reference in the queue.
          val referenceToDispose = queue.remove
          records.remove(referenceToDispose).apply()
          referenceToDispose.clear()
        }
        setContextClassLoader(null)
        setDaemon(true)
        setPriority(Thread.MAX_PRIORITY)
        start()
      }
    }
  })
} 
Example 72
Source File: MemorySchemaRegistry.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.avro

import java.util.concurrent.ConcurrentHashMap

import com.typesafe.config.Config
import io.amient.affinity.avro.MemorySchemaRegistry.MemAvroConf
import io.amient.affinity.avro.record.AvroSerde
import io.amient.affinity.avro.record.AvroSerde.AvroConf
import io.amient.affinity.core.config.CfgStruct
import org.apache.avro.{Schema, SchemaValidator}

import scala.collection.JavaConverters._

object MemorySchemaRegistry {

  object MemAvroConf extends MemAvroConf {
    override def apply(config: Config) = new MemAvroConf().apply(config)
  }
  class MemAvroConf extends CfgStruct[MemAvroConf](classOf[AvroConf]) {
    val ID = integer("schema.registry.id", false)
      .doc("multiple instances with the same id will share the schemas registered by any of them")
  }

  val multiverse = new ConcurrentHashMap[Int, Universe]()

  def createUniverse(reuse: Option[Int] = None): Universe = synchronized {
    reuse match {
      case Some(id) if multiverse.containsKey(id) => multiverse.get(id)
      case Some(id) =>
        val universe = new Universe(id)
        multiverse.asScala += id -> universe
        universe
      case None =>
        val id = (if (multiverse.isEmpty) 1 else multiverse.asScala.keys.max + 1)
        val universe = new Universe(id)
        multiverse.asScala += id -> universe
        universe
    }
  }

  class Universe(val id: Int) {
    val schemas = new ConcurrentHashMap[Int, Schema]()
    val subjects = new ConcurrentHashMap[String, List[Int]]()

    def getOrRegister(schema: Schema): Int = synchronized {
      schemas.asScala.find(_._2 == schema) match {
        case None =>
          val newId = schemas.size
          schemas.put(newId, schema)
          newId
        case Some((id, _)) => id
      }
    }

    def updateSubject(subject: String, schemaId: Int, validator: SchemaValidator): Unit = synchronized {
      val existing = Option(subjects.get(subject)).getOrElse(List())
      validator.validate(schemas.get(schemaId), existing.map(id => schemas.get(id)).asJava)
      if (!existing.contains(schemaId)) {
        subjects.put(subject, (existing :+ schemaId))
      }
    }
  }

}

class MemorySchemaRegistry(universe: MemorySchemaRegistry.Universe) extends AvroSerde with AvroSchemaRegistry {

  def this(conf: MemAvroConf) = this(MemorySchemaRegistry.createUniverse(if (conf.ID.isDefined) Some(conf.ID()) else None))

  def this(_conf: AvroConf) = this(MemAvroConf.apply(_conf))

  def this() = this(new MemAvroConf())

  //this is for stable tests
  register[Null]("null")
  register[Boolean]("boolean")
  register[Int]("int")
  register[Long]("long")
  register[Float]("float")
  register[Double]("double")
  register[String]("string")
  register[Array[Byte]]("bytes")

  
  override protected def registerSchema(subject: String, schema: Schema): Int = {
    val id = universe.getOrRegister(schema)
    universe.updateSubject(subject, id, validator)
    id
  }

  override def close() = ()
} 
Example 73
Source File: AffinityMetrics.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.core.util

import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpResponse
import com.codahale.metrics.{MetricRegistry, Timer}

import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}

object AffinityMetrics {
  private val reporters = scala.collection.mutable.ListBuffer[MetricRegistry => Unit]()

  def apply(f: MetricRegistry => Unit): Unit = reporters += f

  private val metricsRegistries = new ConcurrentHashMap[ActorSystem, AffinityMetrics]()

  def forActorSystem(system: ActorSystem): AffinityMetrics = {
    metricsRegistries.get(system) match {
      case null =>
        val registry = new AffinityMetrics
        reporters.foreach(_(registry))
        metricsRegistries.put(system, registry)
        registry
      case registry => registry
    }
  }
}

class AffinityMetrics extends MetricRegistry {

  private implicit val executor = scala.concurrent.ExecutionContext.Implicits.global

  private val processMetricsMap = new ConcurrentHashMap[String, ProcessMetrics]()

  def meterAndHistogram(name: String): ProcessMetrics = {
    processMetricsMap.get(name)  match {
      case null =>
        val m = new ProcessMetrics(name)
        processMetricsMap.put(name, m)
        m
      case some => some
    }
  }

  def process(groupName: String, result: Promise[_]): Unit = process(groupName, result.future)

  def process(groupName: String, result: Future[Any]): Unit = {
    val metrics = meterAndHistogram(groupName)
    val startTime = metrics.markStart()
    result.onComplete {
      case Success(response: HttpResponse) => if (response.status.intValue() < 400) metrics.markSuccess(startTime) else metrics.markFailure(startTime)
      case Success(_) => metrics.markSuccess(startTime)
      case Failure(_) => metrics.markFailure(startTime)
    }
  }

  class ProcessMetrics(name: String) {
    val durations = timer(s"$name.timer")
    val successes = meter(s"$name.success")
    val failures = meter(s"$name.failure")

    def markStart(): Timer.Context = durations.time()

    def markSuccess(context: Timer.Context, n: Long = 1): Unit = {
      context.stop
      successes.mark(n)
    }

    def markFailure(context: Timer.Context): Unit = {
      context.stop
      failures.mark()
    }

  }

} 
Example 74
Source File: Failover2Spec.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.core.cluster

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model.StatusCode
import akka.http.scaladsl.model.StatusCodes.SeeOther
import com.typesafe.config.ConfigValueFactory
import io.amient.affinity.Conf
import io.amient.affinity.core.util.AffinityTestBase
import io.amient.affinity.kafka.EmbeddedKafka
import org.scalatest.{FlatSpec, Matchers}

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps
import scala.util.{Failure, Random, Success, Try}


class Failover2Spec extends FlatSpec with AffinityTestBase with EmbeddedKafka with Matchers {

  val specTimeout = 15 seconds

  override def numPartitions = 2

  def config = configure("failoverspecs", Some(zkConnect), Some(kafkaBootstrap))

  val node1 = new Node(config.withValue(Conf.Affi.Node.Gateway.Class.path, ConfigValueFactory.fromAnyRef(classOf[FailoverTestGateway].getName)))
  val node2 = new Node(config.withValue(Conf.Affi.Node.Containers("keyspace1").path, ConfigValueFactory.fromIterable(List(0,1).asJava)))
  val node3 = new Node(config.withValue(Conf.Affi.Node.Containers("keyspace1").path, ConfigValueFactory.fromIterable(List(0,1).asJava)))

  override def beforeAll(): Unit = try {
    node1.start()
    node2.start()
    node3.start()
    node1.awaitClusterReady()
  } finally {
    super.beforeAll()
  }

  override def afterAll(): Unit = try {
    node1.shutdown()
    node2.shutdown()
    node3.shutdown()
  } finally {
    super.afterAll()
  }

  "Master Transition" should "not lead to inconsistent state" in {
    val requestCount = new AtomicInteger(0)
    val expected = new ConcurrentHashMap[String, String]()
    import scala.concurrent.ExecutionContext.Implicits.global

    val random = new Random()
    val requests = scala.collection.mutable.ListBuffer[Future[Try[StatusCode]]]()
    for (i <- (1 to 250)) {
      val key = random.nextInt.toString
      val value = random.nextInt.toString
      requests += node1.http(POST, s"/$key/$value") map {
        case response =>
          expected.put(key, value)
          if (i == 25) {
            //after a few writes have succeeded kill one node
            node2.shutdown()
          }
          Success(response.status)
      } recover {
        case e: Throwable =>
          Failure(e)
      }
    }
    requestCount.set(requests.size)
    Await.result(Future.sequence(requests), specTimeout).foreach(_ should be(Success(SeeOther)))
    println(s"${requests.size} successful requests")

    expected.asScala.foreach { case (key, value) =>
      val (status, entity) = Await.result(node1.http(GET, s"/$key").map { response => (response.status, response.entity)}, specTimeout / 3)
      status.intValue should be (200)
      val expectedEntity = jsonStringEntity(value)
      entity should be(expectedEntity)
    }
  }

} 
Example 75
Source File: KyuubiConf.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package org.apache.kyuubi.config

import java.util.concurrent.ConcurrentHashMap

import org.apache.kyuubi.{Logging, Utils}

case class KyuubiConf(loadSysDefault: Boolean = true) extends Logging {
  private val settings = new ConcurrentHashMap[String, String]()
  private lazy val reader: ConfigProvider = new ConfigProvider(settings)

  if (loadSysDefault) {
    loadFromMap()
  }

  private def loadFromMap(props: Map[String, String] = Utils.getSystemProperties): KyuubiConf = {
    for ((key, value) <- props if key.startsWith(KYUUBI_PREFIX)) {
      set(key, value)
    }
    this
  }

  def loadFileDefaults(): KyuubiConf = {
    val maybeConfigFile = Utils.getDefaultPropertiesFile()
    loadFromMap(Utils.getPropertiesFromFile(maybeConfigFile))
    this
  }

  def set[T](entry: ConfigEntry[T], value: T): KyuubiConf = {
    settings.put(entry.key, entry.strConverter(value))
    this
  }

  def set[T](entry: OptionalConfigEntry[T], value: T): KyuubiConf = {
    set(entry.key, entry.rawStrConverter(value))
    this
  }

  def set(key: String, value: String): KyuubiConf = {
    require(key != null)
    require(value != null)
    settings.put(key, value)
    this
  }

  def get[T](config: ConfigEntry[T]): T = {
    config.readFrom(reader)
  }

  
  final val KYUUBI_CONF_FILE_NAME = "kyuubi-defaults.conf"
  final val KYUUBI_HOME = "KYUUBI_HOME"

  def buildConf(key: String): ConfigBuilder = ConfigBuilder(KYUUBI_PREFIX + key)

  val EMBEDDED_ZK_PORT: ConfigEntry[Int] = buildConf("embedded.zookeeper.port")
    .doc("The port of the embedded zookeeper server")
    .version("1.0.0")
    .intConf
    .createWithDefault(2181)

  val EMBEDDED_ZK_TEMP_DIR: ConfigEntry[String] = buildConf("embedded.zookeeper.directory")
    .doc("The temporary directory for the embedded zookeeper server")
    .version("1.0.0")
    .stringConf
    .createWithDefault(Utils.resolveURI("embedded_zookeeper").getRawPath)

  val SERVER_PRINCIPAL: OptionalConfigEntry[String] = buildConf("server.principal")
    .doc("")
    .version("1.0.0")
    .stringConf
    .createOptional

  val SERVER_KEYTAB: OptionalConfigEntry[String] = buildConf("server.keytab")
    .doc("")
    .version("1.0.0")
    .stringConf
    .createOptional

} 
Example 76
Source File: ConfiguredAsObjectEncoder.scala    From circe-generic-extras   with Apache License 2.0 5 votes vote down vote up
package io.circe.generic.extras.encoding

import io.circe.JsonObject
import io.circe.generic.encoding.DerivedAsObjectEncoder
import io.circe.generic.extras.{ Configuration, JsonKey }
import java.util.concurrent.ConcurrentHashMap
import scala.annotation.implicitNotFound
import scala.collection.immutable.Map
import shapeless.{ Annotations, Coproduct, HList, LabelledGeneric, Lazy }
import shapeless.ops.hlist.ToTraversable
import shapeless.ops.record.Keys

@implicitNotFound(
  """Could not find ConfiguredAsObjectEncoder for type ${A}.
Some possible causes for this:
- ${A} isn't a case class or sealed trat
- some of ${A}'s members don't have codecs of their own
- missing implicit Configuration"""
)
abstract class ConfiguredAsObjectEncoder[A](config: Configuration) extends DerivedAsObjectEncoder[A] {
  private[this] val constructorNameCache: ConcurrentHashMap[String, String] =
    new ConcurrentHashMap[String, String]()

  protected[this] def constructorNameTransformer(value: String): String = {
    val current = constructorNameCache.get(value)

    if (current eq null) {
      val transformed = config.transformConstructorNames(value)
      constructorNameCache.put(value, transformed)
      transformed
    } else {
      current
    }
  }
}

object ConfiguredAsObjectEncoder {
  implicit def encodeCaseClass[A, R <: HList, F <: HList, K <: HList](implicit
    gen: LabelledGeneric.Aux[A, R],
    encode: Lazy[ReprAsObjectEncoder[R]],
    config: Configuration,
    fields: Keys.Aux[R, F],
    fieldsToList: ToTraversable.Aux[F, List, Symbol],
    keys: Annotations.Aux[JsonKey, A, K],
    keysToList: ToTraversable.Aux[K, List, Option[JsonKey]]
  ): ConfiguredAsObjectEncoder[A] = new ConfiguredAsObjectEncoder[A](config) {
    private[this] val keyAnnotations: List[Option[JsonKey]] = keysToList(keys())
    private[this] val hasKeyAnnotations: Boolean = keyAnnotations.exists(_.nonEmpty)

    private[this] val keyAnnotationMap: Map[String, String] =
      fieldsToList(fields())
        .map(_.name)
        .zip(keyAnnotations)
        .collect {
          case (field, Some(keyAnnotation)) => (field, keyAnnotation.value)
        }
        .toMap

    private[this] def memberNameTransformer(value: String): String =
      if (hasKeyAnnotations)
        keyAnnotationMap.getOrElse(value, config.transformMemberNames(value))
      else
        config.transformMemberNames(value)

    private[this] val transformedMemberCache: Map[String, String] = {
      fieldsToList(fields()).map(f => (f.name, memberNameTransformer(f.name))).toMap
    }

    private[this] def transformMemberName(value: String) =
      transformedMemberCache.getOrElse(value, value)

    final def encodeObject(a: A): JsonObject =
      encode.value.configuredEncodeObject(gen.to(a))(
        transformMemberName,
        constructorNameTransformer,
        None
      )
  }

  implicit def encodeAdt[A, R <: Coproduct](implicit
    gen: LabelledGeneric.Aux[A, R],
    encode: Lazy[ReprAsObjectEncoder[R]],
    config: Configuration
  ): ConfiguredAsObjectEncoder[A] = new ConfiguredAsObjectEncoder[A](config) {
    final def encodeObject(a: A): JsonObject =
      encode.value.configuredEncodeObject(gen.to(a))(
        Predef.identity,
        constructorNameTransformer,
        config.discriminator
      )
  }
} 
Example 77
Source File: MutableURLClassLoader.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{URLClassLoader, URL}
import java.util.Enumeration
import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions._


  private val locks = new ConcurrentHashMap[String, Object]()

  override def loadClass(name: String, resolve: Boolean): Class[_] = {
    var lock = locks.get(name)
    if (lock == null) {
      val newLock = new Object()
      lock = locks.putIfAbsent(name, newLock)
      if (lock == null) {
        lock = newLock
      }
    }

    lock.synchronized {
      try {
        super.loadClass(name, resolve)
      } catch {
        case e: ClassNotFoundException =>
          parentClassLoader.loadClass(name, resolve)
      }
    }
  }

  override def getResource(name: String): URL = {
    val url = super.findResource(name)
    val res = if (url != null) url else parentClassLoader.getResource(name)
    res
  }

  override def getResources(name: String): Enumeration[URL] = {
    val urls = super.findResources(name)
    val res =
      if (urls != null && urls.hasMoreElements()) {
        urls
      } else {
        parentClassLoader.getResources(name)
      }
    res
  }

  override def addURL(url: URL) {
    super.addURL(url)
  }

} 
Example 78
Source File: TimeStampedHashSet.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions
import scala.collection.mutable.Set

private[spark] class TimeStampedHashSet[A] extends Set[A] {
  val internalMap = new ConcurrentHashMap[A, Long]()

  def contains(key: A): Boolean = {
    internalMap.contains(key)
  }

  def iterator: Iterator[A] = {
    val jIterator = internalMap.entrySet().iterator()
    JavaConversions.asScalaIterator(jIterator).map(_.getKey)
  }

  override def + (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet += elem
    newSet
  }

  override def - (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet -= elem
    newSet
  }
  //this.type表示当前对象(this)的类型,this指代当前的对象
  override def += (key: A): this.type = {
    internalMap.put(key, currentTime)
    this
  }
  //this.type表示当前对象(this)的类型,this指代当前的对象
  override def -= (key: A): this.type = {
    internalMap.remove(key)
    this
  }

  override def empty: Set[A] = new TimeStampedHashSet[A]()

  override def size(): Int = internalMap.size()

  override def foreach[U](f: (A) => U): Unit = {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      f(iterator.next.getKey)
    }
  }

  
  def clearOldValues(threshTime: Long) {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      val entry = iterator.next()
      if (entry.getValue < threshTime) {
        iterator.remove()
      }
    }
  }

  private def currentTime: Long = System.currentTimeMillis()
} 
Example 79
Source File: BlockManagerId.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.SparkContext
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils


  def apply(execId: String, host: String, port: Int): BlockManagerId =
    getCachedBlockManagerId(new BlockManagerId(execId, host, port))

  def apply(in: ObjectInput): BlockManagerId = {
    val obj = new BlockManagerId()
    obj.readExternal(in)//读取ObjectInput对象host和port_,executorId_
    getCachedBlockManagerId(obj)//获取BlockManagerId
  }

  val blockManagerIdCache = new ConcurrentHashMap[BlockManagerId, BlockManagerId]()

  def getCachedBlockManagerId(id: BlockManagerId): BlockManagerId = {
    blockManagerIdCache.putIfAbsent(id, id)//put和putIfAbsent的区别就是一个是直接放入并替换,另一个是有就不替换
    blockManagerIdCache.get(id)//返回BlockManagerId对象
  }
} 
Example 80
Source File: BlockInfo.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.util.concurrent.ConcurrentHashMap
//tellMaster 是否将状态汇报到Master
private[storage] class BlockInfo(val level: StorageLevel, val tellMaster: Boolean) {
  // To save space, 'pending' and 'failed' are encoded as special sizes:
  //为了节省空间,“挂起”和“失败”被编码为特殊尺寸
  @volatile var size: Long = BlockInfo.BLOCK_PENDING
  private def pending: Boolean = size == BlockInfo.BLOCK_PENDING
  private def failed: Boolean = size == BlockInfo.BLOCK_FAILED
  private def initThread: Thread = BlockInfo.blockInfoInitThreads.get(this)//根据当前对象获取当前线程对象

  setInitThread()//初始化对象

  private def setInitThread() {
    
  private val blockInfoInitThreads = new ConcurrentHashMap[BlockInfo, Thread]

  private val BLOCK_PENDING: Long = -1L //在等待…期间
  private val BLOCK_FAILED: Long = -2L//失败
} 
Example 81
Source File: Random.scala    From ratatool   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio

import com.spotify.scio.util.random.RandomSampler

import java.util.concurrent.ConcurrentHashMap

import org.apache.beam.sdk.transforms.DoFn
import org.apache.beam.sdk.transforms.DoFn.ProcessElement
import java.util.{Random => JRandom}

object Random {

  
      require(
        fraction >= (0.0 - RandomSampler.roundingEpsilon)
          && fraction <= (1.0 + RandomSampler.roundingEpsilon),
        s"Sampling fractions must be on interval [0, 1], Key: $key, Fraction: $fraction")

      rngs.putIfAbsent(key, init())
      val count = samples(fraction, rngs.get(key))
      var i = 0
      while (i < count) {
        c.output((key, value))
        i += 1
      }
    }

    def init(): R
    def samples(fraction: Double, rng: R): Int
    def setSeed(seed: Long): Unit = this.seed = seed

  }

  class BernoulliValueSampler[K, V]
    extends RandomValueSampler[K, V, JRandom] {

    // TODO: Is seed properly handled here
    // TODO: is it necessary to setSeed for each instance like Spark does?
    override def init(): JRandom = {
      val r = RandomSampler.newDefaultRNG
      if (seed > 0) {
        r.setSeed(seed)
      }
      r
    }

    override def samples(fraction: Double, rng: JRandom): Int =
      if (fraction <= 0.0) {
        0
      } else if (fraction >= 1.0) {
        1
      } else {
        if (rng.nextDouble() <= fraction) 1 else 0
      }
  }

  class RandomValueAssigner[K, V]()
    extends DoFn[(K, V), (K, (V, Double))] {
    protected lazy val rngs: ConcurrentHashMap[K, JRandom] = new ConcurrentHashMap[K, JRandom]()
    protected var seed: Long = -1

    // TODO: is it necessary to setSeed for each instance like Spark does?
    def init(): JRandom = {
      val r = RandomSampler.newDefaultRNG
      if (seed > 0) {
        r.setSeed(seed)
      }
      r
    }

    @ProcessElement
    def processElement(c: DoFn[(K, V), (K, (V, Double))]#ProcessContext): Unit = {
      val (key, value) = c.element()
      rngs.putIfAbsent(key, init())
      c.output((key, (value, rngs.get(key).nextDouble())))
    }

    def setSeed(seed: Long): Unit = this.seed = seed
  }
} 
Example 82
Source File: SparkSQLOperationManager.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver.server

import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap

import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.{ExecuteStatementOperation, Operation, OperationManager}
import org.apache.hive.service.cli.session.HiveSession

import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.thriftserver.{ReflectionUtils, SparkExecuteStatementOperation}
import org.apache.spark.sql.internal.SQLConf


private[thriftserver] class SparkSQLOperationManager()
  extends OperationManager with Logging {

  val handleToOperation = ReflectionUtils
    .getSuperField[JMap[OperationHandle, Operation]](this, "handleToOperation")

  val sessionToActivePool = new ConcurrentHashMap[SessionHandle, String]()
  val sessionToContexts = new ConcurrentHashMap[SessionHandle, SQLContext]()

  override def newExecuteStatementOperation(
      parentSession: HiveSession,
      statement: String,
      confOverlay: JMap[String, String],
      async: Boolean): ExecuteStatementOperation = synchronized {
    val sqlContext = sessionToContexts.get(parentSession.getSessionHandle)
    require(sqlContext != null, s"Session handle: ${parentSession.getSessionHandle} has not been" +
      s" initialized or had already closed.")
    val conf = sqlContext.sessionState.conf
    val hiveSessionState = parentSession.getSessionState
    setConfMap(conf, hiveSessionState.getOverriddenConfigurations)
    setConfMap(conf, hiveSessionState.getHiveVariables)
    val runInBackground = async && conf.getConf(HiveUtils.HIVE_THRIFT_SERVER_ASYNC)
    val operation = new SparkExecuteStatementOperation(parentSession, statement, confOverlay,
      runInBackground)(sqlContext, sessionToActivePool)
    handleToOperation.put(operation.getHandle, operation)
    logDebug(s"Created Operation for $statement with session=$parentSession, " +
      s"runInBackground=$runInBackground")
    operation
  }

  def setConfMap(conf: SQLConf, confMap: java.util.Map[String, String]): Unit = {
    val iterator = confMap.entrySet().iterator()
    while (iterator.hasNext) {
      val kv = iterator.next()
      conf.setConfString(kv.getKey, kv.getValue)
    }
  }
} 
Example 83
Source File: SQLExecution.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.ui.{SparkListenerSQLExecutionEnd, SparkListenerSQLExecutionStart}

object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  private val executionIdToQueryExecution = new ConcurrentHashMap[Long, QueryExecution]()

  def getQueryExecution(executionId: Long): QueryExecution = {
    executionIdToQueryExecution.get(executionId)
  }

  private val testing = sys.props.contains("spark.testing")

  private[sql] def checkSQLExecutionId(sparkSession: SparkSession): Unit = {
    val sc = sparkSession.sparkContext
    // only throw an exception during tests. a missing execution ID should not fail a job.
    if (testing && sc.getLocalProperty(EXECUTION_ID_KEY) == null) {
      // Attention testers: when a test fails with this exception, it means that the action that
      // started execution of a query didn't call withNewExecutionId. The execution ID should be
      // set by calling withNewExecutionId in the action that begins execution, like
      // Dataset.collect or DataFrameWriter.insertInto.
      throw new IllegalStateException("Execution ID should be set")
    }
  }

  
  def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T = {
    val oldExecutionId = sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
    try {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, executionId)
      body
    } finally {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, oldExecutionId)
    }
  }
} 
Example 84
Source File: NettyStreamManager.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.io.File
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc.RpcEnvFileServer
import org.apache.spark.util.Utils


private[netty] class NettyStreamManager(rpcEnv: NettyRpcEnv)
  extends StreamManager with RpcEnvFileServer {

  private val files = new ConcurrentHashMap[String, File]()
  private val jars = new ConcurrentHashMap[String, File]()
  private val dirs = new ConcurrentHashMap[String, File]()

  override def getChunk(streamId: Long, chunkIndex: Int): ManagedBuffer = {
    throw new UnsupportedOperationException()
  }

  override def openStream(streamId: String): ManagedBuffer = {
    val Array(ftype, fname) = streamId.stripPrefix("/").split("/", 2)
    val file = ftype match {
      case "files" => files.get(fname)
      case "jars" => jars.get(fname)
      case other =>
        val dir = dirs.get(ftype)
        require(dir != null, s"Invalid stream URI: $ftype not found.")
        new File(dir, fname)
    }

    if (file != null && file.isFile()) {
      new FileSegmentManagedBuffer(rpcEnv.transportConf, file, 0, file.length())
    } else {
      null
    }
  }

  override def addFile(file: File): String = {
    val existingPath = files.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/files/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addJar(file: File): String = {
    val existingPath = jars.putIfAbsent(file.getName, file)
    require(existingPath == null || existingPath == file,
      s"File ${file.getName} was already registered with a different path " +
        s"(old path = $existingPath, new path = $file")
    s"${rpcEnv.address.toSparkURL}/jars/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addDirectory(baseUri: String, path: File): String = {
    val fixedBaseUri = validateDirectoryUri(baseUri)
    require(dirs.putIfAbsent(fixedBaseUri.stripPrefix("/"), path) == null,
      s"URI '$fixedBaseUri' already registered.")
    s"${rpcEnv.address.toSparkURL}$fixedBaseUri"
  }

} 
Example 85
Source File: LocalMetadataCache.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.coordinator.mockedActors

import java.util.concurrent.ConcurrentHashMap

import akka.actor.Actor
import io.radicalbit.nsdb.cluster.actor.ReplicatedMetadataCache._
import io.radicalbit.nsdb.common.model.MetricInfo
import io.radicalbit.nsdb.common.protocol.{Coordinates, NSDbSerializable}
import io.radicalbit.nsdb.model.Location

import scala.collection.JavaConverters._
import scala.collection.mutable

class LocalMetadataCache extends Actor {

  import LocalMetadataCache.{DeleteAll, DeleteDone}

  val locations: mutable.Map[MetricLocationsCacheKey, Set[Location]] = mutable.Map.empty

  val metricInfo: ConcurrentHashMap[MetricInfoCacheKey, MetricInfo] = new ConcurrentHashMap

  val coordinates: mutable.Set[Coordinates] = mutable.Set.empty

  def receive: Receive = {
    case GetDbsFromCache =>
      sender ! DbsFromCacheGot(coordinates.map(_.db).toSet)
    case GetNamespacesFromCache(db) =>
      sender ! NamespacesFromCacheGot(db, coordinates.filter(c => c.db == db).map(_.namespace).toSet)
    case GetMetricsFromCache(db, namespace) =>
      sender ! MetricsFromCacheGot(db,
                                   namespace,
                                   coordinates.filter(c => c.db == db && c.namespace == namespace).map(_.metric).toSet)
    case DropMetricFromCache(db, namespace, metric) =>
      coordinates -= Coordinates(db, namespace, metric)
      locations --= locations.collect {
        case (key, _) if key.db == db && key.namespace == namespace && key.metric == metric => key
      }
      sender() ! MetricFromCacheDropped(db, namespace, metric)
    case DropNamespaceFromCache(db, namespace) =>
      coordinates --= coordinates.filter(c => c.db == db && c.namespace == namespace)
      sender() ! NamespaceFromCacheDropped(db, namespace)
    case PutLocationInCache(db, namespace, metric, value) =>
      val key              = MetricLocationsCacheKey(db, namespace, metric)
      val previousLocation = locations.getOrElse(key, Set.empty)
      locations.put(key, previousLocation + value)
      coordinates += Coordinates(db, namespace, metric)
      sender ! LocationCached(db, namespace, metric, value)
    case GetLocationsFromCache(db, namespace, metric) =>
      sender ! LocationsCached(db,
                               namespace,
                               metric,
                               locations.getOrElse(MetricLocationsCacheKey(db, namespace, metric), Set.empty).toList)
    case DeleteAll =>
      locations.clear()
      metricInfo.clear()
      coordinates.clear()
      sender() ! DeleteDone
    case PutMetricInfoInCache(info @ MetricInfo(db, namespace, metric, _, _)) =>
      val key = MetricInfoCacheKey(db, namespace, metric)
      Option(metricInfo.get(key)) match {
        case Some(v) =>
          sender ! MetricInfoAlreadyExisting(key, v)
        case None =>
          metricInfo.put(key, info)
          sender ! MetricInfoCached(db, namespace, metric, Some(info))
      }
    case EvictLocation(db, namespace, location) =>
      val key              = MetricLocationsCacheKey(db, namespace, location.metric)
      val previousLocation = locations.getOrElse(key, Set.empty)
      locations.put(key, previousLocation - location)
      sender ! Right(LocationEvicted(db, namespace, location))
    case GetMetricInfoFromCache(db, namespace, metric) =>
      val key = MetricInfoCacheKey(db, namespace, metric)
      sender ! MetricInfoCached(db, namespace, metric, Option(metricInfo.get(key)))
    case GetAllMetricInfoWithRetention =>
      sender() ! AllMetricInfoWithRetentionGot(metricInfo.values().asScala.toSet.filter(_.retention > 0))
  }
}

object LocalMetadataCache {
  case object DeleteAll  extends NSDbSerializable
  case object DeleteDone extends NSDbSerializable
} 
Example 86
Source File: BigDatalogConf.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package edu.ucla.cs.wis.bigdatalog.spark

import java.util.concurrent.ConcurrentHashMap
import org.apache.spark.storage.StorageLevel

// modeled after SparkConf.scala
class BigDatalogConf(sparkDefaultParallelism: Int) extends Serializable {
  private val settings = new ConcurrentHashMap[String, String]()

  def defaultStorageLevel(): StorageLevel = {
    val storageLevelType = get("spark.datalog.storage.level", "MEMORY_ONLY")

    val lastPart = storageLevelType.split("_").last
    if (lastPart.forall(_.isDigit)) {
      val storageLevelName = storageLevelType.substring(0, storageLevelType.lastIndexOf("_"))
      val storageLevel = StorageLevel.fromString(storageLevelName)
      //new StorageLevel(useDisk, useMemory, useOffHeap, deserialized, replication))
      StorageLevel(storageLevel.useDisk,
        storageLevel.useMemory,
        storageLevel.useOffHeap,
        storageLevel.deserialized,
        lastPart.toInt)
    } else {
      StorageLevel.fromString(storageLevelType)
    }
  }

  def get(key: String): String = {
    getOption(key).getOrElse(throw new NoSuchElementException(key))
  }

  def get(key: String, defaultValue: String): String = {
    getOption(key).getOrElse(defaultValue)
  }

  def getOption(key: String): Option[String] = {
    Option(settings.get(key))
  }

  def getInt(key: String, defaultValue: Int): Int = {
    getOption(key).map(_.toInt).getOrElse(defaultValue)
  }

  def getInt(key: String): Int = {
    getOption(key).map(_.toInt).getOrElse(throw new NoSuchElementException(key))
  }

  def getLong(key: String, defaultValue: Long): Long = {
    getOption(key).map(_.toLong).getOrElse(defaultValue)
  }

  def getDouble(key: String, defaultValue: Double): Double = {
    getOption(key).map(_.toDouble).getOrElse(defaultValue)
  }

  def getBoolean(key: String, defaultValue: Boolean): Boolean = {
    getOption(key).map(_.toBoolean).getOrElse(defaultValue)
  }

  def set(key: String, value: String): BigDatalogConf = {
    if (key == null) throw new NullPointerException("null key")
    if (value == null) throw new NullPointerException("null value for " + key)
    settings.put(key, value)
    this
  }
} 
Example 87
Source File: NettyStreamManager.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import java.io.File
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.server.StreamManager
import org.apache.spark.rpc.RpcEnvFileServer
import org.apache.spark.util.Utils


private[netty] class NettyStreamManager(rpcEnv: NettyRpcEnv)
  extends StreamManager with RpcEnvFileServer {

  private val files = new ConcurrentHashMap[String, File]()
  private val jars = new ConcurrentHashMap[String, File]()

  override def getChunk(streamId: Long, chunkIndex: Int): ManagedBuffer = {
    throw new UnsupportedOperationException()
  }

  override def openStream(streamId: String): ManagedBuffer = {
    val Array(ftype, fname) = streamId.stripPrefix("/").split("/", 2)
    val file = ftype match {
      case "files" => files.get(fname)
      case "jars" => jars.get(fname)
      case _ => throw new IllegalArgumentException(s"Invalid file type: $ftype")
    }

    require(file != null && file.isFile(), s"File not found: $streamId")
    new FileSegmentManagedBuffer(rpcEnv.transportConf, file, 0, file.length())
  }

  override def addFile(file: File): String = {
    require(files.putIfAbsent(file.getName(), file) == null,
      s"File ${file.getName()} already registered.")
    s"${rpcEnv.address.toSparkURL}/files/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

  override def addJar(file: File): String = {
    require(jars.putIfAbsent(file.getName(), file) == null,
      s"JAR ${file.getName()} already registered.")
    s"${rpcEnv.address.toSparkURL}/jars/${Utils.encodeFileNameToURIRawPath(file.getName())}"
  }

} 
Example 88
Source File: MutableURLClassLoader.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{URLClassLoader, URL}
import java.util.Enumeration
import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConverters._


  private val locks = new ConcurrentHashMap[String, Object]()

  override def loadClass(name: String, resolve: Boolean): Class[_] = {
    var lock = locks.get(name)
    if (lock == null) {
      val newLock = new Object()
      lock = locks.putIfAbsent(name, newLock)
      if (lock == null) {
        lock = newLock
      }
    }

    lock.synchronized {
      try {
        super.loadClass(name, resolve)
      } catch {
        case e: ClassNotFoundException =>
          parentClassLoader.loadClass(name, resolve)
      }
    }
  }

  override def getResource(name: String): URL = {
    val url = super.findResource(name)
    val res = if (url != null) url else parentClassLoader.getResource(name)
    res
  }

  override def getResources(name: String): Enumeration[URL] = {
    val childUrls = super.findResources(name).asScala
    val parentUrls = parentClassLoader.getResources(name).asScala
    (childUrls ++ parentUrls).asJavaEnumeration
  }

  override def addURL(url: URL) {
    super.addURL(url)
  }

} 
Example 89
Source File: TimeStampedHashSet.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConverters._
import scala.collection.mutable.Set

private[spark] class TimeStampedHashSet[A] extends Set[A] {
  val internalMap = new ConcurrentHashMap[A, Long]()

  def contains(key: A): Boolean = {
    internalMap.contains(key)
  }

  def iterator: Iterator[A] = {
    val jIterator = internalMap.entrySet().iterator()
    jIterator.asScala.map(_.getKey)
  }

  override def + (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet += elem
    newSet
  }

  override def - (elem: A): Set[A] = {
    val newSet = new TimeStampedHashSet[A]
    newSet ++= this
    newSet -= elem
    newSet
  }

  override def += (key: A): this.type = {
    internalMap.put(key, currentTime)
    this
  }

  override def -= (key: A): this.type = {
    internalMap.remove(key)
    this
  }

  override def empty: Set[A] = new TimeStampedHashSet[A]()

  override def size(): Int = internalMap.size()

  override def foreach[U](f: (A) => U): Unit = {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      f(iterator.next.getKey)
    }
  }

  
  def clearOldValues(threshTime: Long) {
    val iterator = internalMap.entrySet().iterator()
    while(iterator.hasNext) {
      val entry = iterator.next()
      if (entry.getValue < threshTime) {
        iterator.remove()
      }
    }
  }

  private def currentTime: Long = System.currentTimeMillis()
} 
Example 90
Source File: BlockManagerId.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import java.util.concurrent.ConcurrentHashMap

import org.apache.spark.SparkContext
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.Utils


  def apply(execId: String, host: String, port: Int): BlockManagerId =
    getCachedBlockManagerId(new BlockManagerId(execId, host, port))

  def apply(in: ObjectInput): BlockManagerId = {
    val obj = new BlockManagerId()
    obj.readExternal(in)
    getCachedBlockManagerId(obj)
  }

  val blockManagerIdCache = new ConcurrentHashMap[BlockManagerId, BlockManagerId]()

  def getCachedBlockManagerId(id: BlockManagerId): BlockManagerId = {
    blockManagerIdCache.putIfAbsent(id, id)
    blockManagerIdCache.get(id)
  }
} 
Example 91
Source File: AuthManager.scala    From asura   with MIT License 5 votes vote down vote up
package asura.core.auth

import java.util.concurrent.ConcurrentHashMap

object AuthManager {

  private val operators = new ConcurrentHashMap[String, AuthorizeAndValidate]()

  def register(operator: AuthorizeAndValidate): Unit = {
    if (operators.contains(operator.`type`)) {
      throw new RuntimeException(s"${operator.`type`} already exists.")
    } else {
      operators.put(operator.`type`, operator)
    }
  }

  def apply(name: String): Option[AuthorizeAndValidate] = {
    Option(operators.get(name))
  }

  def getAll(): java.util.Collection[AuthorizeAndValidate] = {
    operators.values()
  }
} 
Example 92
Source File: SparkSQLOperationsMenager.scala    From bdg-sequila   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver.server

import java.util.{Map => JMap}
import java.util.concurrent.ConcurrentHashMap

import org.apache.hive.service.cli._
import org.apache.hive.service.cli.operation.{ExecuteStatementOperation, Operation, OperationManager}
import org.apache.hive.service.cli.session.HiveSession
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{SQLContext, SequilaSession}
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.thriftserver.{ReflectionUtils, SparkExecuteStatementOperationSeq}
import org.apache.spark.sql.internal.SQLConf


private[thriftserver] class SparkSQLOperationManagerSeq(ss: SequilaSession)
  extends OperationManager with Logging {

  val handleToOperation = ReflectionUtils
    .getSuperField[JMap[OperationHandle, Operation]](this, "handleToOperation")

  val sessionToActivePool = new ConcurrentHashMap[SessionHandle, String]()
  val sessionToContexts = new ConcurrentHashMap[SessionHandle, SQLContext]()

  override def newExecuteStatementOperation(
                                             parentSession: HiveSession,
                                             statement: String,
                                             confOverlay: JMap[String, String],
                                             async: Boolean): ExecuteStatementOperation = synchronized {
    val sqlContext = sessionToContexts.get(parentSession.getSessionHandle)
    require(sqlContext != null, s"Session handle: ${parentSession.getSessionHandle} has not been" +
      s" initialized or had already closed.")
    val conf = sqlContext.sessionState.conf
    val hiveSessionState = parentSession.getSessionState
    setConfMap(conf, hiveSessionState.getOverriddenConfigurations)
    setConfMap(conf, hiveSessionState.getHiveVariables)
    val runInBackground = async && conf.getConf(HiveUtils.HIVE_THRIFT_SERVER_ASYNC)
    val operation = new SparkExecuteStatementOperationSeq(parentSession, statement, confOverlay,
      runInBackground)(ss, sessionToActivePool)
    handleToOperation.put(operation.getHandle, operation)
    logDebug(s"Created Operation for $statement with session=$parentSession, " +
      s"runInBackground=$runInBackground")
    operation
  }

  def setConfMap(conf: SQLConf, confMap: java.util.Map[String, String]): Unit = {
    val iterator = confMap.entrySet().iterator()
    while (iterator.hasNext) {
      val kv = iterator.next()
      conf.setConfString(kv.getKey, kv.getValue)
    }
  }
} 
Example 93
Source File: StreamEnvImpl.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.impl

import java.util.concurrent.{ConcurrentHashMap, TimeoutException}
import scala.annotation.tailrec
import scala.util.Try
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
import com.typesafe.config.Config
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
import swave.core.macros._
import swave.core._

private[core] final class StreamEnvImpl(val name: String,
                                        val config: Config,
                                        val settings: StreamEnv.Settings,
                                        val classLoader: ClassLoader)
    extends StreamEnv {

  val startTime = System.currentTimeMillis()

  val log = Logger(LoggerFactory.getLogger(name))

  val dispatchers = DispatchersImpl(settings.dispatcherSettings)

  val scheduler = SchedulerImpl(settings.schedulerSettings)

  if (settings.logConfigOnStart) log.info(settings.toString) // TODO: improve rendering

  def defaultDispatcher = dispatchers.defaultDispatcher

  def shutdown(): StreamEnv.Termination =
    new StreamEnv.Termination {
      val schedulerTermination   = scheduler.shutdown()
      val dispatchersTermination = dispatchers.shutdownAll()

      def isTerminated: Boolean = schedulerTermination.isCompleted && unterminatedDispatchers.isEmpty

      def unterminatedDispatchers: List[String] = dispatchersTermination()

      def awaitTermination(timeout: FiniteDuration): Unit = {
        requireArg(timeout >= Duration.Zero, "`timeout` must be > 0")
        var deadline = System.nanoTime() + timeout.toNanos
        if (deadline < 0) deadline = Long.MaxValue // overflow protection

        @tailrec def await(): Unit =
          if (!isTerminated) {
            if (System.nanoTime() < deadline) {
              Thread.sleep(1L)
              await()
            } else {
              val unterminated =
                if (schedulerTermination.isCompleted) unterminatedDispatchers
                else "scheduler" :: unterminatedDispatchers
              throw new TimeoutException(
                s"StreamEnv did not shut down within specified timeout of $timeout.\n" +
                  s"Unterminated dispatchers: [${unterminated.mkString(", ")}]")
            }
          }

        await()
      }
    }

  private[this] val _extensions = new ConcurrentHashMap[ExtensionId[_], Future[_ <: Extension]]

  @tailrec def getOrLoadExtension[T <: Extension](id: ExtensionId[T]): Future[T] =
    _extensions.get(id) match {
      case null ⇒
        val promise = Promise[T]()
        _extensions.putIfAbsent(id, promise.future) match {
          case null ⇒
            val tryValue = Try(id.createExtension(this))
            promise.complete(tryValue)
            val future = Promise.fromTry(tryValue).future
            _extensions.put(id, future) // speed up future accesses somewhat
            future
          case _ ⇒ getOrLoadExtension(id)
        }
      case x ⇒ x.asInstanceOf[Future[T]]
    }
} 
Example 94
Source File: ManagedMap.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.kernel.util

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConverters._
import zio.{Cause, Exit, Promise, Reservation, Semaphore, UIO, ZIO, ZManaged}
import ZIO.{effect, effectTotal}

class ManagedMap[K, V] private (
  underlying: ConcurrentHashMap[K, (V, () => ZIO[Any, Nothing, Any])],
  semaphore: Semaphore,
  closed: Promise[Nothing, Unit]
) {

  private def acquire[R, E](res: Reservation[R, E, V]) = closed.isDone.flatMap {
    case false => res.acquire
    case true  => ZIO.halt(Cause.die(new IllegalStateException("Map is already closed; cannot acquire any more values")))
  }

  private def releaseFn[R](key: K, value: V, release: Exit[Any, Any] => ZIO[R, Nothing, Any], env: R): () => ZIO[Any, Nothing, Unit] =
    () => effectTotal(underlying.remove(key, value)) *> release(Exit.succeed(())).unit.provide(env)

  def getOrCreate[R, E](key: K)(create: => ZManaged[R, E, V]): ZIO[R, E, V] = effectTotal {
    if (underlying.containsKey(key)) {
      Option(underlying.get(key)).map(_._1)
    } else None
  }.get.catchAll {
    _ => semaphore.withPermit {
      effectTotal(underlying.containsKey(key)).flatMap {
        case true  => ZIO.succeed(underlying.get(key)._1)
        case false => create.reserve.flatMap {
          res =>
            for {
              r <- ZIO.environment[R]
              v <- acquire(res)
              _ <- effectTotal(underlying.put(key, (v, releaseFn(key, v, res.release, r))))
            } yield v
        }
      }
    }
  }

  def get(key: K): UIO[Option[V]] = effectTotal(Option(underlying.get(key)).map(_._1))

  def put[R, E](key: K, value: ZManaged[R, E, V]): ZIO[R, E, V] = value.reserve.flatMap {
    res =>
      for {
        r    <- ZIO.environment[R]
        v    <- acquire(res)
        prev <- effectTotal(underlying.put(key, (v, releaseFn(key, v, res.release, r))))
        _    <- if (prev != null) prev._2.apply().as(()) else ZIO.unit
      } yield v
  }

  def remove(key: K): ZIO[Any, Nothing, Boolean] = effectTotal(Option(underlying.remove(key))).flatMap {
    case Some((_, release)) => release().as(true)
    case None => ZIO.succeed(false)
  }

  def close(): UIO[Unit] = closed.succeed(()) *> ZIO.foreachPar_(underlying.asScala.toSeq) {
    case (k, null)    => effectTotal(Option(underlying.remove(k))).as(())
    case (k, (_, release)) => effectTotal(Option(underlying.remove(k))) *> release()
  }

}

object ManagedMap {

} 
Example 95
Source File: RefMap.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.kernel.util

import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._


import cats.syntax.traverse._
import cats.instances.list._
import zio.interop.catz._
import zio.{Semaphore, Task, RIO, UIO, ZIO, Ref}



class RefMap[K, V] private (
  underlying: ConcurrentHashMap[K, Ref[V]],
  semaphore: Semaphore
) {

  def getOrCreate[E](key: K)(create: => RIO[E, V]): RIO[E, V] = synchronized {
    if (underlying.containsKey(key)) {
      ZIO.succeed(underlying.get(key)).flatMap(_.get)
    } else {
      semaphore.withPermit {
        ZIO(underlying.containsKey(key)).flatMap {
          case true => ZIO.succeed(underlying.get(key)).flatMap(_.get)
          case false => create.tap(v => Ref.make[V](v).flatMap(ref => ZIO.effectTotal(Option(underlying.put(key, ref))).unit))
        }
      }
    }
  }

  def get(key: K): UIO[Option[V]] = ZIO.effectTotal(Option(underlying.get(key))).flatMap {
    case Some(ref) => ref.get.map(Some(_))
    case None => ZIO.succeed(None)
  }

  def put(key: K, value: V): UIO[Option[V]] = ZIO.effectTotal(Option(underlying.get(key))).flatMap {
    case Some(ref) => ref.modify(old => old -> value).map(Some(_))
    case None      => semaphore.withPermit {
      Ref.make[V](value).flatMap {
        ref => ZIO.effectTotal(Option(underlying.get(key))).flatMap {
          case Some(ref) => ref.modify(old => old -> value).map(Some(_))
          case None      =>
            ZIO.effectTotal(Option(underlying.put(key, ref))).as(None)
        }
      }
    }
  }

  def remove(key: K): UIO[Option[V]] = ZIO.effectTotal(Option(underlying.remove(key))).flatMap {
    case Some(ref) => ref.get.map(Some(_))
    case None => ZIO.succeed(None)
  }

  def keys: UIO[List[K]] = ZIO.effectTotal(underlying.keys().asScala.toList)

  def values: UIO[List[V]] = underlying.values().asScala.toList.map(_.get).sequence

  def isEmpty: UIO[Boolean] = ZIO.effectTotal(underlying.isEmpty)
}

object RefMap {
  def empty[K, V]: UIO[RefMap[K, V]] = Semaphore.make(1).map {
    semaphore => new RefMap[K, V](new ConcurrentHashMap[K, Ref[V]](), semaphore)
  }

  def of[K, V](initialValues: (K, V)*): ZIO[Any, Throwable, RefMap[K, V]] = Semaphore.make(1).flatMap {
    semaphore => initialValues.toList.map {
      case (k, v) => Ref.make[V](v).map {
        ref => (k, ref)
      }
    }.sequence.map {
      initialRefs => new RefMap[K, V](new ConcurrentHashMap[K, Ref[V]](initialRefs.toMap.asJava), semaphore)
    }
  }
} 
Example 96
Source File: MockEntityService.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package services.entity.builtin.importer

import collection.JavaConverters._
import com.vividsolutions.jts.geom.{Coordinate, Envelope}
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import scala.concurrent.{ExecutionContext, Future}
import services.Page
import services.entity.{Entity, EntityRecord, EntityType}
import services.entity.builtin.{EntityService, IndexedEntity}
import storage.es.ES

class MockEntityService(implicit ctx: ExecutionContext) extends EntityService {

  val mockIndex = new ConcurrentHashMap[UUID, Entity]

  override def countEntities(eType: Option[EntityType] = None): Future[Long] =
    eType match {
      case Some(entityType) => ???
      case None => Future.successful(mockIndex.size)
    }

  override def upsertEntities(entities: Seq[IndexedEntity]): Future[Boolean] =
    Future {
      entities.foreach { e =>
        mockIndex.put(e.entity.unionId, e.entity)
      }
      true
    }

  override def deleteEntities(ids: Seq[UUID]): Future[Boolean] =
    Future {
      ids.foldLeft(true){ case (success, id) =>
        val isDefined = Option(mockIndex.remove(id)).isDefined
        success && isDefined
      }
    }

  override def findByURI(uri: String): Future[Option[IndexedEntity]] =
    Future {
      val normalizedURI = EntityRecord.normalizeURI(uri)

      Option(mockIndex.get(normalizedURI)) match {
        case Some(hit) => Some(IndexedEntity(hit))

        case None => // Might still be listed under alternative record URI
          mockIndex.asScala.values.find { entity =>
            entity.uris.contains(normalizedURI)
          } map { IndexedEntity(_) }
      }
    }

  override def findConnected(uris: Seq[String]): Future[Seq[IndexedEntity]] =
    Future {
      val normalized = uris.map(uri => EntityRecord.normalizeURI(uri)).toSet
      mockIndex.asScala.values.filter { entity =>
        val allIdsAndMatches = entity.uris ++ entity.links.map(_.uri)
        allIdsAndMatches.exists(normalized.contains(_))
      }.toSeq.map { IndexedEntity(_) }
    }

  override def searchEntities(
    query      : String,
    eType      : Option[EntityType] = None,
    offset     : Int = 0,
    limit      : Int = ES.MAX_SIZE,
    sortFrom   : Option[Coordinate] = None,
    authorities: Option[Seq[String]] = None): Future[Page[IndexedEntity]] =

    Future {
      val results = mockIndex.asScala.values.toSeq
        .filter { entity =>
          val names = entity.names.keys.toSeq.map(_.name.toLowerCase)
          names.contains(query.toLowerCase)
        } map { IndexedEntity(_) }

      Page(0l, results.size, 0, limit, results.take(limit))
    }

  
  override def countByAuthority(eType: Option[EntityType] = None): Future[Seq[(String, Long)]] = ???

  override def listEntitiesInDocument(docId  : String,
    eType  : Option[EntityType] = None,
    offset : Int = 0,
    limit  : Int = ES.MAX_SIZE
  ): Future[Page[(IndexedEntity, Long)]] = ???

  def searchEntitiesInDocument(
    query  : String,
    docId  : String,
    eType  : Option[EntityType] = None,
    offset : Int = 0,
    limit  : Int = ES.MAX_SIZE
  ): Future[Page[IndexedEntity]] = ???

  def getDocumentSpatialExtent(docId : String): Future[Envelope] = ???
  
  def deleteBySourceAuthority(authority: String): Future[Boolean] = ???

} 
Example 97
Source File: SbtCoursierCache.scala    From sbt-coursier   with Apache License 2.0 5 votes vote down vote up
package lmcoursier.internal

import java.util.concurrent.ConcurrentHashMap

import coursier.core._
import sbt.librarymanagement.UpdateReport
import coursier.cache.FileCache
import coursier.util.Task

// private[coursier]
class SbtCoursierCache {

  import SbtCoursierCache._

  private val resolutionsCache = new ConcurrentHashMap[ResolutionKey, Map[Configuration, Resolution]]
  // these may actually not need to be cached any more, now that the resolutions
  // are cached
  private val reportsCache = new ConcurrentHashMap[ReportKey, UpdateReport]


  def resolutionOpt(key: ResolutionKey): Option[Map[Configuration, Resolution]] =
    Option(resolutionsCache.get(key))
  def putResolution(key: ResolutionKey, res: Map[Configuration, Resolution]): Unit =
    resolutionsCache.put(key, res)

  def reportOpt(key: ReportKey): Option[UpdateReport] =
    Option(reportsCache.get(key))
  def putReport(key: ReportKey, report: UpdateReport): Unit =
    reportsCache.put(key, report)

  def clear(): Unit = {
    resolutionsCache.clear()
    reportsCache.clear()
  }

  def isEmpty: Boolean =
    resolutionsCache.isEmpty && reportsCache.isEmpty

}

// private[coursier]
object SbtCoursierCache {

  final case class ResolutionKey(
    dependencies: Seq[(Configuration, Dependency)],
    internalRepositories: Seq[Repository],
    mainRepositories: Seq[Repository],
    fallbackRepositories: Seq[Repository],
    params: ResolutionParams,
    cache: FileCache[Task],
    sbtClassifiers: Boolean
  )

  final case class ReportKey(
    dependencies: Seq[(Configuration, Dependency)],
    resolution: Map[Configuration, Resolution],
    withClassifiers: Boolean,
    sbtClassifiers: Boolean,
    includeSignatures: Boolean
  )


  // private[coursier]
  val default = new SbtCoursierCache

} 
Example 98
Source File: Channels.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package messaging

import transmitter.RemoteRef

import java.util.concurrent.ConcurrentHashMap

object Channels {
  trait Channel {
    val name: String
    val remote: RemoteRef
  }
}

class Channels[C <: Channels.Channel, R <: RemoteRef](
    createChannel: (String, String, R) => C,
    closeChannel: (C, Boolean) => Unit) {

  private val channels = new ConcurrentHashMap[(String, R), C]

  def obtain(name: String, anchorDefault: String, remote: R): C = {
    val channelId = name -> remote
    val channel = createChannel(name, anchorDefault, remote)
    if (remote.connected) {
      val obtainedChannel =
        Option(channels.putIfAbsent(channelId, channel)) getOrElse channel

      if (!remote.connected)
        channels.remove(obtainedChannel)

      obtainedChannel
    }
    else
      channel
  }

  def get(name: String, remote: R): Option[C] = {
    val channelId = name -> remote
    if (remote.connected)
      Option(channels get channelId)
    else
      None
  }

  def close(channel: C, notifyRemote: Boolean): Unit = {
    val channelId = (channel.name, channel.remote)
    Option(channels.remove(channelId)) foreach { closeChannel(_, notifyRemote) }
  }

  def close(remote: R): Unit = {
    val iterator = channels.keySet.iterator
    while (iterator.hasNext)
      iterator.next() match {
        case id @ (_, `remote`) =>
          Option(channels.remove(id)) foreach { closeChannel(_, false) }
        case _ =>
      }
  }

  def isOpen(channel: C): Boolean = {
    val channelId = (channel.name, channel.remote)
    channel == (channels get channelId)
  }
} 
Example 99
Source File: Bindings.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package registry

import transmitter._

import java.util.concurrent.ConcurrentHashMap

import scala.util.{Failure, Try}

class Bindings[A <: AbstractionRef](
    request: (A, MessageBuffer) => Unit,
    respond: (A, Try[MessageBuffer]) => Unit) {

  private val bindings = new ConcurrentHashMap[
    String, (MessageBuffer, A) => Try[MessageBuffer]]

  private val responseHandlers = new ConcurrentHashMap[
    Channel, Notice.Steady.Source[Try[MessageBuffer]]]

  def bind[T](binding: Binding[T])(function: RemoteRef => T): Unit =
    bindings.put(binding.name, binding.dispatch(function, _, _))

  def lookup[T, R](binding: Binding[T], createAbstraction: () => A): binding.RemoteCall =
    binding.call(createAbstraction) { (message, abstraction) =>
      val channel = abstraction.channel
      val response = Notice.Steady[Try[MessageBuffer]]

      responseHandlers.put(channel, response)

      if (!abstraction.remote.connected)
        channelsClosed()

      logging.trace(s"sending remote access for $abstraction")

      request(abstraction, message)
      response.notice
    }

  def processRequest(
      message: MessageBuffer, name: String, abstraction: A): Unit = {
    logging.trace(s"handling remote access for $abstraction")

    val value = logging.tracing run {
      bindings get name match {
        case null =>
          Failure(new RemoteAccessException(s"request for $abstraction could not be dispatched"))
        case dispatch =>
          dispatch(message, abstraction)
      }
    }

    value.failed foreach { exception =>
      logging.warn("local exception upon remote access propagated to remote instance", exception)
    }

    logging.trace(s"sending remote access response for $abstraction")

    respond(abstraction, value)
  }

  def processResponse(
      message: Try[MessageBuffer], name: String, abstraction: A): Unit =
    responseHandlers.remove(abstraction.channel) match {
      case null =>
        logging.warn(s"unprocessed message for $abstraction [no handler]: $message")
      case response =>
        response.set(message)
    }

  def channelsClosed(): Unit = {
    val iterator = responseHandlers.entrySet.iterator
    while (iterator.hasNext) {
      val entry = iterator.next()
      if (!entry.getKey.open) {
        entry.getValue.trySet(Failure(new RemoteAccessException(RemoteAccessException.RemoteDisconnected)))
        iterator.remove()
      }
    }
  }
} 
Example 100
Source File: ArtifactCaching.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.persistence

import java.lang.ref.SoftReference
import java.util.concurrent.ConcurrentHashMap

import io.vamp.common.Artifact
import io.vamp.common.notification.NotificationProvider

import scala.concurrent.Future

trait ArtifactCaching extends TypeOfArtifact {
  this: NotificationProvider ⇒

  private val cache = new ConcurrentHashMap[String, SoftReference[Future[_]]]()

  protected def cacheAll(enabled: Boolean)(`type`: Class[_ <: Artifact], page: Int, perPage: Int)(otherwise: () ⇒ Future[ArtifactResponseEnvelope]): Future[ArtifactResponseEnvelope] = {
    if (enabled) retrieve(cacheKey(type2string(`type`), "*", page, perPage), otherwise) else otherwise()
  }

  protected def cacheGet(enabled: Boolean)(name: String, `type`: Class[_ <: Artifact])(otherwise: () ⇒ Future[Option[Artifact]]): Future[Option[Artifact]] = {
    if (enabled) retrieve(cacheKey(type2string(`type`), name), otherwise) else otherwise()
  }

  protected def cacheSet(enabled: Boolean)(artifact: Artifact)(otherwise: () ⇒ Future[Artifact]): Future[Artifact] = {
    if (enabled) cache.clear()
    otherwise()
  }

  protected def cacheDelete(enabled: Boolean)(name: String, `type`: Class[_ <: Artifact])(otherwise: () ⇒ Future[Boolean]): Future[Boolean] = {
    if (enabled) cache.clear()
    otherwise()
  }

  private def retrieve[T](key: String, otherwise: () ⇒ Future[T]): Future[T] = {
    Option(cache.get(key)).flatMap(value ⇒ Option(value.get())).getOrElse {
      val future = otherwise()
      cache.put(key, new SoftReference(future))
      future
    }.asInstanceOf[Future[T]]
  }

  private def cacheKey(`type`: String, name: String, page: Int = 1, perPage: Int = 1): String = s"${`type`}/$name/$page/$perPage"
} 
Example 101
Source File: ResolverCache.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.cache

import java.util.UUID
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import cats.Monad
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig}
import ch.epfl.bluebrain.nexus.kg.cache.Cache._
import ch.epfl.bluebrain.nexus.kg.resolve.Resolver
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri

class ResolverCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, ResolverProjectCache[F]])(
    implicit as: ActorSystem,
    config: KeyValueStoreConfig
) {

  
private class ResolverProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, Resolver])
    extends Cache[F, AbsoluteIri, Resolver](store) {

  private implicit val ordering: Ordering[Resolver] = Ordering.by(_.priority)

  def get: F[List[Resolver]] = store.values.map(_.toList.sorted)

  def put(resolver: Resolver): F[Unit] =
    if (resolver.deprecated) store.remove(resolver.id)
    else store.put(resolver.id, resolver)

}

private object ResolverProjectCache {

  def apply[F[_]: Effect: Timer](
      project: ProjectRef
  )(implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverProjectCache[F] =
    new ResolverProjectCache(KeyValueStore.distributed(s"resolver-${project.id}", (_, resolver) => resolver.rev))

}

object ResolverCache {

  def apply[F[_]: Effect: Timer](implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverCache[F] =
    new ResolverCache(new ConcurrentHashMap[UUID, ResolverProjectCache[F]]())
} 
Example 102
Source File: StorageCache.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.cache

import java.time.{Clock, Instant}
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import cats.Monad
import cats.effect.{Effect, Timer}
import cats.implicits._
import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig}
import ch.epfl.bluebrain.nexus.kg.RevisionedValue
import ch.epfl.bluebrain.nexus.kg.cache.Cache._
import ch.epfl.bluebrain.nexus.kg.cache.StorageProjectCache._
import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef
import ch.epfl.bluebrain.nexus.kg.storage.Storage
import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri

class StorageCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, StorageProjectCache[F]])(
    implicit as: ActorSystem,
    config: KeyValueStoreConfig,
    clock: Clock
) {

  
private class StorageProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, RevisionedStorage])
    extends Cache[F, AbsoluteIri, RevisionedStorage](store) {

  private implicit val ordering: Ordering[RevisionedStorage] = Ordering.by((s: RevisionedStorage) => s.rev).reverse

  private implicit def revisioned(storage: Storage)(implicit instant: Instant): RevisionedStorage =
    RevisionedValue(instant.toEpochMilli, storage)

  def get: F[List[Storage]] =
    store.values.map(_.toList.sorted.map(_.value))

  def getDefault: F[Option[Storage]] =
    get.map(_.collectFirst { case storage if storage.default => storage })

  def getBy(id: AbsoluteIri): F[Option[Storage]] =
    get(id).map(_.collectFirst { case RevisionedValue(_, storage) if storage.id == id => storage })

  def put(storage: Storage)(implicit instant: Instant): F[Unit] =
    if (storage.deprecated) store.remove(storage.id)
    else store.put(storage.id, storage)
}

private object StorageProjectCache {

  type RevisionedStorage = RevisionedValue[Storage]

  def apply[F[_]: Effect: Timer](
      project: ProjectRef
  )(implicit as: ActorSystem, config: KeyValueStoreConfig): StorageProjectCache[F] =
    new StorageProjectCache(
      KeyValueStore.distributed(s"storage-${project.id}", (_, storage) => storage.value.rev)
    )

}

object StorageCache {

  def apply[F[_]: Timer: Effect](implicit as: ActorSystem, config: KeyValueStoreConfig, clock: Clock): StorageCache[F] =
    new StorageCache(new ConcurrentHashMap[UUID, StorageProjectCache[F]]())
} 
Example 103
Source File: InboundConnectionFilter.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelFuture, ChannelHandlerContext}
import io.netty.handler.ipfilter.AbstractRemoteAddressFilter

@Sharable
class InboundConnectionFilter(peerDatabase: PeerDatabase, maxInboundConnections: Int, maxConnectionsPerHost: Int)
    extends AbstractRemoteAddressFilter[InetSocketAddress]
    with ScorexLogging {
  private val inboundConnectionCount = new AtomicInteger(0)
  private val perHostConnectionCount = new ConcurrentHashMap[InetAddress, Int]
  private val emptyChannelFuture     = null.asInstanceOf[ChannelFuture]

  private def dec(remoteAddress: InetAddress) = {
    inboundConnectionCount.decrementAndGet()
    log.trace(s"Number of inbound connections: ${inboundConnectionCount.get()}")
    perHostConnectionCount.compute(remoteAddress, (_, cnt) => cnt - 1)
    emptyChannelFuture
  }

  override def accept(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Boolean = Option(remoteAddress.getAddress) match {
    case None =>
      log.debug(s"Can't obtain an address from $remoteAddress")
      false

    case Some(address) =>
      val newTotal        = inboundConnectionCount.incrementAndGet()
      val newCountPerHost = perHostConnectionCount.compute(address, (_, cnt) => Option(cnt).fold(1)(_ + 1))
      val isBlacklisted   = peerDatabase.blacklistedHosts.contains(address)

      val accepted = newTotal <= maxInboundConnections &&
        newCountPerHost <= maxConnectionsPerHost &&
        !isBlacklisted

      log.trace(
        s"Check inbound connection from $remoteAddress: new inbound total = $newTotal, " +
          s"connections with this host = $newCountPerHost, address ${if (isBlacklisted) "IS" else "is not"} blacklisted, " +
          s"${if (accepted) "is" else "is not"} accepted"
      )

      accepted
  }

  override def channelAccepted(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Unit =
    ctx.channel().closeFuture().addListener((_: ChannelFuture) => Option(remoteAddress.getAddress).foreach(dec))

  override def channelRejected(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): ChannelFuture =
    Option(remoteAddress.getAddress).fold(emptyChannelFuture)(dec)
} 
Example 104
Source File: BrokenConnectionDetector.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.util.concurrent.{ConcurrentHashMap, ScheduledFuture}

import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel._

import scala.concurrent.duration.FiniteDuration

@Sharable
class BrokenConnectionDetector(timeout: FiniteDuration) extends ChannelInboundHandlerAdapter with ScorexLogging {
  private val timeouts = new ConcurrentHashMap[Channel, ScheduledFuture[Unit]]

  override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = {
    scheduleClose(ctx)
    super.channelRead(ctx, msg)
  }

  override def channelActive(ctx: ChannelHandlerContext): Unit = {
    scheduleClose(ctx)
    super.channelActive(ctx)
  }

  override def channelInactive(ctx: ChannelHandlerContext): Unit = {
    Option(timeouts.remove(ctx.channel())).foreach(_.cancel(false))
    super.channelInactive(ctx)
  }

  private def scheduleClose(ctx: ChannelHandlerContext): Unit =
    Option(
      timeouts.put(
        ctx.channel(),
        ctx.executor().schedule(timeout) {
          log.info(s"${id(ctx.channel())} Channel haven't sent a message in $timeout, closing it")
          ctx.channel().close()
        }
      ))
      .foreach(_.cancel(false))
} 
Example 105
Source File: ClientSpec.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.{TransactionGen, Version}
import io.netty.buffer.{ByteBuf, Unpooled}
import io.netty.channel.Channel
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.channel.group.ChannelGroup
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FreeSpec, Matchers}

import scala.concurrent.duration.DurationInt
import scala.util.Random

class ClientSpec extends FreeSpec with Matchers with MockFactory with TransactionGen {

  private val clientHandshake = new Handshake(
    applicationName = "wavesI",
    applicationVersion = Version.VersionTuple,
    nodeName = "test",
    nodeNonce = Random.nextInt(),
    declaredAddress = None
  )

  private val serverHandshake = clientHandshake.copy(nodeNonce = Random.nextInt())

  "should send only a local handshake on connection" in {
    val channel = createEmbeddedChannel(mock[ChannelGroup])

    val sentClientHandshakeBuff = channel.readOutbound[ByteBuf]()
    Handshake.decode(sentClientHandshakeBuff) shouldBe clientHandshake
    channel.outboundMessages() shouldBe empty
  }

  "should add a server's channel to all channels after the handshake only" in {
    var channelWasAdded = false
    val allChannels     = mock[ChannelGroup]
    (allChannels.add _).expects(*).onCall { _: Channel =>
      channelWasAdded = true
      true
    }

    val channel = createEmbeddedChannel(allChannels)

    // skip the client's handshake
    channel.readOutbound[ByteBuf]()
    channelWasAdded shouldBe false

    val replyServerHandshakeBuff = Unpooled.buffer()
    serverHandshake.encode(replyServerHandshakeBuff)
    channel.writeInbound(replyServerHandshakeBuff)
    channelWasAdded shouldBe true
  }

  private def createEmbeddedChannel(allChannels: ChannelGroup) = new EmbeddedChannel(
    new HandshakeDecoder(PeerDatabase.NoOp),
    new HandshakeTimeoutHandler(1.minute),
    new HandshakeHandler.Client(
      handshake = clientHandshake,
      establishedConnections = new ConcurrentHashMap(),
      peerConnections = new ConcurrentHashMap(),
      peerDatabase = PeerDatabase.NoOp,
      allChannels = allChannels
    )
  )

} 
Example 106
Source File: ChannelGroupExtSpec.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.util.concurrent.ConcurrentHashMap

import io.netty.channel._
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.channel.group.DefaultChannelGroup
import io.netty.util.concurrent.GlobalEventExecutor
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FreeSpec, Matchers}

import scala.jdk.CollectionConverters._
import scala.util.Random

class ChannelGroupExtSpec extends FreeSpec with Matchers with MockFactory {
  "broadcast" - {
    "should not send a message to the excluded channels" in {
      val message = "test"

      val channelGroup = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE)
      val received     = ConcurrentHashMap.newKeySet[Int]()

      def receiver(id: Int): Channel = new EmbeddedChannel(
        new ChannelId {
          override def asShortText(): String        = asLongText()
          override def asLongText(): String         = id.toString
          override def compareTo(o: ChannelId): Int = o.asLongText().toInt - id
        },
        new ChannelOutboundHandlerAdapter {
          override def write(ctx: ChannelHandlerContext, msg: scala.Any, promise: ChannelPromise): Unit = {
            received.add(id)
            super.write(ctx, msg, promise)
          }
        }
      )

      val allIds      = (0 to 5).toSet
      val allChannels = allIds.map(receiver)

      val excludedChannels = allChannels.filter(_ => Random.nextBoolean)
      val excludedIds      = excludedChannels.map(_.id.asLongText().toInt)

      allChannels.foreach(channelGroup.add)
      channelGroup.broadcast(message, excludedChannels).syncUninterruptibly()

      received.asScala shouldBe (allIds -- excludedIds)
    }
  }
} 
Example 107
Source File: CommTargetManagerImpl.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.interpreter.comm

import java.util.concurrent.ConcurrentHashMap

import almond.protocol.CommInfo
import cats.effect.IO

import scala.collection.JavaConverters._

final class CommTargetManagerImpl extends CommTargetManager {

  private val targets = new ConcurrentHashMap[String, IOCommTarget]
  private val commIdTargets = new ConcurrentHashMap[String, IOCommTarget]

  def addTarget(name: String, target: IOCommTarget): Unit = {

    val previous = targets.putIfAbsent(name, target)

    if (previous != null)
      throw new Exception(s"Target $name already registered")
  }

  def target(name: String): Option[IOCommTarget] =
    Option(targets.get(name))

  def removeTarget(name: String): Unit = {
    val target = targets.remove(name)
    if (target != null)
      for ((id, t) <- commIdTargets.asScala.iterator if t == target)
        targets.remove(id)
  }

  def addId(target: IOCommTarget, id: String): Unit = {
    val previous = commIdTargets.put(id, target)
    if (previous != null) {
      // TODO Log error
    }
  }

  def fromId(id: String): Option[IOCommTarget] =
    Option(commIdTargets.get(id))

  def removeId(id: String): Option[IOCommTarget] =
    Option(commIdTargets.remove(id))


  // small and unlikely chance of discrepancies between targets and commIdTargets, as we query them at different times…
  val allInfos: IO[Map[String, CommInfo.Info]] =
    IO {
      val map = targets.asScala.iterator.map { case (k, v) => v -> k }.toMap
      commIdTargets
        .asScala
        .iterator
        .map {
          case (id, target) =>
            id -> map.get(target)
        }
        .collect {
          case (id, Some(name)) =>
            id -> CommInfo.Info(name)
        }
        .toMap
    }

  // same as above
  def allIds(targetName: String): IO[Seq[String]] =
    IO {
      target(targetName) match {
        case Some(target) =>
          commIdTargets
            .asScala
            .iterator
            .collect {
              case (id, `target`) =>
                id
            }
            .toSeq
        case None =>
          Nil
      }
    }
} 
Example 108
Source File: ProgressSparkListener.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package org.apache.spark.sql.almondinternals

import java.util.UUID
import java.util.concurrent.ConcurrentHashMap

import almond.interpreter.api.{CommHandler, CommTarget, OutputHandler}
import org.apache.spark.scheduler._
import org.apache.spark.sql.SparkSession

import scala.collection.JavaConverters._
import scala.util.Try

final class ProgressSparkListener(
  session: SparkSession,
  keep: Boolean,
  progress: Boolean
)(implicit
  publish: OutputHandler,
  commHandler: CommHandler
) extends SparkListener {

  private val elems = new ConcurrentHashMap[Int, StageElem]

  private val commTargetName = s"cancel-stage-${UUID.randomUUID()}"

  private var sentInitCode = false

  commHandler.registerCommTarget(
    commTargetName,
    CommTarget { (_, data) =>

      Try(com.github.plokhotnyuk.jsoniter_scala.core.readFromArray(data)(CancelStageReq.codec)).toEither match {
        case Left(err) =>
          publish.stderr(s"Error decoding message: $err" + '\n')
        case Right(req) =>
          val stageId = req.stageId
          if (elems.containsKey(stageId)) {
            publish.stderr(s"Cancelling stage $stageId" + '\n')
            session.sparkContext.cancelStage(stageId)
          } else {
            publish.stderr(s"Stage $stageId not found (only have ${elems.asScala.toVector.map(_._1).sorted})" + '\n')
          }
      }
    }
  )

  def newStageElem(stageId: Int, numTasks: Int, name: String, details: String): StageElem = {

    if (!elems.contains(stageId))
      elems.putIfAbsent(stageId, new StageElem(stageId, numTasks, keep, name, details))

    elems.get(stageId)
  }

  def stageElem(stageId: Int): StageElem =
    elems.get(stageId)

  override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit =
    if (progress)
      Try {
        val elem = newStageElem(
          stageSubmitted.stageInfo.stageId,
          stageSubmitted.stageInfo.numTasks,
          stageSubmitted.stageInfo.name,
          stageSubmitted.stageInfo.details
        )
        elem.init(commTargetName, !sentInitCode)
        sentInitCode = true
      }

  override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit =
    if (progress)
      Try {
        val elem = stageElem(stageCompleted.stageInfo.stageId)
        elem.allDone()
        elem.update()
      }

  override def onTaskStart(taskStart: SparkListenerTaskStart): Unit =
    if (progress)
      Try {
        val elem = stageElem(taskStart.stageId)
        elem.taskStart()
        elem.update()
      }

  override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit =
    if (progress)
      Try {
        val elem = stageElem(taskEnd.stageId)
        elem.taskDone()
        elem.update()
      }

} 
Example 109
Source File: TaskMonitor.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.core.monitor

import java.text.SimpleDateFormat
import java.util.Date
import java.util.concurrent.ConcurrentHashMap

import com.ecfront.ez.framework.core.EZ
import com.ecfront.ez.framework.core.helper.{TimeHelper, TimerHelper}
import com.ecfront.ez.framework.core.logger.Logging

import scala.collection.JavaConversions._

object TaskMonitor extends Logging {

  private val tasks = new ConcurrentHashMap[String, (String, Date)]()

  def add(taskName: String): String = {
    val taskId = EZ.createUUID
    tasks += taskId -> (taskName, new Date())
    taskId
  }

  def get(taskId:String):(String,Date)={
    tasks.get(taskId)
  }

  def poll(taskId:String):(String,Date)={
    val d=tasks.get(taskId)
    tasks -= taskId
    d
  }

  def remove(taskId: String): Unit = {
    tasks -= taskId
  }

  def hasTask(): Boolean = {
    tasks.nonEmpty
  }

  
  def waitFinish(timeout: Long = Long.MaxValue): Unit = {
    logger.info("[Monitor]waiting task finish...")
    val waitStart = new Date().getTime
    while (tasks.nonEmpty && waitStart + timeout < new Date().getTime) {
      Thread.sleep(500)
      if (new Date().getTime - waitStart > 60 * 1000) {
        var warn = "[Monitor]has some unfinished tasks:\r\n"
        warn += tasks.map(task => s" > id:${task._1} name:${task._2._1} start time:${TimeHelper.yyyy_MM_dd_HH_mm_ss_SSS.format(task._2._2)}").mkString("\r\n")
        logger.warn(warn)
      }
    }
    if (tasks.nonEmpty) {
      var error = "[Monitor]has some unfinished tasks,but time is out:\r\n"
      error += tasks.map(task => s" > id:${task._1} name:${task._2._1} start time:${TimeHelper.yyyy_MM_dd_HH_mm_ss_SSS.format(task._2._2)}").mkString("\r\n")
      logger.error(error)
    }
    // 再等1秒
    Thread.sleep(1000)
  }

  private val yyyy_MM_dd_HH_mm_ss_SSS = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS")
  TimerHelper.periodic(60L, {
    if (tasks.nonEmpty) {
      val info = new StringBuffer(s"\r\n--------------Current Execute Tasks : (${tasks.size()}) --------------\r\n")
      tasks.foreach {
            i =>
              info.append(s"------ ${yyyy_MM_dd_HH_mm_ss_SSS.format(i._2._2)} : [${i._1}]${i._2._1}\r\n")
      }
      logger.trace(info.toString)
    }
  })

} 
Example 110
Source File: CheckBeamDependencies.scala    From scio   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.ConcurrentHashMap
import java.util.function.{Function => JFunction}

import sbt._
import Keys._
import sbt.plugins.JvmPlugin

import scala.sys.process._

object CheckBeamDependencies extends AutoPlugin {
  override def requires: JvmPlugin.type = sbt.plugins.JvmPlugin
  override def trigger: PluginTrigger = allRequirements

  object autoImport {
    val checkBeamDependencies = taskKey[Unit]("check beam dependencies")
  }
  import autoImport._

  private[this] val beamDeps = new ConcurrentHashMap[String, Map[String, Set[String]]]()

  private def resolveBeamDependencies(deps: Seq[(String, String)]): Map[String, Set[String]] =
    deps
      .filter(d => d._1.startsWith("org.apache.beam"))
      .map {
        case (orgName, rev) =>
          beamDeps.computeIfAbsent(
            orgName,
            new JFunction[String, Map[String, Set[String]]] {
              override def apply(key: String): Map[String, Set[String]] = {
                val output = s"coursier resolve $key:$rev" lineStream_!

                output
                  .flatMap { dep =>
                    dep.split(":").toList match {
                      case org :: name :: rev :: _ => Some((s"$org:$name", rev))
                      case _                       => None
                    }
                  }
                  .toList
                  .groupBy(_._1)
                  .mapValues(_.map(_._2).toSet)
              }
            }
          )
      }
      .foldLeft(Map.empty[String, Set[String]])(_ ++ _)

  override lazy val projectSettings = Seq(
    checkBeamDependencies := {
      val deps = libraryDependencies.value.map(m => (s"${m.organization}:${m.name}", m.revision))
      val beamDependencies = resolveBeamDependencies(deps)
      val projectBeamDeps = deps
        .map(dep => (dep, beamDependencies.getOrElse(dep._1, Nil)))
        .collect {
          case ((dep, version), beamVersions) => beamVersions.map(v => (dep, (version, v)))
        }
        .flatten

      streams.value.log.warn {
        (thisProjectRef.value.project :: projectBeamDeps.collect {
          case (org, (v1, v2)) if v1 != v2 =>
            s"* $org:$v1 -> beam: $v2"
        }.toList).mkString("\n")
      }
    }
  )
} 
Example 111
Source File: CallSites.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.util

import java.util.concurrent.ConcurrentHashMap
import java.util.function.BiFunction

private[scio] object CallSites {
  private val scioNs = "com.spotify.scio"
  private val beamNs = "org.apache.beam"

  private val methodMap = Map("$plus$plus" -> "++")
  private val nameCache = new ConcurrentHashMap[(String, String, Boolean), Int]()

  private def isExternalClass(c: String): Boolean =
    // Not in our code base or an interpreter
    (!c.startsWith(scioNs) && !c.startsWith("scala.") && !c.startsWith(beamNs)) ||
      c.startsWith(s"$scioNs.examples.") || // unless if it's in examples
      c.startsWith(s"$scioNs.values.NamedTransformTest") || // or this test
      c.startsWith(s"$scioNs.values.SimpleJob") || // or this test
      c.startsWith(s"$scioNs.values.ClosureTest") || // or this test
      // or benchmarks/ITs
      (c.startsWith(scioNs) && (c.contains("Benchmark$") || c.contains("IT")))

  private def isTransform(e: StackTraceElement): Boolean =
    e.getClassName == s"$scioNs.values.SCollectionImpl" && e.getMethodName == "transform"

  private def isPCollectionApply(e: StackTraceElement): Boolean =
    e.getClassName == s"$beamNs.sdk.values.PCollection" && e.getMethodName == "apply"

  def getAppName: String =
    Thread
      .currentThread()
      .getStackTrace
      .drop(1)
      .map(_.getClassName)
      .find(isExternalClass)
      .getOrElse("unknown")
      .split("\\.")
      .last
      .replaceAll("\\$$", "")

  def getCurrent: String = {
    val (method, location, nested) = getCurrentName
    val idx = nameCache.merge(
      (method, location, nested),
      1,
      new BiFunction[Int, Int, Int] {
        override def apply(t: Int, u: Int): Int = t + u
      }
    )

    if (nested) {
      s"$method:$idx"
    } else {
      s"$method@{$location}:$idx"
    }
  }

  
  def getCurrentName: (String, String, Boolean) = {
    val stack = Thread.currentThread().getStackTrace.drop(1)
    val firstExtIdx = stack.indexWhere(e => isExternalClass(e.getClassName))
    val scioMethod = stack(firstExtIdx - 1).getMethodName
    val method = methodMap.getOrElse(scioMethod, scioMethod)

    // find first stack outside of Scio or SDK
    val externalCall = stack(firstExtIdx)
    val location = s"${externalCall.getFileName}:${externalCall.getLineNumber}"
    // check if transform is nested
    val transformIdx = stack.indexWhere(isTransform)
    val pApplyIdx = stack.indexWhere(isPCollectionApply)
    val isNested = transformIdx > 0 && pApplyIdx > 0
    val collIdx = stack.lastIndexWhere(_.getClassName.contains("SCollectionImpl"), pApplyIdx)

    (stack.lift(collIdx).fold(method)(_.getMethodName), location, isNested)
  }
} 
Example 112
Source File: MonitoringServerInterceptor.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.grpc.server.micrometer

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}

import io.grpc.ForwardingServerCall.SimpleForwardingServerCall
import io.grpc._
import io.micrometer.core.instrument.{MeterRegistry, Timer}


class MonitoringServerInterceptor(meterRegistry: MeterRegistry) extends ServerInterceptor {

  private val gaugeCache = new ConcurrentHashMap[String, AtomicLong]()
  private val timerCache = new ConcurrentHashMap[String, Timer]()

  override def interceptCall[ReqT, RespT](
      call: ServerCall[ReqT, RespT],
      headers: Metadata,
      next: ServerCallHandler[ReqT, RespT]
  ): ServerCall.Listener[ReqT] = {
    val prefix = s"grpc.${call.getMethodDescriptor.getFullMethodName.replace('/', '.')}"
    val currentCallsCounter = makeGauge(s"$prefix.current-calls")
    currentCallsCounter.incrementAndGet()
    val start = System.nanoTime
    val newCall = new CloseServerCall(prefix, start, currentCallsCounter, call)
    next.startCall(newCall, headers)
  }

  private class CloseServerCall[A, B](prefix: String, start: Long, currentCallsCounter: AtomicLong, delegate: ServerCall[A, B])
      extends SimpleForwardingServerCall[A, B](delegate) {
    override def close(status: Status, trailers: Metadata): Unit = {
      currentCallsCounter.decrementAndGet()
      val durationNs = System.nanoTime - start
      if (status.isOk) {
        makeTimer(s"$prefix.successes").record(durationNs, TimeUnit.NANOSECONDS)
      } else {
        makeTimer(s"$prefix.failures").record(durationNs, TimeUnit.NANOSECONDS)
      }
      super.close(status, trailers)
    }
  }

  private def makeGauge(name: String): AtomicLong = {
    gaugeCache.computeIfAbsent(
      name,
      n => {
        val counter = new AtomicLong()
        meterRegistry.gauge(n, counter)
        counter
      }
    )
  }

  private def makeTimer(name: String): Timer = timerCache.computeIfAbsent(name, meterRegistry.timer(_))

} 
Example 113
Source File: Biller.scala    From apple-of-my-iap   with MIT License 5 votes vote down vote up
package com.meetup.iap

import com.meetup.iap.receipt.{ReceiptGenerator, Subscription}
import org.slf4j.LoggerFactory

import java.util.concurrent.ConcurrentHashMap
import scala.collection.concurrent.{Map => CMap}
import scala.collection.JavaConverters._
import com.meetup.iap.AppleApi.ReceiptInfo

case class Plan(
    name: String,
    description: String,
    billInterval: Int,
    billIntervalUnit: String,
    trialInterval: Int,
    trialIntervalUnit: String,
    productId: String)

object Biller {
  val log = LoggerFactory.getLogger(Biller.getClass)
  lazy val plans: List[Plan] = {
    log.info("Fetching NEW plans...")
    BillerCache.readPlansFromFile()
  }

  lazy val plansByProductId: Map[String, Plan] = plans.map { p => p.productId -> p }.toMap

  private val _subscriptions: CMap[String, Subscription] =
    new ConcurrentHashMap[String, Subscription].asScala

  def subscriptions = _subscriptions.toMap

  def createSub(plan: Plan): Subscription = {
    val receiptEncoding = ReceiptGenerator.genEncoding(plan, subscriptions.keySet)
    val (_, receiptInfo) = ReceiptGenerator(plan, Left(receiptEncoding))
    val sub = Subscription(receiptEncoding, receiptInfo)

    _subscriptions.put(receiptEncoding, sub)
    BillerCache.writeToCache(subscriptions)
    sub
  }

  def setSubStatus(sub: Subscription, status: Int) {
    _subscriptions.put(sub.receiptToken, sub.copy(status = status))
    BillerCache.writeToCache(subscriptions)
  }

  def renewSub(sub: Subscription) {
    plansByProductId.get(sub.latestReceiptInfo.productId).map { plan =>
      val (latestReceiptToken, latestReceiptInfo) = ReceiptGenerator(plan, Right(sub))
      val updatedSub = sub.addReceipt(latestReceiptInfo, latestReceiptToken)
      _subscriptions.put(sub.receiptToken, updatedSub)
	
      BillerCache.writeToCache(subscriptions)
    }
  }

  def cancelSub(sub: Subscription) {
    _subscriptions.put(sub.receiptToken, sub.cancel())
    BillerCache.writeToCache(subscriptions)
  }

  def refundTransaction(sub: Subscription, receiptInfo: ReceiptInfo) {
    log.info(s"Refunding transaction: ${receiptInfo.transactionId}")
    _subscriptions.put(sub.receiptToken, sub.refund(receiptInfo))
    BillerCache.writeToCache(subscriptions)
  }

  def clearSubs() = {
    _subscriptions.clear()
    BillerCache.writeToCache(subscriptions)
  }

  def shutdown() = {
//    LocalTimer.shutdown()
    BillerCache.writeToCache(subscriptions)
  }

  def start() {
    log.info("Reading subs from cache.")
    BillerCache.readFromCache().foreach { case (k,v) => _subscriptions.put(k,v) }
    plans
  }

//  LocalTimer.repeat(Period.seconds(10)) {
//    log.debug("doing stuff.")
//  }
} 
Example 114
Source File: FakeTopicAdmin.scala    From ohara   with Apache License 2.0 4 votes vote down vote up
package oharastream.ohara.configurator.fake

import java.util.concurrent.{CompletableFuture, CompletionStage, ConcurrentHashMap}
import java.{lang, util}

import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.kafka.{TopicAdmin, TopicCreator, TopicDescription, TopicOption}

private[configurator] class FakeTopicAdmin extends TopicAdmin {
  import scala.jdk.CollectionConverters._

  override val connectionProps: String = "Unknown"

  private[this] val cachedTopics = new ConcurrentHashMap[TopicKey, TopicDescription]()

  override def createPartitions(topicKey: TopicKey, numberOfPartitions: Int): CompletionStage[Void] = {
    val previous = cachedTopics.get(topicKey)
    val f        = new CompletableFuture[Void]()
    if (previous == null)
      f.completeExceptionally(
        new NoSuchElementException(
          s"the topic:$topicKey doesn't exist. actual:${cachedTopics.keys().asScala.mkString(",")}"
        )
      )
    else {
      cachedTopics.put(
        topicKey,
        new TopicDescription(
          previous.topicKey,
          previous.partitionInfos(),
          previous.options
        )
      )
    }
    f
  }

  override def topicKeys: CompletionStage[util.Set[TopicKey]] = CompletableFuture.completedFuture(cachedTopics.keySet())

  override def topicDescription(key: TopicKey): CompletionStage[TopicDescription] = {
    val topic = cachedTopics.get(key)
    val f     = new CompletableFuture[TopicDescription]()
    if (topic == null) f.completeExceptionally(new NoSuchElementException(s"$key does not exist"))
    else f.complete(topic)
    f
  }

  override def topicCreator(): TopicCreator =
    (_: Int, _: Short, options: util.Map[String, String], topicKey: TopicKey) => {
      val f = new CompletableFuture[Void]()
      if (cachedTopics.contains(topicKey))
        f.completeExceptionally(new IllegalArgumentException(s"$topicKey already exists!"))
      else {
        val topicInfo = new TopicDescription(
          topicKey,
          java.util.List.of(),
          options.asScala
            .map {
              case (key, value) =>
                new TopicOption(
                  key,
                  value,
                  false,
                  false,
                  false
                )
            }
            .toSeq
            .asJava
        )
        if (cachedTopics.putIfAbsent(topicKey, topicInfo) != null)
          throw new RuntimeException(s"the $topicKey already exists in kafka")
        f.complete(null)
      }
      f
    }

  private[this] var _closed = false

  override def close(): Unit = _closed = true

  override def closed(): Boolean = _closed

  override def brokerPorts(): CompletionStage[util.Map[String, Integer]] =
    CompletableFuture.completedFuture(java.util.Map.of())

  override def exist(topicKey: TopicKey): CompletionStage[lang.Boolean] =
    CompletableFuture.completedFuture(cachedTopics.containsKey(topicKey))

  override def deleteTopic(topicKey: TopicKey): CompletionStage[lang.Boolean] = {
    val f       = new CompletableFuture[lang.Boolean]()
    val removed = cachedTopics.remove(topicKey)
    if (removed == null) f.complete(false)
    else f.complete(true)
    f
  }
} 
Example 115
Source File: MesosExternalShuffleService.scala    From drizzle-spark   with Apache License 2.0 4 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.nio.ByteBuffer
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}

import scala.collection.JavaConverters._

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.ExternalShuffleService
import org.apache.spark.internal.Logging
import org.apache.spark.network.client.{RpcResponseCallback, TransportClient}
import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler
import org.apache.spark.network.shuffle.protocol.BlockTransferMessage
import org.apache.spark.network.shuffle.protocol.mesos.{RegisterDriver, ShuffleServiceHeartbeat}
import org.apache.spark.network.util.TransportConf
import org.apache.spark.util.ThreadUtils


private[mesos] class MesosExternalShuffleService(conf: SparkConf, securityManager: SecurityManager)
  extends ExternalShuffleService(conf, securityManager) {

  protected override def newShuffleBlockHandler(
      conf: TransportConf): ExternalShuffleBlockHandler = {
    val cleanerIntervalS = this.conf.getTimeAsSeconds("spark.shuffle.cleaner.interval", "30s")
    new MesosExternalShuffleBlockHandler(conf, cleanerIntervalS)
  }
}

private[spark] object MesosExternalShuffleService extends Logging {

  def main(args: Array[String]): Unit = {
    ExternalShuffleService.main(args,
      (conf: SparkConf, sm: SecurityManager) => new MesosExternalShuffleService(conf, sm))
  }
}