java.util.concurrent.atomic.AtomicLong Scala Examples

The following examples show how to use java.util.concurrent.atomic.AtomicLong. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: RateController.scala    From drizzle-spark   with Apache License 2.0 6 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.io.ObjectInputStream
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.{ExecutionContext, Future}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.{ThreadUtils, Utils}


  private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit =
    Future[Unit] {
      val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay)
      newRate.foreach { s =>
        rateLimit.set(s.toLong)
        publish(getLatestRate())
      }
    }

  def getLatestRate(): Long = rateLimit.get()

  override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) {
    val elements = batchCompleted.batchInfo.streamIdToInputInfo

    for {
      processingEnd <- batchCompleted.batchInfo.processingEndTime
      workDelay <- batchCompleted.batchInfo.processingDelay
      waitDelay <- batchCompleted.batchInfo.schedulingDelay
      elems <- elements.get(streamUID).map(_.numRecords)
    } computeAndPublish(processingEnd, elems, workDelay, waitDelay)
  }
}

object RateController {
  def isBackPressureEnabled(conf: SparkConf): Boolean =
    conf.getBoolean("spark.streaming.backpressure.enabled", false)
} 
Example 2
Source File: RandomContext.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package com.github.agourlay.cornichon.core

import java.util.concurrent.atomic.AtomicLong
import scala.util.Random

trait RandomContext {
  val initialSeed: Long
  def nextBoolean(): Boolean
  def nextDouble(): Double
  def nextFloat(): Float
  def nextGaussian(): Double
  def nextInt(): Int
  def nextInt(n: Int): Int
  def nextLong(): Long
  def uniqueLong(): Long
  def nextString(length: Int): String
  def nextPrintableChar(): Char
  def alphanumeric(length: Int): String
  def shuffle[T](xs: Iterable[T]): Iterable[T]
}

// FIXME seededRandom works through internal mutation https://github.com/agourlay/cornichon/issues/303
class MutableRandomContext(seed: Long, seededRandom: Random) extends RandomContext {
  val initialSeed: Long = seed
  def nextBoolean(): Boolean = seededRandom.nextBoolean()
  def nextDouble(): Double = seededRandom.nextDouble()
  def nextFloat(): Float = seededRandom.nextFloat()
  def nextGaussian(): Double = seededRandom.nextGaussian()
  def nextInt(): Int = seededRandom.nextInt()
  def nextInt(n: Int): Int = seededRandom.nextInt(n)
  def nextLong(): Long = seededRandom.nextLong()
  def nextString(length: Int): String = seededRandom.nextString(length)
  def nextPrintableChar(): Char = seededRandom.nextPrintableChar()
  def alphanumeric(length: Int): String = seededRandom.alphanumeric.take(length).mkString("")
  def shuffle[T](xs: Iterable[T]): Iterable[T] = seededRandom.shuffle(xs)
  private val atomicLong = new AtomicLong(1L)
  def uniqueLong(): Long = atomicLong.getAndIncrement()
}

object RandomContext {
  def fromOptSeed(withSeed: Option[Long]): RandomContext = {
    val initialSeed = withSeed.getOrElse(System.currentTimeMillis())
    fromSeed(initialSeed)
  }

  def fromSeed(seed: Long): RandomContext = {
    new MutableRandomContext(seed, new Random(new java.util.Random(seed)))
  }
} 
Example 3
Source File: KillSwitchStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchStream extends PerpetualStream[Future[Long]] {
  import KillSwitchStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }
} 
Example 4
Source File: ProperShutdownStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.Done
import akka.actor.ActorRef
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object ProperShutdownStream {
  val genCount = new AtomicLong(0L)
}

class ProperShutdownStream extends PerpetualStream[(ActorRef, Future[Long])] {
  import ProperShutdownStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val managedSource = LifecycleManaged().source(Source fromIterator generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(managedSource, counter)((a, b) => (a._2, b)) {
    implicit builder =>
    (source, sink) =>
      import GraphDSL.Implicits._
      source ~> throttle ~> sink
      ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>
      val (_, fCount) = matValue

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! fCount
  }

  override def shutdown() = {
    super.shutdown()
    import context.dispatcher
    val (actorRef, fCount) = matValue
    val fStopped = gracefulStop(actorRef, awaitMax)
    for { _ <- fCount; _ <- fStopped } yield Done
  }
} 
Example 5
Source File: ChannelStatisticsHandler.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino.handler.ops

import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}

import com.ebay.neutrino.metrics.Instrumented
import com.typesafe.scalalogging.slf4j.StrictLogging
import io.netty.buffer.{ByteBuf, ByteBufHolder}
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel._
import scala.concurrent.duration._



@Sharable
class ChannelStatisticsHandler(upstream: Boolean) extends ChannelDuplexHandler with StrictLogging with Instrumented
{
  import com.ebay.neutrino.util.AttributeSupport._
  import com.ebay.neutrino.util.Utilities.AtomicLongSupport
  import com.ebay.neutrino.metrics.Metrics._

  // Global statistics
  val readBytes    = if (upstream) UpstreamBytesRead else DownstreamBytesRead
  val writeBytes   = if (upstream) UpstreamBytesWrite else DownstreamBytesWrite
  val readPackets  = if (upstream) UpstreamPacketsRead else DownstreamPacketsRead
  val writePackets = if (upstream) UpstreamPacketsWrite else DownstreamPacketsWrite


  @inline def calculateSize(msg: AnyRef) = msg match {
    case data: ByteBuf => data.readableBytes()
    case data: ByteBufHolder => data.content.readableBytes
    case data => 0
  }


  // Log to global (and local) statistics
  override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = {
    val bytes = calculateSize(msg)

    readPackets.mark
    readBytes += bytes
    ctx.statistics.readPackets += 1
    ctx.statistics.readBytes += bytes

    ctx.fireChannelRead(msg)
  }


  override def write(ctx: ChannelHandlerContext, msg: AnyRef, promise: ChannelPromise): Unit = {
    val bytes = calculateSize(msg)

    writePackets.mark
    writeBytes += bytes
    ctx.statistics.writePackets += 1
    ctx.statistics.writeBytes += bytes

    ctx.write(msg, promise)
  }
}

class ChannelStatistics {

  // Collected traffic statistics
  val readBytes     = new AtomicLong()
  val writeBytes    = new AtomicLong()
  val readPackets   = new AtomicLong()
  val writePackets  = new AtomicLong()

  // Channel usage statistics
  val startTime     = System.nanoTime()
  val allocations   = new AtomicInteger()

  // Request statistics
  val requestCount  = new AtomicInteger()
  val responseCount = new AtomicInteger()

  // Helper methods
  def elapsed       = (System.nanoTime()-startTime).nanos
} 
Example 6
Source File: BroadcastManager.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.spark._
import org.apache.spark.util.Utils

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = null

  initialize()

  // Called by SparkContext or Executor before using Broadcast
  private def initialize() {
    synchronized {
      if (!initialized) {
        val broadcastFactoryClass =
          conf.get("spark.broadcast.factory", "org.apache.spark.broadcast.TorrentBroadcastFactory")

        broadcastFactory =
          Utils.classForName(broadcastFactoryClass).newInstance.asInstanceOf[BroadcastFactory]

        // Initialize appropriate BroadcastFactory and BroadcastObject
        broadcastFactory.initialize(isDriver, conf, securityManager)

        initialized = true
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }

  private val nextBroadcastId = new AtomicLong(0)

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
  }
} 
Example 7
Source File: RateController.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.io.ObjectInputStream
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.{ExecutionContext, Future}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.{ThreadUtils, Utils}


  private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit =
    Future[Unit] {
      val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay)
      newRate.foreach { s =>
        rateLimit.set(s.toLong)
        publish(getLatestRate())
      }
    }

  def getLatestRate(): Long = rateLimit.get()

  override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) {
    val elements = batchCompleted.batchInfo.streamIdToInputInfo

    for {
      processingEnd <- batchCompleted.batchInfo.processingEndTime
      workDelay <- batchCompleted.batchInfo.processingDelay
      waitDelay <- batchCompleted.batchInfo.schedulingDelay
      elems <- elements.get(streamUID).map(_.numRecords)
    } computeAndPublish(processingEnd, elems, workDelay, waitDelay)
  }
}

object RateController {
  def isBackPressureEnabled(conf: SparkConf): Boolean =
    conf.getBoolean("spark.streaming.backpressure.enabled", false)
} 
Example 8
Source File: StoragePerfTester.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.tools

import java.util.concurrent.{CountDownLatch, Executors}
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.util.Utils


    val numOutputSplits = sys.env.get("NUM_REDUCERS").map(_.toInt).getOrElse(500)

    val recordLength = 1000 // ~1KB records
    val totalRecords = dataSizeMb * 1000
    val recordsPerMap = totalRecords / numMaps

    val writeKey = "1" * (recordLength / 2)
    val writeValue = "1" * (recordLength / 2)
    val executor = Executors.newFixedThreadPool(numMaps)

    val conf = new SparkConf()
      .set("spark.shuffle.compress", "false")
      .set("spark.shuffle.sync", "true")
      .set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    // This is only used to instantiate a BlockManager. All thread scheduling is done manually.
    val sc = new SparkContext("local[4]", "Write Tester", conf)
    val hashShuffleManager = sc.env.shuffleManager.asInstanceOf[HashShuffleManager]

    def writeOutputBytes(mapId: Int, total: AtomicLong): Unit = {
      val shuffle = hashShuffleManager.shuffleBlockResolver.forMapTask(1, mapId, numOutputSplits,
        new KryoSerializer(sc.conf), new ShuffleWriteMetrics())
      val writers = shuffle.writers
      for (i <- 1 to recordsPerMap) {
        writers(i % numOutputSplits).write(writeKey, writeValue)
      }
      writers.map { w =>
        w.commitAndClose()
        total.addAndGet(w.fileSegment().length)
      }

      shuffle.releaseWriters(true)
    }

    val start = System.currentTimeMillis()
    val latch = new CountDownLatch(numMaps)
    val totalBytes = new AtomicLong()
    for (task <- 1 to numMaps) {
      executor.submit(new Runnable() {
        override def run(): Unit = {
          try {
            writeOutputBytes(task, totalBytes)
            latch.countDown()
          } catch {
            case e: Exception =>
              // scalastyle:off println
              println("Exception in child thread: " + e + " " + e.getMessage)
              // scalastyle:on println
              System.exit(1)
          }
        }
      })
    }
    latch.await()
    val end = System.currentTimeMillis()
    val time = (end - start) / 1000.0
    val bytesPerSecond = totalBytes.get() / time
    val bytesPerFile = (totalBytes.get() / (numOutputSplits * numMaps.toDouble)).toLong

    // scalastyle:off println
    System.err.println("files_total\t\t%s".format(numMaps * numOutputSplits))
    System.err.println("bytes_per_file\t\t%s".format(Utils.bytesToString(bytesPerFile)))
    System.err.println("agg_throughput\t\t%s/s".format(Utils.bytesToString(bytesPerSecond.toLong)))
    // scalastyle:on println

    executor.shutdown()
    sc.stop()
  }
} 
Example 9
Source File: SQLExecution.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.ui.SparkPlanGraph
import org.apache.spark.util.Utils

private[sql] object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  
  def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T = {
    val oldExecutionId = sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
    try {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, executionId)
      body
    } finally {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, oldExecutionId)
    }
  }
} 
Example 10
Source File: StateFactory.scala    From odinson   with Apache License 2.0 5 votes vote down vote up
package ai.lum.odinson.state

import java.util.concurrent.atomic.AtomicLong

import com.typesafe.config.Config
import ai.lum.common.ConfigUtils._

object StateFactory {
  protected var count: AtomicLong = new AtomicLong

  def newSqlState(config: Config, index: Long): State = {
    val jdbcUrl = config[String]("state.jdbc.url")
    val state = new SqlState(jdbcUrl, index)

    state
  }

  def newState(config: Config): State = {
    // TODO: Also add PID in case multiple Javas are running at the same time.
    val index = count.getAndIncrement
    val provider = config[String]("state.provider")
    val state = provider match {
      case "sql" => newSqlState(config, index)
      case "file" => new FileState()
      case "memory" => new MemoryState()
      case _ => throw new Exception(s"Unknown state provider: $provider")
    }

    state
  }
} 
Example 11
Source File: BroadcastManager.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.commons.collections.map.{AbstractReferenceMap, ReferenceMap}

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.internal.Logging

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = null

  initialize()

  // Called by SparkContext or Executor before using Broadcast
  private def initialize() {
    synchronized {
      if (!initialized) {
        broadcastFactory = new TorrentBroadcastFactory
        broadcastFactory.initialize(isDriver, conf, securityManager)
        initialized = true
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }

  private val nextBroadcastId = new AtomicLong(0)

  private[broadcast] val cachedValues = {
    new ReferenceMap(AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK)
  }

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
  }
} 
Example 12
Source File: RateController.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.io.ObjectInputStream
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.{ExecutionContext, Future}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.{ThreadUtils, Utils}


  private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit =
    Future[Unit] {
      val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay)
      newRate.foreach { s =>
        rateLimit.set(s.toLong)
        publish(getLatestRate())
      }
    }

  def getLatestRate(): Long = rateLimit.get()

  override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) {
    val elements = batchCompleted.batchInfo.streamIdToInputInfo

    for {
      processingEnd <- batchCompleted.batchInfo.processingEndTime
      workDelay <- batchCompleted.batchInfo.processingDelay
      waitDelay <- batchCompleted.batchInfo.schedulingDelay
      elems <- elements.get(streamUID).map(_.numRecords)
    } computeAndPublish(processingEnd, elems, workDelay, waitDelay)
  }
}

object RateController {
  def isBackPressureEnabled(conf: SparkConf): Boolean =
    conf.getBoolean("spark.streaming.backpressure.enabled", false)
} 
Example 13
Source File: SQLExecution.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.ui.{SparkListenerSQLExecutionEnd, SparkListenerSQLExecutionStart}

object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  private val executionIdToQueryExecution = new ConcurrentHashMap[Long, QueryExecution]()

  def getQueryExecution(executionId: Long): QueryExecution = {
    executionIdToQueryExecution.get(executionId)
  }

  private val testing = sys.props.contains("spark.testing")

  private[sql] def checkSQLExecutionId(sparkSession: SparkSession): Unit = {
    val sc = sparkSession.sparkContext
    // only throw an exception during tests. a missing execution ID should not fail a job.
    if (testing && sc.getLocalProperty(EXECUTION_ID_KEY) == null) {
      // Attention testers: when a test fails with this exception, it means that the action that
      // started execution of a query didn't call withNewExecutionId. The execution ID should be
      // set by calling withNewExecutionId in the action that begins execution, like
      // Dataset.collect or DataFrameWriter.insertInto.
      throw new IllegalStateException("Execution ID should be set")
    }
  }

  
  def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T = {
    val oldExecutionId = sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
    try {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, executionId)
      body
    } finally {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, oldExecutionId)
    }
  }
} 
Example 14
Source File: KillSwitchWithChildActorStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{Actor, Props}
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchWithChildActorStream {
  val genCount = new AtomicLong(0L)
}

class DummyChildActor extends Actor {
  def receive = PartialFunction.empty
}

class KillSwitchWithChildActorStream extends PerpetualStream[Future[Long]] {
  import KillSwitchWithChildActorStream._
  import org.squbs.unicomplex.Timeouts._

  val dummyChildActor = context.actorOf(Props[DummyChildActor])

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }

  override def shutdown() = {
    val f = super.shutdown()
    defaultMidActorStop(Seq(dummyChildActor))
    f
  }
} 
Example 15
Source File: AskInstrumentation.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akka.viz

import java.util.concurrent.atomic.AtomicLong

import akka.actor.ActorRef
import akka.util.Timeout
import akkaviz.config.Config
import akkaviz.events.EventSystem
import akkaviz.events.types.{Answer, AnswerFailed, Question}
import org.aspectj.lang.annotation._

import scala.concurrent.Future
import scala.util.{Failure, Success}

@Aspect
class AskInstrumentation {

  private[this] val askCounter: AtomicLong = new AtomicLong(0)
  private[this] val internalSystemName = Config.internalSystemName

  @Pointcut("execution (* akka.pattern..*.internalAsk$extension(..)) && args(recipient, msg, timeout, sender)")
  def internalAskPointcut(
    recipient: ActorRef,
    msg: Any,
    timeout: Timeout,
    sender: ActorRef
  ): Unit = {}

  @AfterReturning(pointcut = "internalAskPointcut(recipient, msg, timeout, sender)", returning = "future")
  def afterInternalAsk(
    future: Future[Any],
    recipient: ActorRef,
    msg: Any,
    timeout: Timeout,
    sender: ActorRef
  ): Unit = {
    if (!isSystemActor(recipient)) {
      val questionId = askCounter.incrementAndGet()
      publishQuestion(questionId, Option(sender), recipient, msg)
      scheduleAnswer(questionId, future)
    }
  }

  private[this] def isSystemActor(ref: ActorRef) =
    ref.path.address.system == internalSystemName

  def publishQuestion(
    questionId: Long,
    from: Option[ActorRef],
    to: ActorRef,
    msg: Any
  ): Unit = {
    EventSystem.report(Question(questionId, from, to, msg))
  }

  def scheduleAnswer(questionId: Long, answerFuture: Future[Any]): Unit = {
    implicit val ec = scala.concurrent.ExecutionContext.global
    answerFuture.onComplete {
      case Success(msg) => EventSystem.report(Answer(questionId, msg))
      case Failure(ex)  => EventSystem.report(AnswerFailed(questionId, ex))
    }
  }
} 
Example 16
Source File: BroadcastManager.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.spark._
import org.apache.spark.util.Utils

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = null

  initialize()//主要根据配置初始化broadcastFactory成员变量

  // Called by SparkContext or Executor before using Broadcast
  //调用SparkContext或者Executor前使用广播
  private def initialize() {
    synchronized {
      if (!initialized) {
         //广播的实现类
        val broadcastFactoryClass =	     
        conf.get("spark.broadcast.factory", "org.apache.spark.broadcast.TorrentBroadcastFactory")
        broadcastFactory =
          Utils.classForName(broadcastFactoryClass).newInstance.asInstanceOf[BroadcastFactory]
        // Initialize appropriate BroadcastFactory and BroadcastObject
        //调用初始化函数
        broadcastFactory.initialize(isDriver, conf, securityManager)
        initialized = true //初始化完成
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }
  //广播变更ID
  private val nextBroadcastId = new AtomicLong(0)

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
  }
} 
Example 17
Source File: RateController.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.io.ObjectInputStream
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.{ExecutionContext, Future}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.{ThreadUtils, Utils}


  private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit =
    Future[Unit] {
      val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay)
      newRate.foreach { s =>
        rateLimit.set(s.toLong)
        publish(getLatestRate())
      }
    }

  def getLatestRate(): Long = rateLimit.get()

  override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) {
    val elements = batchCompleted.batchInfo.streamIdToInputInfo

    for {
      processingEnd <- batchCompleted.batchInfo.processingEndTime
      workDelay <- batchCompleted.batchInfo.processingDelay
      waitDelay <- batchCompleted.batchInfo.schedulingDelay
      elems <- elements.get(streamUID).map(_.numRecords)
    } computeAndPublish(processingEnd, elems, workDelay, waitDelay)
  }
}

object RateController {
  def isBackPressureEnabled(conf: SparkConf): Boolean =
    conf.getBoolean("spark.streaming.backpressure.enabled", false)
} 
Example 18
Source File: BroadcastManager.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.spark._

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = null

  initialize()

  // Called by SparkContext or Executor before using Broadcast
  private def initialize() {
    synchronized {
      if (!initialized) {
        val broadcastFactoryClass =
          conf.get("spark.broadcast.factory", "org.apache.spark.broadcast.TorrentBroadcastFactory")

        broadcastFactory =
          Class.forName(broadcastFactoryClass).newInstance.asInstanceOf[BroadcastFactory]

        // Initialize appropriate BroadcastFactory and BroadcastObject
        broadcastFactory.initialize(isDriver, conf, securityManager)

        initialized = true
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }

  private val nextBroadcastId = new AtomicLong(0)

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
  }
} 
Example 19
Source File: StoragePerfTester.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.tools

import java.util.concurrent.{CountDownLatch, Executors}
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.util.Utils


    val numOutputSplits = sys.env.get("NUM_REDUCERS").map(_.toInt).getOrElse(500)

    val recordLength = 1000 // ~1KB records
    val totalRecords = dataSizeMb * 1000
    val recordsPerMap = totalRecords / numMaps

    val writeKey = "1" * (recordLength / 2)
    val writeValue = "1" * (recordLength / 2)
    val executor = Executors.newFixedThreadPool(numMaps)

    val conf = new SparkConf()
      .set("spark.shuffle.compress", "false")
      .set("spark.shuffle.sync", "true")
      .set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    // This is only used to instantiate a BlockManager. All thread scheduling is done manually.
    val sc = new SparkContext("local[4]", "Write Tester", conf)
    val hashShuffleManager = sc.env.shuffleManager.asInstanceOf[HashShuffleManager]

    def writeOutputBytes(mapId: Int, total: AtomicLong): Unit = {
      val shuffle = hashShuffleManager.shuffleBlockResolver.forMapTask(1, mapId, numOutputSplits,
        new KryoSerializer(sc.conf), new ShuffleWriteMetrics())
      val writers = shuffle.writers
      for (i <- 1 to recordsPerMap) {
        writers(i % numOutputSplits).write(writeKey, writeValue)
      }
      writers.map { w =>
        w.commitAndClose()
        total.addAndGet(w.fileSegment().length)
      }

      shuffle.releaseWriters(true)
    }

    val start = System.currentTimeMillis()
    val latch = new CountDownLatch(numMaps)
    val totalBytes = new AtomicLong()
    for (task <- 1 to numMaps) {
      executor.submit(new Runnable() {
        override def run(): Unit = {
          try {
            writeOutputBytes(task, totalBytes)
            latch.countDown()
          } catch {
            case e: Exception =>
              println("Exception in child thread: " + e + " " + e.getMessage)
              System.exit(1)
          }
        }
      })
    }
    latch.await()
    val end = System.currentTimeMillis()
    val time = (end - start) / 1000.0
    val bytesPerSecond = totalBytes.get() / time
    val bytesPerFile = (totalBytes.get() / (numOutputSplits * numMaps.toDouble)).toLong

    System.err.println("files_total\t\t%s".format(numMaps * numOutputSplits))
    System.err.println("bytes_per_file\t\t%s".format(Utils.bytesToString(bytesPerFile)))
    System.err.println("agg_throughput\t\t%s/s".format(Utils.bytesToString(bytesPerSecond.toLong)))

    executor.shutdown()
    sc.stop()
  }
} 
Example 20
Source File: BroadcastManager.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = _
  private val user = Utils.getCurrentUserName()

  initialize()

  // Called by SparkContext or Executor before using Broadcast
  private def initialize() {
    synchronized {
      if (!initialized) {
        broadcastFactory = new TorrentBroadcastFactory
        broadcastFactory.initialize(isDriver, conf, securityManager)
        initialized = true
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }

  private val nextBroadcastId = new AtomicLong(0)

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking, user)
  }
} 
Example 21
Source File: RateController.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.io.ObjectInputStream
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.{ExecutionContext, Future}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.{ThreadUtils, Utils}


  private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit =
    Future[Unit] {
      val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay)
      newRate.foreach { s =>
        rateLimit.set(s.toLong)
        publish(getLatestRate())
      }
    }

  def getLatestRate(): Long = rateLimit.get()

  override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) {
    val elements = batchCompleted.batchInfo.streamIdToInputInfo

    for {
      processingEnd <- batchCompleted.batchInfo.processingEndTime
      workDelay <- batchCompleted.batchInfo.processingDelay
      waitDelay <- batchCompleted.batchInfo.schedulingDelay
      elems <- elements.get(streamUID).map(_.numRecords)
    } computeAndPublish(processingEnd, elems, workDelay, waitDelay)
  }
}

object RateController {
  def isBackPressureEnabled(conf: SparkConf): Boolean =
    conf.getBoolean("spark.streaming.backpressure.enabled", false)
} 
Example 22
Source File: SQLExecution.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.ui.{SparkListenerSQLExecutionEnd,
  SparkListenerSQLExecutionStart}

object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  
  def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T = {
    val oldExecutionId = sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
    try {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, executionId)
      body
    } finally {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, oldExecutionId)
    }
  }
} 
Example 23
Source File: Utils.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.rabbit

import java.text.SimpleDateFormat
import java.util.{Date, UUID}
import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicLong

object Utils {
  def timestamp: String = {
    val formatter = new SimpleDateFormat("HH:mm:ss.SSS-dd.MM.yyyy")
    val now       = new Date()
    formatter.format(now)
  }

  def timestampWithRandom: String = timestamp + "-" + UUID.randomUUID()

  def newNamedThreadFactory(name: String): ThreadFactory = new ThreadFactory {
    val count = new AtomicLong(0)

    override def newThread(runnable: Runnable): Thread = {
      val thread = new Thread(runnable)
      thread.setName(s"$name-" + count.getAndIncrement)
      thread.setDaemon(true)
      thread
    }
  }
} 
Example 24
Source File: Startup.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.test.perf

import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import com.ecfront.common.JsonHelper
import com.mdataset.excavator.http.HttpHelper
import com.typesafe.scalalogging.slf4j.LazyLogging


object Startup extends App with LazyLogging {

  var token: String = ""

  def u(path: String): String = s"http://host.wangzifinance.cn:8070/$path?__ez_token__=$token"

  val client = HttpHelper.getClient
  token = JsonHelper.toJson(client.post(u("public/ez/auth/login/"),
    s"""
       |{
       |  "id":"sysadmin",
       |  "password":"admin"
       |}
     """.stripMargin)).get("body").get("token").asText()

  val counter = new AtomicLong(0)

  val threads = for (i <- 0 until 1000)
    yield new Thread(new Runnable {
      override def run(): Unit = {
        while (true) {
          assert(JsonHelper.toJson(client.post(u("test1/normal/"), "abc")).get("body").asText() == "abc")
          assert(JsonHelper.toJson(client.post(u("test2/normal/"), "def")).get("body").asText() == "def")
          logger.info(">> " + counter.incrementAndGet())
          
        }
      }
    })
  threads.foreach(_.start())
  new CountDownLatch(1).await()

} 
Example 25
Source File: IdGenerator.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package libs

import java.util.concurrent.atomic.AtomicLong
import cats.implicits._

import scala.util.Random

class IdGenerator(generatorId: Long) {
  def nextId(): Long = IdGenerator.nextId(generatorId)
}

object IdGenerator {

  private[this] val CHARACTERS =
    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789".toCharArray
      .map(_.toString)
  private[this] val EXTENDED_CHARACTERS =
    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789*$%)([]!=+-_:/;.><&".toCharArray
      .map(_.toString)
  private[this] val INIT_STRING = for (i <- 0 to 15)
    yield Integer.toHexString(i)

  private[this] val minus         = 1288834974657L
  private[this] val counter       = new AtomicLong(-1L)
  private[this] val lastTimestamp = new AtomicLong(-1L)

  def apply(generatorId: Long) = new IdGenerator(generatorId)

  def nextId(generatorId: Long): Long = synchronized {
    if (generatorId > 1024L)
      throw new RuntimeException("Generator id can't be larger than 1024")
    val timestamp = System.currentTimeMillis
    if (timestamp < lastTimestamp.get())
      throw new RuntimeException("Clock is running backward. Sorry :-(")
    lastTimestamp.set(timestamp)
    counter.compareAndSet(4095, -1L)
    ((timestamp - minus) << 22L) | (generatorId << 10L) | counter
      .incrementAndGet()
  }

  def uuid: String =
    (for {
      c <- 0 to 36
    } yield
      c match {
        case i if i === 9 || i === 14 || i === 19 || i === 24 => "-"
        case i if i === 15                                    => "4"
        case _ if c === 20                                    => INIT_STRING((Random.nextDouble() * 4.0).toInt | 8)
        case _                                                => INIT_STRING((Random.nextDouble() * 15.0).toInt | 0)
      }).mkString("")

  def token(characters: Array[String], size: Int): String =
    (for {
      _ <- 0 to size - 1
    } yield characters(Random.nextInt(characters.size))).mkString("")

  def token(size: Int): String         = token(CHARACTERS, size)
  def token: String                    = token(64)
  def extendedToken(size: Int): String = token(EXTENDED_CHARACTERS, size)
  def extendedToken: String            = token(EXTENDED_CHARACTERS, 64)
} 
Example 26
Source File: ABTestingSimulation.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package experiments

import java.util.concurrent.atomic.AtomicLong

import io.gatling.core.Predef._
import io.gatling.http.Predef._

import scala.concurrent.duration.DurationLong

class ABTestingSimulation extends Simulation {
  val sentHeaders = Map(HttpHeaderNames.ContentType -> "application/json")

  val userEmailFeeder = Iterator.from(0).map(i => Map("email" -> s"[email protected]"))

  val counter = new AtomicLong(0)
  def nextMail() = {
    val count = counter.incrementAndGet()
    s"[email protected]"
  }

  private val headers = Map(
    "Izanami-Client-Id"     -> "xxxx",
    "Izanami-Client-Secret" -> "xxxx"
  )

  private val experimentId = "project:experiments:name"

  private val host = "http://localhost:9000"

  val httpConf = http
    .baseUrl(host)
    .acceptHeader("application/json")
    .headers(headers)
    .acceptEncodingHeader("gzip, deflate")
    .acceptLanguageHeader("en-US,en;q=0.5")

  val scn = scenario("A/B test")
    .repeat(100, "n") {
      randomSwitch(
        50d ->
        exec(
          http("Displayed A request ")
            .post(s"/api/experiments/$experimentId/displayed?id=A&clientId=user-$${n}@maif.fr")
            .body(StringBody("{}"))
            .headers(sentHeaders)
        ).pause(1.second)
          .randomSwitch(
            30d -> exec(
              http("Won A request")
                .post(s"/api/experiments/$experimentId/won?id=A&clientId=user-$${n}@maif.fr")
                .body(StringBody("{}"))
                .headers(sentHeaders)
            ).pause(1.second)
          ),
        30d ->
        exec(
          http("Displayed B request")
            .post(s"/api/experiments/$experimentId/displayed?id=B&clientId=user-$${n}@maif.fr")
            .body(StringBody("{}"))
            .headers(sentHeaders)
        ).pause(1.second)
          .randomSwitch(
            70d -> exec(
              http("Won B request")
                .post(s"/api/experiments/$experimentId/won?id=B&clientId=user-$${n}@gmail.com")
                .body(StringBody("{}"))
                .headers(sentHeaders)
            ).pause(1.second)
          ),
        20d ->
        exec(
          http("Displayed C request")
            .post(s"/api/experiments/$experimentId/displayed?id=C&clientId=user-$${n}@maif.fr")
            .body(StringBody("{}"))
            .headers(sentHeaders)
        ).pause(1.second)
          .randomSwitch(
            70d -> exec(
              http("Won C request")
                .post(s"/api/experiments/$experimentId/won?id=C&clientId=user-$${n}@gmail.com")
                .body(StringBody("{}"))
                .headers(sentHeaders)
            ).pause(1.second)
          )
      )
    }

  setUp(scn.inject(atOnceUsers(2)).protocols(httpConf))

} 
Example 27
Source File: tradingStrategies.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala.free

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.locks.LockSupport

trait TradingStrategy {
  def makeTradingDecision(e: BboUpdated): Option[Either[Bid, Offer]]
}

class ProductionStrategy(counter: AtomicLong) extends TradingStrategy {
  def makeTradingDecision(e: BboUpdated): Option[Either[Bid, Offer]] = {
    val c = counter.getAndIncrement()
    c % 1000 == 0 match {
      case true =>
        Thread.sleep(10)
        Some(Right(e.offer))
      case false =>
        LockSupport.parkNanos(20000) // 0.02ms
        c % 3 == 0 match {
          case true => Some(Left(e.bid))
          case false => None
        }
    }
  }
} 
Example 28
Source File: MonitoringServerInterceptor.scala    From scala-server-toolkit   with MIT License 5 votes vote down vote up
package com.avast.sst.grpc.server.micrometer

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}

import io.grpc.ForwardingServerCall.SimpleForwardingServerCall
import io.grpc._
import io.micrometer.core.instrument.{MeterRegistry, Timer}


class MonitoringServerInterceptor(meterRegistry: MeterRegistry) extends ServerInterceptor {

  private val gaugeCache = new ConcurrentHashMap[String, AtomicLong]()
  private val timerCache = new ConcurrentHashMap[String, Timer]()

  override def interceptCall[ReqT, RespT](
      call: ServerCall[ReqT, RespT],
      headers: Metadata,
      next: ServerCallHandler[ReqT, RespT]
  ): ServerCall.Listener[ReqT] = {
    val prefix = s"grpc.${call.getMethodDescriptor.getFullMethodName.replace('/', '.')}"
    val currentCallsCounter = makeGauge(s"$prefix.current-calls")
    currentCallsCounter.incrementAndGet()
    val start = System.nanoTime
    val newCall = new CloseServerCall(prefix, start, currentCallsCounter, call)
    next.startCall(newCall, headers)
  }

  private class CloseServerCall[A, B](prefix: String, start: Long, currentCallsCounter: AtomicLong, delegate: ServerCall[A, B])
      extends SimpleForwardingServerCall[A, B](delegate) {
    override def close(status: Status, trailers: Metadata): Unit = {
      currentCallsCounter.decrementAndGet()
      val durationNs = System.nanoTime - start
      if (status.isOk) {
        makeTimer(s"$prefix.successes").record(durationNs, TimeUnit.NANOSECONDS)
      } else {
        makeTimer(s"$prefix.failures").record(durationNs, TimeUnit.NANOSECONDS)
      }
      super.close(status, trailers)
    }
  }

  private def makeGauge(name: String): AtomicLong = {
    gaugeCache.computeIfAbsent(
      name,
      n => {
        val counter = new AtomicLong()
        meterRegistry.gauge(n, counter)
        counter
      }
    )
  }

  private def makeTimer(name: String): Timer = timerCache.computeIfAbsent(name, meterRegistry.timer(_))

} 
Example 29
Source File: SparkSqlTable.scala    From spark-vector   with Apache License 2.0 5 votes vote down vote up
package com.actian.spark_vector.sql

import java.util.concurrent.atomic.AtomicLong
import org.apache.spark.sql.DataFrame

sealed trait SparkSqlTable {
  def tableName: String
  def quotedName: String = sparkQuote(tableName)
  def close(): Unit
}

case class HiveTable(override val tableName: String) extends SparkSqlTable {
  override def close(): Unit = {}
}

class TempTable private (override val tableName: String, df: DataFrame) extends SparkSqlTable {
  private def register(): Unit = df.createOrReplaceTempView(tableName)
  override def close(): Unit = df.sqlContext.dropTempTable(tableName);
}

object TempTable {
  private val id = new AtomicLong(0L)

  def apply(tableNameBase: String, df: DataFrame): TempTable = {
    val tableName = s"${tableNameBase}_${id.incrementAndGet}"
    val tt = new TempTable(tableName, df)
    tt.register()
    tt
  }
} 
Example 30
Source File: CurrentPersistenceIdsQuerySourceTest.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.sstreaming

import java.util.UUID
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{ ActorRef, Props }
import akka.persistence.PersistentActor
import akka.testkit.TestProbe
import com.github.dnvriend.TestSpec
import com.github.dnvriend.spark.datasources.SparkImplicits._
import com.github.dnvriend.spark.datasources.person.Person
import org.apache.spark.sql.streaming.{ OutputMode, ProcessingTime }
import org.scalatest.Ignore

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.language.implicitConversions

object PersonActor {
  final case class BlogPost(id: Long, text: String)
}
class PersonActor(val persistenceId: String, schedule: Boolean)(implicit ec: ExecutionContext) extends PersistentActor {
  val counter = new AtomicLong()
  def ping() = context.system.scheduler.scheduleOnce(200.millis, self, "persist")
  def randomId: String = UUID.randomUUID.toString
  override val receiveRecover: Receive = PartialFunction.empty
  override val receiveCommand: Receive = {
    case "persist" =>
      persist(Person(counter.incrementAndGet(), s"foo-$randomId", 20)) { _ =>
        sender() ! "ack"
      }
      if (schedule) ping()
  }
  if (schedule) ping()
}

@Ignore
class CurrentPersistenceIdsQuerySourceTest extends TestSpec {
  def withPersistentActor(pid: String = randomId, schedule: Boolean = false)(f: ActorRef => TestProbe => Unit): Unit = {
    val tp = TestProbe()
    val ref = system.actorOf(Props(new PersonActor(pid, schedule)))
    try f(ref)(tp) finally killActors(ref)
  }

  it should "query read journal" in withSparkSession { spark =>
    withPersistentActor() { ref => tp =>
      tp.send(ref, "persist")
      tp.expectMsg("ack")

      val jdbcReadJournal = spark.readStream
        .currentPersistenceIds("jdbc-read-journal")

      jdbcReadJournal.printSchema()

      println("Is the query streaming: " + jdbcReadJournal.isStreaming)
      println("Are there any streaming queries? " + spark.streams.active.isEmpty)

      val query = jdbcReadJournal
        .writeStream
        .format("console")
        .trigger(ProcessingTime(1.seconds))
        .queryName("consoleStream")
        .outputMode(OutputMode.Append())
        .start()

      query.awaitTermination(10.seconds)
    }
  }
} 
Example 31
Source File: SubSeqIterator.scala    From aloha   with MIT License 5 votes vote down vote up
package com.eharmony.aloha.util

import java.util.concurrent.atomic.{AtomicLong, AtomicBoolean}


object SubSeqIterator {
    private val Masks = 0 to 62 map { i => 1L << i }

    def apply[A](values: IndexedSeq[A], k: Int): Iterator[Seq[A]] =
        if (0 == k) OneIterator[A]()
        else if (k > values.size) Iterator.empty
        else new SubSeqIterator(values, k)

    case class OneIterator[A]() extends Iterator[Seq[A]] {
        val more = new AtomicBoolean(true)
        def hasNext: Boolean = more.get
        def next(): Seq[A] =
            if (more.getAndSet(false)) Seq[A]()
            else throw new NoSuchElementException
    }

    case class SubSeqIterator[A](values: IndexedSeq[A], k: Int)
        extends Iterator[Seq[A]] {
        require(values.size <= 63)
        require(k <= values.size)

        private[this] val current = new AtomicLong(smallest(k))
        private[this] val last = largest(values.size, k)
        private[this] val numValues = values.size

        def hasNext: Boolean = current.get <= last

        def next(): IndexedSeq[A] = {
            var c = current.get
            var n = 0L
            if (c <= last) {
                n = next(c)
                while (c <= last && !current.compareAndSet(c, n)) {
                    c = current.get
                    n = next(c)
                }
            }
            if (c <= last) subset(c)
            else throw new NoSuchElementException
        }

        protected[this] def subset(n: Long) = {
            var i = 0
            var els = values.companion.empty[A]
            while(i < numValues) {
                if (0 != (Masks(i) & n))
                    els = els :+ values(i)
                i += 1
            }
            els
        }

        protected[this] def largest(n: Int, k: Int): Long = smallest(k) << (n - k)
        protected[this] def smallest(n: Int): Long = (1L << n) - 1
        protected[this] def next(x: Long): Long = {
            if (x == 0) 0
            else {
                val smallest = x & -x
                val ripple = x + smallest
                val newSmallest = ripple & -ripple
                val ones = ((newSmallest / smallest) >> 1) - 1
                ripple | ones
            }
        }
    }
} 
Example 32
Source File: DynamoService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.archaius

import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicLong
import javax.inject.Inject
import javax.inject.Singleton

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import com.netflix.iep.service.AbstractService
import com.typesafe.config.Config

import scala.concurrent.ExecutionContext
import scala.concurrent.Future


@Singleton
class DynamoService @Inject()(client: AmazonDynamoDB, config: Config) extends AbstractService {

  private val nextId = new AtomicLong()
  private val pool = Executors.newFixedThreadPool(
    Runtime.getRuntime.availableProcessors(),
    (r: Runnable) => {
      new Thread(r, s"dynamo-db-${nextId.getAndIncrement()}")
    }
  )
  private val ec = ExecutionContext.fromExecutorService(pool)

  override def startImpl(): Unit = ()

  override def stopImpl(): Unit = {
    client match {
      case c: AmazonDynamoDBClient => c.shutdown()
      case _                       =>
    }
  }

  def execute[T](task: AmazonDynamoDB => T): Future[T] = Future(task(client))(ec)
} 
Example 33
Source File: LinebackerSpec.scala    From linebacker   with MIT License 5 votes vote down vote up
package io.chrisdavenport.linebacker

import org.specs2._
import cats.effect._
import cats.implicits._
import java.lang.Thread
import scala.concurrent.ExecutionContext
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}
import _root_.io.chrisdavenport.linebacker.contexts.{Executors => E}
import scala.concurrent.ExecutionContext.global

class LinebackerSpec extends Spec {
  override def is = s2"""
  Threads Run On Linebacker $runsOnLinebacker
  Threads Afterwards Run on Provided EC $runsOffLinebackerAfterwards
  """

  def runsOnLinebacker = {
    val testRun = E
      .unbound[IO]
      .map(Linebacker.fromExecutorService[IO])
      .use { implicit linebacker =>
        implicit val cs = IO.contextShift(global)
        Linebacker[IO].blockContextShift(IO(Thread.currentThread().getName))
      }

    testRun.unsafeRunSync must_=== "linebacker-thread-0"
  }

  def runsOffLinebackerAfterwards = {
    val executor = Executors.newCachedThreadPool(new ThreadFactory {
      private val counter = new AtomicLong(0L)

      def newThread(r: Runnable) = {
        val th = new Thread(r)
        th.setName("test-ec-" + counter.getAndIncrement.toString)
        th.setDaemon(true)
        th
      }
    })
    implicit val ec = ExecutionContext
      .fromExecutorService(executor)

    implicit val linebacker = Linebacker.fromExecutionContext[IO](global) // Block Onto Global
    implicit val cs = IO.contextShift(ec) // Should return to custom

    val testRun = Linebacker[IO].blockContextShift(IO.unit) *>
      IO(Thread.currentThread().getName) <*
      IO(executor.shutdownNow)

    testRun.unsafeRunSync must_=== "test-ec-0"
  }
} 
Example 34
Source File: PlainSourceConsumer.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example.consumer

import java.util.concurrent.atomic.AtomicLong

import akka.Done
import akka.kafka.Subscriptions
import akka.kafka.scaladsl.Consumer
import akka.stream.scaladsl.Sink
import com.example._
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object PlainSourceConsumer extends App {

  val db = new DB
  db.loadOffset().foreach { fromOffset =>
    val partition = 0
    val subscription = Subscriptions.assignmentWithOffset(
      new TopicPartition(topic, partition) -> fromOffset
    )

    val done =
      Consumer.plainSource(consumerSettings, subscription)
        .mapAsync(1)(db.save)
        .runWith(Sink.ignore)
  }

}

//Zookeeper or DB storage mock
class DB {

  private val offset = new AtomicLong(2)

  def save(record: ConsumerRecord[Array[Byte], String]): Future[Done] = {
    println(s"DB.save: ${record.value}")
    offset.set(record.offset)
    Future.successful(Done)
  }

  def loadOffset(): Future[Long] =
    Future.successful(offset.get)

  def update(data: String): Future[Done] = {
    println(s"DB.update: $data")
    Future.successful(Done)
  }
} 
Example 35
Source File: ConcurrentRingSet.scala    From rflows   with Apache License 2.0 5 votes vote down vote up
package api.routing.metrics

import java.util.Comparator
import java.util.concurrent.ConcurrentSkipListSet
import java.util.concurrent.atomic.AtomicLong


final class ConcurrentRingSet[K](val maxSize: Int)(val onRemove: K => Unit) extends Iterable[K] {

  @volatile private var isActive = true

  private lazy val set = new ConcurrentSkipListSet[PositionedKey](new Comparator[PositionedKey] {
    override def compare(o1: PositionedKey, o2: PositionedKey): Int = if (o1.key == o2.key) 0 else if (o1.position > o2.position) 1 else -1
  })

  private case class PositionedKey(position: Long, key: K)

  private val count = new AtomicLong(0)

  def put(k: K): Boolean = if (isActive) {
    // $COVERAGE-OFF$
    if (count.get() == Long.MaxValue + 1) { set.clear(); count.set(0) } //Just in case...
    // $COVERAGE-ON$
    val index = count.incrementAndGet()
    while (set.size >= maxSize) Option(set.pollFirst()).map(_.key).foreach(onRemove(_))
    set.add(PositionedKey(index, k))
  }
  else false

  def close() = {
    isActive = false
    this
  }

  import scala.collection.JavaConverters._
  override def iterator: Iterator[K] = set.descendingIterator().asScala.map(_.key)
} 
Example 36
Source File: NatsSpec.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.cluster.nats

import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import com.ecfront.ez.framework.test.BasicSpec
import io.nats.client.{ConnectionFactory, Message, MessageHandler}

class NatsSpec extends BasicSpec {

  test("nats test") {
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        connection.subscribe("/test/", "111",new MessageHandler {
          override def onMessage(msg: Message) = {
            logger.info(">>" + new String(msg.getData, "UTF-8"))
            connection.publish(msg.getReplyTo, (new String(msg.getData, "UTF-8") + "_reply").getBytes("UTF-8"))
          }
        })
        new CountDownLatch(1).await()
      }
    }).start()
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        connection.subscribe("/test/","111", new MessageHandler {
          override def onMessage(msg: Message) = {
            logger.info(">>" + new String(msg.getData, "UTF-8"))
            connection.publish(msg.getReplyTo, (new String(msg.getData, "UTF-8") + "_reply").getBytes("UTF-8"))
          }
        })
        new CountDownLatch(1).await()
      }
    }).start()
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        connection.subscribe("/test/11/","111", new MessageHandler {
          override def onMessage(msg: Message) = {
            logger.info(">>" + new String(msg.getData, "UTF-8"))
            connection.publish(msg.getReplyTo, (new String(msg.getData, "UTF-8") + "_reply").getBytes("UTF-8"))
          }
        })
        new CountDownLatch(1).await()
      }
    }).start()

    val counter = new AtomicLong(0)
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        while (true) {
          val reply = connection.request("/test/", s"A test_mesage ${counter.getAndIncrement()}".getBytes("UTF-8"))
          logger.info("A<<" + new String(reply.getData, "UTF-8"))
        }
        new CountDownLatch(1).await()
      }
    }).start()

    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        while (true) {
          var reply = connection.request("/test/", s"B test_mesage ${counter.getAndIncrement()}".getBytes("UTF-8"))
          logger.info("B<<" + new String(reply.getData, "UTF-8"))
          reply = connection.request("/test/11/", s"B msg ${counter.getAndIncrement()}".getBytes("UTF-8"))
          logger.info("B<<" + new String(reply.getData, "UTF-8"))
        }
        new CountDownLatch(1).await()
      }
    }).start()

    new CountDownLatch(1).await()
  }

} 
Example 37
Source File: RabbitmqSpec.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.cluster.rabbitmq

import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import com.ecfront.ez.framework.core.logger.Logging
import com.rabbitmq.client.AMQP.BasicProperties
import com.rabbitmq.client.{ConnectionFactory, QueueingConsumer}
import org.scalatest.{BeforeAndAfter, FunSuite}


class RabbitmqSpec extends FunSuite with BeforeAndAfter with Logging {

  test("rabbitmq test") {
    val p = new AtomicLong(0)
    val c = new AtomicLong(0)

    val factory = new ConnectionFactory()
    factory.setUsername("user")
    factory.setPassword("password")
    factory.setHost("127.0.0.1")
    val connection = factory.newConnection()
    // produce
    val produceThreads = for (i <- 0 until 50)
      yield new Thread(new Runnable {
        override def run(): Unit = {
          val channel = connection.createChannel()
          val replyQueueName = channel.queueDeclare().getQueue
          val replyConsumer = new QueueingConsumer(channel)
          channel.basicConsume(replyQueueName, true, replyConsumer)
          val corrId = java.util.UUID.randomUUID().toString
          val opt = new BasicProperties.Builder().correlationId(corrId).replyTo(replyQueueName).build()
          channel.basicPublish("", "a", opt, s"test${p.incrementAndGet()}".getBytes())
          var delivery = replyConsumer.nextDelivery()
          while (true) {
            if (delivery.getProperties.getCorrelationId.equals(corrId)) {
              logger.info(s"reply " + new String(delivery.getBody))
            }
            delivery = replyConsumer.nextDelivery()
          }
          channel.close()
        }
      })
    produceThreads.foreach(_.start())

    // consumer
    new Thread(new Runnable {
      override def run(): Unit = {
        val channel = connection.createChannel()
        channel.queueDeclare("a", false, false, false, null)
        val consumer = new QueueingConsumer(channel)
        channel.basicConsume("a", true, consumer)
        while (true) {
          val delivery = consumer.nextDelivery()
          val props = delivery.getProperties()
          val message = new String(delivery.getBody())
          new Thread(new Runnable {
            override def run(): Unit = {
              Thread.sleep(10000)
              logger.info(s"receive 1 [${c.incrementAndGet()}] " + message)
              channel.basicPublish("", props.getReplyTo(), new BasicProperties.Builder().correlationId(props.getCorrelationId()).build(), message.getBytes)
            }
          }).start()
        }
      }
    }).start()
   

    new CountDownLatch(1).await()
  }
} 
Example 38
Source File: AlohaEventBus.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.scheduler.bus

import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}

import me.jrwang.aloha.scheduler.bus.AsyncEventQueue.POISON_PILL

class AlohaEventBus extends EventBus[AlohaEventListener, AlohaEvent] {
  override protected def doPostEvent(listener: AlohaEventListener, event: AlohaEvent): Unit = {
    event match {
      case e: AppStateChangedEvent =>
        listener.onApplicationStateChange(e)
      case e: AppRelaunchedEvent =>
        listener.onApplicationRelaunched(e)
        //TODO other specific event
      case _ =>
        listener.onOtherEvent(event)
    }
  }
}



  private[aloha] def stop(): Unit = {
    if (!started.get()) {
      throw new IllegalStateException(s"Attempted to stop $name that has not yet started!")
    }
    if (stopped.compareAndSet(false, true)) {
      eventCount.incrementAndGet()
      eventQueue.put(POISON_PILL)
    }
    // this thread might be trying to stop itself as part of error handling -- we can't join
    // in that case.
    if (Thread.currentThread() != dispatchThread) {
      dispatchThread.join()
    }
  }

  def post(event: AlohaEvent): Unit = {
    if (stopped.get()) {
      return
    }

    eventCount.incrementAndGet()
    if (eventQueue.offer(event)) {
      return
    }

    eventCount.decrementAndGet()
    if (logDroppedEvent.compareAndSet(false,true)) {
      // Only log the following message once to avoid duplicated annoying logs.
      logError(s"Dropping event from queue $name. " +
        "This likely means one of the listeners is too slow and cannot keep up with " +
        "the rate at which tasks are being started by the scheduler.")
    }
    logTrace(s"Dropping event $event")
  }

  override def removeListenerOnError(listener: AlohaEventListener): Unit = {
    // the listener failed in an unrecoverably way, we want to remove it from the entire
    // LiveListenerBus (potentially stopping a queue if it is empty)
    bus.removeListener(listener)
  }

}

object AsyncEventQueue {
  case object POISON_PILL extends AlohaEvent
} 
Example 39
Source File: ObservedLoadingCacheSpecification.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.utils

import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong

import com.google.common.base.Ticker
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.wavesplatform.utils.ObservedLoadingCacheSpecification.FakeTicker
import monix.execution.Ack
import monix.reactive.Observer
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FreeSpec, Matchers}

import scala.jdk.CollectionConverters._
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt

class ObservedLoadingCacheSpecification extends FreeSpec with Matchers with MockFactory {
  private val ExpiringTime = 10.minutes

  "notifies" - {
    "on refresh" in test { (loadingCache, changes, _) =>
      (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once()

      loadingCache.refresh("foo")
    }

    "on put" in test { (loadingCache, changes, _) =>
      (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once()

      loadingCache.put("foo", 10)
    }

    "on putAll" in test { (loadingCache, changes, _) =>
      (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once()
      (changes.onNext _).expects("bar").returning(Future.successful(Ack.Continue)).once()

      loadingCache.putAll(Map[String, Integer]("foo" -> 10, "bar" -> 11).asJava)
    }

    "on invalidate" in test { (loadingCache, changes, _) =>
      (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once()

      loadingCache.invalidate("foo")
    }

    "on invalidateAll" in test { (loadingCache, changes, _) =>
      (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once()
      (changes.onNext _).expects("bar").returning(Future.successful(Ack.Continue)).once()

      loadingCache.invalidateAll(Seq("foo", "bar").asJava)
    }
  }

  "don't notify" - {
    "on cache expiration" in test { (loadingCache, changes, ticker) =>
      (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once()
      loadingCache.put("foo", 1)
      ticker.advance(ExpiringTime.toMillis + 100, TimeUnit.MILLISECONDS)
    }
  }

  private def test(f: (LoadingCache[String, Integer], Observer[String], FakeTicker) => Unit): Unit = {
    val changes = mock[Observer[String]]
    val ticker  = new FakeTicker()

    val delegate = CacheBuilder
      .newBuilder()
      .expireAfterWrite(ExpiringTime.toMillis, TimeUnit.MILLISECONDS)
      .ticker(ticker)
      .build(new CacheLoader[String, Integer] {
        override def load(key: String): Integer = key.length
      })

    val loadingCache = new ObservedLoadingCache(delegate, changes)
    f(loadingCache, changes, ticker)
  }
}

private object ObservedLoadingCacheSpecification {

  // see https://github.com/google/guava/blob/master/guava-testlib/src/com/google/common/testing/FakeTicker.java
  class FakeTicker extends Ticker {
    private val nanos                  = new AtomicLong()
    private var autoIncrementStepNanos = 0L

    def advance(time: Long, timeUnit: TimeUnit): FakeTicker = advance(timeUnit.toNanos(time))
    def advance(nanoseconds: Long): FakeTicker = {
      nanos.addAndGet(nanoseconds)
      this
    }

    def setAutoIncrementStep(autoIncrementStep: Long, timeUnit: TimeUnit): FakeTicker = {
      require(autoIncrementStep >= 0, "May not auto-increment by a negative amount")
      this.autoIncrementStepNanos = timeUnit.toNanos(autoIncrementStep)
      this
    }

    override def read: Long = nanos.getAndAdd(autoIncrementStepNanos)
  }
} 
Example 40
Source File: Connections.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package registry

import communicator.{Connection, Connector, Listener}
import messaging.{ConnectionsBase, Message}
import transmitter.RemoteRef

import java.util.concurrent.atomic.AtomicLong

import scala.util.{Failure, Success, Try}

object Connections {
  type Protocol = ConnectionsBase.Protocol

  private final case class RemoteRef(
      id: Long, protocol: Protocol)(
      connections: Connections[_]) extends transmitter.RemoteRef {
    val doDisconnected = Notice.Steady[Unit]

    def connected = connections.isConnected(this)
    def disconnect() = connections.disconnect(this)
    val disconnected = doDisconnected.notice

    override def toString: String = s"remote#$id[$protocol]"
  }
}

class Connections[M: Message.Method]
    extends ConnectionsBase[RemoteRef, Message[M]] {

  protected def deserializeMessage(message: MessageBuffer) = {
    val result = Message deserialize message
    result.failed foreach { logging.warn("could not parse message", _) }
    result
  }

  protected def serializeMessage(message: Message[M]) =
    Message serialize message

  protected class State extends BaseState {
    private val counter = new AtomicLong(1)
    def createId() = counter.getAndIncrement()
  }

  protected val state = new State

  def connect(
      connector: Connector[Connections.Protocol])(
      handler: Try[RemoteRef] => Unit): Unit =
    connector.connect() { addConnection(_, handler) }

  def listen(listener: Listener[Connections.Protocol])(
      handler: Try[RemoteRef] => Unit): Try[Unit] =
    listener.startListening() { addConnection(_, handler) } match {
      case Success(listening) =>
        addListening(listening)
      case Failure(exception) =>
        Failure(exception)
    }

  private def addConnection(
      connection: Try[Connection[Connections.Protocol]],
      handler: Try[RemoteRef] => Unit): Unit =
    connection match {
      case Success(connection) =>
        val remote = Connections.RemoteRef(
          state.createId(), connection.protocol)(this)
        handler(addConnection(remote, connection) map { _ => remote })

      case Failure(exception) =>
        handler(Failure(exception))
    }

  remoteLeft foreach {
    case remote: Connections.RemoteRef =>
      remote.doDisconnected.set()
    case _ =>
  }
} 
Example 41
Source File: InterruptManager.scala    From inox   with Apache License 2.0 5 votes vote down vote up
package inox
package utils

import scala.collection.JavaConverters._

import java.util.concurrent.atomic.{AtomicLong, AtomicBoolean}
import sun.misc.{Signal, SignalHandler}
import java.util.WeakHashMap

class InterruptManager(reporter: Reporter) extends Interruptible {
  private[this] val interruptibles = new WeakHashMap[Interruptible, Boolean]()
  private[this] val sigINT = new Signal("INT")

  private[this] val lastTimestamp = new AtomicLong(0L)
  private val exitWindow = 1000L

  private[this] val handler = new SignalHandler {
    def handle(sig: Signal) {
      def now(): Long = System.currentTimeMillis()
      reporter.info("")
      if (now() - lastTimestamp.get < exitWindow) {
        reporter.warning("Aborting...")
        System.exit(1)
      } else {
        reporter.warning("Interrupted...")
        lastTimestamp.set(now())
        interrupt()
      }
    }
  }

  val interrupted: AtomicBoolean = new AtomicBoolean(false)

  @inline
  def isInterrupted = interrupted.get

  def interrupt() = synchronized {
    if (!isInterrupted) {
      interrupted.set(true)

      val it = interruptibles.keySet.iterator
      for (i <- it.asScala.toList) i.interrupt()
    } else {
      reporter.warning("Already interrupted!")
    }
  }

  def reset() = synchronized {
    if (isInterrupted) {
      interrupted.set(false)
    } else {
      reporter.warning("Not interrupted!")
    }
  }

  def registerForInterrupts(i: Interruptible) = synchronized {
    if (isInterrupted) i.interrupt()
    interruptibles.put(i, true)
  }

  // We should not need this because keys should automatically be removed
  // from the WeakHashMap when gc'ed.
  // But let's have it anyway!
  def unregisterForInterrupts(i: Interruptible) = synchronized {
    interruptibles.remove(i)
  }

  def registerSignalHandler() = Signal.handle(sigINT, handler)
} 
Example 42
Source File: LongRef.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.kernel.util

import java.util.concurrent.atomic.AtomicLong
import java.util.function.{LongBinaryOperator, LongUnaryOperator}

import zio.{UIO, ZIO}

class LongRef private (inner: AtomicLong) extends Serializable {
  def get: UIO[Long] = ZIO.succeed(inner.get())
  def set(value: Long): UIO[Unit] = ZIO.succeed(inner.set(value))
  def lazySet(value: Long): UIO[Unit] = ZIO.succeed(inner.lazySet(value))
  def getAndSet(value: Long): UIO[Long] = ZIO.succeed(inner.getAndSet(value))
  val incrementAndGet: UIO[Long] = ZIO.succeed(inner.incrementAndGet())
  val getAndIncrement: UIO[Long] = ZIO.succeed(inner.getAndIncrement())
  val decrementAndGet: UIO[Long] = ZIO.succeed(inner.decrementAndGet())
  val getAndDecrement: UIO[Long] = ZIO.succeed(inner.getAndDecrement())
  def addAndGet(delta: Long): UIO[Long] = ZIO.succeed(inner.addAndGet(delta))
  def getAndAdd(delta: Long): UIO[Long] = ZIO.succeed(inner.getAndAdd(delta))
  def compareAndSet(expected: Long, newValue: Long): UIO[Boolean] = ZIO.succeed(inner.compareAndSet(expected, newValue))
  def weakCompareAndSet(expected: Long, newValue: Long): UIO[Boolean] = ZIO.succeed(inner.weakCompareAndSet(expected, newValue))
  def getAndUpdate(op: LongUnaryOperator): UIO[Long] = ZIO.succeed(inner.getAndUpdate(op))
  def getAndUpdate(op: Long => Long): UIO[Long] = getAndUpdate(LongRef.unaryOp(op))
  def updateAndGet(op: LongUnaryOperator): UIO[Long] = ZIO.succeed(inner.updateAndGet(op))
  def updateAndGet(op: Long => Long): UIO[Long] = updateAndGet(LongRef.unaryOp(op))
  def accumulateAndGet(value: Long, op: LongBinaryOperator): UIO[Long] = ZIO.succeed(inner.accumulateAndGet(value, op))
  def accumulateAndGet(value: Long, op: (Long, Long) => Long): UIO[Long] = accumulateAndGet(value, LongRef.binaryOp(op))
  def getAndAccumulate(value: Long, op: LongBinaryOperator): UIO[Long] = ZIO.succeed(inner.getAndAccumulate(value, op))
  def getAndAccumulate(value: Long, op: (Long, Long) => Long): UIO[Long] = getAndAccumulate(value, LongRef.binaryOp(op))
}

object LongRef {
  private def unaryOp(fn: Long => Long): LongUnaryOperator = new LongUnaryOperator {
    override final def applyAsLong(operand: Long): Long = fn(operand)
  }

  private def binaryOp(fn: (Long, Long) => Long): LongBinaryOperator = new LongBinaryOperator {
    override final def applyAsLong(left: Long, right: Long): Long = fn(left, right)
  }

  val zero: UIO[LongRef] = ZIO.succeed(zeroSync)
  def zeroSync: LongRef = makeSync(0L)

  def make(initial: Long): UIO[LongRef] = ZIO.succeed(makeSync(initial))
  def makeSync(initial: Long) = new LongRef(new AtomicLong(initial))

} 
Example 43
Source File: SimulatedScheduler.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome

import akka.actor.{Cancellable, Scheduler}
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration


class SimulatedScheduler(clock: SettableClock) extends Scheduler {
  override def maxFrequency = 0.0
  private[this] val nextId = new AtomicLong
  private[this] val scheduledTasks = scala.collection.mutable.Map.empty[Long, ScheduledTask]
  private case class ScheduledTask(action: () => Unit, var time: Long)
  private class ScheduledTaskCancellable(id: Long) extends Cancellable {
    override def cancel() = {
      doCancel(id)
      true
    }
    override def isCancelled = scheduledTasks.contains(id)
  }

  clock.onChange { () => poll() }

  private[this] def doCancel(id: Long) = synchronized { scheduledTasks -= id }
  private[this] def poll(): Unit =
    synchronized {
      val now = clock.instant().toEpochMilli
      scheduledTasks.values.foreach { task =>
        if (task.time <= now) task.action()
      }
    }

  override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit
      executor: ExecutionContext
  ): Cancellable =
    synchronized {
      val id = nextId.getAndIncrement
      val cancellable = new ScheduledTaskCancellable(id)
      scheduledTasks(id) = ScheduledTask(
        time = clock.instant().plusMillis(delay.toMillis).toEpochMilli,
        action = () => {
          cancellable.cancel()
          executor.execute(runnable)
        }
      )
      poll()
      cancellable
    }

  def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit
      executor: ExecutionContext
  ): Cancellable =
    synchronized {
      val id = nextId.getAndIncrement
      val cancellable = new ScheduledTaskCancellable(id)
      scheduledTasks(id) = ScheduledTask(
        time = clock.instant().toEpochMilli + initialDelay.toMillis,
        action = () => {
          scheduledTasks(id).time = clock.instant().toEpochMilli + interval.toMillis
          executor.execute(runnable)
        }
      )
      poll()
      cancellable
    }

  def taskCount = synchronized { scheduledTasks.size }
} 
Example 44
Source File: KillSwitchMatStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import java.util.concurrent.atomic.AtomicLong

import akka.stream.{ClosedShape, KillSwitch, KillSwitches}
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchMatStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchMatStream extends PerpetualStream[(KillSwitch, Future[Long])] {
  import KillSwitchMatStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(KillSwitches.single[Int], counter)((_, _)) {
    implicit builder =>
      (kill, sink) =>
        import GraphDSL.Implicits._
        source ~> kill ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue._2
  }
} 
Example 45
Source File: CompactionBench.scala    From iodb   with Creative Commons Zero v1.0 Universal 5 votes vote down vote up
package io.iohk.iodb.bench

import java.util.concurrent.atomic.AtomicLong

import io.iohk.iodb.TestUtils._
import io.iohk.iodb._


object CompactionBench {

  def main(args: Array[String]): Unit = {
    val time: Long = 60
    val endTime = System.currentTimeMillis() + time * 1000

    //start background thread with updates, while compaction runs on foreground
    val dir = TestUtils.tempDir()
    val store = new LogStore(dir = dir, keySize = 8, keepVersions = 0)
    val updateCounter = new AtomicLong(1)
    var compactionCounter = 0L

    val updateThread = new Thread(runnable {
      while (System.currentTimeMillis() < endTime) {
        val value = updateCounter.incrementAndGet()
        val toUpdate = List((fromLong(1L), fromLong(value)))
        store.update(versionID = fromLong(1L), toUpdate = toUpdate, toRemove = Nil)
      }
    })
    updateThread.setDaemon(false)
    updateThread.start()


    while (System.currentTimeMillis() < endTime) {
      store.taskCompact()
      store.clean(0)
      compactionCounter += 1
    }

    //wait until Update Thread finishes
    while (updateThread.isAlive) {
      Thread.sleep(1)
    }


    println("Runtime: " + (time / 1000) + " seconds")
    println("Update count: " + updateCounter.get())
    println("Compaction count: " + compactionCounter)

    store.close()
    deleteRecur(dir)
  }

} 
Example 46
Source File: TemperatureServer.scala    From the-finagle-docs   with MIT License 5 votes vote down vote up
package net.gutefrage.context

import java.util.concurrent.atomic.AtomicLong

import com.twitter.finagle._
import com.twitter.finagle.thrift.Protocols
import com.twitter.logging.{Logger, Logging}
import com.twitter.server.TwitterServer
import com.twitter.util.{Await, Future}
import net.gutefrage.Env
import net.gutefrage.temperature.thrift._
import org.slf4j.bridge.SLF4JBridgeHandler



object TemperatureServer extends TwitterServer with Logging{

  val port = flag[Int]("port", 8080, "port this server should use")
  val env = flag[Env]("env", Env.Local, "environment this server runs")

  override def failfastOnFlagsNotParsed: Boolean = true

  premain {
    SLF4JBridgeHandler.removeHandlersForRootLogger()
    SLF4JBridgeHandler.install()
  }

  onExit {
    info("Shutting down temperature server")
  }

  val appLog = Logger("application")

  def main() {
    appLog.info(s"Starting temperature server in environment ${env().name} on port ${port()}")

    // the actual server implementation
    val service = new TemperatureService.FutureIface {

      val numElements = new AtomicLong()
      val sumTemperature = new AtomicLong()

      override def add(datum: TemperatureDatum): Future[Unit] = {
        numElements.incrementAndGet()
        sumTemperature.addAndGet(datum.celsius)
        Future.Unit
      }

      override def mean(): Future[Double] = {
        // Accessing a given userContext if provided
        UserContext.current.foreach { userContext =>
          appLog.info(s"Call received with context: ${userContext}")
        }

        val n = numElements.get()
        val mean = if(n == 0L) 0.0 else sumTemperature.get() / n
        Future.value(mean)
      }
    }

    // Wrap the service implementation into a real finagle service
    val finagledService: Service[Array[Byte], Array[Byte]] = new TemperatureService.FinagledService(
      service, Protocols.binaryFactory()
    )

    // run and announce the service
    val server = ThriftMux.server
      .withLabel("temperature-service")
      .serveAndAnnounce(
        // schema ! host ! path ! shardId
        name = s"zk!127.0.0.1:2181!/service/${env.name}/temperature!0",
        // bind to local address
        addr = s":${port()}",
        service = finagledService
      )

    // Keep waiting for the server and prevent the java process to exit
    closeOnExit(server)
    Await.ready(server)
  }

} 
Example 47
Source File: DIStats.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.tracing

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import wvlet.airframe.surface.Surface
import wvlet.airframe.{Design, Session}
import wvlet.log.LogSupport

import scala.jdk.CollectionConverters._

case class DIStatsReport(
    coverage: Double,
    observedTypes: Seq[Surface],
    initCount: Map[Surface, Long],
    injectCount: Map[Surface, Long],
    unusedTypes: Seq[Surface]
) {
  override def toString: String = {
    val report = Seq.newBuilder[String]

    // Coverage report
    report += "[coverage]"
    report += f"design coverage: ${coverage * 100}%.1f%%"
    if (unusedTypes.nonEmpty) {
      report += "[unused types]"
      unusedTypes.map { x => report += x.toString }
    }
    // Access stat report
    report += "[access stats]"
    val allTypes = injectCount.keySet ++ initCount.keySet
    for (s <- observedTypes) {
      report += s"[${s}] init:${initCount.getOrElse(s, 0)}, inject:${injectCount.getOrElse(s, 0)}"
    }

    report.result().mkString("\n")
  }
}


class DIStats extends LogSupport with Serializable {
  // This will holds the stat data while the session is active.
  // To avoid holding too many stats for applications that create many child sessions,
  // we will just store the aggregated stats.
  private val injectCountTable = new ConcurrentHashMap[Surface, AtomicLong]().asScala
  private val initCountTable   = new ConcurrentHashMap[Surface, AtomicLong]().asScala

  private val baseNano  = System.nanoTime()
  private val firstSeen = new ConcurrentHashMap[Surface, Long]().asScala

  private[airframe] def observe(s: Surface): Unit = {
    firstSeen.getOrElseUpdate(s, System.nanoTime() - baseNano)
  }

  private[airframe] def incrementInjectCount(session: Session, surface: Surface): Unit = {
    observe(surface)
    val counter = injectCountTable.getOrElseUpdate(surface, new AtomicLong(0))
    counter.incrementAndGet()
  }

  private[airframe] def incrementInitCount(session: Session, surface: Surface): Unit = {
    observe(surface)
    val counter = initCountTable.getOrElseUpdate(surface, new AtomicLong(0))
    counter.incrementAndGet()
  }

  private def getInjectCount(surface: Surface): Long = {
    injectCountTable.get(surface).map(_.get()).getOrElse(0)
  }

  def coverageReportFor(design: Design): DIStatsReport = {
    var bindingCount     = 0
    var usedBindingCount = 0
    val unusedBindings   = Seq.newBuilder[Surface]
    for (b <- design.binding) yield {
      bindingCount += 1
      val surface     = b.from
      val injectCount = getInjectCount(surface)
      if (injectCount > 0) {
        usedBindingCount += 1
      } else {
        unusedBindings += surface
      }
    }
    val coverage =
      if (bindingCount == 0) {
        1.0
      } else {
        usedBindingCount.toDouble / bindingCount
      }
    DIStatsReport(
      coverage = coverage,
      observedTypes = firstSeen.toSeq.sortBy(_._2).map(_._1).toSeq,
      initCount = initCountTable.map(x => x._1 -> x._2.get()).toMap,
      injectCount = injectCountTable.map(x => x._1 -> x._2.get()).toMap,
      unusedTypes = unusedBindings.result()
    )
  }
} 
Example 48
Source File: OffHeapMemoryAllocator.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.canvas
import java.lang.ref.ReferenceQueue
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import wvlet.log.LogSupport

import scala.jdk.CollectionConverters._

private[canvas] case class MemoryRefHolder(ref: MemoryReference, size: Long)


class OffHeapMemoryAllocator extends AutoCloseable with LogSupport {
  type Address = Long

  private val allocatedMemoryAddresses = new ConcurrentHashMap[Address, MemoryRefHolder]().asScala
  private val memoryQueue              = new ReferenceQueue[Memory]()
  private val totalAllocatedMemorySize = new AtomicLong(0)

  {
    // Start a off-heap memory collector to release allocated memory upon GC
    val collectorThread = new Thread(new Runnable {
      override def run(): Unit = {
        val ref = classOf[MemoryReference].cast(memoryQueue.remove())
        release(ref)
      }
    })
    collectorThread.setName("AirframeCanvas-GC")
    collectorThread.setDaemon(true) // Allow shutting down JVM while this thread is running
    logger.debug("Start Canvas-GC memory collector")
    collectorThread.start()
  }

  def allocatedMemorySize: Long = {
    totalAllocatedMemorySize.get()
  }

  def allocate(size: Long): OffHeapMemory = {
    val address = UnsafeUtil.unsafe.allocateMemory(size)
    val m       = OffHeapMemory(address, size, this)
    register(m)
    m
  }

  private def register(m: OffHeapMemory): Unit = {
    // Register a memory reference that will be collected by GC
    val ref    = new MemoryReference(m, memoryQueue)
    val holder = MemoryRefHolder(ref, m.size)
    allocatedMemoryAddresses.put(m.address, holder)
    totalAllocatedMemorySize.addAndGet(m.size)
  }

  private[canvas] def release(m: OffHeapMemory): Unit = {
    releaseMemoryAt(m.address)
  }

  private[canvas] def release(reference: MemoryReference): Unit = {
    releaseMemoryAt(reference.address)
  }

  private def releaseMemoryAt(address: Long): Unit = {
    debug(f"Releasing memory at ${address}%x")
    allocatedMemoryAddresses.get(address) match {
      case Some(h) =>
        if (address != 0) {
          UnsafeUtil.unsafe.freeMemory(address)
          totalAllocatedMemorySize.getAndAdd(-h.size)
        }
        allocatedMemoryAddresses.remove(address)
      case None =>
        warn(f"Unknown allocated memory address: ${address}%x")
    }
  }

  override def close(): Unit = {
    for (address <- allocatedMemoryAddresses.keys) {
      releaseMemoryAt(address)
    }
  }
} 
Example 49
Source File: OneElementConcurrentQueue.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.io.Serializable

import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong, AtomicReference }

import zio.internal.MutableConcurrentQueue

final class OneElementConcurrentQueue[A] extends MutableConcurrentQueue[A] with Serializable {
  private[this] val ref = new AtomicReference[AnyRef]()

  private[this] val headCounter   = new AtomicLong(0L)
  private[this] val deqInProgress = new AtomicBoolean(false)

  private[this] val tailCounter   = new AtomicLong(0L)
  private[this] val enqInProgress = new AtomicBoolean(false)

  override final val capacity = 1

  override def dequeuedCount(): Long = headCounter.get()
  override def enqueuedCount(): Long = tailCounter.get()

  override def isEmpty(): Boolean = ref.get() == null
  override def isFull(): Boolean  = !isEmpty()

  override def offer(a: A): Boolean = {
    assert(a != null)

    var res     = false
    var looping = true

    while (looping) {
      if (isFull()) {
        looping = false
      } else {
        if (enqInProgress.compareAndSet(false, true)) { // get an exclusive right to offer
          if (ref.get() == null) {
            tailCounter.lazySet(tailCounter.get() + 1)
            ref.lazySet(a.asInstanceOf[AnyRef])
            res = true
          }

          enqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def poll(default: A): A = {
    var res     = default
    var looping = true

    while (looping) {
      if (isEmpty()) {
        looping = false
      } else {
        if (deqInProgress.compareAndSet(false, true)) { // get an exclusive right to poll
          val el = ref.get().asInstanceOf[A]

          if (el != null) {
            res = el
            headCounter.lazySet(headCounter.get() + 1)
            ref.lazySet(null.asInstanceOf[AnyRef])
          }

          deqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def size(): Int = if (isEmpty()) 0 else 1
} 
Example 50
Source File: OneElementConcurrentQueue.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.io.Serializable
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong, AtomicReference }

import zio.internal.MutableConcurrentQueue

final class OneElementConcurrentQueue[A] extends MutableConcurrentQueue[A] with Serializable {
  private[this] val ref = new AtomicReference[AnyRef]()

  private[this] val headCounter   = new AtomicLong(0L)
  private[this] val deqInProgress = new AtomicBoolean(false)

  private[this] val tailCounter   = new AtomicLong(0L)
  private[this] val enqInProgress = new AtomicBoolean(false)

  override final val capacity = 1

  override def dequeuedCount(): Long = headCounter.get()
  override def enqueuedCount(): Long = tailCounter.get()

  override def isEmpty(): Boolean = ref.get() == null
  override def isFull(): Boolean  = !isEmpty()

  override def offer(a: A): Boolean = {
    assert(a != null)

    var res     = false
    var looping = true

    while (looping) {
      if (isFull()) {
        looping = false
      } else {
        if (enqInProgress.compareAndSet(false, true)) { // get an exclusive right to offer
          if (ref.get() == null) {
            tailCounter.lazySet(tailCounter.get() + 1)
            ref.lazySet(a.asInstanceOf[AnyRef])
            res = true
          }

          enqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def poll(default: A): A = {
    var res     = default
    var looping = true

    while (looping) {
      if (isEmpty()) {
        looping = false
      } else {
        if (deqInProgress.compareAndSet(false, true)) { // get an exclusive right to poll
          val el = ref.get().asInstanceOf[A]

          if (el != null) {
            res = el
            headCounter.lazySet(headCounter.get() + 1)
            ref.lazySet(null.asInstanceOf[AnyRef])
          }

          deqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def size(): Int = if (isEmpty()) 0 else 1
} 
Example 51
Source File: SQLExecution.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.ui.{SparkListenerSQLExecutionEnd, SparkListenerSQLExecutionStart}

object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  private val executionIdToQueryExecution = new ConcurrentHashMap[Long, QueryExecution]()

  def getQueryExecution(executionId: Long): QueryExecution = {
    executionIdToQueryExecution.get(executionId)
  }

  private val testing = sys.props.contains("spark.testing")

  private[sql] def checkSQLExecutionId(sparkSession: SparkSession): Unit = {
    val sc = sparkSession.sparkContext
    // only throw an exception during tests. a missing execution ID should not fail a job.
    if (testing && sc.getLocalProperty(EXECUTION_ID_KEY) == null) {
      // Attention testers: when a test fails with this exception, it means that the action that
      // started execution of a query didn't call withNewExecutionId. The execution ID should be
      // set by calling withNewExecutionId in the action that begins execution, like
      // Dataset.collect or DataFrameWriter.insertInto.
      throw new IllegalStateException("Execution ID should be set")
    }
  }

  
  def withSQLConfPropagated[T](sparkSession: SparkSession)(body: => T): T = {
    val sc = sparkSession.sparkContext
    // Set all the specified SQL configs to local properties, so that they can be available at
    // the executor side.
    val allConfigs = sparkSession.sessionState.conf.getAllConfs
    val originalLocalProps = allConfigs.collect {
      case (key, value) if key.startsWith("spark") =>
        val originalValue = sc.getLocalProperty(key)
        sc.setLocalProperty(key, value)
        (key, originalValue)
    }

    try {
      body
    } finally {
      for ((key, value) <- originalLocalProps) {
        sc.setLocalProperty(key, value)
      }
    }
  }
} 
Example 52
Source File: AESCTR.scala    From tsec   with MIT License 5 votes vote down vote up
package tsec.cipher.symmetric.jca

import java.util.concurrent.atomic.AtomicLong

import cats.effect.Sync
import tsec.cipher.common.padding.NoPadding
import tsec.cipher.symmetric._
import tsec.cipher.symmetric.jca.primitive.JCAPrimitiveCipher

sealed abstract class AESCTR[A] extends JCACipherAPI[A, CTR, NoPadding] with AES[A] with JCAKeyGen[A] {
  implicit val ae: AESCTR[A] = this

  implicit def genEncryptor[F[_]: Sync](implicit c: BlockCipher[A]): JCAPrimitiveCipher[F, A, CTR, NoPadding] =
    JCAPrimitiveCipher.sync[F, A, CTR, NoPadding]

  
  def incrementalIvStrategy[F[_]](implicit F: Sync[F]): CounterIvGen[F, A] =
    new CounterIvGen[F, A] {
      private val delta                     = 1000000L
      private val maxVal: Long              = Long.MaxValue - delta
      private val fixedCounter: Array[Byte] = Array.fill[Byte](8)(0.toByte)
      private val atomicNonce: AtomicLong   = new AtomicLong(Long.MinValue)

      def refresh: F[Unit] = F.delay(atomicNonce.set(Long.MinValue))

      def counterState: F[Long] = F.delay(unsafeCounterState)

      def unsafeCounterState: Long = atomicNonce.get()

      def genIv: F[Iv[A]] =
        F.delay(genIvUnsafe)

      def genIvUnsafe: Iv[A] =
        if (atomicNonce.get() >= maxVal) {
          throw IvError("Maximum safe nonce number reached")
        } else {
          val nonce = atomicNonce.incrementAndGet()
          val iv    = new Array[Byte](16) //AES block size
          iv(0) = (nonce >> 56).toByte
          iv(1) = (nonce >> 48).toByte
          iv(2) = (nonce >> 40).toByte
          iv(3) = (nonce >> 32).toByte
          iv(4) = (nonce >> 24).toByte
          iv(5) = (nonce >> 16).toByte
          iv(6) = (nonce >> 8).toByte
          iv(7) = nonce.toByte
          System.arraycopy(fixedCounter, 0, iv, 8, 8)
          Iv[A](iv)
        }
    }

  def ciphertextFromConcat(rawCT: Array[Byte]): Either[CipherTextError, CipherText[A]] =
    CTOPS.ciphertextFromArray[A, CTR, NoPadding](rawCT)
}

sealed trait AES128CTR

object AES128CTR extends AESCTR[AES128CTR] with AES128[AES128CTR]

sealed trait AES192CTR

object AES192CTR extends AESCTR[AES192CTR] with AES192[AES192CTR]

sealed trait AES256CTR

object AES256CTR extends AESCTR[AES256CTR] with AES256[AES256CTR] 
Example 53
Source File: MemoryUserRequestQueue.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.queue

import java.util.concurrent.atomic.AtomicLong

import com.ing.wbaa.rokku.proxy.data.{ RequestId, User }
import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId
import com.ing.wbaa.rokku.proxy.metrics.MetricsFactory
import com.typesafe.config.ConfigFactory

import scala.collection.concurrent.TrieMap


  private def isAllowedToAddToRequestQueue(user: User) = {
    synchronized {
      queuePerUser.putIfAbsent(user.userName.value, new AtomicLong(0))
      val userRequests = queuePerUser(user.userName.value)
      val userOccupiedQueue = (100 * userRequests.get()) / maxQueueSize
      val maxQueueBeforeBlockInPercentPerUser = maxQueueBeforeBlockInPercent / queuePerUser.size
      val isOverflown = userOccupiedQueue >= maxQueueBeforeBlockInPercentPerUser
      currentQueueSize.get() < maxQueueSize && !isOverflown
    }
  }

  private def logDebug(user: User, queueRequestCount: Long, userRequestCount: Long, method: String)(implicit id: RequestId): Unit = {
    logger.debug("request queue = {}", queueRequestCount)
    logger.debug("user request queue = {}", method, user.userName.value, userRequestCount)
    logger.debug("active users size = {}", queuePerUser.size)
  }

  private def metricName(user: User): String = {
    MetricsFactory.REQUEST_QUEUE_OCCUPIED_BY_USER.replace(MetricsFactory.REQUEST_USER, s"${user.userName.value.head.toString}..")
  }
} 
Example 54
Source File: HbaseSinkWriter.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hbase

import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}

import com.sksamuel.exts.Logging
import io.eels.schema.StructType
import io.eels.{Row, SinkWriter}
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{BufferedMutator, _}

object HbaseSinkWriter extends Logging {

  def apply(namespace: String,
            table: String,
            numberOfWriters: AtomicInteger,
            schema: StructType,
            maxKeyValueSize: Option[Int],
            writeBufferSize: Option[Long],
            writeRowBatchSize: Int,
            serializer: HbaseSerializer,
            connection: Connection): Seq[HbaseSinkWriter] = {
    val tableName = TableName.valueOf(namespace, table)

    
  private val fieldsWithIndex = schema.fields.zipWithIndex

  override def write(row: Row): Unit = {
    if ((rowCounter.incrementAndGet() % writeRowBatchSize) == 0) mutator.flush()
    val rowKey = serializer.toBytes(row.values(rowKeyIndex), keyField.name, keyField.dataType)
    val put = new Put(rowKey)
    for ((field, index) <- fieldsWithIndex) {
      if (index != rowKeyIndex && row.values(index) != null) {
        val cf = field.columnFamily.getOrElse(sys.error(s"No Column Family defined for field '${field.name}'")).getBytes
        val col = field.name.getBytes()
        put.addColumn(cf, col, serializer.toBytes(row.values(index), field.name, field.dataType))
      }
    }
    mutator.mutate(put)
  }

  override def close(): Unit = {
    mutator.flush()
    if (numberOfWriters.decrementAndGet() == 0) mutator.close()
  }

} 
Example 55
Source File: CsvPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.csv

import java.io.InputStream
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import com.univocity.parsers.csv.CsvParser
import io.eels.Row
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType

class CsvPublisher(createParser: () => CsvParser,
                   inputFn: () => InputStream,
                   header: Header,
                   schema: StructType) extends Publisher[Seq[Row]] with Logging with Using {

  val rowsToSkip: Int = header match {
    case Header.FirstRow => 1
    case _ => 0
  }

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {

    val input = inputFn()
    val parser = createParser()

    try {
      parser.beginParsing(input)

      val running = new AtomicBoolean(true)
      subscriber.subscribed(Subscription.fromRunning(running))

      logger.debug(s"CSV Source will skip $rowsToSkip rows")

      val count = new AtomicLong(0)

      Iterator.continually(parser.parseNext)
        .takeWhile(_ != null)
        .takeWhile(_ => running.get)
        .drop(rowsToSkip)
        .map { record => Row(schema, record.toVector) }
        .grouped(DataStream.DefaultBatchSize)
        .foreach { ts =>
          count.addAndGet(ts.size)
          subscriber.next(ts)
        }

      logger.debug(s"All ${count.get} rows read, notifying subscriber")
      subscriber.completed()

    } catch {
      case t: Throwable =>
        logger.error(s"Error in CSV Source, subscriber will be notified", t)
        subscriber.error(t)

    } finally {
      logger.debug("Closing CSV source resources")
      parser.stopParsing()
    }
  }
} 
Example 56
Source File: TemperatureServer.scala    From the-finagle-docs   with MIT License 5 votes vote down vote up
package net.gutefrage.basic

import java.util.concurrent.atomic.AtomicLong

import com.twitter.finagle._
import com.twitter.finagle.thrift.Protocols
import com.twitter.logging.Logger
import com.twitter.server.TwitterServer
import com.twitter.util.logging.Logging
import com.twitter.util.{Await, Future}
import net.gutefrage.Env
import net.gutefrage.temperature.thrift._
import org.slf4j.bridge.SLF4JBridgeHandler



object TemperatureServer extends TwitterServer with Logging{

  val port = flag[Int]("port", 8080, "port this server should use")
  val env = flag[Env]("env", Env.Local, "environment this server runs")

  override def failfastOnFlagsNotParsed: Boolean = true

  premain {
    SLF4JBridgeHandler.removeHandlersForRootLogger()
    SLF4JBridgeHandler.install()
  }

  onExit {
    info("Shutting down temperature server")
  }

  val appLog = Logger("application")

  def main() {
    appLog.info(s"Starting temperature server in environment ${env().name} on port ${port()}")

    // the actual server implementation
    val service = new TemperatureService.FutureIface {

      val numElements = new AtomicLong()
      val sumTemperature = new AtomicLong()

      override def add(datum: TemperatureDatum): Future[Unit] = {
        numElements.incrementAndGet()
        sumTemperature.addAndGet(datum.celsius)
        Future.Unit
      }

      override def mean(): Future[Double] = {
        val n = numElements.get()
        val mean = if(n == 0L) 0.0 else sumTemperature.get() / n
        Future.value(mean)
      }
    }

    // Wrap the service implementation into a real finagle service
    val finagledService: Service[Array[Byte], Array[Byte]] = new TemperatureService.FinagledService(
      service, Protocols.binaryFactory()
    )

    // run and announce the service
    val server = ThriftMux.server
      .withLabel("temperature-service")
      .serveAndAnnounce(
        // schema ! host ! path ! shardId
        name = s"zk!127.0.0.1:2181!/service/${env().name}/temperature!0",
        // bind to local address
        addr = s":${port()}",
        service = finagledService
      )

    // Keep waiting for the server and prevent the java process to exit
    closeOnExit(server)
    Await.ready(server)
  }

} 
Example 57
Source File: AbstractEngineBuilder.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.entrance.execute.impl

import java.util.concurrent.atomic.AtomicLong

import com.webank.wedatasphere.linkis.common.ServiceInstance
import com.webank.wedatasphere.linkis.common.exception.WarnException
import com.webank.wedatasphere.linkis.common.utils.Logging
import com.webank.wedatasphere.linkis.entrance.conf.EntranceConfiguration._
import com.webank.wedatasphere.linkis.entrance.execute.{EngineBuilder, EntranceEngine}
import com.webank.wedatasphere.linkis.entrance.scheduler.EntranceGroupFactory
import com.webank.wedatasphere.linkis.protocol.engine.{RequestEngine, RequestEngineStatus, ResponseEngineInfo, ResponseEngineStatus}
import com.webank.wedatasphere.linkis.rpc.Sender
import com.webank.wedatasphere.linkis.scheduler.executer.ExecutorState
import com.webank.wedatasphere.linkis.scheduler.queue.GroupFactory



abstract class AbstractEngineBuilder(groupFactory: GroupFactory) extends EngineBuilder with Logging {
  private val idGenerator = new AtomicLong(0)

  protected def createEngine(id: Long): EntranceEngine

  override def buildEngine(instance: String): EntranceEngine = {
    val sender = Sender.getSender(ServiceInstance(ENGINE_SPRING_APPLICATION_NAME.getValue, instance))
    sender.ask(RequestEngineStatus(RequestEngineStatus.Status_Overload_Concurrent)) match {
      case r: ResponseEngineStatus => buildEngine(r)
      case warn: WarnException => throw warn
    }
  }

  protected def getGroupName(creator: String, user: String): String = EntranceGroupFactory.getGroupName(creator, user)

  override def buildEngine(instance: String, requestEngine: RequestEngine): EntranceEngine = {
    val engine = createEngine(idGenerator.incrementAndGet())
    engine.setInstance(instance)
    engine.setGroup(groupFactory.getOrCreateGroup(getGroupName(requestEngine.creator, requestEngine.user)))
    engine.setUser(requestEngine.user)
    engine.setCreator(requestEngine.creator)
    engine.updateState(ExecutorState.Starting, ExecutorState.Idle, null, null)
    engine
  }

  override def buildEngine(responseEngineStatus: ResponseEngineStatus): EntranceEngine = responseEngineStatus  match {
    case ResponseEngineStatus(instance, state, overload, concurrent, ResponseEngineInfo(createEntranceInstance, creator, user, _)) =>
      warn(s"receive a new engine $instance for creator $creator, which is created by other entrance $createEntranceInstance.")
      val engine = createEngine(idGenerator.incrementAndGet())
      engine.setInstance(instance)
      engine.setGroup(groupFactory.getOrCreateGroup(getGroupName(creator, user)))
      engine.setUser(user)
      engine.setCreator(creator)
      engine.updateState(ExecutorState.Starting, ExecutorState.apply(state), overload, concurrent)
      engine
  }
} 
Example 58
Source File: SparkSqlExecutor.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.engine.executors

import java.lang.reflect.InvocationTargetException
import java.util.concurrent.atomic.AtomicLong

import com.webank.wedatasphere.linkis.common.conf.CommonVars
import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.engine.configuration.SparkConfiguration
import com.webank.wedatasphere.linkis.engine.execute.EngineExecutorContext
import com.webank.wedatasphere.linkis.engine.extension.SparkSqlExtension
import com.webank.wedatasphere.linkis.engine.spark.common.{Kind, SparkSQL}
import com.webank.wedatasphere.linkis.engine.spark.utils.EngineUtils
import com.webank.wedatasphere.linkis.scheduler.executer.{ErrorExecuteResponse, ExecuteResponse, SuccessExecuteResponse}
import org.apache.commons.lang.exception.ExceptionUtils
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SQLContext}

      SQLSession.showDF(sc, jobGroup, rdd, null, SparkConfiguration.SHOW_DF_MAX_RES.getValue,engineExecutorContext)
      SuccessExecuteResponse()
    } catch {
      case e: InvocationTargetException =>
        var cause = ExceptionUtils.getCause(e)
        if(cause == null) cause = e
        error("execute sparkSQL failed!", cause)
        ErrorExecuteResponse(ExceptionUtils.getRootCauseMessage(e), cause)
      case ite: Exception =>
        error("execute sparkSQL failed!", ite)
        ErrorExecuteResponse(ExceptionUtils.getRootCauseMessage(ite), ite)
    } finally sc.clearJobGroup()
  }



  override def kind: Kind = SparkSQL()

  override def open: Unit = {}

  override def close: Unit = {}

} 
Example 59
Source File: LocalQueueStore.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong

import com.google.common.primitives.{Longs, Shorts}
import com.wavesplatform.dex.db.DbKeys._
import com.wavesplatform.dex.db.leveldb.{DBExt, ReadOnlyDB}
import com.wavesplatform.dex.queue.{QueueEvent, QueueEventWithMeta}
import org.iq80.leveldb.{DB, ReadOptions}

class LocalQueueStore(db: DB) {

  private val newestIdx        = new AtomicLong(db.get(lqNewestIdx))
  private val inMemQueue       = new ConcurrentLinkedQueue[QueueEventWithMeta]
  private var startInMemOffset = Option.empty[QueueEventWithMeta.Offset]

  def enqueue(event: QueueEvent, timestamp: Long): QueueEventWithMeta.Offset = {
    val offset   = newestIdx.incrementAndGet()
    val eventKey = lpqElement(offset)

    val x = QueueEventWithMeta(offset, timestamp, event)
    db.readWrite { rw =>
      rw.put(eventKey, Some(x))
      rw.put(lqNewestIdx, offset)
    }

    inMemQueue.add(x)
    if (startInMemOffset.isEmpty) startInMemOffset = Some(offset)
    offset
  }

  def getFrom(offset: QueueEventWithMeta.Offset, maxElements: Int): Vector[QueueEventWithMeta] = {
    if (startInMemOffset.exists(_ <= offset)) {
      if (inMemQueue.isEmpty) Vector.empty
      else {
        val xs    = Vector.newBuilder[QueueEventWithMeta]
        var added = 0

        while (!inMemQueue.isEmpty && added < maxElements) Option(inMemQueue.poll()).foreach { x =>
          xs += x
          added += 1
        }

        xs.result()
      }
    } else
      new ReadOnlyDB(db, new ReadOptions())
        .read(LqElementKeyName, LqElementPrefixBytes, lpqElement(math.max(offset, 0)).keyBytes, Int.MaxValue) { e =>
          val offset = Longs.fromByteArray(e.getKey.slice(Shorts.BYTES, Shorts.BYTES + Longs.BYTES))
          lpqElement(offset).parse(e.getValue).getOrElse(throw new RuntimeException(s"Can't find a queue event at $offset"))
        }
  }

  def newestOffset: Option[QueueEventWithMeta.Offset] = {
    val idx      = newestIdx.get()
    val eventKey = lpqElement(idx)
    eventKey.parse(db.get(eventKey.keyBytes)).map(_.offset)
  }

  def dropUntil(offset: QueueEventWithMeta.Offset): Unit = db.readWrite { rw =>
    val oldestIdx = math.max(db.get(lqOldestIdx), 0)
    (oldestIdx until offset).foreach { offset =>
      rw.delete(lpqElement(offset))
    }
    rw.put(lqOldestIdx, offset)
  }

} 
Example 60
Source File: IodbDataSource.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.db.dataSource

import java.io.File
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicLong

import io.iohk.iodb.{ByteArrayWrapper, LSMStore}

class IodbDataSource private (lSMStore: LSMStore, keySize: Int, path: String) extends DataSource {

  import IodbDataSource._

  override def get(namespace: Namespace, key: Key): Option[Value] = {
    require(namespace.length + key.length <= keySize, "Wrong key size in IODB get")
    val keyPadded = padToKeySize(namespace, key, keySize).toArray
    lSMStore.get(ByteArrayWrapper(keyPadded)).map(v => v.data.toIndexedSeq)
  }

  override def update(namespace: Namespace, toRemove: Seq[Key], toUpsert: Seq[(Key, Value)]): DataSource = {
    require(
      toRemove.forall{ keyToRemove => namespace.length + keyToRemove.length <= keySize } &&
        toUpsert.forall{ case (keyToUpSert, _) => namespace.length + keyToUpSert.length <= keySize },
      "Wrong key size in IODB update"
    )
    val toRemovePadded = toRemove.map(key => padToKeySize(namespace, key, keySize))
    val toUpsertPadded = toUpsert.map{case (key, value) => padToKeySize(namespace, key, keySize) -> value}
    lSMStore.update(
      ByteArrayWrapper(storageVersionGen()),
      toRemovePadded.map(key => ByteArrayWrapper(key.toArray)),
      asStorables(toUpsertPadded))
    new IodbDataSource(lSMStore, keySize, path)
  }

  override def clear: DataSource = {
    destroy()
    IodbDataSource(path, keySize)
  }

  override def close(): Unit = lSMStore.close()

  override def destroy(): Unit = {
    try {
      close()
    } finally {
      val directoryDeletionSuccess = deleteDirectory(new File(path))
      assert(directoryDeletionSuccess, "Iodb folder destruction failed")
    }
  }

  private def asStorables(keyValues: Seq[(Key, Value)]): Seq[(ByteArrayWrapper, ByteArrayWrapper)] =
    keyValues.map{ case (key, value) => ByteArrayWrapper(key.toArray) -> ByteArrayWrapper(value.toArray) }
}


object IodbDataSource {
  val KeySizeWithoutNamespace = 32
  val KeySize = 33

  private val updateCounter = new AtomicLong(System.currentTimeMillis())

  private def storageVersionGen(): Array[Byte] = {
    ByteBuffer.allocate(java.lang.Long.SIZE / java.lang.Byte.SIZE).putLong(updateCounter.incrementAndGet()).array()
  }

  
  def apply(path: String, keySize: Int): IodbDataSource = {
    val dir: File = new File(path)
    val dirSetupSuccess = (dir.exists() && dir.isDirectory) || dir.mkdirs()
    assert(dirSetupSuccess, "Iodb folder creation failed")

    val lSMStore: LSMStore = new LSMStore(dir = dir, keySize = keySize)
    new IodbDataSource(lSMStore, keySize, path)
  }

  private def deleteDirectory(dir: File): Boolean = {
    require(dir.exists() && dir.isDirectory, "Trying to delete a file thats not a folder")
    val files = dir.listFiles()
    val filesDeletionSuccess = files.forall { f =>
      val deleted = if (f.isDirectory) deleteDirectory(f) else f.delete()
      deleted
    }
    filesDeletionSuccess && dir.delete()
  }

  private def padToKeySize(namespace: IndexedSeq[Byte], key: IndexedSeq[Byte], keySize: Int): IndexedSeq[Byte] =
    namespace ++ key.padTo(keySize - namespace.size, 0.toByte)
} 
Example 61
Source File: BroadcastManager.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.internal.Logging

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = null

  initialize()

  // Called by SparkContext or Executor before using Broadcast
  private def initialize() {
    synchronized {
      if (!initialized) {
        broadcastFactory = new TorrentBroadcastFactory
        broadcastFactory.initialize(isDriver, conf, securityManager)
        initialized = true
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }

  private val nextBroadcastId = new AtomicLong(0)

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
  }
} 
Example 62
Source File: SQLExecution.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.ui.{SparkListenerSQLExecutionEnd,
  SparkListenerSQLExecutionStart}

object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  
  def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T = {
    val oldExecutionId = sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
    try {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, executionId)
      body
    } finally {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, oldExecutionId)
    }
  }
} 
Example 63
Source File: BlockingIO.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.util

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future, Promise, blocking}
import scala.util.control.NonFatal
import monix.execution.Scheduler

// https://github.com/alexandru/scala-best-practices/blob/master/sections/4-concurrency-parallelism.md
object BlockingIO {
  private val ioThreadPool = Scheduler.io(name = "io-thread")

  def future[T](t: => T): Future[T] = {
    val p = Promise[T]()

    val runnable = new Runnable {
      def run() = try {
        p.success(blocking(t))
      } catch {
        case NonFatal(ex) => p.failure(ex)
      }
    }

    ioThreadPool.execute(runnable)

    p.future
  }
} 
Example 64
Source File: SequentialLogEntryId.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils

import java.util.concurrent.atomic.AtomicLong

import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlLogEntryId
import com.google.protobuf.ByteString

class SequentialLogEntryId(prefix: String) {
  private val currentEntryId = new AtomicLong()
  private val prefixBytes = ByteString.copyFromUtf8(prefix)

  def next(): DamlLogEntryId = {
    val entryId = currentEntryId.getAndIncrement().toHexString
    DamlLogEntryId.newBuilder
      .setEntryId(prefixBytes.concat(ByteString.copyFromUtf8(entryId)))
      .build
  }
} 
Example 65
Source File: ApiCommandCompletionService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services

import java.util.concurrent.atomic.AtomicLong

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.daml.ledger.participant.state.index.v2.IndexCompletionsService
import com.daml.dec.DirectExecutionContext
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.domain
import com.daml.ledger.api.domain.{LedgerId, LedgerOffset}
import com.daml.ledger.api.messages.command.completion.CompletionStreamRequest
import com.daml.ledger.api.v1.command_completion_service._
import com.daml.ledger.api.validation.PartyNameChecker
import com.daml.logging.LoggingContext.withEnrichedLoggingContext
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import com.daml.platform.api.grpc.GrpcApiService
import com.daml.platform.server.api.services.domain.CommandCompletionService
import com.daml.platform.server.api.services.grpc.GrpcCommandCompletionService
import io.grpc.ServerServiceDefinition

import scala.concurrent.{ExecutionContext, Future}

final class ApiCommandCompletionService private (completionsService: IndexCompletionsService)(
    implicit ec: ExecutionContext,
    protected val mat: Materializer,
    protected val esf: ExecutionSequencerFactory,
    logCtx: LoggingContext)
    extends CommandCompletionService {

  private val logger = ContextualizedLogger.get(this.getClass)

  private val subscriptionIdCounter = new AtomicLong()

  override def completionStreamSource(
      request: CompletionStreamRequest): Source[CompletionStreamResponse, NotUsed] =
    withEnrichedLoggingContext(logging.parties(request.parties), logging.offset(request.offset)) {
      implicit logCtx =>
        val subscriptionId = subscriptionIdCounter.getAndIncrement().toString
        logger.debug(s"Received request for completion subscription $subscriptionId: $request")

        val offset = request.offset.getOrElse(LedgerOffset.LedgerEnd)

        completionsService
          .getCompletions(offset, request.applicationId, request.parties)
          .via(logger.logErrorsOnStream)
    }

  override def getLedgerEnd(ledgerId: domain.LedgerId): Future[LedgerOffset.Absolute] =
    completionsService.currentLedgerEnd().andThen(logger.logErrorsOnCall[LedgerOffset.Absolute])

}

object ApiCommandCompletionService {

  def create(ledgerId: LedgerId, completionsService: IndexCompletionsService)(
      implicit ec: ExecutionContext,
      mat: Materializer,
      esf: ExecutionSequencerFactory,
      logCtx: LoggingContext): GrpcCommandCompletionService with GrpcApiService = {
    val impl: CommandCompletionService =
      new ApiCommandCompletionService(completionsService)

    new GrpcCommandCompletionService(ledgerId, impl, PartyNameChecker.AllowAllParties)
    with GrpcApiService {
      override def bindService(): ServerServiceDefinition =
        CommandCompletionServiceGrpc.bindService(this, DirectExecutionContext)
    }
  }
} 
Example 66
Source File: RowQueue.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.sink

import java.time.{Duration => JDuration}
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong

import oharastream.ohara.common.data.Row

private[sink] class RowQueue extends ConcurrentLinkedQueue[Row] {
  private[sink] val lastTime = new AtomicLong(System.currentTimeMillis())

  override def poll(): Row =
    try {
      super.poll()
    } finally {
      lastTime.set(System.currentTimeMillis())
    }

  def isIdle(idleTime: JDuration): Boolean =
    System.currentTimeMillis() > (idleTime.toMillis + lastTime.get())
} 
Example 67
Source File: BroadcastManager.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.internal.Logging

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = null

  initialize()

  // Called by SparkContext or Executor before using Broadcast
  private def initialize() {
    synchronized {
      if (!initialized) {
        broadcastFactory = new TorrentBroadcastFactory
        broadcastFactory.initialize(isDriver, conf, securityManager)
        initialized = true
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }

  private val nextBroadcastId = new AtomicLong(0)

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean): Broadcast[T] = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
  }
} 
Example 68
Source File: ThreadFactoryBuilder.scala    From gfc-concurrent   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.concurrent

import java.lang.Thread.UncaughtExceptionHandler
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.ThreadFactory

import com.gilt.gfc.logging.Loggable


object ThreadFactoryBuilder {
  def apply(): ThreadFactoryBuilder = ThreadFactoryBuilder(None, None, None, None, true)

  def apply(groupName: String, threadName: String): ThreadFactoryBuilder = {
    val group = ThreadGroupBuilder().withName(groupName).build()
    ThreadFactoryBuilder().withNameFormat(threadName + "-%s").withThreadGroup(group)
  }

  val LogUncaughtExceptionHandler = new Thread.UncaughtExceptionHandler with Loggable {
    override def uncaughtException(t: Thread, e: Throwable): Unit = {
      error("Failed to catch exception in thread " + t.getName(), e)
    }
  }
}

case class ThreadFactoryBuilder private (private val nameFormat: Option[String],
                                         private val priority: Option[Int],
                                         private val exceptionHandler: Option[UncaughtExceptionHandler],
                                         private val threadGroup: Option[ThreadGroup],
                                         private val daemon: Boolean) {
  def withNameFormat(nameFormat: String): ThreadFactoryBuilder = copy(nameFormat = Some(nameFormat))

  def withPriority(priority: Int): ThreadFactoryBuilder = copy(priority = Some(priority))

  def withUncaughtExceptionHandler(exceptionHandler: UncaughtExceptionHandler): ThreadFactoryBuilder = copy(exceptionHandler = Some(exceptionHandler))

  def withThreadGroup(threadGroup: ThreadGroup): ThreadFactoryBuilder = copy(threadGroup = Some(threadGroup))

  def withDaemonFlag(isDaemon: Boolean): ThreadFactoryBuilder = copy(daemon = isDaemon)

  def build(): ThreadFactory = {
    val nameF: Option[() => String] = nameFormat.map { nf =>
      val count = new AtomicLong(0)
      () => nf.format(count.getAndIncrement)
    }

    new ThreadFactory {
      override def newThread(runnable: Runnable): Thread = {
        val group = threadGroup.getOrElse(ThreadGroupBuilder.currentThreadGroup())
        val thread = new Thread(group, runnable)
        nameF.foreach(f => thread.setName(f()))
        priority.foreach(thread.setPriority)
        exceptionHandler.foreach(thread.setUncaughtExceptionHandler)
        thread.setDaemon(daemon)
        thread
      }
    }
  }
} 
Example 69
Source File: identifiers.scala    From apalache   with Apache License 2.0 5 votes vote down vote up
package at.forsyte.apalache.tla.lir

import java.util.concurrent.atomic.AtomicLong


  def unique: UID = {
    val newId = nextId.getAndAdd(1)
    if (newId == Long.MaxValue) {
      throw new IllegalStateException("Too many identifiers, change the underlying representation of UID.")
    }
    new UID(newId)
  }
}

trait Identifiable extends Ordered[Identifiable] {
  val ID : UID = UID.unique

  override def compare(that: Identifiable): Int = ID.id.compareTo(that.ID.id)
} 
Example 70
Source File: CloudantStreamingSelector.scala    From bahir   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.examples.sql.cloudant

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{ Seconds, StreamingContext, Time }

import org.apache.bahir.cloudant.CloudantReceiver

object CloudantStreamingSelector {
  def main(args: Array[String]) {
    val spark = SparkSession.builder()
      .appName("Cloudant Spark SQL External Datasource in Scala")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    // Create the context with a 10 seconds batch size
    val ssc = new StreamingContext(spark.sparkContext, Seconds(10))
    val curTotalAmount = new AtomicLong(0)
    val curSalesCount = new AtomicLong(0)
    var batchAmount = 0L

    val changes = ssc.receiverStream(new CloudantReceiver(spark.sparkContext.getConf, Map(
      "cloudant.host" -> "examples.cloudant.com",
      "database" -> "sales",
      "selector" -> "{\"month\":\"May\", \"rep\":\"John\"}")))

    changes.foreachRDD((rdd: RDD[String], time: Time) => {
      // Get the singleton instance of SQLContext

      println(s"========= $time =========") // scalastyle:ignore
      val changesDataFrame = spark.read.json(rdd.toDS())
      if (changesDataFrame.schema.nonEmpty) {
        changesDataFrame.select("*").show()
        batchAmount = changesDataFrame.groupBy().sum("amount").collect()(0).getLong(0)
        curSalesCount.getAndAdd(changesDataFrame.count())
        curTotalAmount.getAndAdd(batchAmount)
        println("Current sales count:" + curSalesCount)// scalastyle:ignore
        println("Current total amount:" + curTotalAmount)// scalastyle:ignore
      } else {
        ssc.stop()
      }
    })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 71
Source File: mmkvStore.scala    From vm   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.mmadt.storage.mmkv

import java.util.concurrent.atomic.AtomicLong

import org.mmadt.language.obj.`type`.RecType
import org.mmadt.language.obj.value.strm.RecStrm
import org.mmadt.language.obj.value.{IntValue, RecValue, StrValue, Value}
import org.mmadt.language.obj.{Obj, Rec, ViaTuple, _}
import org.mmadt.language.{LanguageException, LanguageFactory, LanguageProvider, Tokens}
import org.mmadt.storage.StorageFactory._

import scala.collection.mutable
import scala.io.{BufferedSource, Source}


  }
}

object mmkvStore extends AutoCloseable {
  private val dbs: mutable.Map[String, mmkvStore[Obj, Obj]] = new mutable.LinkedHashMap

  def open[K <: Obj, V <: Obj](file: String): mmkvStore[K, V] =
    if (file.equals(Tokens.empty)) dbs.last._2.asInstanceOf[mmkvStore[K, V]]
    else {
      val db = dbs.getOrElseUpdate(file, new mmkvStore(file))
      dbs.remove(file)
      dbs.put(file, db)
      db.asInstanceOf[mmkvStore[K, V]]
    }

  override def close(): Unit = {
    dbs.values.foreach(m => m.close())
    dbs.clear()
  }
} 
Example 72
Source File: ThroughputSpec.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.channel

import java.util.UUID
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.testkit.TestProbe
import fr.acinq.bitcoin.{ByteVector32, Crypto}
import fr.acinq.eclair.TestConstants.{Alice, Bob}
import fr.acinq.eclair._
import fr.acinq.eclair.blockchain._
import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher
import fr.acinq.eclair.payment.relay.{CommandBuffer, Relayer}
import fr.acinq.eclair.wire.{Init, UpdateAddHtlc}
import org.scalatest.funsuite.AnyFunSuite

import scala.concurrent.duration._
import scala.util.Random

class ThroughputSpec extends AnyFunSuite {
  ignore("throughput") {
    implicit val system = ActorSystem("test")
    val pipe = system.actorOf(Props[Pipe], "pipe")
    val blockCount = new AtomicLong()
    val blockchain = system.actorOf(ZmqWatcher.props(blockCount, new TestBitcoinClient()), "blockchain")
    val paymentHandler = system.actorOf(Props(new Actor() {
      val random = new Random()

      context.become(run(Map()))

      override def receive: Receive = ???

      def run(h2r: Map[ByteVector32, ByteVector32]): Receive = {
        case ('add, tgt: ActorRef) =>
          val r = randomBytes32
          val h = Crypto.sha256(r)
          tgt ! CMD_ADD_HTLC(1 msat, h, CltvExpiry(1), TestConstants.emptyOnionPacket, Upstream.Local(UUID.randomUUID()))
          context.become(run(h2r + (h -> r)))

        case ('sig, tgt: ActorRef) => tgt ! CMD_SIGN

        case htlc: UpdateAddHtlc if h2r.contains(htlc.paymentHash) =>
          val r = h2r(htlc.paymentHash)
          sender ! CMD_FULFILL_HTLC(htlc.id, r)
          context.become(run(h2r - htlc.paymentHash))
      }
    }), "payment-handler")
    val registerA = TestProbe()
    val registerB = TestProbe()
    val commandBufferA = system.actorOf(Props(new CommandBuffer(Alice.nodeParams, registerA.ref)))
    val commandBufferB = system.actorOf(Props(new CommandBuffer(Bob.nodeParams, registerB.ref)))
    val relayerA = system.actorOf(Relayer.props(Alice.nodeParams, TestProbe().ref, registerA.ref, commandBufferA, paymentHandler))
    val relayerB = system.actorOf(Relayer.props(Bob.nodeParams, TestProbe().ref, registerB.ref, commandBufferB, paymentHandler))
    val wallet = new TestWallet
    val alice = system.actorOf(Channel.props(Alice.nodeParams, wallet, Bob.nodeParams.nodeId, blockchain, relayerA, None), "a")
    val bob = system.actorOf(Channel.props(Bob.nodeParams, wallet, Alice.nodeParams.nodeId, blockchain, relayerB, None), "b")
    val aliceInit = Init(Alice.channelParams.features)
    val bobInit = Init(Bob.channelParams.features)
    alice ! INPUT_INIT_FUNDER(ByteVector32.Zeroes, TestConstants.fundingSatoshis, TestConstants.pushMsat, TestConstants.feeratePerKw, TestConstants.feeratePerKw, Alice.channelParams, pipe, bobInit, ChannelFlags.Empty, ChannelVersion.STANDARD)
    bob ! INPUT_INIT_FUNDEE(ByteVector32.Zeroes, Bob.channelParams, pipe, aliceInit)

    val latch = new CountDownLatch(2)
    val listener = system.actorOf(Props(new Actor {
      override def receive: Receive = {
        case ChannelStateChanged(_, _, _, _, NORMAL, _) => latch.countDown()
      }
    }), "listener")
    system.eventStream.subscribe(listener, classOf[ChannelEvent])

    pipe ! (alice, bob)
    latch.await()

    var i = new AtomicLong(0)
    val random = new Random()

    def msg = random.nextInt(100) % 5 match {
      case 0 | 1 | 2 | 3 => 'add
      case 4 => 'sig
    }

    import scala.concurrent.ExecutionContext.Implicits.global
    system.scheduler.schedule(0 seconds, 50 milliseconds, new Runnable() {
      override def run(): Unit = paymentHandler ! (msg, alice)
    })
    system.scheduler.schedule(5 seconds, 70 milliseconds, new Runnable() {
      override def run(): Unit = paymentHandler ! (msg, bob)
    })

    Thread.sleep(Long.MaxValue)
  }
} 
Example 73
Source File: PostgresExtensionsSpec.scala    From akka-stream-extensions   with Apache License 2.0 5 votes vote down vote up
package com.mfglabs.stream
package extensions.postgres

import java.io.File
import java.util.concurrent.atomic.AtomicLong

import akka.actor.ActorSystem
import akka.stream.scaladsl._
import akka.util.ByteString
import org.scalatest.time._
import org.scalatest._
import concurrent.ScalaFutures
import akka.stream._
import scala.util.Try

class PostgresExtensionsSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll with DockerTmpDB { self:DockerTmpDB =>
  implicit val as = ActorSystem()
  implicit val fm = ActorMaterializer()
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Minutes), interval = Span(5, Millis))


  "PgStream" should "stream a file to a Postgres table and stream a sql query from a Postgres table" in {
    val stmt = conn.createStatement()
    implicit val pgConn = PgStream.sqlConnAsPgConnUnsafe(conn)
    implicit val blockingEc = ExecutionContextForBlockingOps(scala.concurrent.ExecutionContext.Implicits.global)

    stmt.execute(
      s"""
       create table public.test_postgres(
        io_id integer,
        dsp_name text,
        advertiser_id integer,
        campaign_id integer,
        strategy_id integer,
        day date,
        impressions integer,
        clicks integer,
        post_view_conversions float8,
        post_click_conversions float8,
        media_cost float8,
        total_ad_cost float8,
        total_cost float8
       )
     """
    )

    val insertTable = "test_postgres(io_id, dsp_name, advertiser_id, campaign_id, strategy_id, day, impressions, " +
      "clicks, post_view_conversions, post_click_conversions, media_cost, total_ad_cost, total_cost)"

    val nbLinesInserted = new AtomicLong(0L)

    val futLines = SourceExt
      .fromFile(new File(getClass.getResource("/report.csv0000_part_00").getPath), maxChunkSize = 5 * 1024 * 1024)
      .via(FlowExt.rechunkByteStringBySeparator(ByteString("\n"), maximumChunkBytes = 1 * 1024 * 1024))
      .via(PgStream.insertStreamToTable("public", insertTable, Map("DELIMITER" -> "','"), pgVersion = self.version, chunkInsertionConcurrency = 2))
      .via(FlowExt.fold(0L)(_ + _))
      .map { total =>
      nbLinesInserted.set(total)
      PgStream.getQueryResultAsStream("select * from public.test_postgres", Map("DELIMITER" -> "','"),pgVersion = self.version)
    }
      .flatMapConcat(identity)
      .via(FlowExt.rechunkByteStringBySize(5 * 1024 * 1024))
      .via(FlowExt.rechunkByteStringBySeparator(ByteString("\n"), maximumChunkBytes = 1 * 1024 * 1024))
      .map(_.utf8String)
      .runWith(Sink.seq)

    val futExpectedLines = SourceExt
      .fromFile(new File(getClass.getResource("/report.csv0000_part_00").getPath), maxChunkSize = 5 * 1024 * 1024)
      .via(FlowExt.rechunkByteStringBySeparator(ByteString("\n"), maximumChunkBytes = 1 * 1024 * 1024))
      .map(_.utf8String)
      .runWith(Sink.seq)

    whenReady(futLines zip futExpectedLines) { case (lines, expectedLines) =>
      lines.length shouldEqual expectedLines.length
      lines.length shouldEqual nbLinesInserted.get

      lines.sorted.zip(expectedLines.sorted).foreach { case (line, expectedLine) =>
        line.split(",").map { s =>
          Try(s.toDouble).map(BigDecimal(_).setScale(3, BigDecimal.RoundingMode.HALF_UP).toDouble).getOrElse(s)
        } shouldEqual expectedLine.split(",").map { s =>
          Try(s.toDouble).map(BigDecimal(_).setScale(3, BigDecimal.RoundingMode.HALF_UP).toDouble).getOrElse(s)
        }
      }

      stmt.close()
    }


  }
} 
Example 74
Source File: ScModificationTrackerOwner.scala    From intellij-lsp   with Apache License 2.0 5 votes vote down vote up
package org.jetbrains.plugins.scala.lang.psi.api.expr

import java.util.concurrent.atomic.AtomicLong

import com.intellij.openapi.util.ModificationTracker
import com.intellij.psi.{PsiElement, PsiModifiableCodeBlock}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScValue, ScVariable}
import org.jetbrains.plugins.scala.lang.psi.impl.{ScalaPsiElementFactory, ScalaPsiManager}
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount}

import scala.annotation.tailrec


trait ScModificationTrackerOwner extends ScalaPsiElement with PsiModifiableCodeBlock {
  private val blockModificationCount = new AtomicLong(0L)

  def rawModificationCount: Long = blockModificationCount.get()

  def getModificationTracker: ModificationTracker = {
    assert(isValidModificationTrackerOwner)
    new ModificationTracker {
      override def getModificationCount: Long = getModificationCountImpl
    }
  }

  private def getModificationCountImpl: Long = {
    @tailrec
    def calc(place: PsiElement, sum: Long): Long = place match {
      case null => sum + ScalaPsiManager.instance(getProject).getModificationCount
      case _: ScalaFile => sum + ScalaPsiManager.instance(getProject).getModificationCount
      case owner: ScModificationTrackerOwner if owner.isValidModificationTrackerOwner =>
        calc(owner.getContext, sum + owner.rawModificationCount)
      case _ => calc(place.getContext, sum)
    }

    calc(this, 0L)
  }

  def incModificationCount(): Long = {
    assert(isValidModificationTrackerOwner)
    blockModificationCount.incrementAndGet()
  }

  def isValidModificationTrackerOwner: Boolean = {
    getContext match {
      case f: ScFunction => f.returnTypeElement match {
        case Some(_) =>  true
        case None if !f.hasAssign => true
        case _ => false
      }
      case v: ScValue if v.typeElement.isDefined => true
      case _: ScValue => false
      case v: ScVariable if v.typeElement.isDefined => true
      case _: ScVariable => false
      case _: ScWhileStmt => true
      case _: ScFinallyBlock => true
      case _: ScDoStmt => true
      case _ => false
    }
  }

  //elem is always the child of this element because this function is called when going up the tree starting with elem
  //if this is a valid modification tracker owner, no need to change modification count
  override def shouldChangeModificationCount(elem: PsiElement): Boolean = {
    !isValidModificationTrackerOwner
  }

  def createMirror(text: String): PsiElement = {
    ScalaPsiElementFactory.createExpressionWithContextFromText(text, getContext, this)
  }

  @Cached(ModCount.getBlockModificationCount, this)
  def getMirrorPositionForCompletion(dummyIdentifier: String, pos: Int): Option[PsiElement] = {
    val text = new StringBuilder(getText)
    text.insert(pos, dummyIdentifier)
    val newBlock = createMirror(text.toString())
    Option(newBlock).flatMap(b => Option(b.findElementAt(pos)))
  }
} 
Example 75
Source File: package.scala    From intellij-lsp   with Apache License 2.0 5 votes vote down vote up
package org.jetbrains.plugins.scala.lang.psi.api.statements

import java.util.concurrent.atomic.AtomicLong

import com.intellij.openapi.util.{Key, UserDataHolder, UserDataHolderEx}
import com.intellij.psi.{PsiClass, PsiElement, PsiTypeParameter}
import com.intellij.util.containers.ContainerUtil
import org.jetbrains.plugins.scala.extensions.{ConcurrentMapExt, PsiClassExt, PsiElementExt, PsiMemberExt, PsiNamedElementExt}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile

import scala.language.implicitConversions


package object params {
  private val typeParameterCounter = new AtomicLong(0)
  private val typeParameterIdKey: Key[java.lang.Long] = Key.create("type.parameter.id")
  private val idMap = ContainerUtil.newConcurrentMap[String, Long]()

  private def elementQual(element: ScalaPsiElement): String = {
    element match {
      case t: ScTypeParam => elementQual(t.owner) + "#" + t.name
      case c: PsiClass => c.qualifiedName
      case f: ScFunction => elementQual(f.containingClass) + ".." + f.name
      case _ => ""
    }
  }

  def freshTypeParamId(): Long = typeParameterCounter.getAndIncrement()

  def reusableId(typeParameter: ScTypeParam): Long = typeParameter.containingFile match {
    case Some(file: ScalaFile) if file.isCompiled =>
      val qualifier = elementQual(typeParameter)
      idMap.atomicGetOrElseUpdate(qualifier, freshTypeParamId())
    case _ => freshTypeParamId()
  }

  private def cachedId(element: PsiElement): Long = element.getOrUpdateUserData(typeParameterIdKey, Long.box(freshTypeParamId()))

  implicit class PsiTypeParameterExt(val typeParameter: PsiTypeParameter) extends AnyVal {
    def nameAndId: (String, Long) = (typeParameter.name, typeParamId)

    def typeParamId: Long = typeParameter match {
      case sc: ScTypeParam => sc.typeParamId
      case psiTp =>
        Option(psiTp.getOwner)
          .flatMap(owner => Option(owner.containingClass))
          .map(cachedId)
          .getOrElse(-1L)
    }
  }

  implicit class UserDataHolderExt(val holder: UserDataHolder) extends AnyVal {
    def getOrUpdateUserData[T](key: Key[T], update: => T): T = {
      Option(holder.getUserData(key)).getOrElse {
        val newValue = update
        holder match {
          case ex: UserDataHolderEx =>
            ex.putUserDataIfAbsent(key, newValue)
          case _ =>
            holder.putUserData(key, newValue)
            newValue
        }
      }
    }
  }
} 
Example 76
Source File: ThreadFactories.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.common

import java.util.concurrent.ForkJoinPool
import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory
import java.util.concurrent.ForkJoinWorkerThread
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}

import scala.concurrent._

import cats.effect._

object ThreadFactories {

  def ofName(prefix: String): ThreadFactory =
    new ThreadFactory {

      val counter = new AtomicLong(0)

      override def newThread(r: Runnable): Thread = {
        val t = Executors.defaultThreadFactory().newThread(r)
        t.setName(s"$prefix-${counter.getAndIncrement()}")
        t
      }
    }

  def ofNameFJ(prefix: String): ForkJoinWorkerThreadFactory =
    new ForkJoinWorkerThreadFactory {
      val tf      = ForkJoinPool.defaultForkJoinWorkerThreadFactory
      val counter = new AtomicLong(0)

      def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = {
        val t = tf.newThread(pool)
        t.setName(s"$prefix-${counter.getAndIncrement()}")
        t
      }
    }

  def executorResource[F[_]: Sync](
      c: => ExecutionContextExecutorService
  ): Resource[F, ExecutionContextExecutorService] =
    Resource.make(Sync[F].delay(c))(ec => Sync[F].delay(ec.shutdown))

  def cached[F[_]: Sync](
      tf: ThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    executorResource(
      ExecutionContext.fromExecutorService(Executors.newCachedThreadPool(tf))
    )

  def fixed[F[_]: Sync](
      n: Int,
      tf: ThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    executorResource(
      ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(n, tf))
    )

  def workSteal[F[_]: Sync](
      n: Int,
      tf: ForkJoinWorkerThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    executorResource(
      ExecutionContext.fromExecutorService(
        new ForkJoinPool(n, tf, null, true)
      )
    )

  def workSteal[F[_]: Sync](
      tf: ForkJoinWorkerThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    workSteal[F](Runtime.getRuntime().availableProcessors() + 1, tf)
} 
Example 77
Source File: BroadcastManager.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.broadcast

import java.util.concurrent.atomic.AtomicLong

import scala.reflect.ClassTag

import org.apache.spark._

private[spark] class BroadcastManager(
    val isDriver: Boolean,
    conf: SparkConf,
    securityManager: SecurityManager)
  extends Logging {

  private var initialized = false
  private var broadcastFactory: BroadcastFactory = null

  initialize()

  // Called by SparkContext or Executor before using Broadcast
  private def initialize() {
    synchronized {
      if (!initialized) {
        val broadcastFactoryClass =
          conf.get("spark.broadcast.factory", "org.apache.spark.broadcast.TorrentBroadcastFactory")

        broadcastFactory =
          Class.forName(broadcastFactoryClass).newInstance.asInstanceOf[BroadcastFactory]

        // Initialize appropriate BroadcastFactory and BroadcastObject
        broadcastFactory.initialize(isDriver, conf, securityManager)

        initialized = true
      }
    }
  }

  def stop() {
    broadcastFactory.stop()
  }

  private val nextBroadcastId = new AtomicLong(0)

  def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean) = {
    broadcastFactory.newBroadcast[T](value_, isLocal, nextBroadcastId.getAndIncrement())
  }

  def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
    broadcastFactory.unbroadcast(id, removeFromDriver, blocking)
  }
} 
Example 78
Source File: SingleThreadedActorSystem.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.akkautil

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}

import akka.actor.ActorSystem

object SingleThreadedActorSystem {
  def apply(name: String): ActorSystem = {
    val ec = scala.concurrent.ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor(newNamedThreadFactory(name)))
    ActorSystem(name, defaultExecutionContext = Some(ec))
  }

  def newNamedThreadFactory(name: String): ThreadFactory = new ThreadFactory {
    val count = new AtomicLong(0)

    override def newThread(runnable: Runnable): Thread = {
      val thread = new Thread(runnable)
      thread.setDaemon(true)
      thread.setName(s"$name-" + count.getAndIncrement)
      thread
    }
  }
} 
Example 79
Source File: RateController.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.io.ObjectInputStream
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.{ExecutionContext, Future}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.{ThreadUtils, Utils}


  private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit =
    Future[Unit] {
      val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay)
      newRate.foreach { s =>
        rateLimit.set(s.toLong)
        publish(getLatestRate())
      }
    }

  def getLatestRate(): Long = rateLimit.get()

  override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) {
    val elements = batchCompleted.batchInfo.streamIdToInputInfo

    for {
      processingEnd <- batchCompleted.batchInfo.processingEndTime
      workDelay <- batchCompleted.batchInfo.processingDelay
      waitDelay <- batchCompleted.batchInfo.schedulingDelay
      elems <- elements.get(streamUID).map(_.numRecords)
    } computeAndPublish(processingEnd, elems, workDelay, waitDelay)
  }
}

object RateController {
  def isBackPressureEnabled(conf: SparkConf): Boolean =
    conf.getBoolean("spark.streaming.backpressure.enabled", false)
} 
Example 80
Source File: SQLExecution.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.ui.{SparkListenerSQLExecutionEnd,
  SparkListenerSQLExecutionStart}

object SQLExecution {

  val EXECUTION_ID_KEY = "spark.sql.execution.id"

  private val _nextExecutionId = new AtomicLong(0)

  private def nextExecutionId: Long = _nextExecutionId.getAndIncrement

  
  def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T = {
    val oldExecutionId = sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
    try {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, executionId)
      body
    } finally {
      sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, oldExecutionId)
    }
  }
} 
Example 81
Source File: AsynchronousLogHandler.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.handler

import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong

import scribe.LogRecord
import scribe.format.Formatter
import scribe.modify.LogModifier
import scribe.writer.{ConsoleWriter, Writer}
import perfolation._

import scala.language.implicitConversions

case class AsynchronousLogHandler(formatter: Formatter = Formatter.default,
                                  writer: Writer = ConsoleWriter,
                                  modifiers: List[LogModifier] = Nil,
                                  maxBuffer: Int = AsynchronousLogHandler.DefaultMaxBuffer,
                                  overflow: Overflow = Overflow.DropOld) extends LogHandler {
  private lazy val cached = new AtomicLong(0L)

  private lazy val queue = {
    val q = new ConcurrentLinkedQueue[LogRecord[_]]
    val t = new Thread {
      setDaemon(true)

      override def run(): Unit = while (true) {
        Option(q.poll()) match {
          case Some(record) => {
            cached.decrementAndGet()
            SynchronousLogHandler.log(AsynchronousLogHandler.this, record)
            Thread.sleep(1L)
          }
          case None => Thread.sleep(10L)
        }
      }
    }
    t.start()
    q
  }

  def withMaxBuffer(maxBuffer: Int): AsynchronousLogHandler = copy(maxBuffer = maxBuffer)

  def withOverflow(overflow: Overflow): AsynchronousLogHandler = copy(overflow = overflow)

  override def withFormatter(formatter: Formatter): AsynchronousLogHandler = copy(formatter = formatter)

  override def withWriter(writer: Writer): AsynchronousLogHandler = copy(writer = writer)

  override def setModifiers(modifiers: List[LogModifier]): AsynchronousLogHandler = copy(modifiers = modifiers)

  override def log[M](record: LogRecord[M]): Unit = {
    val add = if (!cached.incrementIfLessThan(maxBuffer)) {
      overflow match {
        case Overflow.DropOld => {
          queue.poll()
          true
        }
        case Overflow.DropNew => false
        case Overflow.Block => {
          while(!cached.incrementIfLessThan(maxBuffer)) {
            Thread.sleep(1L)
          }
          true
        }
        case Overflow.Error => throw new LogOverflowException(p"Queue filled (max: $maxBuffer) while attempting to asynchronously log")
      }
    } else {
      true
    }
    if (add) {
      queue.add(record)
    }
  }
}

object AsynchronousLogHandler {
  val DefaultMaxBuffer: Int = 1000
} 
Example 82
Source File: AtomicLongExtras.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.handler

import java.util.concurrent.atomic.AtomicLong

import scala.annotation.tailrec

class AtomicLongExtras(val value: AtomicLong) extends AnyVal {
  
  def decrementIfGreaterThan(min: Long): Boolean = modify((value: Long) => {
    if (value > min) {
      Some(value - 1)
    }
    else {
      None
    }
  })

  def setIfCondition(condition: Long => Boolean, value: Long): Boolean = modify((value: Long) => {
    Some(value).filter(condition)
  })
} 
Example 83
Source File: FlushMode.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.writer.file

import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}

import scribe.util.Time

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

trait FlushMode {
  def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit
}

object FlushMode {
  object NeverFlush extends FlushMode {
    override def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit = {}
  }

  object AlwaysFlush extends FlushMode {
    override def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit = writer.flush()
  }

  case class AsynchronousFlush(delay: FiniteDuration = 1.second) extends FlushMode {
    private lazy val delayMillis = delay.toMillis
    private lazy val flushing = new AtomicBoolean(false)
    private lazy val dirty = new AtomicBoolean(false)
    private lazy val lastFlush = new AtomicLong(0L)
    private var logFile: LogFile = _

    override def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit = {
      this.logFile = logFile
      if (flushing.compareAndSet(false, true)) {
        flush()
      } else {
        dirty.set(true)
      }
    }

    private def flush(): Unit = Future {
      try {
        val delay = this.delayMillis - (Time() - lastFlush.get())
        if (delay > 0L) {
          Thread.sleep(delay)
        }
        logFile.flush()
      } finally {
        lastFlush.set(Time())
        if (dirty.compareAndSet(true, false)) {
          flush()
        } else {
          flushing.set(false)
        }
      }
    }
  }
} 
Example 84
Source File: AtlasUtils.scala    From spark-atlas-connector   with Apache License 2.0 5 votes vote down vote up
package com.hortonworks.spark.atlas

import java.util.concurrent.atomic.AtomicLong

import com.hortonworks.spark.atlas.utils.Logging
import org.apache.atlas.model.instance.{AtlasEntity, AtlasObjectId}

object AtlasUtils extends Logging {
  private val executionId = new AtomicLong(0L)

  def entityToReference(entity: AtlasEntity, useGuid: Boolean = false): AtlasObjectId = {
    if (useGuid) {
      new AtlasObjectId(entity.getGuid)
    } else {
      new AtlasObjectId(entity.getTypeName, "qualifiedName", entity.getAttribute("qualifiedName"))
    }
  }

  def entitiesToReferences(
      entities: Seq[AtlasEntity],
      useGuid: Boolean = false): Set[AtlasObjectId] = {
    entities.map(entityToReference(_, useGuid)).toSet
  }

  def issueExecutionId(): Long = executionId.getAndIncrement()

  def isSacEnabled(conf: AtlasClientConf): Boolean = {
    if (!conf.get(AtlasClientConf.ATLAS_SPARK_ENABLED).toBoolean) {
      logWarn("Spark Atlas Connector is disabled.")
      false
    } else {
      true
    }
  }
} 
Example 85
Source File: Ch01.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch01

import java.util.concurrent.atomic.AtomicLong

import scala.io.StdIn
import scala.util.{Try, Using}

case class User(name: String, surname: String, email: String)

object Ch01 extends App {

  "10".toIntOption
  "TrUe".toBooleanOption

  val bool = "Not True"
  bool.toBooleanOption

  scala.util.Using

  val user = User("John", "Doe", "[email protected]")
  user.productElementNames.mkString(", ")
  user.productElementName(3)

  val tuple = (1, "two", false)

  tuple.productElementNames.mkString(", ")
  tuple.productElementName(1)

  def naiveToJsonString(p: Product): String =
    (for { i <- 0 until p.productArity } yield
      s""""${p.productElementName(i)}": "${p.productElement(i)}"""")
      .mkString("{ ", ", ", " }")

  naiveToJsonString(user)

  import scala.util.chaining._

  import UserDb._
  val userId = 1L
  save(update(getById(userId)))

  getById(userId).pipe(update).pipe(save)

  val doEverything = (getById _).andThen(update).andThen(save)
  doEverything(userId)

  val lastTick = new AtomicLong(0)
  def start(): Unit = lastTick.set(System.currentTimeMillis())
  def measure[A](a: A): Unit = {
    val now = System.currentTimeMillis()
    val before = lastTick.getAndSet(now)
    println(s"$a: ${now - before} ms elapsed")
  }

  start()
  val result = StdIn.readLine().pipe(_.toIntOption).tap(measure)
  val anotherResult = StdIn.readLine().pipe(_.toIntOption).tap(measure)

  final case class Resource(name: String) extends AutoCloseable {
    override def close(): Unit = println(s"Closing $name")
    def lines = List(s"$name line 1", s"$name line 2")
  }
  val List(r1, r2, r3) = List("first", "2", "3").map(Resource)

  val lines: Try[Seq[String]] = for {
    u1 <- Using(r1)
    u2 <- Using(r2)
    u3 <- Using(r3)
  } yield {
    u1.lines ++ u2.lines ++ u3.lines
  }

  println(lines)

}

object UserDb {
  def getById(id: Long): User = ???
  def update(u: User): User = ???
  def save(u: User): Boolean = ???
} 
Example 86
Source File: L6-22Counters.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput

object StatefulCountersApp {

  def main(args: Array[String]) {
    if (args.length != 1) {
      System.err.println(
        "Usage: StatefulCountersApp <appname>")
      System.exit(1)
    }

    val Seq(appName) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))
    
    var globalMax: AtomicLong = new AtomicLong(Long.MinValue)
    var globalMin: AtomicLong = new AtomicLong(Long.MaxValue)
    var globalCounter500: AtomicLong = new AtomicLong(0)

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], (rec \ "LastTradePriceOnly").extract[String].toFloat, (rec \ "Volume").extract[String].toLong))
      })
      .foreachRDD(rdd => {
        val stocks = rdd.take(10)
        stocks.foreach(stock => {
          val price = stock._2
          val volume = stock._3
          if (volume > globalMax.get()) {
            globalMax.set(volume)
          }
          if (volume < globalMin.get()) {
            globalMin.set(volume)
          }
          if (price > 500) {
            globalCounter500.incrementAndGet()
          }
        })
        if (globalCounter500.get() > 1000L) {
          println("Global counter has reached 1000")
          println("Max ----> " + globalMax.get)
          println("Min ----> " + globalMin.get)
          globalCounter500.set(0)
        }
      })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 87
Source File: L7-4UI.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext

object SocialSearchApp {
  def main(args: Array[String]) {
    if (args.length != 3) {
      System.err.println(
        "Usage: SocialSearchApp <appname> <hostname> <port>")
      System.exit(1)
    }
    val Seq(appName, hostname, port) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)
      //.set("spark.eventLog.enabled", "true")
      //.set("spark.eventLog.dir", "/tmp/historical")
      

    val countSearch = new AtomicLong(0)
    val countSocial = new AtomicLong(0)

    val ssc = new StreamingContext(conf, Seconds(1))
    
    val titleStream = ssc.socketTextStream(hostname, port.toInt)
      .map(rec => rec.split("\\t"))
      .filter(_(3) match {
        case "other-google" | "other-bing" | "other-yahoo" | "other-facebook" | "other-twitter" => true
        case _ => false
      })
      .map(rec => (rec(3), rec(4)))
      .cache()

    val searchStream = titleStream.filter(_._1 match {
      case "other-google" | "other-bing" | "other-yahoo" => true
      case _ => false
    })
      .map(rec => rec._2)

    val socialStream = titleStream.filter(_._1 match {
      case "other-facebook" | "other-twitter" => true
      case _ => false
    })
      .map(rec => rec._2)

    val exclusiveSearch = searchStream.transformWith(socialStream,
      (searchRDD: RDD[String], socialRDD: RDD[String]) => searchRDD.subtract(socialRDD))
      .foreachRDD(rdd => {
        countSearch.addAndGet(rdd.count())
        println("Exclusive count search engines: " + countSearch)
      })

    val exclusiveSocial = socialStream.transformWith(searchStream,
      (socialRDD: RDD[String], searchRDD: RDD[String]) => socialRDD.subtract(searchRDD))
      .foreachRDD(rdd => {
        countSocial.addAndGet(rdd.count())
        println("Exclusive count social media: " + countSocial)
      })

    ssc.start()
    ssc.awaitTermination()
  }

}