java.util.concurrent.atomic.AtomicInteger Scala Examples

The following examples show how to use java.util.concurrent.atomic.AtomicInteger. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: InboundConnectionFilter.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelFuture, ChannelHandlerContext}
import io.netty.handler.ipfilter.AbstractRemoteAddressFilter

@Sharable
class InboundConnectionFilter(peerDatabase: PeerDatabase, maxInboundConnections: Int, maxConnectionsPerHost: Int)
    extends AbstractRemoteAddressFilter[InetSocketAddress]
    with ScorexLogging {
  private val inboundConnectionCount = new AtomicInteger(0)
  private val perHostConnectionCount = new ConcurrentHashMap[InetAddress, Int]
  private val emptyChannelFuture     = null.asInstanceOf[ChannelFuture]

  private def dec(remoteAddress: InetAddress) = {
    inboundConnectionCount.decrementAndGet()
    log.trace(s"Number of inbound connections: ${inboundConnectionCount.get()}")
    perHostConnectionCount.compute(remoteAddress, (_, cnt) => cnt - 1)
    emptyChannelFuture
  }

  override def accept(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Boolean = Option(remoteAddress.getAddress) match {
    case None =>
      log.debug(s"Can't obtain an address from $remoteAddress")
      false

    case Some(address) =>
      val newTotal        = inboundConnectionCount.incrementAndGet()
      val newCountPerHost = perHostConnectionCount.compute(address, (_, cnt) => Option(cnt).fold(1)(_ + 1))
      val isBlacklisted   = peerDatabase.blacklistedHosts.contains(address)

      val accepted = newTotal <= maxInboundConnections &&
        newCountPerHost <= maxConnectionsPerHost &&
        !isBlacklisted

      log.trace(
        s"Check inbound connection from $remoteAddress: new inbound total = $newTotal, " +
          s"connections with this host = $newCountPerHost, address ${if (isBlacklisted) "IS" else "is not"} blacklisted, " +
          s"${if (accepted) "is" else "is not"} accepted"
      )

      accepted
  }

  override def channelAccepted(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Unit =
    ctx.channel().closeFuture().addListener((_: ChannelFuture) => Option(remoteAddress.getAddress).foreach(dec))

  override def channelRejected(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): ChannelFuture =
    Option(remoteAddress.getAddress).fold(emptyChannelFuture)(dec)
} 
Example 2
Source File: ThreadUtil.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.util

import java.lang.Thread.UncaughtExceptionHandler
import java.util.concurrent.{Executors, ThreadFactory}
import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService}
import scala.util.control.NonFatal

object ThreadUtil {

  // From https://github.com/functional-streams-for-scala/fs2/blob/d47f903bc6bbcdd5d8bc6d573bc7cfd956f0cbb6/core/jvm/src/main/scala/fs2/Strategy.scala#L19-L41
  
  def daemonThreadFactory(threadName: String, exitJvmOnFatalError: Boolean = true): ThreadFactory = new ThreadFactory {
    val defaultThreadFactory = Executors.defaultThreadFactory()
    val idx = new AtomicInteger(0)
    def newThread(r: Runnable) = {
      val t = defaultThreadFactory.newThread(r)
      t.setDaemon(true)
      t.setName(s"$threadName-${idx.incrementAndGet()}")
      t.setUncaughtExceptionHandler(new UncaughtExceptionHandler {
        def uncaughtException(t: Thread, e: Throwable): Unit = {
          System.err.println(s"------------ UNHANDLED EXCEPTION ---------- (${t.getName})")
          e.printStackTrace(System.err)
          if (exitJvmOnFatalError) {
            e match {
              case NonFatal(_) => ()
              case fatal => System.exit(-1)
            }
          }
        }
      })
      t
    }
  }

  def sequentialExecutionContext(): ExecutionContext =
    new SequentialExecutionContext

  def singleThreadedExecutionContext(threadName: String): ExecutionContext =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(daemonThreadFactory(threadName))
    )

  def attemptShutdownExecutionContext(ec: ExecutionContext): Boolean =
    ec match {
      case _: SequentialExecutionContext =>
        true
      case es: ExecutionContextExecutorService =>
        es.shutdown()
        true
      case _ =>
        false
    }

} 
Example 3
Source File: UpdatableDisplay.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.display

import java.util.{Locale, UUID}
import java.util.concurrent.atomic.AtomicInteger

import almond.interpreter.api.{DisplayData, OutputHandler}

trait UpdatableDisplay extends Display {
  def displayId: String
  override def displayData(): DisplayData =
    DisplayData(data(), metadata = metadata(), idOpt = Some(displayId))
  protected def emptyDisplayData(): DisplayData = {
    val data = displayData()
    data.copy(data = data.data.mapValues(_ => "").toMap)
  }

  def update()(implicit output: OutputHandler): Unit =
    output.updateDisplay(displayData())

  def clear()(implicit output: OutputHandler): Unit =
    output.updateDisplay(emptyDisplayData())
}

object UpdatableDisplay {

  def useRandomIds(): Boolean =
    sys.props
      .get("almond.ids.random")
      .forall(s => s == "1" || s.toLowerCase(Locale.ROOT) == "true")

  private val idCounter = new AtomicInteger
  private val divCounter = new AtomicInteger

  def generateId(): String =
    if (useRandomIds())
      UUID.randomUUID().toString
    else
      idCounter.incrementAndGet().toString

  def generateDiv(prefix: String = "data-"): String =
    prefix + {
      if (useRandomIds())
        UUID.randomUUID().toString
      else
        divCounter.incrementAndGet().toString
    }

} 
Example 4
Source File: package.scala    From cuttle   with Apache License 2.0 5 votes vote down vote up
package com.criteo.cuttle

import cats.effect.IO
import com.criteo.cuttle.ThreadPools._
import com.criteo.cuttle.ThreadPools.ThreadPoolSystemProperties._

import java.time.Instant
import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.ExecutionContext
import scala.language.implicitConversions

package object cron {
  type CronJob = Job[CronScheduling]
  type CronExecution = Execution[CronScheduling]

  object Implicits {

    //Backward compat for Job to CronDag
    implicit class JobToCronDag(job: Job[CronScheduling]) {
      def every(cronExpression: CronExpression) =
        CronDag(job.id, CronPipeline(Set(job), Set.empty), cronExpression, job.name, job.description, job.tags)
    }

    implicit def stringToCronExp(cronExpression: String) = CronExpression(cronExpression)

    // Thread pool to run Cron scheduler
    implicit val cronThreadPool = new WrappedThreadPool with Metrics {
      private val _threadPoolSize: AtomicInteger = new AtomicInteger(0)

      override val underlying = ExecutionContext.fromExecutorService(
        newFixedThreadPool(
          loadSystemPropertyAsInt("com.criteo.cuttle.ThreadPools.CronThreadPool.nThreads",
                                  Runtime.getRuntime.availableProcessors),
          poolName = Some("Cron"),
          threadCounter = _threadPoolSize
        )
      )

      override def threadPoolSize(): Int = _threadPoolSize.get()
    }

    implicit val cronContextShift = IO.contextShift(cronThreadPool.underlying)

  }

  // Fair assumptions about start and end date within which we operate by default if user doesn't specify his interval.
  // We choose these dates over Instant.MIN and Instant.MAX because MySQL works within this range.
  private[cron] val minStartDateForExecutions = Instant.parse("1000-01-01T00:00:00Z")
  private[cron] val maxStartDateForExecutions = Instant.parse("9999-12-31T23:59:59Z")

  // This function was implemented because executor.archivedExecutions returns duplicates when passing the same table
  // into the context query.
  private[cron] def buildExecutionsList(executor: Executor[CronScheduling],
                                        jobPartIds: Set[String],
                                        startDate: Instant,
                                        endDate: Instant,
                                        limit: Int): IO[Map[Instant, Seq[ExecutionLog]]] =
    for {
      archived <- executor.rawArchivedExecutions(jobPartIds, "", asc = false, 0, limit)
      running <- IO(executor.runningExecutions.collect {
        case (e, status)
            if jobPartIds.contains(e.job.id) && e.context.instant.isAfter(startDate) && e.context.instant
              .isBefore(endDate) =>
          e.toExecutionLog(status)
      })
    } yield (running ++ archived).groupBy(
      f =>
        CronContext.decoder.decodeJson(f.context) match {
          case Left(_)  => Instant.now()
          case Right(b) => b.instant
        }
    )

} 
Example 5
Source File: TestBuild.scala    From sbt-babel   with MIT License 5 votes vote down vote up
import java.util.concurrent.atomic.AtomicInteger

import com.typesafe.sbt.web.SbtWeb
import com.typesafe.sbt.web.SbtWeb.autoImport._
import sbt.Keys._
import sbt._

object TestBuild extends Build {

  class TestLogger(target: File) extends Logger {
    val unrecognisedInputCount = new AtomicInteger(0)

    def trace(t: => Throwable): Unit = {}

    def success(message: => String): Unit = {}

    def log(level: Level.Value, message: => String): Unit = {
      if (level == Level.Error) {
        if (message.contains( """ParseError: expected "indent", got "=="""")) {
          if (unrecognisedInputCount.addAndGet(1) == 1) {
            IO.touch(target / "unrecognised-input-error")
          }
        }
      }
    }
  }

  class TestReporter(target: File) extends LoggerReporter(-1, new TestLogger(target))

  lazy val root = Project(
    id = "test-build",
    base = file("."),
    settings = Seq(WebKeys.reporter := new TestReporter(target.value))
  ).enablePlugins(SbtWeb)

} 
Example 6
Source File: TestUtils.scala    From rtree2d   with Apache License 2.0 5 votes vote down vote up
package com.github.plokhotnyuk.rtree2d.core

import java.lang.Math._
import java.util.concurrent.atomic.AtomicInteger

import org.scalacheck.Gen

object TestUtils {
  val lastId = new AtomicInteger
  val floatGen: Gen[Float] = Gen.choose[Float](-1000, 1000)
  val latGen: Gen[Float] = Gen.choose[Float](-90, 90)
  val lonGen: Gen[Float] = Gen.choose[Float](-180, 180)
  val positiveIntGen: Gen[Int] = Gen.choose[Int](0, 200)
  val positiveFloatGen: Gen[Float] = Gen.choose[Float](0, 200)
  val entryGen: Gen[RTreeEntry[Int]] = for {
    x <- floatGen
    y <- floatGen
    w <- positiveFloatGen
    h <- positiveFloatGen
  } yield RTreeEntry(x, y, x + w, y + h, lastId.getAndIncrement())
  val distanceEntryGen: Gen[(Float, RTreeEntry[Int])] = for {
    d <- positiveFloatGen
    e <- entryGen
  } yield (d, e)
  val latLonEntryGen: Gen[RTreeEntry[Int]] = for {
    lat1 <- latGen
    lon1 <- lonGen
    lat2 <- latGen
    lon2 <- lonGen
  } yield RTreeEntry(min(lat1, lat2), min(lon1, lon2), max(lat1, lat2), max(lon1, lon2), lastId.getAndIncrement())
  val entryListGen: Gen[Seq[RTreeEntry[Int]]] = Gen.oneOf(0, 1, 10, 100, 1000).flatMap(n => Gen.listOfN(n, entryGen))
  val distanceEntryListGen: Gen[Seq[(Float, RTreeEntry[Int])]] =
    Gen.oneOf(0, 1, 10, 100, 1000).flatMap(n => Gen.listOfN(n, distanceEntryGen))

  implicit def orderingByName[A <: RTreeEntry[Int]]: Ordering[A] =
    Ordering.by(e => (e.minX, e.minY, e.maxX, e.maxY, e.value))

  def intersects[T](es: Seq[RTreeEntry[T]], x: Float, y: Float): Seq[RTreeEntry[T]] = intersects(es, x, y, x, y)

  def intersects[T](es: Seq[RTreeEntry[T]], minX: Float, minY: Float, maxX: Float, maxY: Float): Seq[RTreeEntry[T]] =
    es.filter(e => intersects(e, minX, minY, maxX, maxY))

  def intersects[T](e: RTree[T], x: Float, y: Float): Boolean = e.minX <= x && x <= e.maxX && e.minY <= y && y <= e.maxY

  def intersects[T](e: RTree[T], minX: Float, minY: Float, maxX: Float, maxY: Float): Boolean =
    e.minX <= maxX && minX <= e.maxX && e.minY <= maxY && minY <= e.maxY

  def euclideanDistance[T](x: Float, y: Float, t: RTree[T]): Float = {
    val dx = max(abs((t.minX + t.maxX) / 2 - x) - (t.maxX - t.minX) / 2, 0)
    val dy = max(abs((t.minY + t.maxY) / 2 - y) - (t.maxY - t.minY) / 2, 0)
    sqrt(dx * dx + dy * dy).toFloat
  }

  def alignedHorizontally[T](e: RTree[T], lat: Float, lon: Float): Boolean =
    e.minX <= lat && lat <= e.maxX && (lon < e.minY || e.maxY < lon)

  def alignedVertically[T](e: RTree[T], lat: Float, lon: Float): Boolean =
    e.minY <= lon && lon <= e.maxY && (lat < e.minX || e.maxX < lat)

  // https://en.wikipedia.org/wiki/Haversine_formula + https://en.wikipedia.org/wiki/Earth_radius#Mean_radius
  def greatCircleDistance(lat1: Float, lon1: Float, lat2: Float, lon2: Float, radius: Float = 6371.0088f): Float = {
    val shdy = sin((lon1 - lon2) * PI / 180 / 2)
    val shdx = sin((lat1 - lat2) * PI / 180 / 2)
    (asin(sqrt(cos(lat1 * PI / 180) * cos(lat2 * PI / 180) * shdy * shdy + shdx * shdx)) * 2 * radius).toFloat
  }
} 
Example 7
Source File: AutoFinishScopeManager.scala    From scala-concurrent   with Apache License 2.0 5 votes vote down vote up
package io.opentracing.contrib.concurrent

import java.util.concurrent.atomic.AtomicInteger

import io.opentracing.{ScopeManager, Span}

class AutoFinishScopeManager extends ScopeManager {
  private[concurrent] val tlsScope = new ThreadLocal[AutoFinishScope]

  override def activate(span: Span): AutoFinishScope = {
    new AutoFinishScope(this, new AtomicInteger(1), span)
  }

  override def activeSpan: Span = {
    Option(tlsScope.get).map(_.span).orNull
  }

  private[concurrent] def captureScope: AutoFinishScope#Continuation = {
    Option(tlsScope.get).map(_.capture).orNull
  }
} 
Example 8
package com.avast.sst.monix.catnap.micrometer

import java.util.concurrent.atomic.AtomicInteger

import cats.effect.Sync
import cats.syntax.functor._
import com.avast.sst.monix.catnap.CircuitBreakerMetrics
import com.avast.sst.monix.catnap.CircuitBreakerMetrics.State
import com.avast.sst.monix.catnap.CircuitBreakerMetrics.State.{Closed, HalfOpen, Open}
import io.micrometer.core.instrument.MeterRegistry

object MicrometerCircuitBreakerMetricsModule {

  
  def make[F[_]: Sync](name: String, meterRegistry: MeterRegistry): F[CircuitBreakerMetrics[F]] = {
    for {
      circuitBreakerState <- Sync[F].delay(new AtomicInteger(CircuitClosed))
    } yield new MicrometerCircuitBreakerMetrics(name, meterRegistry, circuitBreakerState)
  }

  private class MicrometerCircuitBreakerMetrics[F[_]: Sync](name: String, meterRegistry: MeterRegistry, state: AtomicInteger)
      extends CircuitBreakerMetrics[F] {

    private val F = Sync[F]

    private val rejected = meterRegistry.counter(s"circuit-breaker.$name.rejected")
    private val circuitState = meterRegistry.gauge[AtomicInteger](s"circuit-breaker.$name.state", state)

    override def increaseRejected: F[Unit] = F.delay(rejected.increment())

    override def setState(state: State): F[Unit] = {
      state match {
        case Closed   => F.delay(circuitState.set(CircuitClosed))
        case Open     => F.delay(circuitState.set(CircuitOpened))
        case HalfOpen => F.delay(circuitState.set(CircuitHalfOpened))
      }
    }

  }

  private val CircuitOpened = -1
  private val CircuitHalfOpened = 0
  private val CircuitClosed = 1

} 
Example 9
Source File: JVMObjectTracker.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off

private[r] object JVMObjectTracker {
  @transient
  protected lazy val logger: Logger = LoggerFactory.getLogger(getClass.getName)
  private[this] val objMap = new TrieMap[String, Object]
  private[this] val objCounter = new AtomicInteger(0)

  def getObject(id: String): Object = {
    logger.info(s"Get object at  $id")
    objMap(id)
  }

  def get(id: String): Option[Object] = {
    logger.info(s"Get object at $id")
    objMap.get(id)
  }

  def put(obj: Object): String = {
    val objId = objCounter.getAndIncrement.toString
    val objName = obj.getClass.getName
    logger.info(s"Puts $objName at $objId ")
    objMap.put(objId, obj)
    objId
  }

  def remove(id: String): Option[Object] = {
    logger.info(s"Removed $id")
    objMap.remove(id)
  }

} 
Example 10
Source File: TestConcurrentAccess.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, TimeUnit}

import oharastream.ohara.client.configurator.NodeApi
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}

class TestConcurrentAccess extends OharaTest {
  private[this] val configurator = Configurator.builder.fake().build()

  private[this] val nodeApi = NodeApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(10, TimeUnit.SECONDS))

  
  @Test
  def deletedObjectShouldDisappearFromGet(): Unit = {
    val threadCount                                         = 10
    val threadsPool                                         = Executors.newFixedThreadPool(threadCount)
    val unmatchedCount                                      = new AtomicInteger()
    implicit val executionContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(threadsPool)
    (0 until threadCount).foreach { _ =>
      threadsPool.execute { () =>
        val nodeName = CommonUtils.randomString(10)
        val nodes = result(
          nodeApi.request
            .nodeName(nodeName)
            .user(CommonUtils.randomString(10))
            .password(CommonUtils.randomString(10))
            .create()
            .flatMap(node => nodeApi.delete(node.key))
            .flatMap(_ => nodeApi.list())
        )
        if (nodes.exists(_.hostname == nodeName)) unmatchedCount.incrementAndGet()
      }
    }
    threadsPool.shutdown()
    threadsPool.awaitTermination(60, TimeUnit.SECONDS) shouldBe true
    unmatchedCount.get() shouldBe 0
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 11
Source File: package.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector

import java.util.concurrent.atomic.AtomicInteger

import oharastream.ohara.common.annotations.VisibleForTesting
import oharastream.ohara.common.setting.SettingDef
import oharastream.ohara.kafka.connector.csv.CsvConnectorDefinitions

package object ftp {
  
  val DEFINITIONS: Map[String, SettingDef] = Seq(
    FTP_HOSTNAME_DEFINITION,
    FTP_PORT_DEFINITION,
    FTP_USER_NAME_DEFINITION,
    FTP_PASSWORD_DEFINITION
  ).map(d => d.key() -> d).toMap

  @VisibleForTesting private[ftp] val INPUT_FOLDER_KEY: String     = CsvConnectorDefinitions.INPUT_FOLDER_KEY
  @VisibleForTesting private[ftp] val COMPLETED_FOLDER_KEY: String = CsvConnectorDefinitions.COMPLETED_FOLDER_KEY
  @VisibleForTesting private[ftp] val ERROR_FOLDER_KEY: String     = CsvConnectorDefinitions.ERROR_FOLDER_KEY
  @VisibleForTesting private[ftp] val OUTPUT_FOLDER_KEY: String    = CsvConnectorDefinitions.OUTPUT_FOLDER_KEY
  @VisibleForTesting private[ftp] val FILE_NEED_HEADER_KEY: String = CsvConnectorDefinitions.FILE_NEED_HEADER_KEY
  @VisibleForTesting private[ftp] val FILE_ENCODE_KEY: String      = CsvConnectorDefinitions.FILE_ENCODE_KEY
  @VisibleForTesting private[ftp] val FILE_CACHE_SIZE_KEY: String =
    CsvConnectorDefinitions.SIZE_OF_FILE_CACHE_KEY
} 
Example 12
Source File: package.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector

import java.util.concurrent.atomic.AtomicInteger

import oharastream.ohara.common.data.{Column, DataType}
import oharastream.ohara.common.setting.SettingDef

import scala.concurrent.duration.Duration

package object perf {
  val PERF_BATCH_KEY: String     = "perf.batch"
  val PERF_FREQUENCY_KEY: String = "perf.frequency"

  val PERF_BATCH_DEFAULT: Int          = 10
  val PERF_FREQUENCY_DEFAULT: Duration = Duration("1 second")

  val PERF_CELL_LENGTH_KEY: String  = "perf.cell.length"
  val PERF_CELL_LENGTH_DEFAULT: Int = 10

  private[this] val GROUP_COUNT = new AtomicInteger()

  val DEFINITIONS = Map(
    PERF_BATCH_KEY -> SettingDef
      .builder()
      .displayName("Batch")
      .documentation("The batch of perf")
      .key(PERF_BATCH_KEY)
      .optional(PERF_BATCH_DEFAULT)
      .orderInGroup(GROUP_COUNT.getAndIncrement())
      .build(),
    PERF_FREQUENCY_KEY -> SettingDef
      .builder()
      .displayName("Frequency")
      .documentation("The frequency of perf")
      .key(PERF_FREQUENCY_KEY)
      .optional(java.time.Duration.ofMillis(PERF_FREQUENCY_DEFAULT.toMillis))
      .orderInGroup(GROUP_COUNT.getAndIncrement())
      .build(),
    PERF_CELL_LENGTH_KEY -> SettingDef
      .builder()
      .displayName("cell length")
      .documentation("increase this value if you prefer to large cell. Noted, it works only for string type")
      .key(PERF_CELL_LENGTH_KEY)
      .optional(PERF_CELL_LENGTH_DEFAULT)
      .orderInGroup(GROUP_COUNT.getAndIncrement())
      .build()
  )

  
  val DEFAULT_SCHEMA: Seq[Column] = Seq(
    Column.builder().name("a").dataType(DataType.BYTES).order(0).build(),
    Column.builder().name("b").dataType(DataType.BYTES).order(1).build(),
    Column.builder().name("c").dataType(DataType.BYTES).order(2).build()
  )
} 
Example 13
Source File: package.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector

import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger

import oharastream.ohara.common.setting.SettingDef

import scala.concurrent.duration.Duration

package object console {
  
  private[this] val COUNTER               = new AtomicInteger(0)
  val CONSOLE_FREQUENCE: String           = "console.sink.frequence"
  val CONSOLE_FREQUENCE_DOC: String       = "the frequence to print the row on log"
  val CONSOLE_FREQUENCE_DEFAULT: Duration = Duration(3, TimeUnit.SECONDS)
  val CONSOLE_FREQUENCE_DEFINITION: SettingDef = SettingDef
    .builder()
    .displayName("the frequence of printing data")
    .key(CONSOLE_FREQUENCE)
    .documentation(CONSOLE_FREQUENCE_DOC)
    .optional(java.time.Duration.ofMillis(CONSOLE_FREQUENCE_DEFAULT.toMillis))
    .orderInGroup(COUNTER.getAndIncrement())
    .build()

  val CONSOLE_ROW_DIVIDER: String = "console.sink.row.divider"
  val CONSOLE_ROW_DIVIDER_DOC: String =
    "the charset used to divide the rows. For example, the divider \"|\" makes the output string: row_0|row_1"
  val CONSOLE_ROW_DIVIDER_DEFAULT: String = "|"
  val CONSOLE_ROW_DIVIDER_DEFINITION: SettingDef = SettingDef
    .builder()
    .displayName("the divider charset to distinguish each row")
    .key(CONSOLE_ROW_DIVIDER)
    .documentation(CONSOLE_ROW_DIVIDER_DOC)
    .optional(CONSOLE_ROW_DIVIDER_DEFAULT)
    .orderInGroup(COUNTER.getAndIncrement())
    .build()
} 
Example 14
Source File: package.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.hdfs

import java.util.concurrent.atomic.AtomicInteger

import oharastream.ohara.common.annotations.VisibleForTesting
import oharastream.ohara.common.setting.SettingDef
import oharastream.ohara.kafka.connector.csv.CsvConnectorDefinitions

package object sink {
  
  val DEFINITIONS: Map[String, SettingDef] = Seq(HDFS_URL_DEFINITION)
    .map(d => d.key() -> d)
    .toMap

  @VisibleForTesting val OUTPUT_FOLDER_KEY: String         = CsvConnectorDefinitions.OUTPUT_FOLDER_KEY
  @VisibleForTesting val FLUSH_SIZE_KEY: String            = CsvConnectorDefinitions.FLUSH_SIZE_KEY
  @VisibleForTesting val FLUSH_SIZE_DEFAULT: Int           = CsvConnectorDefinitions.FLUSH_SIZE_DEFAULT
  @VisibleForTesting val ROTATE_INTERVAL_MS_KEY: String    = CsvConnectorDefinitions.ROTATE_INTERVAL_MS_KEY
  @VisibleForTesting val ROTATE_INTERVAL_MS_DEFAULT: Long  = CsvConnectorDefinitions.ROTATE_INTERVAL_MS_DEFAULT
  @VisibleForTesting val FILE_NEED_HEADER_KEY: String      = CsvConnectorDefinitions.FILE_NEED_HEADER_KEY
  @VisibleForTesting val FILE_NEED_HEADER_DEFAULT: Boolean = CsvConnectorDefinitions.FILE_NEED_HEADER_DEFAULT
  @VisibleForTesting val FILE_ENCODE_KEY: String           = CsvConnectorDefinitions.FILE_ENCODE_KEY
  @VisibleForTesting val FILE_ENCODE_DEFAULT: String       = CsvConnectorDefinitions.FILE_ENCODE_DEFAULT
} 
Example 15
Source File: NoCacheSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.caching

import java.util.concurrent.atomic.AtomicInteger

import org.scalatest.WordSpec

class NoCacheSpec extends WordSpec with CacheBehaviorSpecBase {
  override protected lazy val name: String = "a non-existent cache"

  override protected def newCache(): Cache[Integer, String] =
    Cache.none

  "a non-existent cache" should {
    "compute every time" in {
      val cache = newCache()
      val counter = new AtomicInteger(0)

      def compute(value: Integer): String = {
        counter.incrementAndGet()
        value.toString
      }

      cache.get(1, compute)
      cache.get(1, compute)
      cache.get(1, compute)
      cache.get(2, compute)

      counter.get() should be(4)
    }

    "always return `None` on `getIfPresent`" in {
      val cache = Cache.none[Integer, String]

      cache.getIfPresent(7) should be(None)
      cache.get(7, _.toString) should be("7")
      cache.getIfPresent(7) should be(None)
    }

    "do nothing on `put`" in {
      val cache = Cache.none[Integer, String]

      cache.put(7, "7")
      cache.getIfPresent(7) should be(None)

      val counter = new AtomicInteger(0)

      def compute(value: Integer): String = {
        counter.incrementAndGet()
        value.toString
      }

      cache.get(7, compute) should be("7")
      counter.get() should be(1)
    }
  }
} 
Example 16
Source File: CacheCachingSpecBase.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.caching

import java.util.concurrent.atomic.AtomicInteger

import org.scalatest.{Matchers, WordSpecLike}

trait CacheCachingSpecBase extends CacheSpecBase with WordSpecLike with Matchers {
  name should {
    "compute once, and cache" in {
      val cache = newCache()
      val counter = new AtomicInteger(0)

      def compute(value: Integer): String = {
        counter.incrementAndGet()
        value.toString
      }

      cache.get(1, compute)
      cache.get(1, compute)
      cache.get(1, compute)
      cache.get(2, compute)

      counter.get() should be(2)
    }

    "return `None` on `getIfPresent` if the value is not present" in {
      val cache = newCache()

      cache.getIfPresent(7) should be(None)
    }

    "return the value on `getIfPresent` if the value is present" in {
      val cache = newCache()

      cache.get(7, _.toString) should be("7")
      cache.getIfPresent(7) should be(Some("7"))
    }

    "`put` values" in {
      val cache = newCache()

      cache.put(7, "7")
      cache.getIfPresent(7) should be(Some("7"))

      val counter = new AtomicInteger(0)

      def compute(value: Integer): String = {
        counter.incrementAndGet()
        value.toString
      }

      cache.get(7, compute) should be("7")
      counter.get() should be(0)
    }
  }
} 
Example 17
Source File: WaitForCompletionsObserver.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.testing

import java.util.concurrent.atomic.AtomicInteger

import com.daml.ledger.api.v1.command_completion_service.CompletionStreamResponse
import io.grpc.Context
import io.grpc.stub.StreamObserver

import scala.concurrent.{Future, Promise}

object WaitForCompletionsObserver {

  def apply(n: Int)(attach: StreamObserver[CompletionStreamResponse] => Unit): Future[Unit] = {
    if (n < 1) {
      Future.failed(new IllegalArgumentException(
        s"Invalid argument $n, `WaitForCompletionsObserver` requires a strictly positive integer as an argument"))
    } else {
      val observer = new WaitForCompletionsObserver(n)
      attach(observer)
      observer.result
    }
  }

}

final class WaitForCompletionsObserver private (expectedCompletions: Int)
    extends StreamObserver[CompletionStreamResponse] {

  private val promise = Promise[Unit]
  private val counter = new AtomicInteger(0)

  val result: Future[Unit] = promise.future

  override def onNext(v: CompletionStreamResponse): Unit = {
    val total = counter.addAndGet(v.completions.size)
    if (total >= expectedCompletions) {
      val _1 = promise.trySuccess(())
      val _2 = Context.current().withCancellation().cancel(null)
    }
  }

  override def onError(throwable: Throwable): Unit = {
    val _ = promise.tryFailure(throwable)
  }

  override def onCompleted(): Unit = {
    val _ = promise.tryFailure(new RuntimeException("no more completions"))
  }

} 
Example 18
Source File: BufferingObserver.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.utils

import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicInteger

import io.grpc.stub.StreamObserver

import scala.concurrent.Promise

class BufferingObserver[T](limit: Option[Int] = None) extends StreamObserver[T] {
  private val promise = Promise[Vector[T]]()
  val buffer = new ConcurrentLinkedQueue[T]()
  val size = new AtomicInteger(0)
  def resultsF = promise.future

  override def onError(t: Throwable): Unit = promise.failure(t)

  override def onCompleted(): Unit = {
    val vec = Vector.newBuilder[T]
    buffer.forEach((e) => vec += e)
    promise.trySuccess(vec.result())
    ()
  }

  override def onNext(value: T): Unit = {
    size.updateAndGet(curr => {
      if (limit.fold(false)(_ <= curr)) {
        onCompleted()
        curr
      } else {
        buffer.add(value)
        curr + 1
      }
    })
    ()
  }
} 
Example 19
Source File: AkkaExecutionSequencerPool.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import java.util.concurrent.atomic.AtomicInteger

import akka.Done
import akka.actor.ActorSystem

import scala.collection.breakOut
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext, Future}

class AkkaExecutionSequencerPool(
    poolName: String,
    actorCount: Int = AkkaExecutionSequencerPool.defaultActorCount,
    terminationTimeout: FiniteDuration = 30.seconds,
)(implicit system: ActorSystem)
    extends ExecutionSequencerFactory {
  require(actorCount > 0)

  private val counter = new AtomicInteger()

  private val pool =
    Array.fill(actorCount)(
      AkkaExecutionSequencer(s"$poolName-${counter.getAndIncrement()}", terminationTimeout))

  override def getExecutionSequencer: ExecutionSequencer =
    pool(counter.getAndIncrement() % actorCount)

  override def close(): Unit =
    Await.result(closeAsync(), terminationTimeout)

  def closeAsync(): Future[Unit] = {
    implicit val ec: ExecutionContext = system.dispatcher
    val eventuallyClosed: Future[Seq[Done]] = Future.sequence(pool.map(_.closeAsync)(breakOut))
    Future.firstCompletedOf(
      Seq(
        system.whenTerminated.map(_ => ()), //  Cut it short if the ActorSystem stops.
        eventuallyClosed.map(_ => ()),
      )
    )
  }
}

object AkkaExecutionSequencerPool {

  
  private val defaultActorCount: Int = Runtime.getRuntime.availableProcessors() * 8
} 
Example 20
Source File: AkkaImplementation.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.utils.implementations

import java.util.concurrent.atomic.AtomicInteger

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.server.akka.ServerAdapter
import com.daml.grpc.sampleservice.Responding
import com.daml.platform.hello.HelloServiceGrpc.HelloService
import com.daml.platform.hello.{HelloRequest, HelloResponse, HelloServiceGrpc}
import io.grpc.stub.StreamObserver
import io.grpc.{BindableService, ServerServiceDefinition}

import scala.concurrent.ExecutionContext.Implicits.global

class AkkaImplementation(
    implicit executionSequencerFactory: ExecutionSequencerFactory,
    materializer: Materializer,
) extends HelloService
    with Responding
    with BindableService {

  private val serverStreamingCalls = new AtomicInteger()

  def getServerStreamingCalls: Int = serverStreamingCalls.get()

  override def bindService(): ServerServiceDefinition =
    HelloServiceGrpc.bindService(this, global)

  override def serverStreaming(
      request: HelloRequest,
      responseObserver: StreamObserver[HelloResponse],
  ): Unit =
    Source
      .single(request)
      .via(Flow[HelloRequest].mapConcat(responses))
      .runWith(ServerAdapter.toSink(responseObserver))
      .onComplete(_ => serverStreamingCalls.incrementAndGet())

} 
Example 21
Source File: InMemoryPrivateLedgerData.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.engine
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import com.daml.lf.data.{FrontStack, FrontStackCons}
import com.daml.lf.transaction.Node._
import com.daml.lf.transaction.{Transaction => Tx}
import com.daml.lf.value.Value._

import scala.annotation.tailrec

trait PrivateLedgerData {
  def update(tx: Tx.Transaction): Unit
  def get(id: ContractId): Option[ContractInst[VersionedValue[ContractId]]]
  def transactionCounter: Int
  def clear(): Unit
}

private[engine] class InMemoryPrivateLedgerData extends PrivateLedgerData {
  private val pcs: ConcurrentHashMap[ContractId, ContractInst[Tx.Value[ContractId]]] =
    new ConcurrentHashMap()
  private val txCounter: AtomicInteger = new AtomicInteger(0)

  def update(tx: Tx.Transaction): Unit =
    updateWithContractId(tx)

  def updateWithContractId(tx: Tx.Transaction): Unit =
    this.synchronized {
      // traverse in topo order and add / remove
      @tailrec
      def go(remaining: FrontStack[Tx.NodeId]): Unit = remaining match {
        case FrontStack() => ()
        case FrontStackCons(nodeId, nodeIds) =>
          val node = tx.nodes(nodeId)
          node match {
            case nc: NodeCreate.WithTxValue[ContractId] =>
              pcs.put(nc.coid, nc.coinst)
              go(nodeIds)
            case ne: NodeExercises.WithTxValue[Tx.NodeId, ContractId] =>
              go(ne.children ++: nodeIds)
            case _: NodeLookupByKey[_, _] | _: NodeFetch[_, _] =>
              go(nodeIds)
          }
      }
      go(FrontStack(tx.roots))
      txCounter.incrementAndGet()
      ()
    }

  def get(id: ContractId): Option[ContractInst[VersionedValue[ContractId]]] =
    this.synchronized {
      Option(pcs.get(id))
    }

  def clear(): Unit = this.synchronized {
    pcs.clear()
  }

  def transactionCounter: Int = txCounter.intValue()

  override def toString: String = s"InMemoryPrivateContractStore@{txCounter: $txCounter, pcs: $pcs}"
}

private[engine] object InMemoryPrivateLedgerData {
  def apply(): PrivateLedgerData = new InMemoryPrivateLedgerData()
} 
Example 22
Source File: ThreadUtil.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.common.thread

import java.lang.Thread.UncaughtExceptionHandler
import java.nio.channels.AsynchronousChannelGroup
import java.nio.channels.spi.AsynchronousChannelProvider
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, ThreadFactory}

import cats.effect.{Resource, Sync}

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

object ThreadUtil {
  def named(threadPrefix: String, daemon: Boolean, exitJvmOnFatalError: Boolean = true): ThreadFactory =
    new ThreadFactory {
      val defaultThreadFactory = Executors.defaultThreadFactory()
      val idx                  = new AtomicInteger(0)
      def newThread(r: Runnable) = {
        val t = defaultThreadFactory.newThread(r)
        t.setDaemon(daemon)
        t.setName(s"$threadPrefix-${idx.incrementAndGet()}")
        t.setUncaughtExceptionHandler(new UncaughtExceptionHandler {
          def uncaughtException(t: Thread, e: Throwable): Unit = {
            ExecutionContext.defaultReporter(e)
            if (exitJvmOnFatalError) {
              e match {
                case NonFatal(_) => ()
                case _           => System.exit(-1)
              }
            }
          }
        })
        t
      }
    }

  def blockingThreadPool[F[_]](name: String)(implicit F: Sync[F]): Resource[F, ExecutionContext] =
    Resource(F.delay {
      val factory  = named(name, daemon = true)
      val executor = Executors.newCachedThreadPool(factory)
      val ec       = ExecutionContext.fromExecutor(executor)
      (ec, F.delay(executor.shutdown()))
    })

  def acg[F[_]](implicit F: Sync[F]): Resource[F, AsynchronousChannelGroup] =
    Resource(F.delay {
      val acg = acgUnsafe
      (acg, F.delay(acg.shutdownNow()))
    })

  def acgUnsafe: AsynchronousChannelGroup =
    AsynchronousChannelProvider
      .provider()
      .openAsynchronousChannelGroup(8, named("jbok-ag-tcp", daemon = true))

  lazy val acgGlobal: AsynchronousChannelGroup = acgUnsafe
} 
Example 23
Source File: QueryBuilderContext.scala    From scarango   with MIT License 5 votes vote down vote up
package com.outr.arango.query

import java.util.concurrent.atomic.AtomicInteger

import com.outr.arango.{NamedRef, Query, Ref}

class QueryBuilderContext private() {
  private var queries = List.empty[Query]
  private var refNames = Map.empty[Ref, String]
  private lazy val incrementer = new AtomicInteger(0)

  var ref: Option[Ref] = None

  def addQuery(query: Query): Unit = queries = query :: queries

  def name(ref: Ref): String = ref match {
    case NamedRef(name) => name
    case _ => {
      refNames.get(ref) match {
        case Some(name) => name
        case None => {
          val name = createArg
          refNames += ref -> name
          name
        }
      }
    }
  }

  def createArg: String = s"arg${incrementer.incrementAndGet()}"

  def toQuery: Query = {
    if (queries.isEmpty) throw new RuntimeException("Empty query is not allowed")
    Query.merge(queries.reverse)
  }
}

object QueryBuilderContext {
  private val threadLocal = new ThreadLocal[Option[QueryBuilderContext]] {
    override def initialValue(): Option[QueryBuilderContext] = None
  }

  def apply(): QueryBuilderContext = threadLocal.get().getOrElse(throw new RuntimeException(s"No QueryBuilderContext defined in current thread. Use `aql { ... }` around your query."))

  def contextualize(f: => Unit): Query = {
    val previous = threadLocal.get()
    try {
      val builder = new QueryBuilderContext
      threadLocal.set(Some(builder))
      f
      builder.toQuery
    } finally {
      if (previous.nonEmpty) {
        threadLocal.set(previous)
      } else {
        threadLocal.remove()
      }
    }
  }
} 
Example 24
Source File: FixedPool.scala    From chymyst-core   with Apache License 2.0 5 votes vote down vote up
package io.chymyst.jc

import java.util.concurrent.atomic.AtomicInteger

import scala.language.experimental.macros


final class FixedPool(
  name: String,
  override val parallelism: Int = cpuCores,
  priority: Int = Thread.NORM_PRIORITY,
  reporter: EventReporting = ConsoleErrorReporter
) extends Pool(name, priority, reporter) {
  private[jc] val blockingCalls = new AtomicInteger(0)

  private def deadlockCheck(): Unit =
    if (blockingCalls.get >= workerExecutor.getMaximumPoolSize)
      reporter.reportDeadlock(toString, workerExecutor.getMaximumPoolSize, blockingCalls.get, Core.getReactionInfo)

  override private[chymyst] def runReaction(name: String, closure: ⇒ Unit): Unit = {
    deadlockCheck()
    super.runReaction(name, closure)
  }

  private[jc] def startedBlockingCall(selfBlocking: Boolean) = if (selfBlocking) {
    blockingCalls.getAndIncrement()
    deadlockCheck()
  }

  private[jc] def finishedBlockingCall(selfBlocking: Boolean) = if (selfBlocking) {
    blockingCalls.getAndDecrement()
    deadlockCheck()
  }

  def withReporter(r: EventReporting): FixedPool = new FixedPool(name, parallelism, priority, r)
}

object FixedPool {
  def apply(): FixedPool = macro PoolMacros.newFixedPoolImpl0 // IntelliJ cannot resolve the symbol PoolMacros, but compilation works.
  def apply(parallelism: Int): FixedPool = macro PoolMacros.newFixedPoolImpl1
} 
Example 25
Source File: Pool.scala    From chymyst-core   with Apache License 2.0 5 votes vote down vote up
package io.chymyst.jc


import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent._

import scala.concurrent.ExecutionContext
import Core._


  def shutdownNow(): Unit = new Thread {
    try {
      schedulerExecutor.getQueue.clear()
      schedulerExecutor.shutdown()
      schedulerExecutor.awaitTermination(shutdownWaitTimeMs, TimeUnit.MILLISECONDS)
      workerExecutor.getQueue.clear()
      workerExecutor.shutdown()
      workerExecutor.awaitTermination(shutdownWaitTimeMs, TimeUnit.MILLISECONDS)
    } finally {
      schedulerExecutor.shutdown()
      workerExecutor.shutdownNow()
      workerExecutor.awaitTermination(shutdownWaitTimeMs, TimeUnit.MILLISECONDS)
      workerExecutor.shutdownNow()
      ()
    }
  }.start()

  @inline def reporter: EventReporting = _reporter

  def reporter_=(r: EventReporting): Unit = {
    val reporterChanged = _reporter.asInstanceOf[EventReporting] =!= r
    if (reporterChanged) {
      reporter.reporterUnassigned(this, r)
      _reporter = r
      r.reporterAssigned(this)
    }
  }
} 
Example 26
Source File: JobWaiter.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.{Future, Promise}

import org.apache.spark.internal.Logging


  def cancel() {
    dagScheduler.cancelJob(jobId)
  }

  override def taskSucceeded(index: Int, result: Any): Unit = {
    // resultHandler call must be synchronized in case resultHandler itself is not thread safe.
    synchronized {
      resultHandler(index, result.asInstanceOf[T])
    }
    if (finishedTasks.incrementAndGet() == totalTasks) {
      jobPromise.success(())
    }
  }

  override def jobFailed(exception: Exception): Unit = {
    if (!jobPromise.tryFailure(exception)) {
      logWarning("Ignore failure", exception)
    }
  }

} 
Example 27
Source File: COCOSeqFileGenerator.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.models.utils

import com.intel.analytics.bigdl.dataset.segmentation.{COCODataset, COCOSerializeContext}
import java.io.File
import java.nio.file.{Files, Paths}
import java.util.concurrent.atomic.AtomicInteger
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.SequenceFile.Writer
import org.apache.hadoop.io.compress.BZip2Codec
import org.apache.hadoop.io.{BytesWritable, SequenceFile}
import scala.collection.parallel.ForkJoinTaskSupport
import scopt.OptionParser

object COCOSeqFileGenerator {

  
  case class COCOSeqFileGeneratorParams(
    folder: String = ".",
    metaPath: String = "instances_val2014.json",
    output: String = ".",
    parallel: Int = 1,
    blockSize: Int = 12800
  )

  private val parser = new OptionParser[COCOSeqFileGeneratorParams]("BigDL COCO " +
    "Sequence File Generator") {
    head("BigDL COCO Sequence File Generator")
    opt[String]('f', "folder")
      .text("where you put the COCO image files")
      .action((x, c) => c.copy(folder = x))
    opt[String]('o', "output folder")
      .text("where you put the generated seq files")
      .action((x, c) => c.copy(output = x))
    opt[Int]('p', "parallel")
      .text("parallel num")
      .action((x, c) => c.copy(parallel = x))
    opt[Int]('b', "blockSize")
      .text("block size")
      .action((x, c) => c.copy(blockSize = x))
    opt[String]('m', "metaPath")
      .text("metadata json file path")
      .action((x, c) => c.copy(metaPath = x))
  }

  def main(args: Array[String]): Unit = {
    parser.parse(args, COCOSeqFileGeneratorParams()).foreach { param =>
      println("Loading COCO metadata")
      val meta = COCODataset.load(param.metaPath, param.folder)
      println("Metadata loaded")
      val conf: Configuration = new Configuration
      val doneCount = new AtomicInteger(0)
      val tasks = meta.images.filter(img => {
        val path = img.path
        val valid = Files.exists(path) && !Files.isDirectory(path)
        if (!valid) {
          System.err.print(s"[Warning] The image file ${path.getFileName} does not exist.\n")
        }
        valid
      }).grouped(param.blockSize).zipWithIndex.toArray.par
      tasks.tasksupport = new ForkJoinTaskSupport(
        new scala.concurrent.forkjoin.ForkJoinPool(param.parallel))
      tasks.foreach { case (imgs, blkId) =>
        val outFile = new Path(param.output, s"coco-seq-$blkId.seq")
        val key = new BytesWritable
        val value = new BytesWritable
        val writer = SequenceFile.createWriter(conf, Writer.file(outFile), Writer.keyClass(key
          .getClass), Writer.valueClass(value.getClass), Writer.compression(SequenceFile
          .CompressionType.BLOCK, new BZip2Codec))
        val context = new COCOSerializeContext
        imgs.foreach { img =>
          context.clear()
          context.dump(img.fileName)
          img.dumpTo(context)
          context.dump(COCODataset.MAGIC_NUM)
          val keyBytes = context.toByteArray
          key.set(keyBytes, 0, keyBytes.length)
          val bytes = img.data
          value.set(bytes, 0, bytes.length)
          writer.append(key, value)
          val cnt = doneCount.incrementAndGet()
          if (cnt % 500 == 0) {
            System.err.print(s"\r$cnt / ${meta.images.length} = ${cnt.toFloat/meta.images.length}")
          }
        }
        writer.close()
      }
      System.err.print("\n")
    }
  }
} 
Example 28
Source File: FrameManager.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.nn

import java.util.concurrent.atomic.AtomicInteger

import com.intel.analytics.bigdl.nn.Graph.ModuleNode
import com.intel.analytics.bigdl.nn.tf.{Exit, MergeOps, NextIteration}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer


  class Frame[T] private[FrameManager] (
    val name: String,
    val parent: Option[Frame[T]]
  ) {
    // Sync all next iteration nodes execution
    private[bigdl] var barrier: AtomicInteger = new AtomicInteger(0)
    // User can use NextIteration to sync execution. This is a list of those type of nodes
    private[bigdl] val waitingNodes: ArrayBuffer[ModuleNode[T]] = new ArrayBuffer[ModuleNode[T]]()

    // Nodes should be refreshed in a iteration of the frame
    private[bigdl] val nodes: ArrayBuffer[ModuleNode[T]] = new ArrayBuffer[ModuleNode[T]]()
  }
} 
Example 29
Source File: TestUtils.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.utils

import java.util.concurrent.atomic.AtomicInteger

import com.intel.analytics.bigdl.nn.Sequential
import com.intel.analytics.bigdl.nn.keras.{InputLayer, KerasLayer, Sequential => KSequential}
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, TensorModule}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import org.scalatest.exceptions.TestCanceledException

import scala.reflect.ClassTag

object TestUtils {

  
  def rosenBrock(x: Tensor[Double]): (Double, Tensor[Double]) = {
    // (1) compute f(x)
    val d = x.size(1)

    // x1 = x(i)
    val x1 = Tensor[Double](d - 1).copy(x.narrow(1, 1, d - 1))
    // x(i + 1) - x(i)^2
    x1.cmul(x1).mul(-1).add(x.narrow(1, 2, d - 1))
    // 100 * (x(i + 1) - x(i)^2)^2
    x1.cmul(x1).mul(100)

    // x0 = x(i)
    val x0 = Tensor[Double](d - 1).copy(x.narrow(1, 1, d - 1))
    // 1-x(i)
    x0.mul(-1).add(1)
    x0.cmul(x0)
    // 100*(x(i+1) - x(i)^2)^2 + (1-x(i))^2
    x1.add(x0)

    val fout = x1.sum()

    // (2) compute f(x)/dx
    val dxout = Tensor[Double]().resizeAs(x).zero()
    // df(1:D-1) = - 400*x(1:D-1).*(x(2:D)-x(1:D-1).^2) - 2*(1-x(1:D-1));
    x1.copy(x.narrow(1, 1, d - 1))
    x1.cmul(x1).mul(-1).add(x.narrow(1, 2, d - 1)).cmul(x.narrow(1, 1, d - 1)).mul(-400)
    x0.copy(x.narrow(1, 1, d - 1)).mul(-1).add(1).mul(-2)
    x1.add(x0)
    dxout.narrow(1, 1, d - 1).copy(x1)

    // df(2:D) = df(2:D) + 200*(x(2:D)-x(1:D-1).^2);
    x0.copy(x.narrow(1, 1, d - 1))
    x0.cmul(x0).mul(-1).add(x.narrow(1, 2, d - 1)).mul(200)
    dxout.narrow(1, 2, d - 1).add(x0)

    (fout, dxout)
  }
}

class ExceptionTest[T: ClassTag](failCountNumberLists: Array[Int], sleep: Boolean)
  (implicit ev: TensorNumeric[T])
  extends TensorModule[T]  {

  override def updateOutput(input: Tensor[T]): Tensor[T] = {
    this.output = input
    if (failCountNumberLists.contains(ExceptionTest.count.incrementAndGet())) {
      if (sleep) {
        Thread.sleep(10000)
      }
      throw new Exception("Fail task")
    }
    this.output
  }
  override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
    this.gradInput = gradOutput
    this.gradInput
  }

  override def toString(): String = {
    s"nn.ExceptionTest"
  }
}

object ExceptionTest {
  var count = new AtomicInteger(0)

  def resetCount(): Unit = {
    count.set(0)
  }
} 
Example 30
Source File: MessageCodec.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.network.rlpx

import java.util.concurrent.atomic.AtomicInteger

import akka.util.ByteString
import io.iohk.ethereum.network.p2p.{Message, MessageDecoder, MessageSerializable}

import scala.util.Try

class MessageCodec(frameCodec: FrameCodec, messageDecoder: MessageDecoder, protocolVersion: Message.Version) {

  val MaxFramePayloadSize: Int = Int.MaxValue // no framing

  val contextIdCounter = new AtomicInteger

  def readMessages(data: ByteString): Seq[Try[Message]] = {
    val frames = frameCodec.readFrames(data)
    frames map { frame => Try(messageDecoder.fromBytes(frame.`type`, frame.payload.toArray, protocolVersion)) }
  }

  def encodeMessage(serializable: MessageSerializable): ByteString = {
    val encoded: Array[Byte] = serializable.toBytes
    val numFrames = Math.ceil(encoded.length / MaxFramePayloadSize.toDouble).toInt
    val contextId = contextIdCounter.incrementAndGet()

    val frames = (0 until numFrames) map { frameNo =>
      val payload = encoded.drop(frameNo * MaxFramePayloadSize).take(MaxFramePayloadSize)
      val totalPacketSize = if (frameNo == 0) Some(encoded.length) else None
      val header =
        if (numFrames > 1) Header(payload.length, 0, Some(contextId), totalPacketSize)
        else Header(payload.length, 0, None, None)
      Frame(header, serializable.code, ByteString(payload))
    }

    frameCodec.writeFrames(frames)
  }

} 
Example 31
Source File: NamedThreadFactory.scala    From zorechka-bot   with MIT License 5 votes vote down vote up
package com.wix.zorechka.utils.concurrent

import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicInteger

case class NamedThreadFactory(name: String, daemon: Boolean) extends ThreadFactory {
  private val parentGroup =
  Option(System.getSecurityManager).fold(Thread.currentThread().getThreadGroup)(_.getThreadGroup)

  private val threadGroup = new ThreadGroup(parentGroup, name)
  private val threadCount = new AtomicInteger(1)
  private val threadHash  = Integer.toUnsignedString(this.hashCode())

  override def newThread(r: Runnable): Thread = {
    val newThreadNumber = threadCount.getAndIncrement()

    val thread = new Thread(threadGroup, r)
    thread.setName(s"$name-$newThreadNumber-$threadHash")
    thread.setDaemon(daemon)

    thread
  }
} 
Example 32
Source File: BaseInMemoryRepository.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.common.repositories

import java.util.concurrent.atomic.AtomicInteger

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import com.dataengi.crm.common.context.types._
import com.dataengi.crm.common.errors.ValueNotFound

abstract class BaseInMemoryRepository[T] extends AutoIncRepository[T] with KeyExtractor[T, Long] {

  protected val repository = TrieMap[Key, T]()

  private val atomicInteger = new AtomicInteger()

  override protected def getKey(value: T): Key = atomicInteger.getAndIncrement().toLong

  protected def beforeSave(key: Key, value: T): T = value

  override def getAll(): Or[List[T]] =
    Future {
      repository.values.toList
    }.toOr

  override def remove(id: Key): Or[Unit] =
    Future {
      repository.remove(id)
    }.toEmptyOr

  override def add(value: T): Or[Key] =
    Future {
      val key = getKey(value)
      repository.put(key, beforeSave(key, value))
      key
    }.toOr

  override def add(values: List[T]): Or[List[Key]] =
    values.traverseC(add)

  override def get(id: Key): Or[T] =
    repository.get(id) match {
      case Some(value) => value.toOr
      case None        => ValueNotFound(id).toErrorOr
    }

  override def update(id: Key, value: T): Or[Unit] =
    Future {
      repository.update(id, value)
    }.toOr

  override def getOption(id: Key): Or[Option[T]] =
    Future {
      repository.get(id)
    }.toOr

} 
Example 33
Source File: MenuIO.scala    From hacktoberfest-scala-algorithms   with GNU General Public License v3.0 5 votes vote down vote up
package io.github.sentenza.hacktoberfest

import java.lang.System.out.println
import java.lang.reflect.Method
import java.util.concurrent.atomic.AtomicInteger

import io.github.sentenza.hacktoberfest.algos.{ImmutableSorting, MutableSorting, Sorting}

import scala.annotation.tailrec
import scala.util.{Success, Try}


  def printDisclaimer() { println(heading + gplDisclaimer) }

  private val noOp = () => ()

  def readNumberInputs = scala.io.StdIn.readLine().split(",").map(_.toInt)

  case class MenuEntry(selector: Int, display: String, code: () => Unit)
  private val entries =
    List(
      MenuEntry(1, "Sorting algorithms", () => {
        println("You chose sorting\n")
        renderInteractiveMenu(List(
          MenuEntry(2, "MutableSorting", () => {
            println("You chose mutable sorting.")
            renderInteractiveMenu(createMethodMenuEntries(MutableSorting))
          }),
          MenuEntry(1, "ImmutableSorting", () => {
            println("You chose immutable sorting.")
            renderInteractiveMenu(createMethodMenuEntries(ImmutableSorting))
          }),
          MenuEntry(0, "Quit sorting", () => noOp)
        ))
      }),
      MenuEntry(0, "Quit the program",() => System.exit(0))
    )

  private def createMethodMenuEntries(sorting: Sorting[_,_]) = {
    val count = new AtomicInteger()
    retrieveMethodNames(sorting)
      .map(mName =>
        MenuEntry(count.incrementAndGet(), mName, () => executeSortMethod(sorting, mName))
      ).toList
  }

  private def retrieveMethodNames(sorting:Sorting[_,_]) =
    sorting.getClass.getMethods.map(_.getName).filter(_.endsWith("Sort")).distinct

  private def executeSortMethod(sorting: Sorting[_,_], method: String) = {
    println("You've chosen " + method + "! Please enter a list of comma separated integers.")
    val numberInputs = readNumberInputs
    println(s"You entered:${numberInputs.mkString(",")}. They are going to be sorted by $method.\n Sorting...")
    val sorted = execute(sorting, method, numberInputs)
    println(s"Your number entries sorted are: ${sorted.mkString(",")}")
  }

  private def execute[F[_],T](sorting: Sorting[_,_], method: String, numberInputs: F[_]) = {
    findMethod(sorting, method) match {
      case Some(m:Method) => m.invoke(sorting, numberInputs).asInstanceOf[F[_]]
      case None => throw new RuntimeException(s"Method $method not found in $sorting")
    }
  }

  private def findMethod(sorting: Sorting[_,_], method: String) =
    sorting.getClass.getMethods.find(m => m.getName.compare(method) == 0)

  @tailrec
  def renderInteractiveMenu(entries:List[MenuEntry]=entries): Unit = {
    println("Please choose:")
    entries.foreach {
      case MenuEntry(num, label, _) =>
        println(s"$num: $label")
    }

      Try(scala.io.StdIn.readInt()) match {
        case Success(0) =>
          ()
        case Success(choice) if entries.exists(_.selector == choice) =>
          entries.find(_.selector == choice).foreach{
            case MenuEntry(_, _, code) => code()
          }
          renderInteractiveMenu()
        case _ =>
          println("Invalid selection\n")
          renderInteractiveMenu()
      }
    }
} 
Example 34
Source File: DataSourceUtil.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.persistence.jdbcjournal

import java.sql.Connection
import java.util.concurrent.atomic.AtomicInteger
import javax.sql.DataSource

import liquibase.{Contexts, Liquibase}
import liquibase.database.DatabaseFactory
import liquibase.database.jvm.JdbcConnection
import liquibase.resource.ClassLoaderResourceAccessor
import org.h2.jdbcx.JdbcDataSource

import scala.util.Random

object DataSourceUtil {

  def createDataSource(h2DbName:String, pathToLiquibaseFile:String = "akka-tools-jdbc-journal-liquibase.sql"):DataSource = {

    this.synchronized {
      val dataSource = new JdbcDataSource
      val name = s"$h2DbName-${Random.nextInt(1000)}"
      println(s"****> h2-name: '$name'")
      dataSource.setURL(s"jdbc:h2:mem:$name;mode=oracle;DB_CLOSE_DELAY=-1")
      dataSource.setUser("sa")
      dataSource.setPassword("sa")

      // We need to grab a connection and not release it to prevent the db from being
      // released when no connections are active..
      dataSource.getConnection


      updateDb(dataSource, pathToLiquibaseFile)

      dataSource
    }
  }


  private def createLiquibase(dbConnection: Connection, diffFilePath: String): Liquibase = {
    val database = DatabaseFactory.getInstance.findCorrectDatabaseImplementation(new JdbcConnection(dbConnection))
    val classLoader = DataSourceUtil.getClass.getClassLoader
    val resourceAccessor = new ClassLoaderResourceAccessor(classLoader)
    new Liquibase(diffFilePath, resourceAccessor, database)
  }

  private def updateDb(db: DataSource, diffFilePath: String): Unit = {
    val dbConnection = db.getConnection
    val liquibase = createLiquibase(dbConnection, diffFilePath)
    try {
      liquibase.update(null.asInstanceOf[Contexts])
    } catch {
      case e: Throwable => throw e
    } finally {
      liquibase.forceReleaseLocks()
      dbConnection.rollback()
      dbConnection.close()
    }
  }


} 
Example 35
Source File: ParallelExecutor.scala    From nyaya   with GNU Lesser General Public License v2.1 5 votes vote down vote up
package nyaya.test

import java.util.concurrent.{Callable, ExecutorService, Executors, Future, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import nyaya.gen.ThreadNumber
import nyaya.prop.Prop
import ParallelExecutor._
import PTest._
import Executor.{DataCtx, Data}

// TODO data SampleSize = TotalSamples(n) | Fn(qty|%, gensize|%) | PerWorker(sampleSize)

object ParallelExecutor {
  val defaultThreadCount = 1.max(Runtime.getRuntime.availableProcessors - 1)

  def merge[A](a: RunState[A], b: RunState[A]): RunState[A] = {
    val runs = a.runs max b.runs
    (a.success, b.success) match {
      case (false, true) => RunState(runs, a.result)
      case _             => RunState(runs, b.result)
    }
  }
}

case class ParallelExecutor(workers: Int = defaultThreadCount) extends Executor {

  val debugPrefixes = (0 until workers).toVector.map(i => s"Worker #$i: ")

  override def run[A](p: Prop[A], g: Data[A], S: Settings): RunState[A] = {
    val sss = {
      var rem = S.sampleSize.value
      var i = workers
      var v = Vector.empty[SampleSize]
      while(i > 0) {
        val p = rem / i
        v :+= SampleSize(p)
        rem -= p
        i -= 1
      }
      v
    }

    if (S.debug) {
      val szs = sss.map(_.value)
      println(s"Samples/Worker: ${szs.mkString("{", ",", "}")} = Σ${szs.sum}")
    }

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      val dp = debugPrefixes(worker)
      val data = g(DataCtx(sss(worker), ThreadNumber(worker), S.seed, dp))
      testN(p, data, () => ai.incrementAndGet(), S)
    }
    runAsync2(workers, task)
  }

  override def prove[A](p: Prop[A], d: Domain[A], S: Settings): RunState[A] = {
    val threads = workers min d.size

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      proveN(p, d, worker, threads, _ => ai.incrementAndGet, S)
    }
    runAsync2(threads, task)
  }

  private[this] def mkTask[A](f: => RunState[A]) = new Callable[RunState[A]] {
    override def call(): RunState[A] = f
  }

  private[this] def runAsync2[A](threads: Int, f: Int => Callable[RunState[A]]): RunState[A] =
    runAsync(es => (0 until threads).toList.map(es submit f(_)))

  private[this] def runAsync[A](start: ExecutorService => List[Future[RunState[A]]]): RunState[A] = {
    val es: ExecutorService = Executors.newFixedThreadPool(workers)
    val fs = start(es)
    es.shutdown()
    val rss = fs.map(_.get())
    es.awaitTermination(1, TimeUnit.MINUTES)
    rss.foldLeft(RunState.empty[A])(merge)
  }
} 
Example 36
Source File: BroadcastRPCInterceptor.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.rpc.interceptor.common

import java.util.concurrent.atomic.AtomicInteger

import com.webank.wedatasphere.linkis.common.ServiceInstance
import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.protocol.BroadcastProtocol
import com.webank.wedatasphere.linkis.rpc.conf.RPCConfiguration
import com.webank.wedatasphere.linkis.rpc.interceptor.{RPCInterceptor, RPCInterceptorChain, RPCInterceptorExchange}
import com.webank.wedatasphere.linkis.rpc.sender.SpringMVCRPCSender
import com.webank.wedatasphere.linkis.rpc.{BaseRPCSender, RPCSpringBeanCache, Sender}
import org.springframework.stereotype.Component

import scala.concurrent.Future


@Component
class BroadcastRPCInterceptor extends RPCInterceptor with Logging {
  override val order: Int = 0

  implicit val executor = BroadcastRPCInterceptor.broadcastThreadPool

  protected def getSenders(broadcast: BroadcastProtocol, applicationName: String): Array[Sender] =
    if(broadcast.instances != null && broadcast.instances.nonEmpty)
      broadcast.instances.map(instance => Sender.getSender(ServiceInstance(applicationName, instance))) else {
    var senders: Option[Array[Sender]] = None
    for(builder <- RPCSpringBeanCache.getBroadcastSenderBuilders if senders.isEmpty) senders = builder.build(broadcast, applicationName)
    senders.getOrElse(Sender.getInstances(applicationName).map(Sender.getSender))
  }

  override def intercept(interceptorExchange: RPCInterceptorExchange, chain: RPCInterceptorChain): Any = interceptorExchange.getProtocol match {
    case broadcast: BroadcastProtocol if !broadcast.skipBroadcast =>
      val completedSize = new AtomicInteger(0)
      val senders = getSenders(broadcast, chain.getApplicationName)
      var failedError: Option[Throwable] = None
      broadcast.skipBroadcast = true
      senders.map(sender => Future {
        Utils.tryCatch(sender.send(broadcast)){ t =>
          failedError = Some(t)
          val serviceInstance = sender match {
            case s: SpringMVCRPCSender => s.serviceInstance
            case b: BaseRPCSender => b.getApplicationName
            case _ => sender
          }
          warn(s"broadcast to $serviceInstance failed!", t)
        }
      }.map{ _ =>
        completedSize.incrementAndGet
        completedSize synchronized completedSize.notify
      })
      val sendNums = senders.length
      while (completedSize.get() < sendNums) {
        completedSize synchronized completedSize.wait(2000)
      }
      if(broadcast.throwsIfAnyFailed) failedError.foreach(throw _)
    case _ => chain.handle(interceptorExchange)
  }
}
private object BroadcastRPCInterceptor {
  private val broadcastThreadPool = Utils.newCachedExecutionContext(RPCConfiguration.BDP_RPC_BROADCAST_THREAD_SIZE.getValue,
    "Broadcast-ThreadPool-")
} 
Example 37
Source File: ControllerServer.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.server.socket

import java.util
import java.util.concurrent.atomic.AtomicInteger

import com.webank.wedatasphere.linkis.common.conf.Configuration.DEFAULT_DATE_PATTERN
import com.webank.wedatasphere.linkis.common.listener.Event
import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.server.Message
import com.webank.wedatasphere.linkis.server.conf.ServerConfiguration._
import com.webank.wedatasphere.linkis.server.exception.BDPServerErrorException
import com.webank.wedatasphere.linkis.server.socket.controller.{ServerListenerEventBus, SocketServerEvent}
import org.apache.commons.lang.StringUtils
import org.apache.commons.lang.exception.ExceptionUtils
import org.apache.commons.lang.time.DateFormatUtils
import org.eclipse.jetty.websocket.servlet._

import scala.collection.JavaConversions._


private[server] class ControllerServer(serverListenerEventBus: ServerListenerEventBus)
  extends WebSocketServlet with SocketListener
   with Event with Logging {

  private val socketList = new util.HashMap[Int, ServerSocket](BDP_SERVER_SOCKET_QUEUE_SIZE.getValue)
  private val idGenerator = new AtomicInteger(0)

  override def configure(webSocketServletFactory: WebSocketServletFactory): Unit = {
    webSocketServletFactory.setCreator(new WebSocketCreator {
      override def createWebSocket(servletUpgradeRequest: ServletUpgradeRequest,
                                   servletUpgradeResponse: ServletUpgradeResponse): AnyRef =
        ServerSocket(servletUpgradeRequest.getHttpServletRequest, ControllerServer.this)
    })
  }

  def sendMessage(id: Int, message: Message): Unit = {
    val socket = socketList.get(id)
    if(socket == null) throw new BDPServerErrorException(11004, s"ServerSocket($id) does not exist!(ServerSocket($id)不存在!)")
    socket.sendMessage(message)
  }

  def sendMessageToAll(message: Message): Unit =
    socketList.values().foreach(_.sendMessage(message))

  def sendMessageToUser(user: String, message: Message): Unit =
    socketList.values().filter(s => s != null && s.user.contains(user)).foreach(_.sendMessage(message))

  override def onClose(socket: ServerSocket, code: Int, message: String): Unit = {
    val date = DateFormatUtils.format(socket.createTime, DEFAULT_DATE_PATTERN.getValue)
    if(!socketList.containsKey(socket.id))
      warn(s"$socket created at $date has expired, ignore the close function!")
    else {
      info(s"$socket closed at $date with code $code and message: " + message)
      socketList synchronized {
        if(socketList.containsKey(socket.id)) socketList.remove(socket.id)
      }
    }
  }

  override def onOpen(socket: ServerSocket): Unit = socketList synchronized {
    val index = idGenerator.getAndIncrement()
    socket.id = index
    socketList.put(index, socket)
    info(s"open a new $socket with id $index for user ${socket.user.orNull}!")
  }

  override def onMessage(socket: ServerSocket, message: String): Unit = {
    if(StringUtils.isBlank(message)) {
      socket.sendMessage(Message.error("Empty message!"))
      return
    }
    val socketServerEvent = Utils.tryCatch(new SocketServerEvent(socket, message)){ t =>
      warn("parse message failed!", t)
      socket.sendMessage(Message.error(ExceptionUtils.getRootCauseMessage(t), t))
      return
    }
    if(socket.user.isEmpty && socketServerEvent.serverEvent.getMethod != BDP_SERVER_SOCKET_LOGIN_URI.getValue) {
      socket.sendMessage(Message.noLogin("You are not logged in, please login first!(您尚未登录,请先登录!)").data("websocketTag", socketServerEvent.serverEvent.getWebsocketTag) << socketServerEvent.serverEvent.getMethod)
    } else Utils.tryCatch(serverListenerEventBus.post(socketServerEvent)){
      case t: BDPServerErrorException => Message.error(t.getMessage, t).data("websocketTag", socketServerEvent.serverEvent.getWebsocketTag) << socketServerEvent.serverEvent.getMethod
    }
  }
} 
Example 38
Source File: Blocking.scala    From keycloak-benchmark   with Apache License 2.0 5 votes vote down vote up
package io.gatling.keycloak

import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ThreadFactory, Executors}

import io.gatling.core.validation.Success
import io.gatling.core.akka.GatlingActorSystem


object Blocking {
  GatlingActorSystem.instance.registerOnTermination(() => shutdown())

  private val threadPool = Executors.newCachedThreadPool(new ThreadFactory {
    val counter = new AtomicInteger();

    override def newThread(r: Runnable): Thread =
      new Thread(r, "blocking-thread-" + counter.incrementAndGet())
  })

  def apply(f: () => Unit) = {
    threadPool.execute(new Runnable() {
      override def run = {
        f()
      }
    })
    Success(())
  }

  def shutdown() = {
    threadPool.shutdownNow()
  }
} 
Example 39
Source File: Util.scala    From Backup-Repo   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hbase.util

import java.io._
import java.util.concurrent.atomic.AtomicInteger
import java.util.zip.{DeflaterOutputStream, InflaterInputStream}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.HBaseConfiguration

object Util {
  val iteration = new AtomicInteger(0)

  def getTempFilePath(conf: Configuration, prefix: String): String = {
    val fileSystem = FileSystem.get(conf)
    val path = new Path(s"$prefix-${System.currentTimeMillis()}-${iteration.getAndIncrement}")
    if (fileSystem.exists(path)) {
      fileSystem.delete(path, true)
    }
    path.getName
  }

  def serializeHBaseConfiguration(configuration: Configuration): Array[Byte] = {
    val bos = new ByteArrayOutputStream
    val deflaterOutputStream = new DeflaterOutputStream(bos)
    val dos = new DataOutputStream(deflaterOutputStream)
    configuration.write(dos)
    dos.close()
    bos.toByteArray
  }

  def deserializeHBaseConfiguration(arr: Array[Byte]) = {
    val conf = HBaseConfiguration.create
    conf.readFields(new DataInputStream(new InflaterInputStream(new ByteArrayInputStream(arr))))
    conf
  }
} 
Example 40
Source File: Publisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, TimeUnit}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator
import com.sksamuel.exts.concurrent.ExecutorImplicits._

import scala.collection.concurrent.TrieMap

trait Publisher[T] {
  def subscribe(subscriber: Subscriber[T])
}

object Publisher extends Logging {

  
  def merge[T](publishers: Seq[Publisher[T]], sentinel: T)(implicit executor: ExecutorService): Publisher[T] = {

    new Publisher[T] {
      override def subscribe(s: Subscriber[T]): Unit = {

        // subscribers to the returned publisher will be fed from an intermediate queue
        val queue = new LinkedBlockingQueue[Either[Throwable, T]](DataStream.DefaultBufferSize)

        // to keep track of how many subscribers are yet to finish; only once all upstream
        // publishers have finished will this subscriber be completed.
        val outstanding = new AtomicInteger(publishers.size)

        // we make a collection of all the subscriptions, so if there's an error at any point in the
        // merge, we can cancel all upstream producers
        val subscriptions = TrieMap.empty[Subscription, Int]

        // this cancellable can be used to cancel all the subscriptions
        val subscription = new Subscription {
          override def cancel(): Unit = subscriptions.keys.foreach(_.cancel)
        }

        // status flag that an error occured and the subscriptions should watch for it
        val errorRef = new AtomicReference[Throwable](null)
        def terminate(t: Throwable): Unit = {
          logger.error(s"Error in merge", t)
          errorRef.set(t)
          subscription.cancel()
          queue.clear()
          queue.put(Right(sentinel))
        }

        // each subscriber will occupy its own thread, on the provided executor
        publishers.foreach { publisher =>
          executor.submit {
            try {
              publisher.subscribe(new Subscriber[T] {
                override def subscribed(sub: Subscription): Unit = if (sub != null) subscriptions.put(sub, 1)                
                override def next(t: T): Unit = {
                  var success = true
                  do {
                    success = queue.offer(Right(t), 100, TimeUnit.MILLISECONDS)
                  } while(!success && errorRef.get == null)
                }
                override def error(t: Throwable): Unit = terminate(t)
                override def completed(): Unit = {
                  if (outstanding.decrementAndGet() == 0) {
                    logger.debug("All subscribers have finished; marking queue with sentinel")
                    queue.put(Right(sentinel))
                  }
                }
              })
            } catch {
              case t: Throwable => terminate(t)
            }
          }
        }

        try {
          s.subscribed(subscription)
          BlockingQueueConcurrentIterator(queue, Right(sentinel)).takeWhile(_ => errorRef.get == null).foreach {
            case Left(t) => s.error(t)
            case Right(t) => s.next(t)
          }
          // once we've had an error that's it, we don't complete the subscriber
          if (errorRef.get == null)
            s.completed()
          else 
            s.error(errorRef.get)
        } catch {
          case t: Throwable =>
            logger.error("Error in merge subscriber", t)
            subscription.cancel()
            s.error(t)
        }

        logger.debug("Merge subscriber has completed")
      }
    }
  }
} 
Example 41
Source File: AvroWriter.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.avro

import java.io.OutputStream
import java.util.concurrent.atomic.AtomicInteger

import io.eels.Row
import io.eels.schema.StructType
import org.apache.avro.file.DataFileWriter
import org.apache.avro.generic
import org.apache.avro.generic.GenericRecord

class AvroWriter(structType: StructType, out: OutputStream) {
  
  private val schema = AvroSchemaFns.toAvroSchema(structType)
  private val datumWriter = new generic.GenericDatumWriter[GenericRecord](schema)
  private val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
  private val serializer = new RowSerializer(schema)
  private val _records = new AtomicInteger(0)

  dataFileWriter.create(schema, out)

  def write(row: Row): Unit = {
    val record = serializer.serialize(row)
    dataFileWriter.append(record)
    _records.incrementAndGet()
  }

  def records: Int = _records.get()

  def close(): Unit = {
    dataFileWriter.flush()
    dataFileWriter.close()
  }
} 
Example 42
Source File: HbaseSinkWriter.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hbase

import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}

import com.sksamuel.exts.Logging
import io.eels.schema.StructType
import io.eels.{Row, SinkWriter}
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{BufferedMutator, _}

object HbaseSinkWriter extends Logging {

  def apply(namespace: String,
            table: String,
            numberOfWriters: AtomicInteger,
            schema: StructType,
            maxKeyValueSize: Option[Int],
            writeBufferSize: Option[Long],
            writeRowBatchSize: Int,
            serializer: HbaseSerializer,
            connection: Connection): Seq[HbaseSinkWriter] = {
    val tableName = TableName.valueOf(namespace, table)

    
  private val fieldsWithIndex = schema.fields.zipWithIndex

  override def write(row: Row): Unit = {
    if ((rowCounter.incrementAndGet() % writeRowBatchSize) == 0) mutator.flush()
    val rowKey = serializer.toBytes(row.values(rowKeyIndex), keyField.name, keyField.dataType)
    val put = new Put(rowKey)
    for ((field, index) <- fieldsWithIndex) {
      if (index != rowKeyIndex && row.values(index) != null) {
        val cf = field.columnFamily.getOrElse(sys.error(s"No Column Family defined for field '${field.name}'")).getBytes
        val col = field.name.getBytes()
        put.addColumn(cf, col, serializer.toBytes(row.values(index), field.name, field.dataType))
      }
    }
    mutator.mutate(put)
  }

  override def close(): Unit = {
    mutator.flush()
    if (numberOfWriters.decrementAndGet() == 0) mutator.close()
  }

} 
Example 43
Source File: ParquetHiveDialect.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hive.dialect

import java.util.concurrent.atomic.AtomicInteger

import com.sksamuel.exts.Logging
import com.sksamuel.exts.OptionImplicits._
import com.sksamuel.exts.io.Using
import io.eels.component.hive.{HiveDialect, HiveOps, HiveOutputStream}
import io.eels.component.parquet._
import io.eels.component.parquet.util.{ParquetIterator, ParquetLogMute}
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import io.eels.{Predicate, Row}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient
import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe
import org.apache.hadoop.hive.ql.io.parquet.{MapredParquetInputFormat, MapredParquetOutputFormat}

import scala.math.BigDecimal.RoundingMode.RoundingMode

case class ParquetHiveDialect(options: ParquetWriteOptions = ParquetWriteOptions()) extends HiveDialect with Logging with Using {

  override val serde: String = classOf[ParquetHiveSerDe].getCanonicalName
  override val inputFormat: String = classOf[MapredParquetInputFormat].getCanonicalName
  override val outputFormat: String = classOf[MapredParquetOutputFormat].getCanonicalName

  override def input(path: Path,
                     ignore: StructType,
                     projectionSchema: StructType,
                     predicate: Option[Predicate])
                    (implicit fs: FileSystem, conf: Configuration): Publisher[Seq[Row]] = new Publisher[Seq[Row]] {

    val client = new HiveMetaStoreClient(new HiveConf)
    val ops = new HiveOps(client)

    override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
      // convert the eel projection schema into a parquet schema which will be used by the native parquet reader
      try {
        val parquetProjectionSchema = ParquetSchemaFns.toParquetMessageType(projectionSchema)
        using(RowParquetReaderFn(path, predicate, parquetProjectionSchema.some, true)) { reader =>
          val subscription = new Subscription {
            override def cancel(): Unit = reader.close()
          }
          subscriber.subscribed(subscription)
          ParquetIterator(reader).grouped(DataStream.DefaultBatchSize).foreach(subscriber.next)
          subscriber.completed()
        }
      } catch {
        case t: Throwable => subscriber.error(t)
      }
    }
  }

  override def output(schema: StructType,
                      path: Path,
                      permission: Option[FsPermission],
                      roundingMode: RoundingMode,
                      metadata: Map[String, String])
                     (implicit fs: FileSystem, conf: Configuration): HiveOutputStream = {
    val path_x = path
    new HiveOutputStream {
      ParquetLogMute()

      private val _records = new AtomicInteger(0)
      logger.debug(s"Creating parquet writer at $path")
      private val writer = RowParquetWriterFn(path, schema, metadata, true, roundingMode, fs.getConf)

      override def write(row: Row) {
        require(row.values.nonEmpty, "Attempting to write an empty row")
        writer.write(row)
        _records.incrementAndGet()
      }

      override def close(): Unit = {
        logger.debug(s"Closing hive parquet writer $path")
        writer.close()
        // after the files are closed, we should set permissions if we've been asked to, this allows
        // all the files we create to stay consistent
        permission.foreach(fs.setPermission(path, _))
      }

      override def records: Int = _records.get()
      override def path: Path = path_x
    }
  }
} 
Example 44
Source File: OrcWriter.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.orc

import java.util.concurrent.atomic.AtomicInteger
import java.util.function.IntUnaryOperator

import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.Row
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector
import org.apache.orc.{OrcConf, OrcFile, TypeDescription}

import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer

// performs the actual write out of orc data, to be used by an orc sink
class OrcWriter(path: Path,
                structType: StructType,
                options: OrcWriteOptions)(implicit conf: Configuration) extends Logging {

  private val schema: TypeDescription = OrcSchemaFns.toOrcSchema(structType)
  logger.trace(s"Creating orc writer for schema $schema")

  private val batchSize = {
    val size = ConfigFactory.load().getInt("eel.orc.sink.batchSize")
    Math.max(Math.min(1024, size), 1)
  }
  logger.debug(s"Orc writer will use batchsize=$batchSize")

  private val buffer = new ArrayBuffer[Row](batchSize)
  private val serializers = schema.getChildren.asScala.map(OrcSerializer.forType).toArray
  private val batch = schema.createRowBatch(batchSize)

  OrcConf.COMPRESSION_STRATEGY.setString(conf, options.compressionStrategy.name)
  OrcConf.COMPRESS.setString(conf, options.compressionKind.name)
  options.encodingStrategy.map(_.name).foreach(OrcConf.ENCODING_STRATEGY.setString(conf, _))
  options.compressionBufferSize.foreach(OrcConf.BUFFER_SIZE.setLong(conf, _))
  private val woptions = OrcFile.writerOptions(conf).setSchema(schema)

  options.rowIndexStride.foreach { size =>
    woptions.rowIndexStride(size)
    logger.debug(s"Using stride size = $size")
  }

  if (options.bloomFilterColumns.nonEmpty) {
    woptions.bloomFilterColumns(options.bloomFilterColumns.mkString(","))
    logger.debug(s"Using bloomFilterColumns = $options.bloomFilterColumns")
  }
  private lazy val writer = OrcFile.createWriter(path, woptions)

  private val counter = new AtomicInteger(0)

  def write(row: Row): Unit = {
    buffer.append(row)
    if (buffer.size == batchSize)
      flush()
  }

  def records: Int = counter.get()

  def flush(): Unit = {

    def writecol[T <: ColumnVector](rowIndex: Int, colIndex: Int, row: Row): Unit = {
      val value = row.values(colIndex)
      val vector = batch.cols(colIndex).asInstanceOf[T]
      val serializer = serializers(colIndex).asInstanceOf[OrcSerializer[T]]
      serializer.writeToVector(rowIndex, vector, value)
    }

    // don't use foreach here, using old school for loops for perf
    for (rowIndex <- buffer.indices) {
      val row = buffer(rowIndex)
      for (colIndex <- batch.cols.indices) {
        writecol(rowIndex, colIndex, row)
      }
    }

    batch.size = buffer.size
    writer.addRowBatch(batch)
    counter.updateAndGet(new IntUnaryOperator {
      override def applyAsInt(operand: Int): Int = operand + batch.size
    })
    buffer.clear()
    batch.reset()
  }

  def close(): Long = {
    if (buffer.nonEmpty)
      flush()
    writer.close()
    val count = writer.getNumberOfRows
    logger.info(s"Orc writer wrote $count rows")
    count
  }
} 
Example 45
Source File: SKRSpec.scala    From spark-kafka-writer   with Apache License 2.0 5 votes vote down vote up
package com.github.benfradet.spark.kafka.writer

import java.util.concurrent.atomic.AtomicInteger

import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

case class Foo(a: Int, b: String)

trait SKRSpec
  extends AnyWordSpec
  with Matchers
  with BeforeAndAfterEach
  with BeforeAndAfterAll
  with Eventually {

  val sparkConf = new SparkConf()
    .setMaster("local[1]")
    .setAppName(getClass.getSimpleName)

  var ktu: KafkaTestUtils = _
  override def beforeAll(): Unit = {
    ktu = new KafkaTestUtils
    ktu.setup()
  }
  override def afterAll(): Unit = {
    SKRSpec.callbackTriggerCount.set(0)
    if (ktu != null) {
      ktu.tearDown()
      ktu = null
    }
  }

  var topic: String = _
  var ssc: StreamingContext = _
  var spark: SparkSession = _
  override def afterEach(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }
    if (spark != null) {
      spark.stop()
      spark = null
    }
  }
  override def beforeEach(): Unit = {
    ssc = new StreamingContext(sparkConf, Seconds(1))
    spark = SparkSession.builder
      .config(sparkConf)
      .getOrCreate()
    topic = s"topic-${Random.nextInt()}"
    ktu.createTopics(topic)
  }

  def collect(ssc: StreamingContext, topic: String): ArrayBuffer[String] = {
    val kafkaParams = Map(
      "bootstrap.servers" -> ktu.brokerAddress,
      "auto.offset.reset" -> "earliest",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "test-collect"
    )
    val results = new ArrayBuffer[String]
    KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Set(topic), kafkaParams)
    ).map(_.value())
      .foreachRDD { rdd =>
        results ++= rdd.collect()
        ()
      }
    results
  }

  val producerConfig = Map(
    "bootstrap.servers" -> "127.0.0.1:9092",
    "key.serializer" -> classOf[StringSerializer].getName,
    "value.serializer" -> classOf[StringSerializer].getName
  )
}

object SKRSpec {
  val callbackTriggerCount = new AtomicInteger()
} 
Example 46
Source File: Persistence.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.{Props, Actor}
import scala.util.Random
import java.util.concurrent.atomic.AtomicInteger

object Persistence {
  case class Persist(key: String, valueOption: Option[String], id: Long)
  case class Persisted(key: String, id: Long)

  class PersistenceException extends Exception("Persistence failure")

  def props(flaky: Boolean): Props = Props(classOf[Persistence], flaky)
}

class Persistence(flaky: Boolean) extends Actor {
  import Persistence._

  def receive = {
    case Persist(key, _, id) =>
      if (!flaky || Random.nextBoolean()) sender ! Persisted(key, id)
      else throw new PersistenceException
  }

} 
Example 47
Source File: NonBlockingMutexSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.libs.concurrent

import scala.language.reflectiveCalls

import org.specs2.mutable._
import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.{ ExecutionContext, Promise, Future, Await }
import scala.concurrent.duration.{ Duration, SECONDS }

object NonBlockingMutexSpec extends Specification {

  val waitTime = Duration(2, SECONDS)

  trait Tester {
    def run(body: => Unit): Unit
  }

  class MutexTester extends Tester {
    val mutex = new NonBlockingMutex()
    def run(body: => Unit) = mutex.exclusive(body)
  }

  class NaiveTester extends Tester {
    def run(body: => Unit) = body
  }

  def countOrderingErrors(runs: Int, tester: Tester)(implicit ec: ExecutionContext): Future[Int] = {
    val result = Promise[Int]()
    val runCount = new AtomicInteger(0)
    val orderingErrors = new AtomicInteger(0)

    for (i <- 0 until runs) {
      tester.run {
        val observedRunCount = runCount.getAndIncrement()

        // We see observedRunCount != i then this task was run out of order
        if (observedRunCount != i) {
          orderingErrors.incrementAndGet() // Record the error
        }
        // If this is the last task, complete our result promise
        if ((observedRunCount + 1) >= runs) {
          result.success(orderingErrors.get)
        }
      }
    }
    result.future
  }

  "NonBlockingMutex" should {

    "run a single operation" in {
      val p = Promise[Int]()
      val mutex = new NonBlockingMutex()
      mutex.exclusive { p.success(1) }
      Await.result(p.future, waitTime) must_== (1)
    }

    "run two operations" in {
      val p1 = Promise[Unit]()
      val p2 = Promise[Unit]()
      val mutex = new NonBlockingMutex()
      mutex.exclusive { p1.success(()) }
      mutex.exclusive { p2.success(()) }
      Await.result(p1.future, waitTime) must_== (())
      Await.result(p2.future, waitTime) must_== (())
    }

    "run code in order" in {
      import ExecutionContext.Implicits.global

      def percentageOfRunsWithOrderingErrors(runSize: Int, tester: Tester): Int = {
        val results: Seq[Future[Int]] = for (i <- 0 until 9) yield {
          countOrderingErrors(runSize, tester)
        }
        Await.result(Future.sequence(results), waitTime).filter(_ > 0).size * 10
      }

      // Iteratively increase the run size until we get observable errors 90% of the time
      // We want a high error rate because we want to then use the MutexTester
      // on the same run size and know that it is fixing up some problems. If the run size
      // is too small then the MutexTester probably isn't doing anything. We use
      // dynamic run sizing because the actual size that produces errors will vary
      // depending on the environment in which this test is run.
      var runSize = 8 // This usually reaches 8192 on my dev machine with 10 simultaneous queues
      var errorPercentage = 0
      while (errorPercentage < 90 && runSize < 1000000) {
        runSize = runSize << 1
        errorPercentage = percentageOfRunsWithOrderingErrors(runSize, new NaiveTester())
      }
      //println(s"Got $errorPercentage% ordering errors on run size of $runSize")

      // Now show that this run length works fine with the MutexTester
      percentageOfRunsWithOrderingErrors(runSize, new MutexTester()) must_== 0
    }

  }

} 
Example 48
Source File: CachingEagerEvaluatingDependencyAnalyzer.scala    From exodus   with MIT License 5 votes vote down vote up
package com.wix.bazel.migrator.analyze

import java.nio.file.{Files, Path, Paths}
import java.util
import java.util.concurrent.atomic.AtomicInteger

import com.fasterxml.jackson.annotation.JsonTypeInfo
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.wix.bazel.migrator.model._
import com.wixpress.build.maven.MavenScope
import org.slf4j.LoggerFactory

import scala.collection.JavaConverters._
import scala.collection.parallel.ParMap

//this is needed since currently the transformer isn't thread safe but the dependency analyzer is
class CachingEagerEvaluatingDependencyAnalyzer(sourceModules: Set[SourceModule], dependencyAnalyzer: DependencyAnalyzer, performSourceAnalysis: Boolean) extends DependencyAnalyzer {
  private val log = LoggerFactory.getLogger(getClass)
  private val cachePath = Files.createDirectories(Paths.get("./cache"))
  private val objectMapper = new ObjectMapper()
    .registerModule(DefaultScalaModule)
    .registerModule(new RelativePathSupportingModule)
    .registerModule(new SourceModuleSupportingModule(sourceModules))
    .addMixIn(classOf[Target], classOf[TypeAddingMixin])
    .addMixIn(classOf[CodePurpose], classOf[TypeAddingMixin])
    .addMixIn(classOf[TestType], classOf[TypeAddingMixin])
    .addMixIn(classOf[MavenScope], classOf[TypeAddingMixin])

  private val collectionType = objectMapper.getTypeFactory.constructCollectionType(classOf[util.Collection[Code]], classOf[Code])
  private val clean = performSourceAnalysis

  private def cachePathForSourceModule(m: SourceModule) = {
    cachePath.resolve(m.relativePathFromMonoRepoRoot + ".cache")
  }

  private val size = sourceModules.size
  private val counter = new AtomicInteger()
  private val tenthSize = size / 10

  private def initCachePathForSourceModule(p: Path) = Files.createDirectories(p.getParent)

  private def maybeCodeFromCache(p: Path): Option[List[Code]] = {
    if (clean || !Files.exists(p)) return None
    try {
      val value: util.Collection[Code] = objectMapper.readValue(p.toFile, collectionType)
      val codeList = value.asScala.toList
      Some(codeList)
    } catch {
      case e: Exception =>
        log.warn(s"Error reading $p ,deleting cache file.")
        log.warn(e.getMessage)
        Files.deleteIfExists(p)
        None
    }
  }

  private def retrieveCodeAndCache(m: SourceModule, cachePath: Path): List[Code] = {
    val codeList = dependencyAnalyzer.allCodeForModule(m)
    Files.deleteIfExists(cachePath)
    initCachePathForSourceModule(cachePath)
    Files.createFile(cachePath)
    try {
      objectMapper.writeValue(cachePath.toFile, codeList)
    } catch {
      case e: InterruptedException =>
        log.warn(s"aborting write to file $cachePath")
        Files.deleteIfExists(cachePath)
        throw e
      case e: Exception =>
        log.warn(s"could not write to file $cachePath")
        log.warn(e.getMessage)
    }
    codeList
  }

  private def calculateMapEntryFor(sourceModule: SourceModule) = {
    printProgress()
    val cachePath = cachePathForSourceModule(sourceModule)
    (sourceModule, maybeCodeFromCache(cachePath).getOrElse(retrieveCodeAndCache(sourceModule, cachePath)))
  }

  private def printProgress(): Unit = {
    if (tenthSize > 0) {
      val currentCount = counter.incrementAndGet()
      if (currentCount % tenthSize == 0) {
        log.info(s"DependencyAnalyzer:allCodeForModule:\t ${currentCount / tenthSize * 10}% done")
      }
    }
  }

  private val allCode: ParMap[SourceModule, List[Code]] = sourceModules.par.map(calculateMapEntryFor).toMap

  override def allCodeForModule(module: SourceModule): List[Code] = allCode(module)
}

@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, property = "__class")
trait TypeAddingMixin 
Example 49
Source File: DarwinConcurrentHashMapSpec.scala    From darwin   with Apache License 2.0 5 votes vote down vote up
package it.agilelab.darwin.common

import java.util.concurrent.atomic.AtomicInteger

import org.scalatest.BeforeAndAfter
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

class DarwinConcurrentHashMapSpec extends AnyFlatSpec with Matchers with BeforeAndAfter {
  private val realJavaVersion = System.getProperty("java.version")

  after {
    System.setProperty("java.version", realJavaVersion)
  }

  def test(): Unit = {
    val threadNumber = 1000
    val map = DarwinConcurrentHashMap.empty[String, Int]
    var counter = 0
    val threadCounter = new AtomicInteger(0)
    val runnables = for (_ <- 1 to threadNumber) yield {
      new Runnable {
        override def run(): Unit = {
          threadCounter.incrementAndGet()
          val res = map.getOrElseUpdate("A", {
            counter += 1
            counter
          })
          res should be(1)
        }
      }
    }
    val threads = for (r <- runnables) yield {
      val t = new Thread(r)
      t
    }
    for (t <- threads) {
      t.start()
    }
    for (t <- threads) {
      t.join()
    }
    threadCounter.get() should be(threadNumber)
  }


  it should "not evaluate the value if the key is present JAVA 8" in {
    test()
  }

  it should "not evaluate the value if the key is present JAVA 7" in {
    if (JavaVersion.parseJavaVersion(realJavaVersion) >= 8) {
      System.setProperty("java.version", "1.7")
      test()
    } else {
      assert(true)
    }
  }

} 
Example 50
Source File: AESGCM.scala    From tsec   with MIT License 5 votes vote down vote up
package tsec.cipher.symmetric.jca

import java.util.concurrent.atomic.AtomicInteger

import cats.effect.Sync
import tsec.cipher.common.padding.NoPadding
import tsec.cipher.symmetric._
import tsec.cipher.symmetric.jca.primitive.JCAAEADPrimitive

sealed abstract class AESGCM[A] extends JCAAEAD[A, GCM, NoPadding] with AES[A] with JCAKeyGen[A] {
  implicit val ae: AESGCM[A] = this

  implicit def genEncryptor[F[_]: Sync](implicit c: AES[A]): AADEncryptor[F, A, SecretKey] =
    JCAAEADPrimitive.sync[F, A, GCM, NoPadding]

  
  def incrementalIvStrategy[F[_]](implicit F: Sync[F]): CounterIvGen[F, A] =
    new CounterIvGen[F, A] {
      private val delta                      = 1000000
      private val maxVal: Int                = Int.MaxValue - delta
      private val fixedCounter: Array[Byte]  = Array.fill[Byte](8)(0.toByte)
      private val atomicNonce: AtomicInteger = new AtomicInteger(Int.MinValue)

      def refresh: F[Unit] = F.delay(atomicNonce.set(Int.MinValue))

      def counterState: F[Long] = F.delay(unsafeCounterState)

      def unsafeCounterState: Long = atomicNonce.get().toLong

      def genIv: F[Iv[A]] =
        F.delay(genIvUnsafe)

      def genIvUnsafe: Iv[A] =
        if (atomicNonce.get() >= maxVal)
          throw IvError("Maximum safe nonce number reached")
        else {
          val nonce = atomicNonce.incrementAndGet()
          val iv    = new Array[Byte](12) //GCM optimal iv len
          iv(0) = (nonce >> 24).toByte
          iv(1) = (nonce >> 16).toByte
          iv(2) = (nonce >> 8).toByte
          iv(3) = nonce.toByte
          System.arraycopy(fixedCounter, 0, iv, 4, 8)
          Iv[A](iv)
        }
    }

  def ciphertextFromConcat(rawCT: Array[Byte]): Either[CipherTextError, CipherText[A]] =
    CTOPS.ciphertextFromArray[A, GCM, NoPadding](rawCT)
}

sealed trait AES128GCM

object AES128GCM extends AESGCM[AES128GCM] with AES128[AES128GCM]

sealed trait AES192GCM

object AES192GCM extends AESGCM[AES192GCM] with AES192[AES192GCM]

sealed trait AES256GCM

object AES256GCM extends AESGCM[AES256GCM] with AES256[AES256GCM] 
Example 51
Source File: NamedExecutors.scala    From mango   with Apache License 2.0 5 votes vote down vote up
package com.kakao.mango.concurrent

import java.util.concurrent.Executors._
import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ForkJoinWorkerThread, ExecutorService, ScheduledExecutorService, ForkJoinPool}
import scala.language.implicitConversions


object NamedExecutors {

  implicit def toRich(e: ExecutorService): RichExecutorService = new RichExecutorService(e)

  implicit def toRich(e: ScheduledExecutorService): RichScheduledExecutorService = new RichScheduledExecutorService(e)

  def scheduled(name: String, daemon: Boolean = true): RichScheduledExecutorService = {
    newSingleThreadScheduledExecutor(NamedThreadFactory(name, daemon))
  }

  def scheduledPool(name: String, size: Int, daemon: Boolean = true): RichScheduledExecutorService = {
    newScheduledThreadPool(size, NamedThreadFactory(name, daemon))
  }

  def cached(name: String, daemon: Boolean = true): RichExecutorService = {
    newCachedThreadPool(NamedThreadFactory(name, daemon))
  }

  def fixed(name: String, size: Int, daemon: Boolean = true): RichExecutorService = {
    newFixedThreadPool(size, NamedThreadFactory(name, daemon))
  }

  def single(name: String, daemon: Boolean = true): RichExecutorService = {
    newSingleThreadExecutor(NamedThreadFactory(name, daemon))
  }

  def forkJoin(name: String, size: Int, daemon: Boolean = true, asyncMode: Boolean = false): RichExecutorService = {
    val counter = new AtomicInteger()
    new ForkJoinPool(size, new ForkJoinWorkerThreadFactory {
      override def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = {
        val thread = new ForkJoinWorkerThread(pool) {}
        thread.setName(s"$name-${counter.incrementAndGet()}")
        thread.setDaemon(daemon)
        thread
      }
    }, null, asyncMode)
  }

} 
Example 52
Source File: Connection.scala    From finagle-postgres   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.postgres.connection

import java.util.concurrent.atomic.AtomicInteger

import com.twitter.finagle.postgres.messages._
import com.twitter.logging.Logger
import scala.collection.mutable.ListBuffer


class Connection(startState: State = AuthenticationRequired) {
  val id = Connection.nextId()
  private[this] val logger = Logger(s"${getClass.getName}.connection-$id")
  private[this] val stateMachine = new ConnectionStateMachine(startState, id)


  def send(msg: FrontendMessage) = {
    logger.ifDebug("Sent frontend message of type: %s".format(msg.getClass.getName))

    msg match {
      case q: Query =>
        logger.ifDebug("Query: %s".format(q.str))
      case _ =>
    }

    stateMachine.onEvent(msg)
  }

  def receive(msg: BackendMessage): Option[PgResponse] = {
    logger.ifDebug("Received backend message of type: %s".format(msg.getClass.getName))

    val result = stateMachine.onEvent(msg)
    result foreach {
      r => logger.ifDebug(s"Emitting result ${r.getClass.getName}")
    }
    result
  }
}

object Connection {
  private[this] val currentId = new AtomicInteger(0)
  private def nextId() = currentId.getAndIncrement()
} 
Example 53
Source File: ActionsHandler.scala    From spark-http-stream   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming.http

import java.util.Properties
import scala.collection.mutable.ArrayBuffer
import org.apache.kafka.clients.producer.KafkaProducer
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.spark.internal.Logging
import org.apache.spark.sql.Row
import java.sql.Timestamp
import org.apache.spark.sql.types.StructType
import java.util.concurrent.atomic.AtomicInteger


	def listActionHandlerEntries(requestBody: Map[String, Any]): ActionHandlerEntries;
	def destroy();
}

trait ActionsHandlerFactory {
	def createInstance(params: Params): ActionsHandler;
}

abstract class AbstractActionsHandler extends ActionsHandler {
	def getRequiredParam(requestBody: Map[String, Any], key: String): Any = {
		val opt = requestBody.get(key);
		if (opt.isEmpty) {
			throw new MissingRequiredRequestParameterException(key);
		}

		opt.get;
	}

	override def destroy() = {
	}
}

class NullActionsHandler extends AbstractActionsHandler {
	override def listActionHandlerEntries(requestBody: Map[String, Any]): ActionHandlerEntries = new ActionHandlerEntries() {
		def apply(action: String) = Map[String, Any]();
		//yes, do nothing
		def isDefinedAt(action: String) = false;
	};
}

//rich row with extra info: id, time stamp, ...
case class RowEx(originalRow: Row, batchId: Long, offsetInBatch: Long, timestamp: Timestamp) {
	def withTimestamp(): Row = Row.fromSeq(originalRow.toSeq :+ timestamp);
	def withId(): Row = Row.fromSeq(originalRow.toSeq :+ s"$batchId-$offsetInBatch");
	def extra: (Long, Long, Timestamp) = { (batchId, offsetInBatch, timestamp) };
}

trait SendStreamActionSupport {
	def onReceiveStream(topic: String, rows: Array[RowEx]);
	def getRequiredParam(requestBody: Map[String, Any], key: String): Any;

	val listeners = ArrayBuffer[StreamListener]();

	def addListener(listener: StreamListener): this.type = {
		listeners += listener;
		this;
	}

	protected def notifyListeners(topic: String, data: Array[RowEx]) {
		listeners.foreach { _.onArrive(topic, data); }
	}

	def handleSendStream(requestBody: Map[String, Any]): Map[String, Any] = {
		val topic = getRequiredParam(requestBody, "topic").asInstanceOf[String];
		val batchId = getRequiredParam(requestBody, "batchId").asInstanceOf[Long];
		val rows = getRequiredParam(requestBody, "rows").asInstanceOf[Array[Row]];
		val ts = new Timestamp(System.currentTimeMillis());
		var index = -1;
		val rows2 = rows.map { row ⇒
			index += 1;
			RowEx(Row.fromSeq(row.toSeq), batchId, index, ts)
		}

		onReceiveStream(topic, rows2);
		notifyListeners(topic, rows2);
		Map("rowsCount" -> rows.size);
	}
} 
Example 54
Source File: MultipleReadersSingleWriterCacheTests.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.test

import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt

import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner

import common.StreamLogging
import common.WskActorSystem
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database.CacheChangeNotification
import org.apache.openwhisk.core.database.MultipleReadersSingleWriterCache
import org.apache.openwhisk.core.entity.CacheKey

@RunWith(classOf[JUnitRunner])
class MultipleReadersSingleWriterCacheTests
    extends FlatSpec
    with Matchers
    with MultipleReadersSingleWriterCache[String, String]
    with WskActorSystem
    with StreamLogging {

  behavior of "the cache"

  it should "execute the callback on invalidating and updating an entry" in {
    val ctr = new AtomicInteger(0)
    val key = CacheKey("key")

    implicit val transId = TransactionId.testing
    lazy implicit val cacheUpdateNotifier = Some {
      new CacheChangeNotification {
        override def apply(key: CacheKey) = {
          ctr.incrementAndGet()
          Future.successful(())
        }
      }
    }

    // Create an cache entry
    Await.ready(cacheUpdate("doc", key, Future.successful("db save successful")), 10.seconds)
    ctr.get shouldBe 1

    // Callback should be called if entry exists
    Await.ready(cacheInvalidate(key, Future.successful(())), 10.seconds)
    ctr.get shouldBe 2
    Await.ready(cacheUpdate("docdoc", key, Future.successful("update in db successful")), 10.seconds)
    ctr.get shouldBe 3

    // Callback should be called if entry does not exist
    Await.ready(cacheInvalidate(CacheKey("abc"), Future.successful(())), 10.seconds)
    ctr.get shouldBe 4
  }
} 
Example 55
Source File: ReferenceCounted.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb

import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}

private[cosmosdb] case class ReferenceCounted[T <: AutoCloseable](private val inner: T) {
  private val count = new AtomicInteger(0)

  private def inc(): Unit = count.incrementAndGet()

  private def dec(): Unit = {
    val newCount = count.decrementAndGet()
    if (newCount <= 0) {
      inner.close()
      //Turn count to negative to ensure future reference call fail
      count.decrementAndGet()
    }
  }

  def isClosed: Boolean = count.get() < 0

  def reference(): CountedReference = {
    require(count.get >= 0, "Reference is already closed")
    new CountedReference
  }

  class CountedReference extends AutoCloseable {
    private val closed = new AtomicBoolean()
    inc()
    override def close(): Unit = if (closed.compareAndSet(false, true)) dec()

    def get: T = inner
  }
} 
Example 56
Source File: RateThrottler.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.entitlement

import scala.collection.concurrent.TrieMap

import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.entity.Identity
import org.apache.openwhisk.core.entity.UUID
import java.util.concurrent.atomic.AtomicInteger


  def update(maxPerMinute: Int): Int = {
    roll()
    lastMinCount.incrementAndGet()
  }

  def roll(): Unit = {
    val curMin = getCurrentMinute
    if (curMin != lastMin) {
      lastMin = curMin
      lastMinCount.set(0)
    }
  }

  private def getCurrentMinute = System.currentTimeMillis / (60 * 1000)
} 
Example 57
Source File: DesignTimeLifeCycleHookTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.lifecycle

import java.util.concurrent.atomic.AtomicInteger

import wvlet.airframe.newSilentDesign
import wvlet.airspec.AirSpec


class DesignTimeLifeCycleHookTest extends AirSpec {
  def `support design time bindings`: Unit = {
    val order              = new AtomicInteger(1)
    val initializedTime    = new AtomicInteger(0)
    val injectTime         = new AtomicInteger(0)
    val startTime          = new AtomicInteger(0)
    val afterStartTime     = new AtomicInteger(0)
    val beforeShutdownTime = new AtomicInteger(0)
    val shutdownTime       = new AtomicInteger(0)

    val d = newSilentDesign
      .bind[String].toInstance("hello")
      .onInit(x => initializedTime.set(order.getAndIncrement()))
      .onInject(x => injectTime.set(order.getAndIncrement()))
      .onStart(x => startTime.set(order.getAndIncrement()))
      .afterStart(x => afterStartTime.set(order.getAndIncrement()))
      .beforeShutdown(x => beforeShutdownTime.set(order.getAndIncrement()))
      .onShutdown(x => shutdownTime.set(order.getAndIncrement()))

    d.build[String] { s =>
      //
    }

    initializedTime.get shouldBe 1
    injectTime.get shouldBe 2
    startTime.get shouldBe 3
    afterStartTime.get shouldBe 4
    beforeShutdownTime.get shouldBe 5
    shutdownTime.get shouldBe 6
  }

  def `add lifecycle only`: Unit = {
    val v = new AtomicInteger(0)
    val d = newSilentDesign
      .bind[AtomicInteger].toInstance(v)

    val d2 = d
      .bind[AtomicInteger]
      .onStart { x => x.addAndGet(1) }
      .afterStart { x => x.addAndGet(1 << 1) }
      .onShutdown { x => x.addAndGet(1 << 2) }
      .beforeShutdown { x => x.addAndGet(1 << 3) }
      .onInit { x => x.addAndGet(1 << 4) }
      .onInject { x => x.addAndGet(1 << 5) }

    d2.withSession { s => }

    v.get() shouldBe 0x3f
  }

} 
Example 58
Source File: SingletonTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe

import java.util.concurrent.atomic.AtomicInteger

import wvlet.airframe.SingletonTest._
import wvlet.airspec.AirSpec
import wvlet.log.{LogLevel, LogSupport, Logger}

object SingletonTest {
  type TraitCounter = AtomicInteger

  // This doesn't tell about Singleton
  trait X extends LogSupport {
    debug("new X is instantiated")

    val counter = bind[TraitCounter].onInit { c =>
      val v = c.incrementAndGet()
      debug(s"Counter is initialized: ${v}")
    }
  }

  trait A {
    val t = bind[X]
  }

  trait B {
    val t = bind[X]
  }

  trait SingletonService {
    val service = bind[X]
  }

  trait U1 extends SingletonService
  trait U2 extends SingletonService

  trait NonAbstract extends LogSupport {
    def hello: String = "hello"
  }

  trait C extends NonAbstract {
    override def hello = "nice"
  }

  trait E extends LogSupport {
    val m = bind[NonAbstract]
  }
}


class SingletonTest extends AirSpec {
  scalaJsSupport

  val d =
    newDesign
      .bind[TraitCounter].toInstance(new AtomicInteger(0))

  def `bind singleton with bind[X]` : Unit = {
    val session = d.newSession

    val a = session.build[A]
    val b = session.build[B]

    a.t.counter shouldBeTheSameInstanceAs b.t.counter
    session.build[TraitCounter].get() shouldBe 1
  }

  def `bind singleton with bind[X] as a service`: Unit = {
    val session = d.newSession

    val u1 = session.build[U1]
    val u2 = session.build[U2]

    u1.service.counter shouldBeTheSameInstanceAs u2.service.counter
    u1.service.counter.get() shouldBe 1
  }

  def `support overriding non-abstract singleton trait`: Unit = {
    val d = newDesign
      .bind[E].toSingleton
      .bind[NonAbstract].toSingletonOf[C]

    val session = d.newSession
    val e       = session.build[E]
    e.m.hello shouldBe "nice"
  }
} 
Example 59
Source File: BindLocalTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe

import java.util.concurrent.atomic.AtomicInteger

import wvlet.airspec.AirSpec


class BindLocalTest extends AirSpec {
  scalaJsSupport

  import BindLocalTest._

  class LocalX(counter: AtomicInteger) extends AutoCloseable {
    override def close(): Unit = {
      counter.incrementAndGet()
    }
  }
  trait App {
    val counter = bind[AtomicInteger]
    val x       = bindLocal { new LocalX(counter) }
  }

  def `create a new local instance with a provider`: Unit = {
    val counter = new AtomicInteger()
    val d = newSilentDesign
      .bind[AtomicInteger].toInstance(counter)

    d.build[App] { a => counter.get() shouldBe 0 }
    counter.get() shouldBe 1
  }

  class Y

  def `create different local instances`: Unit = {
    trait App2 {
      val y0     = bind[Y]
      val yLocal = bindLocal { new Y }
    }

    val d = newSilentDesign.bind[Y].toSingleton
    // test 2
    d.build[App2] { a => a.y0 shouldNotBeTheSameInstanceAs a.yLocal }
  }

  trait LocalProviderTest {
    val x1 = bindLocal { d1: D1 => X(d1 = d1) }
    val x2 = bindLocal { (d1: D1, d2: D2) => X(d1 = d1, d2 = d2) }
    val x3 = bindLocal { (d1: D1, d2: D2, d3: D3) => X(d1 = d1, d2 = d2, d3 = d3) }
    val x4 = bindLocal { (d1: D1, d2: D2, d3: D3, d4: D4) => X(d1 = d1, d2 = d2, d3 = d3, d4 = d4) }
    val x5 = bindLocal { (d1: D1, d2: D2, d3: D3, d4: D4, d5: D5) => X(d1 = d1, d2 = d2, d3 = d3, d4 = d4, d5 = d5) }
  }

  def `support bindLocal with dependencies`: Unit = {
    val d = newSilentDesign
      .bind[D1].toInstance(D1(1))
      .bind[D2].toInstance(D2(2))
      .bind[D3].toInstance(D3(3))
      .bind[D4].toInstance(D4(4))
      .bind[D5].toInstance(D5(5))

    d.build[LocalProviderTest] { t =>
      t.x1 shouldBe X(D1(1))
      t.x2 shouldBe X(D1(1), D2(2))
      t.x3 shouldBe X(D1(1), D2(2), D3(3))
      t.x4 shouldBe X(D1(1), D2(2), D3(3), D4(4))
      t.x5 shouldBe X(D1(1), D2(2), D3(3), D4(4), D5(5))
    }
  }
}

object BindLocalTest {
  case class D1(v: Int)
  case class D2(v: Int)
  case class D3(v: Int)
  case class D4(v: Int)
  case class D5(v: Int)

  case class X(d1: D1 = D1(0), d2: D2 = D2(0), d3: D3 = D3(0), d4: D4 = D4(0), d5: D5 = D5(0))
} 
Example 60
Source File: FactoryBindingLifecycleTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe
import java.util.concurrent.atomic.AtomicInteger

import javax.annotation.{PostConstruct, PreDestroy}
import wvlet.airspec.AirSpec
import wvlet.log.LogSupport

object FactoryBindingLifecycleTest {
  val startCounter  = collection.mutable.Map[Int, AtomicInteger]()
  val endCounter    = collection.mutable.Map[Int, AtomicInteger]()
  val threadCounter = new AtomicInteger()

  trait MyThread extends LogSupport {
    debug("hello MyThread")
    threadCounter.incrementAndGet()
  }

  trait MyClient extends LogSupport {
    val port      = bind[Int]
    val singleton = bind[MyThread]

    @PostConstruct
    def start: Unit = {
      debug(s"start client for ${port}")
      startCounter.getOrElseUpdate(port, new AtomicInteger()).incrementAndGet()
    }

    @PreDestroy
    def end: Unit = {
      debug(s"end client for ${port}")
      endCounter.getOrElseUpdate(port, new AtomicInteger()).incrementAndGet()
    }
  }

  trait ClientFactory {
    val factory = bindFactory[Int => MyClient]
  }
}


class FactoryBindingLifecycleTest extends AirSpec {
  import FactoryBindingLifecycleTest._

  def `run shutdown hooks`: Unit = {
    threadCounter.get() shouldBe 0
    newSilentDesign.build[ClientFactory] { f =>
      startCounter shouldBe empty
      endCounter shouldBe empty

      val c1 = f.factory(8081)
      startCounter(8081).get() shouldBe 1
      endCounter.get(8081) shouldBe empty

      val c2 = f.factory(8082)
      startCounter(8082).get() shouldBe 1
      endCounter.get(8082) shouldBe empty
    }

    startCounter(8081).get() shouldBe 1
    startCounter(8082).get() shouldBe 1
    endCounter(8081).get() shouldBe 1
    endCounter(8082).get() shouldBe 1

    threadCounter.get() shouldBe 1 // Generate the singleton MyThread only once
  }
} 
Example 61
Source File: AirSpecContextImpl.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airspec.runner

import java.util.concurrent.atomic.AtomicInteger

import wvlet.airframe.Session
import wvlet.airframe.surface.Surface
import wvlet.airspec.spi.AirSpecContext
import wvlet.airspec.{AirSpecDef, AirSpecSpi}
import wvlet.log.LogSupport

import scala.language.experimental.macros


private[airspec] class AirSpecContextImpl(
    taskExecutor: AirSpecTaskRunner,
    val parentContext: Option[AirSpecContext],
    val currentSpec: AirSpecSpi,
    val testName: String = "<init>",
    val currentSession: Session
) extends AirSpecContext
    with LogSupport {
  private val childTaskCount = new AtomicInteger(0)

  override def hasChildTask: Boolean = {
    childTaskCount.get > 0
  }

  override protected[airspec] def runInternal(spec: AirSpecSpi, testDefs: Seq[AirSpecDef]): AirSpecSpi = {
    childTaskCount.incrementAndGet()
    taskExecutor.run(Some(this), spec, testDefs)
    spec
  }
  override protected[airspec] def runSingle(testDef: AirSpecDef): Unit = {
    childTaskCount.incrementAndGet()
    taskExecutor.runSingle(Some(this), currentSession, currentSpec, testDef, isLocal = true, design = testDef.design)
  }

  override protected def newSpec(specSurface: Surface): AirSpecSpi = {
    val spec: AirSpecSpi = currentSession.get(specSurface)
    // When the spec instance is an anonymous class, we need to find the real class name from the specSurface
    spec.setSpecName(specSurface.fullName)
    spec
  }
} 
Example 62
Source File: TestExtension.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airspec

import java.util.concurrent.atomic.AtomicInteger

import javax.annotation.{PostConstruct, PreDestroy}
import wvlet.airframe.{Design, _}
import wvlet.log.LogSupport

case class MyServerConfig(name: String)

trait MyServer extends LogSupport {
  val config  = bind[MyServerConfig]
  val counter = bind[AtomicInteger]

  @PostConstruct
  def start: Unit = {
    debug(f"Starting ${config.name}: ${this.hashCode()}%x")
    counter.incrementAndGet()
  }

  @PreDestroy
  def stop: Unit = {
    debug(f"Stopping ${config.name}: ${this.hashCode()}%x")
  }
}


trait CustomSpec extends AirSpec with LogSupport {
  protected val serverLaunchCounter = new AtomicInteger(0)

  protected override def design: Design = {
    Design.newDesign
      .bind[MyServer].toSingleton
      .bind[MyServerConfig].toInstance(MyServerConfig("A"))
      .bind[AtomicInteger].toInstance(serverLaunchCounter)
  }
}

class MyServerSpec extends CustomSpec {
  // MyServer will be shared by the all test cases
  def test1(server: MyServer): Unit = {
    debug(s"run test1")
    assert(server.config.name == "A")
  }

  def test2(server: MyServer): Unit = {
    debug(s"run test2")
  }

  def test3(session: Session): Unit = {
    debug(s"run test3")
    val server = session.build[MyServer]
  }

  override protected def afterAll: Unit = {
    assert(serverLaunchCounter.get() == 1)
  }
}

class MyServer2Spec extends CustomSpec {
  protected override def localDesign: Design = {
    Design.newDesign
      .bind[MyServerConfig].toInstance(MyServerConfig("B"))
      // By adding this local design, the server will be a test case local
      .bind[MyServer].toSingleton
  }

  def test4(server: MyServer): Unit = {
    debug("run test4")
    assert(server.config.name == "B")
  }

  def test5(server: MyServer): Unit = {
    debug("run test5")
    assert(server.config.name == "B")
  }

  override protected def afterAll: Unit = {
    assert(serverLaunchCounter.get() == testDefinitions.size)
  }
} 
Example 63
Source File: SQLAnonymizer.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.sql.analyzer
import java.util.concurrent.atomic.AtomicInteger

import wvlet.log.LogSupport
import wvlet.airframe.sql.model._
import wvlet.airframe.sql.parser.{SQLGenerator, SQLParser}

class SQLAnonymizer(dict: Map[Expression, Expression]) {
  def run(sql: String): Unit = {
    val plan = SQLParser.parse(sql)

    val anonymizationRule: PartialFunction[Expression, Expression] = {
      case x if dict.contains(x) =>
        dict(x)
    }

    plan.transformExpressions(anonymizationRule)
  }
}


object SQLAnonymizer extends LogSupport {
  def anonymize(sql: String): String = {
    val plan           = SQLParser.parse(sql)
    val dict           = new DictBuilder().add(plan).build
    val anonymizedPlan = anonymize(plan, dict)
    SQLGenerator.print(anonymizedPlan)
  }

  def anonymize(plan: LogicalPlan, dict: Map[Expression, Expression]): LogicalPlan = {
    val anonymizationRule: PartialFunction[Expression, Expression] = {
      case x if dict.contains(x) =>
        dict(x)
    }
    // Target: Identifier, Literal, UnresolvedAttribute, Table
    plan.transformExpressions(anonymizationRule)
  }

  def buildAnonymizationDictionary(sql: Seq[String]): Map[Expression, Expression] = {
    debug("Building a token dictionary")
    val b = new DictBuilder()
    sql.foreach { x =>
      try {
        val plan = SQLParser.parse(x)
        b.add(plan)
      } catch {
        case e: Exception =>
          warn(e)
      }
    }
    b.build
  }

  import Expression._

  private class DictBuilder {
    val m                  = Map.newBuilder[Expression, Expression]
    val identifierTable    = new SymbolTable("i")
    var stringLiteralTable = new SymbolTable("s")
    var longLiteralTable   = new SymbolTable("l")
    var qnameTable         = new SymbolTable("t")

    def build = m.result()

    def add(plan: LogicalPlan): this.type = {
      // Target: Identifier, Literal, UnresolvedAttribute, Table (QName)
      plan.traverseExpressions {
        case i: Identifier =>
          m += i -> UnquotedIdentifier(identifierTable.lookup(i.value))
        case s: StringLiteral =>
          m += s -> StringLiteral(stringLiteralTable.lookup(s.value))
        case q: QName =>
          m += q -> QName(q.parts.map(qnameTable.lookup))
        case u: UnresolvedAttribute =>
          val v = UnresolvedAttribute(u.name.split("\\.").toSeq.map(qnameTable.lookup).mkString("."))
          m += u -> v
      }
      this
    }
  }

  private class SymbolTable(prefix: String) {
    private val count       = new AtomicInteger(0)
    private val symbolTable = collection.mutable.Map.empty[String, String]

    def lookup(token: String): String = {
      symbolTable.getOrElseUpdate(
        token, {
          val c = count.incrementAndGet()
          s"${prefix}${c}"
        }
      )
    }
  }
} 
Example 64
Source File: TwitterSpec.scala    From interop-twitter   with Apache License 2.0 5 votes vote down vote up
package zio.interop

import java.util.concurrent.atomic.AtomicInteger

import com.twitter.util.{ Await, Future, Promise }
import zio.{ Task, ZIO }
import zio.interop.twitter._
import zio.test._
import zio.test.Assertion._

import scala.util.{ Failure, Success, Try }

object TwitterSpec extends DefaultRunnableSpec {
  val runtime = runner.runtime

  override def spec =
    suite("TwitterSpec")(
      suite("Task.fromTwitterFuture")(
        testM("return failing `Task` if future failed.") {
          val error  = new Exception
          val future = Task(Future.exception[Int](error))
          val task   = Task.fromTwitterFuture(future).unit

          assertM(task.either)(isLeft(equalTo(error)))
        },
        testM("return successful `Task` if future succeeded.") {
          val value  = 10
          val future = Task(Future.value(value))
          val task   = Task.fromTwitterFuture(future).option

          assertM(task)(isSome(equalTo(value)))
        },
        testM("ensure future is interrupted together with task.") {
          val value = new AtomicInteger(0)

          val promise = new Promise[Unit] with Promise.InterruptHandler {
            override protected def onInterrupt(t: Throwable): Unit = setException(t)
          }

          val future = Task(promise.flatMap(_ => Future(value.incrementAndGet())))

          val task =
            (for {
              fiber <- Task.fromTwitterFuture(future).fork
              _     <- fiber.interrupt
              _     <- Task.effect(promise.setDone())
              a     <- fiber.await
            } yield a).fold(_ => false, exit => exit.toEither.isLeft)

          task.map(b => assert(b)(isTrue) && assert(value.get())(equalTo(0)))
        }
      ),
      suite("Runtime.unsafeRunToTwitterFuture")(
        test("return successful `Future` if Task evaluation succeeded.") {
          assert(Await.result(runtime.unsafeRunToTwitterFuture(Task.succeed(2))))(equalTo(2))
        },
        test("return failed `Future` if Task evaluation failed.") {
          val e      = new Throwable
          val task   = Task.fail(e).unit
          val result =
            Try(Await.result(runtime.unsafeRunToTwitterFuture(task))) match {
              case Failure(exception) => Some(exception)
              case Success(_)         => None
            }

          assert(result)(isSome(equalTo(e)))
        },
        testM("ensure Task evaluation is interrupted together with Future.") {
          val value                                  = new AtomicInteger(0)
          val ex                                     = new Exception
          val task: ZIO[Any, Throwable, Future[Int]] = for {
            promise <- zio.Promise.make[Throwable, Int]
            t        = promise.await.flatMap(_ => Task.effectTotal(value.incrementAndGet()))
            future   = runtime.unsafeRunToTwitterFuture(t)
            _        = future.raise(ex)
            _       <- promise.succeed(1)
          } yield future

          assertM(task.map(Await.result(_)).run)(isInterrupted).map(_ && assert(value.get)(equalTo(0)))
        }
      )
    )
} 
Example 65
Source File: UserResource.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package resources

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.Inject
import javax.inject.Singleton

import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.NaptimeModule
import org.coursera.naptime.Ok
import org.coursera.example.User
import org.coursera.naptime.courier.CourierFormats
import org.coursera.naptime.resources.TopLevelCollectionResource
import org.coursera.naptime.resources.RestActionHelpers
import play.api.libs.json.OFormat


  def create() = Rest
      .jsonBody[User]
      .create { context =>
        val user = context.body
        val id = userStore.create(user)

        // Could return Ok(Keyed(id, None)) if we want to return 201 Created,
        // with an empty body. Prefer returning the updated body, however.
        Ok(Keyed(id, Some(user)))
      }
}

class ResourceModule extends NaptimeModule {
  override def configure(): Unit = {
    bindResource[UsersResource]
    bind[UserStore].to[UserStoreImpl]
  }
}


trait UserStore {
  def get(id: Int): Option[User]
  def create(user: User): Int
}

class UserStoreImpl extends UserStore {
  @volatile
  var userStore = Map.empty[Int, User]
  val nextId = new AtomicInteger(0)


  def get(id: Int) = userStore.get(id)

  def create(user: User): Int = {
    val id = nextId.incrementAndGet()
    userStore = userStore + (id -> user)
    id
  }


}

class UserBanManager {
  @volatile
  var bannedUsers = Set.empty[Int]
} 
Example 66
Source File: InstructorStore.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package stores

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.Singleton

import org.coursera.example.Instructor
import org.coursera.naptime.model.Keyed

@Singleton
class InstructorStore {
  @volatile
  var instructorStore = Map.empty[Int, Instructor]
  val nextId = new AtomicInteger(0)

  instructorStore = instructorStore + (
    1 -> Instructor(
      partnerId = "stanford",
      name = "Andrew Ng",
      photoUrl = ""),
    2 -> Instructor(
      partnerId = "ucsd",
      name = "Barb Oakley",
      photoUrl = ""))


  def get(id: Int) = instructorStore.get(id)

  def create(instructor: Keyed[Int, Instructor]): Unit = {
    instructorStore = instructorStore + (instructor.key -> instructor.value)
  }

  def all() = instructorStore
} 
Example 67
Source File: PartnerStore.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package stores

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.Singleton

import org.coursera.example.Instructor
import org.coursera.example.Partner
import org.coursera.naptime.model.Keyed

@Singleton
class PartnerStore {
  @volatile
  var partnerStore = Map.empty[String, Partner]
  val nextId = new AtomicInteger(0)

  partnerStore = partnerStore + (
    "stanford" -> Partner(
      courseIds = List("ml"),
      instructorIds = List(1),
      name = "Stanford University",
      homepage = ""),
    "ucsd" -> Partner(
      courseIds = List("lhtl"),
      instructorIds = List(2),
      name = "UCSD",
      homepage = ""))


  def get(id: String) = partnerStore.get(id)

  def create(partner: Keyed[String, Partner]): Unit = {
    partnerStore = partnerStore + (partner.key -> partner.value)
  }

  def all() = partnerStore
} 
Example 68
Source File: CourseStore.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package stores

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.Singleton

import com.linkedin.data.DataMap
import org.coursera.courier.templates.DataTemplates.DataConversion
import org.coursera.example.AnyData
import org.coursera.example.CertificateCourseMetadata
import org.coursera.example.Course
import org.coursera.example.DegreeCourseMetadata
import org.coursera.naptime.model.Keyed

import scala.collection.JavaConverters._

@Singleton
class CourseStore {
  @volatile
  var courseStore = Map.empty[String, Course]
  val nextId = new AtomicInteger(0)

  courseStore = courseStore + (
    "ml" -> Course(
      instructorIds = List(1),
      partnerId = "stanford",
      slug = "machine-learning",
      name = "Machine Learning",
      description = Some("Machine learning is the science of getting computers to act without being explicitly programmed."),
      extraData = AnyData.build(new DataMap(
        Map("firstModuleId" -> "wrh7vtpj").asJava),
        DataConversion.SetReadOnly),
      courseMetadata = CertificateCourseMetadata(
        certificateInstructorIds = List(1))),
    "lhtl" -> Course(
      instructorIds = List(2),
      partnerId = "ucsd",
      slug = "learning-how-to-learn",
      name = "Learning How to Learn",
      description = None,
      extraData = AnyData.build(new DataMap(
        Map("recentEnrollments" -> new Integer(1000)).asJava),
        DataConversion.SetReadOnly),
      courseMetadata = DegreeCourseMetadata(
        degreeCertificateName = "iMBA",
        degreeInstructorIds = List(1, 2))))

  def get(id: String) = courseStore.get(id)

  def create(course: Keyed[String, Course]): Unit = {
    courseStore = courseStore + (course.key -> course.value)
  }

  def all() = courseStore
} 
Example 69
Source File: UserResource.scala    From naptime   with Apache License 2.0 5 votes vote down vote up
package resources

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.Inject
import javax.inject.Singleton

import akka.stream.Materializer
import org.coursera.naptime.model.KeyFormat
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.Ok
import org.coursera.example.User
import org.coursera.naptime.courier.CourierFormats
import org.coursera.naptime.resources.TopLevelCollectionResource
import play.api.libs.json.OFormat

import scala.concurrent.ExecutionContext


@Singleton
class UsersResource @Inject() (
    userStore: UserStore,
    banManager: UserBanManager)
    (implicit override val executionContext: ExecutionContext,
    override val materializer: Materializer)
  extends TopLevelCollectionResource[Int, User] {

  override def resourceName = "users"
  override def resourceVersion = 1  // optional; defaults to 1
  implicit val fields = Fields.withDefaultFields(  // default field projection
    "id", "name", "email")

  override def keyFormat: KeyFormat[KeyType] = KeyFormat.intKeyFormat
  override implicit def resourceFormat: OFormat[User] = CourierFormats.recordTemplateFormats[User]

  def get(id: Int) = Nap.get { context =>
    OkIfPresent(id, userStore.get(id))
  }

  def multiGet(ids: Set[Int]) = Nap.multiGet { context =>
    Ok(userStore.all()
      .filter(user => ids.contains(user._1))
      .map { case (id, user) => Keyed(id, user) }.toList)
  }

  def getAll() = Nap.getAll { context =>
    Ok(userStore.all().map { case (id, user) => Keyed(id, user) }.toList)
  }

  def create() = Nap
    .jsonBody[User]
    .create { context =>
      val user = context.body
      val id = userStore.create(user)

      // Could return Ok(Keyed(id, None)) if we want to return 201 Created,
      // with an empty body. Prefer returning the updated body, however.
      Ok(Keyed(id, Some(user)))
    }

  def email(email: String) = Nap.finder { context =>
    Ok(userStore.all()
      .filter(_._2.email == email)
      .map { case (id, user) => Keyed(id, user) }.toList)
  }

}


trait UserStore {
  def get(id: Int): Option[User]
  def create(user: User): Int
  def all(): Map[Int, User]
}

@Singleton
class UserStoreImpl extends UserStore {
  @volatile
  var userStore = Map.empty[Int, User]
  val nextId = new AtomicInteger(0)

  def get(id: Int) = userStore.get(id)

  def create(user: User): Int = {
    val id = nextId.incrementAndGet()
    userStore = userStore + (id -> user)
    id
  }

  def all() = userStore

}

class UserBanManager {
  @volatile
  var bannedUsers = Set.empty[Int]
} 
Example 70
Source File: TemperatureMachineThreadFactory.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.task

import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicInteger

object TemperatureMachineThreadFactory {
  def apply(name: String, daemon: Boolean = true): ThreadFactory = {
    new ThreadFactory() {
      val count = new AtomicInteger
      def newThread(runnable: Runnable): Thread = {
        val thread = new Thread(runnable, s"temperature-$name-" + count.incrementAndGet())
        thread.setDaemon(daemon)
        thread
      }
    }
  }
} 
Example 71
Source File: SchedulerTest.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.task

import java.net.{Socket => _}
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger

import bad.robot.temperature.task.Scheduler.ScheduledExecutorServiceOps
import org.specs2.mutable.Specification

import scala.concurrent.duration._

class SchedulerTest extends Specification {

  val errorHandler: Throwable => Runnable => Unit = _ => _ => ()

  "Exceptions aren't propagated when wrapped" >> {
    val handler = Scheduler.wrapWithErrorHandler(() => throw new Exception(), errorHandler)
    handler.run must not(throwA[Exception])
  }

  "Executes at fixed rate" >> {
    val scheduler = new ScheduledExecutorServiceOps(Executors.newSingleThreadScheduledExecutor())
    val counter = new AtomicInteger(0)
    scheduler.schedule(1 milliseconds, errorHandler, () => {
      counter.getAndIncrement()
      throw new Exception()
    })

    counter.get() must be_>(2).eventually
  }

  "Executes at fixed rate without stopping when exceptions are thrown" >> {
    val scheduler = new ScheduledExecutorServiceOps(Executors.newSingleThreadScheduledExecutor())
    val counter = new AtomicInteger(0)
    scheduler.schedule(1 milliseconds, errorHandler, () => {
      counter.getAndIncrement()
      throw new Exception()
    })

    counter.get() must be_>(2).eventually
  }

} 
Example 72
Source File: SerializedSuspendableExecutionContext.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.util

import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal
import scala.annotation.{ tailrec, switch }
import akka.dispatch.AbstractNodeQueue

private[akka] object SerializedSuspendableExecutionContext {
  final val Off = 0
  final val On = 1
  final val Suspended = 2

  def apply(throughput: Int)(implicit context: ExecutionContext): SerializedSuspendableExecutionContext =
    new SerializedSuspendableExecutionContext(throughput)(context match {
      case s: SerializedSuspendableExecutionContext ⇒ s.context
      case other ⇒ other
    })
}


  final def size(): Int = count()

  override final def toString: String = (state.get: @switch) match {
    case 0 ⇒ "Off"
    case 1 ⇒ "On"
    case 2 ⇒ "Off & Suspended"
    case 3 ⇒ "On & Suspended"
  }
} 
Example 73
Source File: IAMClient.scala    From play-zhewbacca   with MIT License 5 votes vote down vote up
package org.zalando.zhewbacca

import java.util.concurrent.atomic.AtomicInteger
import javax.inject.{Inject, Singleton}

import akka.actor.ActorSystem
import akka.pattern.CircuitBreaker
import org.zalando.zhewbacca.metrics.PluggableMetrics
import play.api.http.Status._
import play.api.libs.ws.WSClient
import play.api.{Configuration, Logger}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

import atmos.dsl._
import atmos.dsl.Slf4jSupport._


@Singleton
class IAMClient @Inject() (
    config: Configuration,
    pluggableMetrics: PluggableMetrics,
    ws: WSClient,
    actorSystem: ActorSystem,
    implicit val ec: ExecutionContext) extends (OAuth2Token => Future[Option[TokenInfo]]) {

  val logger: Logger = Logger("security.IAMClient")

  val METRICS_BREAKER_CLOSED = 0
  val METRICS_BREAKER_OPEN = 1
  val circuitStatus = new AtomicInteger()

  pluggableMetrics.gauge {
    circuitStatus.get
  }

  val authEndpoint: String = config.getOptional[String]("authorisation.iam.endpoint").getOrElse(
    throw new IllegalArgumentException("Authorisation: IAM endpoint is not configured"))

  val breakerMaxFailures: Int = config.getOptional[Int]("authorisation.iam.cb.maxFailures").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker max failures is not configured"))

  val breakerCallTimeout: FiniteDuration = config.getOptional[FiniteDuration]("authorisation.iam.cb.callTimeout").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker call timeout is not configured"))

  val breakerResetTimeout: FiniteDuration = config.getOptional[FiniteDuration]("authorisation.iam.cb.resetTimeout").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker reset timeout is not configured"))

  val breakerMaxRetries: TerminationPolicy = config.getOptional[Int]("authorisation.iam.maxRetries").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker max retries is not configured")).attempts

  val breakerRetryBackoff: FiniteDuration = config.getOptional[FiniteDuration]("authorisation.iam.retry.backoff.duration").getOrElse(
    throw new IllegalArgumentException("Authorisation: Circuit Breaker the duration of exponential backoff is not configured"))

  lazy val breaker: CircuitBreaker = new CircuitBreaker(
    actorSystem.scheduler,
    breakerMaxFailures,
    breakerCallTimeout,
    breakerResetTimeout).onHalfOpen {
    circuitStatus.set(METRICS_BREAKER_OPEN)
  }.onOpen {
    circuitStatus.set(METRICS_BREAKER_OPEN)
  }.onClose {
    circuitStatus.set(METRICS_BREAKER_CLOSED)
  }

  implicit val retryRecover = retryFor { breakerMaxRetries } using {
    exponentialBackoff { breakerRetryBackoff }
  } monitorWith {
    logger.logger onRetrying logNothing onInterrupted logWarning onAborted logError
  }

  override def apply(token: OAuth2Token): Future[Option[TokenInfo]] = {
    breaker.withCircuitBreaker(
      pluggableMetrics.timing(
        retryAsync(s"Calling $authEndpoint") {
          ws.url(authEndpoint).withQueryStringParameters(("access_token", token.value)).get()
        })).map { response =>
        response.status match {
          case OK => Some(response.json.as[TokenInfo])
          case _ => None
        }
      } recover {
        case NonFatal(e) =>
          logger.error(s"Exception occurred during validation of token '${token.toSafeString}': $e")
          None // consider any exception as invalid token
      }
  }

} 
Example 74
Source File: DirectDataMultiThreadedInjector.scala    From SparkOnKudu   with Apache License 2.0 5 votes vote down vote up
package org.kududb.spark.demo.gamer.cdc

import java.text.SimpleDateFormat
import java.util.Random
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{TimeUnit, Executors}

import org.kududb.client.{Operation, PartialRow, KuduClient}
import org.kududb.spark.demo.gamer.aggregates.GamerDataGenerator

object DirectDataMultiThreadedInjector {
  val simpleDateFormat = new SimpleDateFormat("MM,dd,yyyy")
  val random = new Random
  def main(args:Array[String]): Unit = {

    if (args.length == 0) {
      println("<kuduMaster> <tableName> <numberOfRecords> <numberOfThreads>")
      return
    }

    val kuduMaster = args(0)
    val tableName = args(1)
    val numberOfRecords = args(2).toInt
    val executor = Executors.newFixedThreadPool(args(3).toInt)
    val numberOfGamers = args(4).toInt
    val sleepTime = args(5).toInt

    val kuduClient = new KuduClient.KuduClientBuilder(kuduMaster).build()
    val leftToRun = new AtomicInteger()

    for (i <- 0 to numberOfRecords) {
      leftToRun.incrementAndGet()
      executor.execute(new ApplyNewRecordRunnable(GamerDataGenerator.makeNewGamerRecord(numberOfGamers),
      kuduClient, tableName, leftToRun))
      println("Summited:" + i)

      Thread.sleep(sleepTime)
    }


    val startTime = System.currentTimeMillis()
    while (!executor.awaitTermination(10000, TimeUnit.SECONDS)) {
      val newTime = System.currentTimeMillis()
      println("> Still Waiting: {Time:" + (newTime - startTime) + ", LeftToRun:" + leftToRun + "}" )
    }


    kuduClient.close()


  }
} 
Example 75
Source File: ApplyNewRecordRunnable.scala    From SparkOnKudu   with Apache License 2.0 5 votes vote down vote up
package org.kududb.spark.demo.gamer.cdc

import java.text.SimpleDateFormat
import java.util.concurrent.atomic.AtomicInteger

import org.kududb.client.{Operation, PartialRow, KuduClient}
import org.kududb.spark.demo.gamer.GamerEvent

class ApplyNewRecordRunnable(val gameEvent: GamerEvent,
                              val kuduClient: KuduClient,
                              val tableName: String,
                              val leftToRun:AtomicInteger) extends Runnable{
  override def run(): Unit = {
    val table = kuduClient.openTable(tableName)
    val session = kuduClient.newSession()
    val simpleDateFormat = new SimpleDateFormat("MM,dd,yyyy")

    val record = gameEvent

    val pr = new PartialRow(table.getSchema)
    pr.addString(0, record.gamerId)
    pr.addString(1, "")
    val scannerRows = kuduClient.newScannerBuilder(table).lowerBound(pr).limit(1).build().nextRows()
    val op:Operation = if (scannerRows.hasNext) {
      println(" >> had next")
      val oldRow = scannerRows.next()

      val oldRecordUpdateOp = table.newInsert()

      val row = oldRecordUpdateOp.getRow
      row.addString("gamer_id", oldRow.getString("gamer_id"))
      row.addString("eff_to", simpleDateFormat.format(record.lastTimePlayed))
      row.addString("eff_from", oldRow.getString("eff_from"))
      row.addLong("last_time_played", oldRow.getLong("last_time_played"))
      row.addInt("games_played", oldRow.getInt("games_played"))
      row.addInt("games_won", oldRow.getInt("games_won"))
      row.addInt("oks", oldRow.getInt("oks"))
      row.addInt("deaths", oldRow.getInt("deaths"))
      row.addInt("damage_given", oldRow.getInt("damage_given"))
      row.addInt("damage_taken", oldRow.getInt("damage_taken"))
      row.addInt("max_oks_in_one_game", oldRow.getInt("max_oks_in_one_game"))
      row.addInt("max_deaths_in_one_game", oldRow.getInt("max_deaths_in_one_game"))

      session.apply(oldRecordUpdateOp)
      table.newUpdate()
    } else {
      table.newInsert()
    }

    val row = op.getRow
    row.addString("gamer_id", record.gamerId)
    row.addString("eff_to", "")
    row.addString("eff_from", simpleDateFormat.format(record.lastTimePlayed))
    row.addLong("last_time_played", record.lastTimePlayed)
    row.addInt("games_played", record.gamesPlayed)
    row.addInt("games_won", record.gamesWon)
    row.addInt("oks", record.oks)
    row.addInt("deaths", record.deaths)
    row.addInt("damage_given", record.damageGiven)
    row.addInt("damage_taken", record.damageTaken)
    row.addInt("max_oks_in_one_game", record.maxOksInOneGame)
    row.addInt("max_deaths_in_one_game", record.maxDeathsInOneGame)

    session.apply(op)

    session.flush()
    leftToRun.decrementAndGet()
    println(" >> finished Submit")
  }
} 
Example 76
Source File: BaseSessionSpec.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.repl

import java.util.Properties
import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps

import org.apache.spark.SparkConf
import org.json4s._
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.Eventually._

import org.apache.livy.LivyBaseUnitTestSuite
import org.apache.livy.rsc.RSCConf
import org.apache.livy.rsc.driver.{Statement, StatementState}
import org.apache.livy.sessions._

abstract class BaseSessionSpec(kind: Kind)
    extends FlatSpec with Matchers with LivyBaseUnitTestSuite {

  implicit val formats = DefaultFormats

  private val rscConf = new RSCConf(new Properties()).set(RSCConf.Entry.SESSION_KIND, kind.toString)

  private val sparkConf = new SparkConf()

  protected def execute(session: Session)(code: String): Statement = {
    val id = session.execute(code)
    eventually(timeout(30 seconds), interval(100 millis)) {
      val s = session.statements(id)
      s.state.get() shouldBe StatementState.Available
      s
    }
  }

  protected def withSession(testCode: Session => Any): Unit = {
    val stateChangedCalled = new AtomicInteger()
    val session =
      new Session(rscConf, sparkConf, None, { _ => stateChangedCalled.incrementAndGet() })
    try {
      // Session's constructor should fire an initial state change event.
      stateChangedCalled.intValue() shouldBe 1
      Await.ready(session.start(), 30 seconds)
      assert(session.state === SessionState.Idle)
      // There should be at least 1 state change event fired when session transits to idle.
      stateChangedCalled.intValue() should (be > 1)
      testCode(session)
    } finally {
      session.close()
    }
  }

  it should "start in the starting or idle state" in {
    val session = new Session(rscConf, sparkConf)
    val future = session.start()
    try {
      Await.ready(future, 60 seconds)
      session.state should (equal (SessionState.Starting) or equal (SessionState.Idle))
    } finally {
      session.close()
    }
  }

  it should "eventually become the idle state" in withSession { session =>
    session.state should equal (SessionState.Idle)
  }

} 
Example 77
Source File: MockServices.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.server.mocks

import java.util.concurrent.atomic.AtomicInteger

import akka.NotUsed
import akka.stream.scaladsl.Source
import com.lightbend.lagom.scaladsl.api.Service.pathCall
import com.lightbend.lagom.scaladsl.api.Service.named
import com.lightbend.lagom.scaladsl.api.Service.restCall
import com.lightbend.lagom.scaladsl.api.Descriptor
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.api.deser.DefaultExceptionSerializer
import com.lightbend.lagom.scaladsl.api.transport.Method
import play.api.Environment
import play.api.Mode

object PathProvider {
  val PATH = "/some-path"
}


trait SimpleStreamedService extends Service {
  override def descriptor: Descriptor =
    named("simple-streamed")
      .withCalls(pathCall(PathProvider.PATH, streamed _))
      .withExceptionSerializer(new DefaultExceptionSerializer(Environment.simple(mode = Mode.Dev)))

  def streamed(): ServiceCall[Source[String, NotUsed], Source[String, NotUsed]]
} 
Example 78
Source File: MockFilters.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.server.mocks

import java.util.concurrent.atomic.AtomicInteger

import akka.stream.Materializer
import com.lightbend.lagom.scaladsl.api.transport.Forbidden
import com.lightbend.lagom.scaladsl.api.transport.HeaderFilter
import com.lightbend.lagom.scaladsl.api.transport.RequestHeader
import com.lightbend.lagom.scaladsl.api.transport.ResponseHeader
import play.api.mvc.Filter
import play.api.mvc.Result
import play.api.mvc.{ RequestHeader => PlayRequestHeader }
import play.api.mvc.{ ResponseHeader => PlayResponseHeader }

import scala.concurrent.ExecutionContext
import scala.concurrent.Future

// ------------------------------------------------------------------------------------------------------------
// This is a play filter that adds a header on the request and the adds a header on the response. Headers may only
// be added once so invoking this Filter twice breaks the test.
class VerboseHeaderPlayFilter(atomicInt: AtomicInteger, mt: Materializer)(implicit ctx: ExecutionContext)
    extends Filter {
  import VerboseHeaderPlayFilter._

  implicit override def mat: Materializer = mt

  override def apply(f: (PlayRequestHeader) => Future[Result])(rh: PlayRequestHeader): Future[Result] = {
    ensureMissing(rh.headers.toSimpleMap, addedOnRequest)
    val richerHeaders = rh.headers.add(addedOnRequest -> atomicInt.incrementAndGet().toString)
    val richerRequest = rh.withHeaders(richerHeaders)
    f(richerRequest).map {
      case result =>
        ensureMissing(result.header.headers, addedOnResponse)
        result.withHeaders(addedOnResponse -> atomicInt.incrementAndGet().toString)
    }
  }

  private def ensureMissing(headers: Map[String, String], key: String) =
    if (headers.get(key).isDefined) throw Forbidden(s"Header $key already exists.")
}

object VerboseHeaderPlayFilter {
  val addedOnRequest  = "addedOnRequest-play"
  val addedOnResponse = "addedOnResponse-play"
}

// ------------------------------------------------------------------------------------------------------------
// This is a Lagom HeaderFilter that adds a header on the request and the adds a header on the response.
class VerboseHeaderLagomFilter(atomicInteger: AtomicInteger) extends HeaderFilter {
  override def transformServerRequest(request: RequestHeader): RequestHeader =
    request.addHeader(VerboseHeaderLagomFilter.addedOnRequest, atomicInteger.incrementAndGet().toString)

  override def transformServerResponse(response: ResponseHeader, request: RequestHeader): ResponseHeader =
    response.addHeader(VerboseHeaderLagomFilter.addedOnResponse, atomicInteger.incrementAndGet().toString)

  override def transformClientResponse(response: ResponseHeader, request: RequestHeader): ResponseHeader = ???
  override def transformClientRequest(request: RequestHeader): RequestHeader                             = ???
}

object VerboseHeaderLagomFilter {
  val addedOnRequest  = "addedOnRequest-Lagom"
  val addedOnResponse = "addedOnResponse-Lagom"
} 
Example 79
Source File: JobWaiter.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.{Future, Promise}

import org.apache.spark.internal.Logging


  def cancel() {
    dagScheduler.cancelJob(jobId)
  }

  override def taskSucceeded(index: Int, result: Any): Unit = {
    // resultHandler call must be synchronized in case resultHandler itself is not thread safe.
    synchronized {
      resultHandler(index, result.asInstanceOf[T])
    }
    if (finishedTasks.incrementAndGet() == totalTasks) {
      jobPromise.success(())
    }
  }

  override def jobFailed(exception: Exception): Unit = {
    if (!jobPromise.tryFailure(exception)) {
      logWarning("Ignore failure", exception)
    }
  }

} 
Example 80
Source File: LoadTest.scala    From ws_to_kafka   with MIT License 5 votes vote down vote up
package com.pkinsky

import java.util.concurrent.atomic.AtomicInteger

import akka.http.scaladsl.model.ws.{InvalidUpgradeResponse, WebsocketUpgradeResponse, WebsocketRequest, TextMessage}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, Sink, RunnableGraph, Source}
import play.api.libs.json.Json

import scala.concurrent.{Future, Await}
import scala.concurrent.duration._
import scala.language.postfixOps

object LoadTest extends App with AppContext {
  val clients = 256
  val eventsPerClient = 256

  val eventsSent = new AtomicInteger(0)

  def testData(clientId: String): Source[Event, Unit] =
    Source.unfoldInf(1) { n =>
      val event = Event(s"msg number $n", clientId, System.currentTimeMillis())
      (n + 1, event)
    }.take(eventsPerClient).throttle(1, 100 millis, 1, ThrottleMode.Shaping)

  def wsClient(clientId: String): RunnableGraph[Future[WebsocketUpgradeResponse]] =
    testData(clientId).map(e => TextMessage.Strict(Json.toJson(e).toString))
      .map { x => eventsSent.incrementAndGet(); x }
      .viaMat(Http().websocketClientFlow(WebsocketRequest(Uri(s"ws://localhost:$port/ws"))))(Keep.right).to(Sink.ignore)

  //set up websocket connections
  (1 to clients).foreach { id =>
    wsClient(s"client $id").run()
  }

  //watch kafka for messages sent via websocket
  val kafkaConsumerGraph: RunnableGraph[Future[Seq[Event]]] =
    kafka.consume[Event](eventTopic, "group_new")
      .take(clients * eventsPerClient).takeWithin(2 minutes)
      .toMat(Sink.seq)(Keep.right)

  val res = Await.result(kafkaConsumerGraph.run, 5 minutes)
  println(s"sent ${eventsSent.get()} events total")
  println(s"res size: ${res.length}")
} 
Example 81
Source File: KubernetesLease.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.coordination.lease.kubernetes

import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger }

import akka.actor.ExtendedActorSystem
import akka.coordination.lease.{ LeaseException, LeaseSettings, LeaseTimeoutException }
import akka.coordination.lease.scaladsl.Lease
import akka.coordination.lease.kubernetes.LeaseActor._
import akka.coordination.lease.kubernetes.internal.KubernetesApiImpl
import akka.dispatch.ExecutionContexts
import akka.pattern.AskTimeoutException
import akka.util.{ ConstantFun, Timeout }

import scala.concurrent.Future

object KubernetesLease {
  val configPath = "akka.coordination.lease.kubernetes"
  private val leaseCounter = new AtomicInteger(1)
}

class KubernetesLease private[akka] (system: ExtendedActorSystem, leaseTaken: AtomicBoolean, settings: LeaseSettings)
    extends Lease(settings) {

  private val k8sSettings = KubernetesSettings(settings.leaseConfig, settings.timeoutSettings)
  private val k8sApi = new KubernetesApiImpl(system, k8sSettings)
  private val leaseActor = system.systemActorOf(
    LeaseActor.props(k8sApi, settings, leaseTaken),
    s"kubernetesLease${KubernetesLease.leaseCounter.incrementAndGet}-${settings.leaseName}-${settings.ownerName}"
  )

  def this(leaseSettings: LeaseSettings, system: ExtendedActorSystem) =
    this(system, new AtomicBoolean(false), leaseSettings)

  import akka.pattern.ask
  import system.dispatcher

  private implicit val timeout: Timeout = Timeout(settings.timeoutSettings.operationTimeout)

  override def checkLease(): Boolean = leaseTaken.get()

  override def release(): Future[Boolean] = {
    // replace with transform once 2.11 dropped
    (leaseActor ? Release())
      .flatMap {
        case LeaseReleased       => Future.successful(true)
        case InvalidRequest(msg) => Future.failed(new LeaseException(msg))
      }(ExecutionContexts.sameThreadExecutionContext)
      .recoverWith {
        case _: AskTimeoutException =>
          Future.failed(new LeaseTimeoutException(
            s"Timed out trying to release lease [${settings.leaseName}, ${settings.ownerName}]. It may still be taken."))
      }
  }

  override def acquire(): Future[Boolean] = {
    acquire(ConstantFun.scalaAnyToUnit)

  }
  override def acquire(leaseLostCallback: Option[Throwable] => Unit): Future[Boolean] = {
    // replace with transform once 2.11 dropped
    (leaseActor ? Acquire(leaseLostCallback))
      .flatMap {
        case LeaseAcquired       => Future.successful(true)
        case LeaseTaken          => Future.successful(false)
        case InvalidRequest(msg) => Future.failed(new LeaseException(msg))
      }
      .recoverWith {
        case _: AskTimeoutException =>
          Future.failed[Boolean](new LeaseTimeoutException(
            s"Timed out trying to acquire lease [${settings.leaseName}, ${settings.ownerName}]. It may still be taken."))
      }(ExecutionContexts.sameThreadExecutionContext)
  }
} 
Example 82
Source File: UniqueNameGenerator.scala    From scredis   with Apache License 2.0 5 votes vote down vote up
package scredis.util

import scala.collection.mutable.{ Map => MMap }

import java.util.concurrent.atomic.AtomicInteger

object UniqueNameGenerator {
  private val ids = MMap[String, AtomicInteger]()
  
  def getUniqueName(name: String): String = {
    ids.synchronized {
      val counter = ids.getOrElseUpdate(name, new AtomicInteger(0))
      counter.incrementAndGet()
      if (counter.get == 1) {
        name
      } else {
        s"$name-${counter.get}"
      }
    }
  }
  
  def getNumberedName(name: String): String = {
    val counter = ids.synchronized {
      ids.getOrElseUpdate(name, new AtomicInteger(0))
    }
    s"$name-${counter.incrementAndGet()}"
  }
  
} 
Example 83
Source File: File.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.common

import java.io.IOException
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes
import java.util.concurrent.atomic.AtomicInteger

import scala.jdk.CollectionConverters._

import cats.effect._
import cats.implicits._
import fs2.Stream

object File {

  def mkDir[F[_]: Sync](dir: Path): F[Path] =
    Sync[F].delay(Files.createDirectories(dir))

  def mkTempDir[F[_]: Sync](parent: Path, prefix: String): F[Path] =
    mkDir(parent).map(p => Files.createTempDirectory(p, prefix))

  def mkTempFile[F[_]: Sync](
      parent: Path,
      prefix: String,
      suffix: Option[String] = None
  ): F[Path] =
    mkDir(parent).map(p => Files.createTempFile(p, prefix, suffix.orNull))

  def deleteDirectory[F[_]: Sync](dir: Path): F[Int] =
    Sync[F].delay {
      val count = new AtomicInteger(0)
      Files.walkFileTree(
        dir,
        new SimpleFileVisitor[Path]() {
          override def visitFile(
              file: Path,
              attrs: BasicFileAttributes
          ): FileVisitResult = {
            Files.deleteIfExists(file)
            count.incrementAndGet()
            FileVisitResult.CONTINUE
          }
          override def postVisitDirectory(dir: Path, e: IOException): FileVisitResult =
            Option(e) match {
              case Some(ex) => throw ex
              case None =>
                Files.deleteIfExists(dir)
                FileVisitResult.CONTINUE
            }
        }
      )
      count.get
    }

  def exists[F[_]: Sync](file: Path): F[Boolean] =
    Sync[F].delay(Files.exists(file))

  def existsNonEmpty[F[_]: Sync](file: Path, minSize: Long = 0): F[Boolean] =
    Sync[F].delay(Files.exists(file) && Files.size(file) > minSize)

  def deleteFile[F[_]: Sync](file: Path): F[Unit] =
    Sync[F].delay(Files.deleteIfExists(file)).map(_ => ())

  def delete[F[_]: Sync](path: Path): F[Int] =
    if (Files.isDirectory(path)) deleteDirectory(path)
    else deleteFile(path).map(_ => 1)

  def withTempDir[F[_]: Sync](parent: Path, prefix: String): Resource[F, Path] =
    Resource.make(mkTempDir(parent, prefix))(p => delete(p).map(_ => ()))

  def listFiles[F[_]: Sync](pred: Path => Boolean, dir: Path): F[List[Path]] =
    Sync[F].delay {
      val javaList =
        Files.list(dir).filter(p => pred(p)).collect(java.util.stream.Collectors.toList())
      javaList.asScala.toList.sortBy(_.getFileName.toString)
    }

  def readAll[F[_]: Sync: ContextShift](
      file: Path,
      blocker: Blocker,
      chunkSize: Int
  ): Stream[F, Byte] =
    fs2.io.file.readAll(file, blocker, chunkSize)

  def readText[F[_]: Sync: ContextShift](file: Path, blocker: Blocker): F[String] =
    readAll[F](file, blocker, 8192).through(fs2.text.utf8Decode).compile.foldMonoid
} 
Example 84
Source File: Actor.scala    From effpi   with MIT License 5 votes vote down vote up
// Effpi - verified message-passing programs in Dotty
// Copyright 2019 Alceste Scalas and Elias Benussi
// Released under the MIT License: https://opensource.org/licenses/MIT
package effpi.actor

import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.{Future, Promise, Await}

import effpi.channel.{Channel, InChannel, OutChannel, QueueChannel}
import effpi.process.{ProcVar, Process, In}
import effpi.system._
import scala.concurrent.duration.Duration

abstract class Mailbox[+A] extends InChannel[A]

private class MailboxImpl[A](c: InChannel[A]) extends Mailbox[A] {
  override val synchronous: Boolean = c.synchronous

  override val name: Option[String] = c.name
  
  override def receive()(implicit timeout: Duration) = c.receive()(timeout)

  override def poll() = c.poll()

  override def enqueue(i: (Map[ProcVar[_], (_) => Process],
                           List[() => Process],
                           In[InChannel[Any], Any, Any => Process])) = c.enqueue(i)

  override def dequeue() = c.dequeue()

  override def waiting = c.waiting
}

abstract class ActorRef[-A] extends OutChannel[A] {
  def ! = send
}

private class ActorRefImpl[A](c: OutChannel[A])
                             (maybeDual: Option[Mailbox[Any]]) extends ActorRef[A] {
  override val synchronous: Boolean = c.synchronous

  override val name: Option[String] = c.name

  override def send(v: A) = c.send(v)

  override val dualIn: Mailbox[Any] = maybeDual match {
    case None => new MailboxImpl(c.dualIn)
    case Some(d) => d
  }

  override def create[B](synchronous: Boolean,
                         name: Option[String] = None): Channel[B] = {
    c.create[B](synchronous, name)
  }
}

protected[actor] abstract class ActorChannel[A] extends Channel[A] {
  val mbox: Mailbox[A]
  val ref: ActorRef[A]
}

private class ActorChannelImpl[A](override val mbox: Mailbox[A],
                                  override val ref: ActorRef[A])
  extends ActorChannel[A] {
    assert(mbox.synchronous == ref.synchronous)
    override val synchronous: Boolean = mbox.synchronous

    assert(mbox.name == ref.name)
    override val name: Option[String] = mbox.name
}

object ActorChannel {
  
  def ask[Req, Resp](srv: ActorRef[Req], query: ActorRef[Resp] => Req)
                    (implicit ps: ProcessSystem, timeout: Duration): Resp = {
    import effpi.process.{dsl => pdsl}

    val respPromise = Promise[Resp]()
    val respFuture = respPromise.future

    val pipe = ActorChannel[Resp]()
    val askProcess = {
      pdsl.send(srv, query(pipe.ref)) >>
      pdsl.receive(pipe.mbox) { msg: Resp =>
        respPromise.success(msg)
        pdsl.nil
      }
    }
    askProcess.spawn(ps)
    Await.result(respFuture, Duration.Inf)
  }
} 
Example 85
Source File: ThreadUtil.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.cache.internal

import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, ThreadPoolExecutor, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger

object ThreadUtil {

  private val poolNumber = new AtomicInteger(1)

  def daemonThreadFactory(): ThreadFactory = {

    val poolNumber0 = poolNumber.getAndIncrement()

    val threadNumber = new AtomicInteger(1)

    new ThreadFactory {
      def newThread(r: Runnable) = {
        val threadNumber0 = threadNumber.getAndIncrement()
        val t = new Thread(r, s"coursier-pool-$poolNumber0-thread-$threadNumber0")
        t.setDaemon(true)
        t.setPriority(Thread.NORM_PRIORITY)
        t
      }
    }
  }

  def fixedThreadPool(size: Int): ExecutorService = {

    val factory = daemonThreadFactory()

    // 1 min keep alive, so that threads get stopped a bit after resolution / downloading is done
    val executor = new ThreadPoolExecutor(
      size, size,
      1L, TimeUnit.MINUTES,
      new LinkedBlockingQueue[Runnable],
      factory
    )
    executor.allowCoreThreadTimeOut(true)
    executor
  }

  def fixedScheduledThreadPool(size: Int): ScheduledExecutorService = {

    val factory = daemonThreadFactory()

    val executor = new ScheduledThreadPoolExecutor(size, factory)
    executor.setKeepAliveTime(1L, TimeUnit.MINUTES)
    executor.allowCoreThreadTimeOut(true)
    executor
  }

  def withFixedThreadPool[T](size: Int)(f: ExecutorService => T): T = {

    var pool: ExecutorService = null
    try {
      pool = fixedThreadPool(size)
      f(pool)
    } finally {
      if (pool != null)
        pool.shutdown()
    }
  }

} 
Example 86
Source File: FunctionLogsErrorShovel.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.worker.helpers

import java.util.concurrent.atomic.AtomicInteger

import cool.graph.bugsnag.BugSnaggerImpl
import cool.graph.messagebus.Conversions.ByteUnmarshaller
import cool.graph.messagebus.queue.rabbit.RabbitQueue
import cool.graph.worker.payloads.{JsonConversions, LogItem}
import cool.graph.worker.utils.Utils
import org.joda.time.DateTime
import play.api.libs.json.{JsObject, Json}

import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success, Try}


object FunctionLogsErrorShovel extends App {
  import JsonConversions._

  import scala.concurrent.ExecutionContext.Implicits.global
  import scala.concurrent.duration._

  case class OldLogItem(
      id: String,
      projectId: String,
      functionId: String,
      requestId: String,
      status: String,
      duration: Long,
      timestamp: String,
      message: String
  ) {
    def toLogItem: LogItem = {
      status match {
        case "SUCCESS" => LogItem(id, projectId, functionId, requestId, status, duration, timestamp, Json.parse(message).as[JsObject])
        case "FAILURE" => LogItem(id, projectId, functionId, requestId, status, duration, timestamp, Json.obj("error" -> message))
      }
    }
  }

  implicit val bugsnagger       = BugSnaggerImpl("")
  implicit val oldLogItemFormat = Json.format[OldLogItem]

  val amqpUri = sys.env("RABBITMQ_URI")

  val faultTolerantUnmarshaller: ByteUnmarshaller[LogItem] = { bytes =>
    Try { logItemUnmarshaller(bytes) }.orElse(fromOldLogItemFormat(bytes)) match {
      case Success(logItem) => logItem.copy(timestamp = correctLogTimestamp(logItem.timestamp))
      case Failure(err)     => throw err
    }
  }

  val marshaller      = JsonConversions.logItemMarshaller
  val targetPublisher = RabbitQueue.publisher[LogItem](amqpUri, "function-logs")
  val counter         = new AtomicInteger(0)

  val consumeFn = { msg: LogItem =>
    println(s"[FunctionLogsErrorShovel][${counter.incrementAndGet()}]] Re-processing: $msg")
    targetPublisher.publish(msg)
    Future.successful(())
  }

  val plainErrConsumer =
    RabbitQueue.plainConsumer[LogItem](amqpUri, "function-logs-error", "function-logs", autoDelete = false)(bugsnagger, faultTolerantUnmarshaller)

  def fromOldLogItemFormat(bytes: Array[Byte]): Try[LogItem] = Try { Json.parse(bytes).as[OldLogItem].toLogItem }

  def correctLogTimestamp(timestamp: String): String = {
    val dt     = DateTime.parse(timestamp)
    val newTst = Utils.msqlDateFormatter.print(dt)

    println(s"[FunctionLogsErrorShovel]\t$timestamp\t->\t$newTst")
    newTst
  }

  plainErrConsumer.withConsumer(consumeFn)

  println("Press enter to terminate...")
  scala.io.StdIn.readLine()
  println("Terminating.")

  plainErrConsumer.shutdown
  targetPublisher.shutdown
} 
Example 87
Source File: GraphLens.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.analysis.API.GraphLenses

import akka.actor.ActorContext
import com.raphtory.core.analysis.API.ManagerCount
import com.raphtory.core.analysis.API.entityVisitors.VertexVisitor
import com.raphtory.core.model.graphentities.Vertex
import com.raphtory.core.storage.EntityStorage
import java.util.concurrent.atomic.AtomicInteger

import com.raphtory.core.components.PartitionManager.Workers.ViewJob

import scala.collection.parallel.mutable.ParTrieMap

abstract class GraphLens(jobID: ViewJob, superstep: Int, storage: EntityStorage, managerCount: ManagerCount) {
  private val messages = new AtomicInteger(0)

  protected var voteCount                             = new AtomicInteger(0)
  def superStep()                                = superstep
  def getVerticesSet(): ParTrieMap[Long, Vertex] = storage.vertices

  def getVerticesWithMessages(): ParTrieMap[Long, Vertex] = storage.vertices.filter {
    case (id: Long, vertex: Vertex) => vertex.multiQueue.getMessageQueue(jobID, superstep).nonEmpty
  }

  def recordMessage(sourceID: Long, vertexID: Long, data: Any) =
    messages.incrementAndGet()

  def getMessages() = messages.get

  def getVertex(v: Vertex)(implicit context: ActorContext, managerCount: ManagerCount): VertexVisitor =
    new VertexVisitor(v, jobID, superstep, this)

  def getTotalVerticesSet() =
    storage.vertices.keySet

  def vertexVoted() =
    voteCount.incrementAndGet()

  def checkVotes(workerID: Int): Boolean =
    storage.vertices.size == voteCount.get
} 
Example 88
Source File: ExecutorProxy.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.infra

import java.lang.reflect.{InvocationHandler, InvocationTargetException, Method}
import java.util.concurrent.Executor
import java.util.concurrent.atomic.AtomicInteger

import org.slf4s.Logging

import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise, TimeoutException}
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}

class ExecutorProxy(executor: Executor) {
  import scala.collection.JavaConverters._

  def createFor[A <: AnyRef : ClassTag](instance: A): A = {
    val clazz = implicitly[ClassTag[A]].runtimeClass
    java.lang.reflect.Proxy.newProxyInstance(clazz.getClassLoader, Array(clazz), new Handler(instance)).asInstanceOf[A]
  }

  class Handler(instance: AnyRef) extends InvocationHandler with Logging {
    import scala.concurrent.ExecutionContext.Implicits._
    private val className = instance.getClass.getName

    private val idGen = new AtomicInteger(0)
    private var awaitingCalls = Map[Int, String]()

    override def invoke(proxy: scala.Any, method: Method, args: Array[AnyRef]): AnyRef = {
      val resultPromise = Promise[AnyRef]()

      val before = System.nanoTime()

      val id = idGen.getAndIncrement()
      val argss = Option(args).getOrElse(Array.empty)
      val desc = s"$method(${argss.mkString(", ")})[$id]"
      log.trace(s"Waiting to execute: $desc")

      // Snapshot of waiting calls prior to submitting to the executor
      val waitingCallsAtEntry = awaitingCalls

      executor.execute(() => {
        log.trace(s"Execute: $id")
        Try(method.invoke(instance, args: _*)) match {
          case Success(f: Future[_]) => resultPromise.completeWith(f.asInstanceOf[Future[AnyRef]])
          case Success(result) => resultPromise.success(result)
          case Failure(t: InvocationTargetException) => resultPromise.failure(t.getCause)
          case Failure(t) => resultPromise.failure(t)
        }
      })

      resultPromise.future.onComplete { _ =>
        val methodName = method.getName
        val millis = (System.nanoTime() - before).nanos.toMillis
        log.trace(s"Elapsed time for $className.$methodName = $millis ms")
      }

      if (classOf[Future[_]].isAssignableFrom(method.getReturnType)) resultPromise.future
      else {
        // Update with this call
        awaitingCalls += (id -> desc)
        //TODO: Configurable timeout
        try Await.result(resultPromise.future, 30.seconds) catch {
          case _: TimeoutException =>
            val other = waitingCallsAtEntry.values
            val sb = new StringBuilder(s"Timed out waiting for '$desc' to complete. Calls at entry: ${other.mkString("'", "', '", "'")}. Stack:\n")
            appendStackTraces(sb)
            log.debug(sb.toString())
            throw new TimeoutException(s"Timed out waiting for '$desc' to complete.")
        } finally {
          // Done with this call
          awaitingCalls -= id
          log.trace(s"Done: $id")
        }
      }
    }

    private def appendStackTraces(sb: StringBuilder): Unit = {
      Thread.getAllStackTraces.asScala.foreach { tup =>
        sb.append("\n> THREAD ").append(tup._1.getName).append("\n")
        tup._2.foreach(ste => sb.append("  ").append(ste).append("\n"))
      }
    }
  }
} 
Example 89
Source File: MultiThreadingTest.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn
import java.io.{BufferedReader, InputStreamReader}
import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicInteger

import com.programmaticallyspeaking.ncd.host._
import com.programmaticallyspeaking.ncd.messaging.Observer
import com.programmaticallyspeaking.ncd.testing.{SharedInstanceActorTesting, UnitTest}
import jdk.nashorn.api.scripting.NashornScriptEngineFactory
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.exceptions.TestFailedException
import org.slf4s.Logging

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future, Promise}

trait MultiThreadingTestFixture extends UnitTest with Logging with SharedInstanceActorTesting with VirtualMachineLauncher with ScalaFutures with FairAmountOfPatience with Eventually {
  override val scriptExecutor: ScriptExecutorBase = MultiThreadedScriptExecutor
  override implicit val executionContext: ExecutionContext = ExecutionContext.global
}

class MultiThreadingTest extends MultiThreadingTestFixture {
  def location(ln: Int) = ScriptLocation(ln, None)

  "Breakpoint requests from other threads should be ignore in a paused state" in {
    val scriptAddedPromise = Promise[Script]()
    val hitBreakpointPromise = Promise[String]()
    val breakpointCounter = new AtomicInteger()
    val host = getHost
    observeScriptEvents(new Observer[ScriptEvent] {

      override def onNext(item: ScriptEvent): Unit = item match {
        case ScriptAdded(script) =>
          scriptAddedPromise.success(script)
        case hb: HitBreakpoint =>
          breakpointCounter.incrementAndGet()
          hitBreakpointPromise.trySuccess("")
        case _ =>
      }

      override def onError(error: Throwable): Unit = {}

      override def onComplete(): Unit = {}
    })

    whenReady(scriptAddedPromise.future) { script =>
      val scriptLocation = eventually {
        host.getBreakpointLocations(ScriptIdentity.fromId(script.id), location(1), None).headOption.getOrElse(fail(s"No line numbers for script ${script.id}"))
      }
      host.setBreakpoint(ScriptIdentity.fromURL(script.url), scriptLocation, BreakpointOptions.empty)

      try {
        whenReady(hitBreakpointPromise.future) { _ =>
          // Ugly, but wait for a while to see if the counter increases over 1 (which it shouldn't).
          Thread.sleep(200)
          breakpointCounter.get() should be(1)
        }
      } catch {
        case t: TestFailedException if t.getMessage().contains("timeout") =>
          val progress = summarizeProgress()
          throw new TimeoutException("Timed out: " + progress)
      }
    }
  }
}

object MultiThreadedScriptExecutor extends App with ScriptExecutorBase {
  println("MultiThreadedScriptExecutor starting. Java version: " + System.getProperty("java.version"))
  val scriptEngine = new NashornScriptEngineFactory().getScriptEngine("--no-syntax-extensions")
  val reader = new BufferedReader(new InputStreamReader(System.in))
  println(Signals.ready)
  waitForSignal(Signals.go)

  // Used a compiled script here before, stopped working with JDK 10
  var src =
    """(function () {
      |  return Math.floor(5.5);
      |})();
    """.stripMargin

  implicit val ec = ExecutionContext.global

  val futures = (1 to 5).map { _ =>
    Future {
      while (true) {
        scriptEngine.eval(src)
      }
    }
  }

  Await.result(Future.sequence(futures), 30.seconds)
} 
Example 90
Source File: RemovalListenerSpec.scala    From scaffeine   with Apache License 2.0 5 votes vote down vote up
package com.github.blemale.scaffeine

import java.util.concurrent.atomic.AtomicInteger

import com.github.benmanes.caffeine.cache.RemovalCause
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

class RemovalListenerSpec extends AnyWordSpec with Matchers {

  class StubListener extends ((String, String, RemovalCause) => Unit) {
    val callCounter = new AtomicInteger

    override def apply(
        key: String,
        value: String,
        cause: RemovalCause
    ): Unit = {
      val _ = callCounter.incrementAndGet()
    }
  }

  "Cache" should {
    "call removal listener on enties eviction" in {
      val listener = new StubListener
      val cache =
        Scaffeine()
          .executor(DirectExecutor)
          .removalListener(listener)
          .build[String, String]()

      cache.put("foo", "bar")
      cache.invalidate("foo")

      listener.callCounter.get should be(1)
    }
  }

} 
Example 91
Source File: PortManager.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}


case class PortManager(initialPort: Int, step: Int) {
  def getPort(index: Int): Int = {
    initialPort + step * (index - 1)
  }
}

case object CassandraPortManager {
  val jmxPortManager = PortManager(7199, 2)
  val dmx4jPortManager = PortManager(8989, 1)
}

case object ElasticsearchPortManager {
  val jmxPortManager = PortManager(7200, 2)
  val httpPortManager = PortManager(9201, 1)
  val transportPortManager = PortManager(9301, 1)
}

case object BgPortManager {
  val jmxPortManager = PortManager(7196, 1)
  val monitorPortManager = PortManager(8050, 1)
}

case object CtrlPortManager {
  val monitorPortManager = PortManager(8000, 1)
  val jmxPortManager = PortManager(7192, 1)
}

case object WebServicePortManager {
  val jmxPortManager = PortManager(7194, 1)
  val playHttpPortManager = PortManager(9000, 1)
  val monitorPortManager = PortManager(8010, 1)
}

case object CwPortManager {
  val monitorPortManager = PortManager(8030, 1)
}

case object DcPortManager {
  val monitorPortManager = PortManager(8040, 1)
  val jmxPortManager = PortManager(7193, 1)
}

case object KafkaPortManager {
  val jmxPortManager = PortManager(7191, 1)
}

object PortManagers {
  val cas = CassandraPortManager
  val es = ElasticsearchPortManager
  val bg = BgPortManager
  val ws = WebServicePortManager
  val ctrl = CtrlPortManager
  val cw = CwPortManager
  val dc = DcPortManager
  val kafka = KafkaPortManager
} 
Example 92
Source File: JobWaiter.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.{Future, Promise}

import org.apache.spark.internal.Logging


  def cancel() {
    dagScheduler.cancelJob(jobId)
  }

  override def taskSucceeded(index: Int, result: Any): Unit = {
    // resultHandler call must be synchronized in case resultHandler itself is not thread safe.
    synchronized {
      resultHandler(index, result.asInstanceOf[T])
    }
    if (finishedTasks.incrementAndGet() == totalTasks) {
      jobPromise.success(())
    }
  }

  override def jobFailed(exception: Exception): Unit = {
    if (!jobPromise.tryFailure(exception)) {
      logWarning("Ignore failure", exception)
    }
  }

} 
Example 93
Source File: H2Dialect.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import io.getquill.idiom.StatementInterpolator._
import java.util.concurrent.atomic.AtomicInteger

import io.getquill.ast.{ Ast, OnConflict }
import io.getquill.context.CanReturnField
import io.getquill.context.sql.idiom.PositionalBindVariables
import io.getquill.context.sql.idiom.SqlIdiom
import io.getquill.context.sql.idiom.ConcatSupport
import io.getquill.util.Messages.fail

trait H2Dialect
  extends SqlIdiom
  with PositionalBindVariables
  with ConcatSupport
  with CanReturnField {

  private[getquill] val preparedStatementId = new AtomicInteger

  override def prepareForProbing(string: String) =
    s"PREPARE p${preparedStatementId.incrementAndGet.toString.token} AS $string}"

  override def astTokenizer(implicit astTokenizer: Tokenizer[Ast], strategy: NamingStrategy): Tokenizer[Ast] =
    Tokenizer[Ast] {
      case c: OnConflict => c.token
      case ast           => super.astTokenizer.token(ast)
    }

  implicit def conflictTokenizer(implicit astTokenizer: Tokenizer[Ast], strategy: NamingStrategy): Tokenizer[OnConflict] = {
    import OnConflict._
    def tokenizer(implicit astTokenizer: Tokenizer[Ast]) =
      Tokenizer[OnConflict] {
        case OnConflict(i, NoTarget, Ignore) => stmt"${astTokenizer.token(i)} ON CONFLICT DO NOTHING"
        case _                               => fail("Only onConflictIgnore upsert is supported in H2 (v1.4.200+).")
      }

    tokenizer(super.astTokenizer)
  }
}

object H2Dialect extends H2Dialect 
Example 94
Source File: PostgresDialect.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.util.concurrent.atomic.AtomicInteger

import io.getquill.ast._
import io.getquill.context.CanReturnClause
import io.getquill.context.sql.idiom._
import io.getquill.idiom.StatementInterpolator._

trait PostgresDialect
  extends SqlIdiom
  with QuestionMarkBindVariables
  with ConcatSupport
  with OnConflictSupport
  with CanReturnClause {

  override def astTokenizer(implicit astTokenizer: Tokenizer[Ast], strategy: NamingStrategy): Tokenizer[Ast] =
    Tokenizer[Ast] {
      case ListContains(ast, body) => stmt"${body.token} = ANY(${ast.token})"
      case c: OnConflict           => conflictTokenizer.token(c)
      case ast                     => super.astTokenizer.token(ast)
    }

  override implicit def operationTokenizer(implicit astTokenizer: Tokenizer[Ast], strategy: NamingStrategy): Tokenizer[Operation] =
    Tokenizer[Operation] {
      case UnaryOperation(StringOperator.`toLong`, ast) => stmt"${scopedTokenizer(ast)}::bigint"
      case UnaryOperation(StringOperator.`toInt`, ast)  => stmt"${scopedTokenizer(ast)}::integer"
      case operation                                    => super.operationTokenizer.token(operation)
    }

  private[getquill] val preparedStatementId = new AtomicInteger

  override def prepareForProbing(string: String) = {
    var i = 0
    val query = string.flatMap(x => if (x != '?') s"$x" else {
      i += 1
      s"$$$i"
    })
    s"PREPARE p${preparedStatementId.incrementAndGet.toString.token} AS $query"
  }
}

object PostgresDialect extends PostgresDialect 
Example 95
Source File: OkTestClient.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.finagle.mysql

import com.twitter.concurrent.AsyncStream
import com.twitter.finagle.mysql
import com.twitter.util.{ Future, Time }
import java.util.concurrent.atomic.AtomicInteger
import com.twitter.finagle.mysql.Transactions
import com.twitter.finagle.mysql.Session
import com.twitter.finagle.mysql.Client

class OkTestClient extends mysql.Client with mysql.Transactions {
  val methodCount = new AtomicInteger

  val ok = mysql.OK(0, 0, 0, 0, "")

  override def query(sql: String): Future[mysql.Result] = {
    methodCount.incrementAndGet()
    Future(ok)
  }

  override def select[T](sql: String)(f: mysql.Row => T): Future[Seq[T]] = {
    methodCount.incrementAndGet()
    Future(Seq.empty)
  }
  override def prepare(sql: String): mysql.PreparedStatement = {
    methodCount.incrementAndGet()
    new mysql.PreparedStatement {
      override def apply(params: mysql.Parameter*): Future[mysql.Result] = Future(ok)
    }
  }
  override def cursor(sql: String): mysql.CursoredStatement = {
    methodCount.incrementAndGet()
    new mysql.CursoredStatement {
      override def apply[T](rowsPerFetch: Int, params: mysql.Parameter*)(f: mysql.Row => T): Future[mysql.CursorResult[T]] = Future {
        new mysql.CursorResult[T] {
          override def stream: AsyncStream[T] = AsyncStream.empty
          override def close(deadline: Time): Future[Unit] = Future.Unit
        }
      }
    }
  }

  override def ping(): Future[Unit] = {
    methodCount.incrementAndGet()
    Future.Unit
  }

  override def transaction[T](f: mysql.Client => Future[T]): Future[T] = {
    f(this)
  }
  override def transactionWithIsolation[T](isolationLevel: mysql.IsolationLevel)(f: mysql.Client => Future[T]): Future[T] = {
    f(this)
  }

  override def close(deadline: Time): Future[Unit] = Future.Unit

  def session[T](f: Client with Transactions with Session => Future[T]): Future[T] = {
    ???
  }
} 
Example 96
Source File: JVMObjectTracker.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off

private[r] object JVMObjectTracker {
  @transient
  protected lazy val logger: Logger = LoggerFactory.getLogger(getClass.getName)
  private[this] val objMap = new TrieMap[String, Object]
  private[this] val objCounter = new AtomicInteger(0)

  def getObject(id: String): Object = {
    logger.info(s"Get object at $id")
    objMap(id)
  }

  def get(id: String): Option[Object] = {
    logger.info(s"Get object at $id")
    objMap.get(id)
  }

  def put(obj: Object): String = {
    val objId = objCounter.getAndIncrement.toString
    val objName = obj.getClass.getName
    logger.info(s"Puts $objName at $objId ")
    objMap.put(objId, obj)
    objId
  }

  def remove(id: String): Option[Object] = {
    logger.info(s"Removed $id")
    objMap.remove(id)
  }

} 
Example 97
Source File: StandardThreadStartManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.threads

import java.util.concurrent.atomic.AtomicInteger

import com.sun.jdi.event.ThreadStartEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardThreadStartManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardThreadStartManager") {
    it("should trigger when a thread starts") {
      val testClass = "org.scaladebugger.test.threads.ThreadStart"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val threadStartCount = new AtomicInteger(0)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Mark that we want to receive thread start events
      threadStartManager.createThreadStartRequest()
      eventManager.addResumingEventHandler(ThreadStartEventType, e => {
        val threadEvent = e.asInstanceOf[ThreadStartEvent]
        val threadName = threadEvent.thread().name()

        logger.debug(s"Detected start of thread named $threadName")
        if (threadName.startsWith("test thread")) {
          logger.trace(s"Thread was desired test thread! Incrementing counter!")
          threadStartCount.incrementAndGet()
        }
      })

      // Start our Thread and listen for the start event
      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive a total of 10 thread starts
        logTimeTaken(eventually {
          threadStartCount.get() should be (10)
        })
      }
    }
  }
} 
Example 98
Source File: StandardThreadDeathManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.threads

import java.util.concurrent.atomic.AtomicInteger

import com.sun.jdi.event.ThreadDeathEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardThreadDeathManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardThreadDeathManager") {
    it("should trigger when a thread dies") {
      val testClass = "org.scaladebugger.test.threads.ThreadDeath"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val threadDeathCount = new AtomicInteger(0)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Mark that we want to receive thread death events
      threadDeathManager.createThreadDeathRequest()
      eventManager.addResumingEventHandler(ThreadDeathEventType, e => {
        val threadEvent = e.asInstanceOf[ThreadDeathEvent]
        val threadName = threadEvent.thread().name()

        logger.debug(s"Detected death of thread named $threadName")
        if (threadName.startsWith("test thread")) {
          logger.trace(s"Thread was desired test thread! Incrementing counter!")
          threadDeathCount.incrementAndGet()
        }
      })

      // Start our Thread and listen for the start event
      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive a total of 10 thread deaths
        logTimeTaken(eventually {
          threadDeathCount.get() should be (10)
        })
      }
    }
  }
} 
Example 99
Source File: JavaClassPrepareRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.classes

import java.util.concurrent.atomic.AtomicInteger

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaClassPrepareRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaClassPrepareRequest") {
    it("should trigger when a class is loaded") {
      val testClass = "org.scaladebugger.test.classes.ClassPrepare"

      val expectedClassName = "org.scaladebugger.test.classes.CustomClass"
      val classPrepareHit = new AtomicInteger(0)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive class prepare events and watch for one
      // NOTE: This is already set within the ScalaVirtualMachine class
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateClassPrepareRequest()
        .map(_.referenceType.name)
        .filter(_ == expectedClassName)
        .foreach(_ => classPrepareHit.incrementAndGet())

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the class prepare event
        logTimeTaken(eventually {
          classPrepareHit.get() should be (1)
        })
      }
    }
  }
} 
Example 100
Source File: JavaThreadDeathRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.threads

import java.util.concurrent.atomic.AtomicInteger

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaThreadDeathRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaThreadDeathRequest") {
    it("should trigger when a thread dies") {
      val testClass = "org.scaladebugger.test.threads.ThreadDeath"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val threadDeathCount = new AtomicInteger(0)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive thread death events
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateThreadDeathRequest()
        .map(_.thread.name)
        .filter(_.startsWith("test thread"))
        .foreach(_ => threadDeathCount.incrementAndGet())

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive a total of 10 thread deaths
        logTimeTaken(eventually {
          threadDeathCount.get() should be (10)
        })
      }
    }
  }
} 
Example 101
Source File: JavaThreadStartRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.threads

import java.util.concurrent.atomic.AtomicInteger

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaThreadStartRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaThreadStartRequest") {
    it("should trigger when a thread starts") {
      val testClass = "org.scaladebugger.test.threads.ThreadStart"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val threadStartCount = new AtomicInteger(0)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive thread start events
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateThreadStartRequest()
        .map(_.thread.name)
        .filter(_.startsWith("test thread"))
        .foreach(_ => threadStartCount.incrementAndGet())

      // Start our Thread and listen for the start event
      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive a total of 10 thread starts
        logTimeTaken(eventually {
          threadStartCount.get() should be (10)
        })
      }
    }
  }
} 
Example 102
Source File: ListeningDebuggerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.debuggers

import java.net.ServerSocket
import java.util.concurrent.atomic.AtomicInteger

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

import scala.util.Try

class ListeningDebuggerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("ListeningDebugger") {
    it("should be able to listen for multiple connecting JVM processes") {
      withProcessCreator((address, port, createProcess) => {
        val totalJvmProcesses = 3
        val currentConnectedCount = new AtomicInteger(0)

        // Start listening for JVM connections
        val listeningDebugger = ListeningDebugger(hostname = address, port = port)
        listeningDebugger.start(_ => currentConnectedCount.incrementAndGet())

        // Verify that our listening debugger can actually support multiple
        // connections (it should as a socket listener)
        if (!listeningDebugger.supportsMultipleConnections) {
          alert(
            "Listening debuggers do not support multiple connections on this JVM!"
          )
        }

        // Spawn our JVM processes
        (1 to totalJvmProcesses).foreach(_ => createProcess())

        // Keep checking back until we have successfully connected all JVMs
        eventually {
          currentConnectedCount.get() should be (totalJvmProcesses)
        }
      })
    }
  }

  
  private def withProcessCreator[T](testCode: (String, Int, () => Process) => T): T = {
    val (address, port) = {
      val socket = new ServerSocket(0)
      val _address = socket.getInetAddress.getHostName
      val _port = socket.getLocalPort
      socket.close()
      (_address, _port)
    }

    var jvmProcesses: Seq[Process] = Nil
    def createProcess(port: Int): Process = {
      val process = JDITools.spawn(
        className = "org.scaladebugger.test.misc.ListeningMain",
        server = false,
        suspend = true,
        port = port
      )
      jvmProcesses +:= process
      process
    }

    val result = Try(testCode(address, port, () => createProcess(port)))

    // Clean up any leftover processes
    jvmProcesses.foreach(p => Try(p.destroy()))

    result.get
  }
} 
Example 103
Source File: ControlledParallelSuite.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.test.helpers

import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger

import ControlledParallelSuite._
import org.scalatest.{Args, Distributor, Status, Suite}

import scala.util.Try

object ControlledParallelSuite {
  lazy val EnvironmentPoolSize: Try[Int] =
    Try(System.getenv("SCALATEST_PARALLEL_TESTS").toInt)
  lazy val DefaultPoolSize: Int = Runtime.getRuntime.availableProcessors() * 2
  def calculatePoolSize(): Int = EnvironmentPoolSize.getOrElse(DefaultPoolSize)

  private val atomicThreadCounter: AtomicInteger = new AtomicInteger

  lazy val threadFactory: ThreadFactory = new ThreadFactory {
    val defaultThreadFactory = Executors.defaultThreadFactory

    def newThread(runnable: Runnable): Thread = {
      val thread = defaultThreadFactory.newThread(runnable)
      thread.setName("ScalaTest-" + atomicThreadCounter.incrementAndGet())
      thread
    }
  }

  import scala.collection.JavaConverters._
  val semaMap: collection.mutable.Map[String, Semaphore] =
    new ConcurrentHashMap[String, Semaphore]().asScala
}


  def semaSync[T](id: String)(thunk: => T): T = {
    val semaphore = semaMap.getOrElseUpdate(id, new Semaphore(poolSize))

    semaphore.acquire()
    val result = Try(thunk)
    semaphore.release()
    result.get
  }
} 
Example 104
Source File: Failover2Spec.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.core.cluster

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model.StatusCode
import akka.http.scaladsl.model.StatusCodes.SeeOther
import com.typesafe.config.ConfigValueFactory
import io.amient.affinity.Conf
import io.amient.affinity.core.util.AffinityTestBase
import io.amient.affinity.kafka.EmbeddedKafka
import org.scalatest.{FlatSpec, Matchers}

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.postfixOps
import scala.util.{Failure, Random, Success, Try}


class Failover2Spec extends FlatSpec with AffinityTestBase with EmbeddedKafka with Matchers {

  val specTimeout = 15 seconds

  override def numPartitions = 2

  def config = configure("failoverspecs", Some(zkConnect), Some(kafkaBootstrap))

  val node1 = new Node(config.withValue(Conf.Affi.Node.Gateway.Class.path, ConfigValueFactory.fromAnyRef(classOf[FailoverTestGateway].getName)))
  val node2 = new Node(config.withValue(Conf.Affi.Node.Containers("keyspace1").path, ConfigValueFactory.fromIterable(List(0,1).asJava)))
  val node3 = new Node(config.withValue(Conf.Affi.Node.Containers("keyspace1").path, ConfigValueFactory.fromIterable(List(0,1).asJava)))

  override def beforeAll(): Unit = try {
    node1.start()
    node2.start()
    node3.start()
    node1.awaitClusterReady()
  } finally {
    super.beforeAll()
  }

  override def afterAll(): Unit = try {
    node1.shutdown()
    node2.shutdown()
    node3.shutdown()
  } finally {
    super.afterAll()
  }

  "Master Transition" should "not lead to inconsistent state" in {
    val requestCount = new AtomicInteger(0)
    val expected = new ConcurrentHashMap[String, String]()
    import scala.concurrent.ExecutionContext.Implicits.global

    val random = new Random()
    val requests = scala.collection.mutable.ListBuffer[Future[Try[StatusCode]]]()
    for (i <- (1 to 250)) {
      val key = random.nextInt.toString
      val value = random.nextInt.toString
      requests += node1.http(POST, s"/$key/$value") map {
        case response =>
          expected.put(key, value)
          if (i == 25) {
            //after a few writes have succeeded kill one node
            node2.shutdown()
          }
          Success(response.status)
      } recover {
        case e: Throwable =>
          Failure(e)
      }
    }
    requestCount.set(requests.size)
    Await.result(Future.sequence(requests), specTimeout).foreach(_ should be(Success(SeeOther)))
    println(s"${requests.size} successful requests")

    expected.asScala.foreach { case (key, value) =>
      val (status, entity) = Await.result(node1.http(GET, s"/$key").map { response => (response.status, response.entity)}, specTimeout / 3)
      status.intValue should be (200)
      val expectedEntity = jsonStringEntity(value)
      entity should be(expectedEntity)
    }
  }

} 
Example 105
Source File: MemoryStore.scala    From shield   with MIT License 5 votes vote down vote up
package shield.kvstore

import java.util.concurrent.atomic.AtomicInteger

import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap
import com.typesafe.scalalogging.LazyLogging
import shield.metrics.Instrumented
import spray.http.{MediaType, HttpResponse}
import scala.concurrent.{ExecutionContext, Future}
import scala.collection.concurrent

class LazyWrapper[A](builder: => A) {
  lazy val value : A = builder
}

class MemoryStore(id: String, maxHashCapacity: Int, maxKeyCapacity: Int, maxLimitCapacity: Int)(implicit context: ExecutionContext) extends KVStore with LazyLogging with Instrumented {
  def getMillis():Long = System.currentTimeMillis
  private val setStore = new ConcurrentLinkedHashMap.Builder[String, LazyWrapper[TrieSet[String]]]
    .initialCapacity(1000)
    .maximumWeightedCapacity(Math.max(1000, maxHashCapacity))
    .build()
  // todo: tweak capacity - can we do by memory size? (weigher to weigh by memory footprint)
  private val keyStore = new ConcurrentLinkedHashMap.Builder[String, HttpResponse]
    .initialCapacity(1000)
    .maximumWeightedCapacity(Math.max(1000, maxKeyCapacity))
    .build()
  private val limitStore = new ConcurrentLinkedHashMap.Builder[String, AtomicInteger]
    .initialCapacity(1000)
    .maximumWeightedCapacity(Math.max(1000, maxLimitCapacity))
    .build()

  // todo: profiling optimization - triesets are expensive to build.  Is there a better data structure we can use?
  private def getOrSet[V](set: ConcurrentLinkedHashMap[String, V], key: String, default: V) = set.putIfAbsent(key, default) match {
    case null => default
    case existing => existing
  }

  val setGetTimer = timing("setGet", id)
  def setGet(key: String) : Future[Seq[String]] = setGetTimer {
    Future.successful(getOrSet(setStore, key, new LazyWrapper[TrieSet[String]](TrieSet[String]())).value.toSeq)
  }
  val setDeleteTimer = timing("setDelete", id)
  def setDelete(key: String) : Future[Long] = setDeleteTimer {
    setStore.remove(key)
    // todo: implement these according to the same semantics as RedisStore
    Future.successful(0L)
  }
  val setAddTimer = timing("setAdd", id)
  def setAdd(key: String, value: String) : Future[Long] = setAddTimer {
    getOrSet(setStore, key, new LazyWrapper[TrieSet[String]](TrieSet[String]())).value += value
    Future.successful(0L)
  }
  val setRemoveTimer = timing("setRemove", id)
  def setRemove(key: String, value: String) : Future[Long] = setRemoveTimer {
    getOrSet(setStore, key, new LazyWrapper[TrieSet[String]](TrieSet[String]())).value -= value
    Future.successful(0L)
  }
  val keyGetTimer = timing("keyGet", id)
  def keyGet(key: String) : Future[Option[HttpResponse]] = keyGetTimer {
    Future.successful(Option(keyStore.get(key)))
  }
  val keySetTimer = timing("keySet", id)
  def keySet(key: String, value: HttpResponse) : Future[Boolean] = keySetTimer {
    keyStore.put(key, value)
    Future.successful(true)
  }
  val keyDeleteTimer = timing("keyDelete", id)
  def keyDelete(key: String) : Future[Long] = keyDeleteTimer {
    keyStore.remove(key)
    Future.successful(0L)
  }

  val tokenTimer = timing("tokenRateLimit", id)
  def tokenRateLimit(key: String, rate: Int, perSeconds: Int) : Future[Boolean] = tokenTimer {
    // we could set up a concurrent system for actively pruning expired entries or....
    // we could just let them get evicted via lru policy
    val floored = Math.floor(getMillis() / (perSeconds * 1000)).toLong
    val fullKey = s"rl:$floored:$key"
    val counter = getOrSet(limitStore, fullKey, new AtomicInteger(0))
    // doesn't matter if we increment over the count (ie count rate limited requests), since it won't spill
    // over to the next bucket
    Future.successful(counter.incrementAndGet() <= rate)
  }
} 
Example 106
Source File: CassandraTraceRecordWriter.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.cassandra.store

import java.util.concurrent.atomic.AtomicInteger

import com.expedia.open.tracing.backend.TraceRecord
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.commons.retries.RetryOperation._
import com.expedia.www.haystack.trace.storage.backends.cassandra.client.CassandraSession
import com.expedia.www.haystack.trace.storage.backends.cassandra.config.entities.CassandraConfiguration
import com.expedia.www.haystack.trace.storage.backends.cassandra.metrics.AppMetricNames
import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContextExecutor, Future, Promise}
import scala.util.{Failure, Success}

class CassandraTraceRecordWriter(cassandra: CassandraSession,
                                 config: CassandraConfiguration)(implicit val dispatcher: ExecutionContextExecutor)
  extends MetricsSupport {

  private val LOGGER = LoggerFactory.getLogger(classOf[CassandraTraceRecordWriter])
  private lazy val writeTimer = metricRegistry.timer(AppMetricNames.CASSANDRA_WRITE_TIME)
  private lazy val writeFailures = metricRegistry.meter(AppMetricNames.CASSANDRA_WRITE_FAILURE)

  cassandra.ensureKeyspace(config.clientConfig.tracesKeyspace)
  private val spanInsertPreparedStmt = cassandra.createSpanInsertPreparedStatement(config.clientConfig.tracesKeyspace)

  private def execute(record: TraceRecord): Future[Unit] = {

    val promise = Promise[Unit]
    // execute the request async with retry
    withRetryBackoff(retryCallback => {
      val timer = writeTimer.time()

      // prepare the statement
      val statement = cassandra.newTraceInsertBoundStatement(record.getTraceId,
        record.getSpans.toByteArray,
        config.writeConsistencyLevel(retryCallback.lastError()),
        spanInsertPreparedStmt)

      val asyncResult = cassandra.executeAsync(statement)
      asyncResult.addListener(new CassandraTraceRecordWriteResultListener(asyncResult, timer, retryCallback), dispatcher)
    },
      config.retryConfig,
      onSuccess = (_: Any) => promise.success(),
      onFailure = ex => {
        writeFailures.mark()
        LOGGER.error(s"Fail to write to cassandra after ${config.retryConfig.maxRetries} retry attempts for ${record.getTraceId}", ex)
        promise.failure(ex)
      })
    promise.future
  }

  
      execute(record).onComplete {
        case Success(_) => if (writableRecordsLatch.decrementAndGet() == 0) {
          promise.success()
        }
        case Failure(ex) =>
          //TODO: We fail the response only if the last cassandra write fails, ideally we should be failing if any of the cassandra writes fail
          if (writableRecordsLatch.decrementAndGet() == 0) {
            promise.failure(ex)
          }
      }
    })
    promise.future

  }
} 
Example 107
Source File: Util.scala    From Heracles   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hbase.util

import java.io._
import java.util.concurrent.atomic.AtomicInteger
import java.util.zip.{DeflaterOutputStream, InflaterInputStream}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.HBaseConfiguration

object Util {
  val iteration = new AtomicInteger(0)

  
  def dropTempFilePath(conf: Configuration, path: String): Boolean = {
    val fileSystem = FileSystem.get(conf)
    val filePath = new Path(path)
    if (fileSystem.exists(filePath)) {
      fileSystem.delete(filePath, true)
    } else {
      false
    }
  }

  def serializeHBaseConfiguration(configuration: Configuration): Array[Byte] = {
    val bos = new ByteArrayOutputStream
    val deflaterOutputStream = new DeflaterOutputStream(bos)
    val dos = new DataOutputStream(deflaterOutputStream)
    configuration.write(dos)
    dos.close()
    bos.toByteArray
  }

  def deserializeHBaseConfiguration(arr: Array[Byte]) = {
    val conf = HBaseConfiguration.create
    conf.readFields(new DataInputStream(new InflaterInputStream(new ByteArrayInputStream(arr))))
    conf
  }
} 
Example 108
Source File: Util.scala    From Spark-SQL-on-HBase   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hbase.util

import java.io._
import java.util.concurrent.atomic.AtomicInteger
import java.util.zip.{DeflaterOutputStream, InflaterInputStream}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hbase.HBaseConfiguration

object Util {
  val iteration = new AtomicInteger(0)

  def getTempFilePath(conf: Configuration, prefix: String): String = {
    val fileSystem = FileSystem.get(conf)
    val path = new Path(s"$prefix-${System.currentTimeMillis()}-${iteration.getAndIncrement}")
    if (fileSystem.exists(path)) {
      fileSystem.delete(path, true)
    }
    path.getName
  }

  def serializeHBaseConfiguration(configuration: Configuration): Array[Byte] = {
    val bos = new ByteArrayOutputStream
    val deflaterOutputStream = new DeflaterOutputStream(bos)
    val dos = new DataOutputStream(deflaterOutputStream)
    configuration.write(dos)
    dos.close()
    bos.toByteArray
  }

  def deserializeHBaseConfiguration(arr: Array[Byte]) = {
    val conf = HBaseConfiguration.create
    conf.readFields(new DataInputStream(new InflaterInputStream(new ByteArrayInputStream(arr))))
    conf
  }
} 
Example 109
Source File: CountingGreeterServiceImpl.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.scaladsl

import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.Future

import akka.NotUsed
import akka.stream.scaladsl.Source

import example.myapp.helloworld.grpc.helloworld._

class CountingGreeterServiceImpl extends GreeterService {
  var greetings = new AtomicInteger(0);

  def sayHello(in: HelloRequest): Future[HelloReply] = {
    greetings.incrementAndGet()
    Future.successful(HelloReply(s"Hi ${in.name}!"))
  }

  def itKeepsReplying(in: HelloRequest): Source[HelloReply, NotUsed] =
    Source(List(HelloReply("First"), HelloReply("Second"))).mapMaterializedValue { m => println("XXX MAT YYY"); m }
  def itKeepsTalking(
      in: akka.stream.scaladsl.Source[example.myapp.helloworld.grpc.helloworld.HelloRequest, akka.NotUsed])
      : scala.concurrent.Future[example.myapp.helloworld.grpc.helloworld.HelloReply] = ???
  def streamHellos(in: akka.stream.scaladsl.Source[example.myapp.helloworld.grpc.helloworld.HelloRequest, akka.NotUsed])
      : akka.stream.scaladsl.Source[example.myapp.helloworld.grpc.helloworld.HelloReply, akka.NotUsed] = ???

} 
Example 110
Source File: ConcurrentlyStepSpec.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package com.github.agourlay.cornichon.steps.wrapped

import java.util.concurrent.atomic.AtomicInteger

import com.github.agourlay.cornichon.core._
import com.github.agourlay.cornichon.steps.regular.assertStep.{ AssertStep, GenericEqualityAssertion }
import com.github.agourlay.cornichon.testHelpers.CommonTestSuite
import utest._

import scala.concurrent.duration._

object ConcurrentlyStepSpec extends TestSuite with CommonTestSuite {

  val tests = Tests {
    test("fails if 'Concurrently' block contains a failed step") {
      val nested = AssertStep(
        "always fails",
        _ => GenericEqualityAssertion(true, false)
      ) :: Nil
      val steps = ConcurrentlyStep(nested, 200.millis) :: Nil
      val s = Scenario("with Concurrently", steps)
      val res = awaitTask(ScenarioRunner.runScenario(Session.newEmpty)(s))
      scenarioFailsWithMessage(res) {
        """Scenario 'with Concurrently' failed:
          |
          |at step:
          |always fails
          |
          |with error(s):
          |expected result was:
          |'true'
          |but actual result is:
          |'false'
          |
          |seed for the run was '1'
          |""".stripMargin
      }
    }

    test("fails if 'Concurrently' block does not complete within 'maxDuration because of a single step duration") {
      val nested = AssertStep(
        "always succeed after 50 ms",
        _ => {
          Thread.sleep(50)
          GenericEqualityAssertion(true, true)
        }
      ) :: Nil
      val steps = ConcurrentlyStep(nested, 10.millis) :: Nil
      val s = Scenario("with Concurrently", steps)
      val res = awaitTask(ScenarioRunner.runScenario(Session.newEmpty)(s))
      scenarioFailsWithMessage(res) {
        """Scenario 'with Concurrently' failed:
          |
          |at step:
          |Concurrently block with maxTime '10 milliseconds'
          |
          |with error(s):
          |Concurrently block did not reach completion in time: 0/1 finished
          |
          |seed for the run was '1'
          |""".stripMargin
      }
    }

    test("runs nested block 'n' times") {
      val uglyCounter = new AtomicInteger(0)
      val loop = 5
      val nested = AssertStep(
        "increment captured counter",
        _ => {
          uglyCounter.incrementAndGet()
          GenericEqualityAssertion(true, true)
        }
      )
      val concurrentlyStep = ConcurrentlyStep(List.fill(loop)(nested), 300.millis)
      val s = Scenario("scenario with Concurrently", concurrentlyStep :: Nil)
      val res = awaitTask(ScenarioRunner.runScenario(Session.newEmpty)(s))
      assert(res.isSuccess)
      assert(uglyCounter.intValue() == loop)
    }
  }
} 
Example 111
Source File: AttachStepSpec.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package com.github.agourlay.cornichon.steps.wrapped

import java.util.concurrent.atomic.AtomicInteger

import com.github.agourlay.cornichon.core.{ Scenario, ScenarioRunner, Session, Step }
import com.github.agourlay.cornichon.steps.cats.EffectStep
import com.github.agourlay.cornichon.testHelpers.CommonTestSuite
import utest._

object AttachStepSpec extends TestSuite with CommonTestSuite {

  val tests = Tests {
    test("merges nested steps in the parent flow when first") {
      val nested = List.fill(5)(alwaysValidAssertStep)
      val steps = AttachStep(_ => nested) :: Nil
      val s = Scenario("scenario with Attach", steps)
      val res = awaitTask(ScenarioRunner.runScenario(Session.newEmpty)(s))
      assert(res.isSuccess)
      matchLogsWithoutDuration(res.logs) {
        """
          |   Scenario : scenario with Attach
          |      main steps
          |      valid
          |      valid
          |      valid
          |      valid
          |      valid""".stripMargin
      }
    }

    test("merges nested steps in the parent flow when nested") {
      val nested = List.fill(5)(alwaysValidAssertStep)
      val steps = AttachStep(_ => nested) :: Nil
      val s = Scenario("scenario with Attach", RepeatStep(steps, 1, None) :: Nil)
      val res = awaitTask(ScenarioRunner.runScenario(Session.newEmpty)(s))
      assert(res.isSuccess)
      matchLogsWithoutDuration(res.logs) {
        """
          |   Scenario : scenario with Attach
          |      main steps
          |      Repeat block with occurrence '1'
          |         valid
          |         valid
          |         valid
          |         valid
          |         valid
          |      Repeat block with occurrence '1' succeeded""".stripMargin
      }
    }

    test("runs all nested valid effects") {
      val uglyCounter = new AtomicInteger(0)
      val effectNumber = 5
      val effect = EffectStep.fromSync(
        "increment captured counter",
        sc => {
          uglyCounter.incrementAndGet()
          sc.session
        }
      )

      val nestedSteps = List.fill(effectNumber)(effect)
      val attached = AttachStep(_ => nestedSteps)

      val s = Scenario("scenario with effects", attached :: effect :: Nil)
      val res = awaitTask(ScenarioRunner.runScenario(Session.newEmpty)(s))
      assert(res.isSuccess)
      assert(uglyCounter.get() == effectNumber + 1)
      matchLogsWithoutDuration(res.logs) {
        """
          |   Scenario : scenario with effects
          |      main steps
          |      increment captured counter
          |      increment captured counter
          |      increment captured counter
          |      increment captured counter
          |      increment captured counter
          |      increment captured counter""".stripMargin
      }
    }

    test("available bia Step.eval") {
      val steps = Step.eval(alwaysValidAssertStep) :: Nil
      val s = Scenario("scenario with Attach", steps)
      val res = awaitTask(ScenarioRunner.runScenario(Session.newEmpty)(s))
      assert(res.isSuccess)
      matchLogsWithoutDuration(res.logs) {
        """
          |   Scenario : scenario with Attach
          |      main steps
          |      valid""".stripMargin
      }
    }
  }
} 
Example 112
Source File: KafkaContinuousTest.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import java.util.concurrent.atomic.AtomicInteger

import org.apache.spark.SparkContext
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd, SparkListenerTaskStart}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.execution.streaming.StreamExecution
import org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.test.TestSparkSession

// Trait to configure StreamTest for kafka continuous execution tests.
trait KafkaContinuousTest extends KafkaSourceTest {
  override val defaultTrigger = Trigger.Continuous(1000)
  override val defaultUseV2Sink = true

  // We need more than the default local[2] to be able to schedule all partitions simultaneously.
  override protected def createSparkSession = new TestSparkSession(
    new SparkContext(
      "local[10]",
      "continuous-stream-test-sql-context",
      sparkConf.set("spark.sql.testkey", "true")))

  // In addition to setting the partitions in Kafka, we have to wait until the query has
  // reconfigured to the new count so the test framework can hook in properly.
  override protected def setTopicPartitions(
      topic: String, newCount: Int, query: StreamExecution) = {
    testUtils.addPartitions(topic, newCount)
    eventually(timeout(streamingTimeout)) {
      assert(
        query.lastExecution.logical.collectFirst {
          case DataSourceV2Relation(_, r: KafkaContinuousReader) => r
        }.exists(_.knownPartitions.size == newCount),
        s"query never reconfigured to $newCount partitions")
    }
  }

  // Continuous processing tasks end asynchronously, so test that they actually end.
  private val tasksEndedListener = new SparkListener() {
    val activeTaskIdCount = new AtomicInteger(0)

    override def onTaskStart(start: SparkListenerTaskStart): Unit = {
      activeTaskIdCount.incrementAndGet()
    }

    override def onTaskEnd(end: SparkListenerTaskEnd): Unit = {
      activeTaskIdCount.decrementAndGet()
    }
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
    spark.sparkContext.addSparkListener(tasksEndedListener)
  }

  override def afterEach(): Unit = {
    eventually(timeout(streamingTimeout)) {
      assert(tasksEndedListener.activeTaskIdCount.get() == 0)
    }
    spark.sparkContext.removeSparkListener(tasksEndedListener)
    super.afterEach()
  }


  test("ensure continuous stream is being used") {
    val query = spark.readStream
      .format("rate")
      .option("numPartitions", "1")
      .option("rowsPerSecond", "1")
      .load()

    testStream(query)(
      Execute(q => assert(q.isInstanceOf[ContinuousExecution]))
    )
  }
} 
Example 113
Source File: KafkaContinuousSourceSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kafka010

import java.util.Properties
import java.util.concurrent.atomic.AtomicInteger

import org.scalatest.time.SpanSugar._
import scala.collection.mutable
import scala.util.Random

import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, ForeachWriter, Row}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.execution.streaming.StreamExecution
import org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution
import org.apache.spark.sql.streaming.{StreamTest, Trigger}
import org.apache.spark.sql.test.{SharedSQLContext, TestSparkSession}

// Run tests in KafkaSourceSuiteBase in continuous execution mode.
class KafkaContinuousSourceSuite extends KafkaSourceSuiteBase with KafkaContinuousTest

class KafkaContinuousSourceTopicDeletionSuite extends KafkaContinuousTest {
  import testImplicits._

  override val brokerProps = Map("auto.create.topics.enable" -> "false")

  test("subscribing topic by pattern with topic deletions") {
    val topicPrefix = newTopic()
    val topic = topicPrefix + "-seems"
    val topic2 = topicPrefix + "-bad"
    testUtils.createTopic(topic, partitions = 5)
    testUtils.sendMessages(topic, Array("-1"))
    require(testUtils.getLatestOffsets(Set(topic)).size === 5)

    val reader = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", testUtils.brokerAddress)
      .option("kafka.metadata.max.age.ms", "1")
      .option("subscribePattern", s"$topicPrefix-.*")
      .option("failOnDataLoss", "false")

    val kafka = reader.load()
      .selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
      .as[(String, String)]
    val mapped = kafka.map(kv => kv._2.toInt + 1)

    testStream(mapped)(
      makeSureGetOffsetCalled,
      AddKafkaData(Set(topic), 1, 2, 3),
      CheckAnswer(2, 3, 4),
      Execute { query =>
        testUtils.deleteTopic(topic)
        testUtils.createTopic(topic2, partitions = 5)
        eventually(timeout(streamingTimeout)) {
          assert(
            query.lastExecution.logical.collectFirst {
              case DataSourceV2Relation(_, r: KafkaContinuousReader) => r
            }.exists { r =>
              // Ensure the new topic is present and the old topic is gone.
              r.knownPartitions.exists(_.topic == topic2)
            },
            s"query never reconfigured to new topic $topic2")
        }
      },
      AddKafkaData(Set(topic2), 4, 5, 6),
      CheckAnswer(2, 3, 4, 5, 6, 7)
    )
  }
}

class KafkaContinuousSourceStressForDontFailOnDataLossSuite
    extends KafkaSourceStressForDontFailOnDataLossSuite {
  override protected def startStream(ds: Dataset[Int]) = {
    ds.writeStream
      .format("memory")
      .queryName("memory")
      .trigger(Trigger.Continuous("1 second"))
      .start()
  }
} 
Example 114
Source File: JobWaiter.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.{Future, Promise}

import org.apache.spark.internal.Logging


  def cancel() {
    dagScheduler.cancelJob(jobId, None)
  }

  override def taskSucceeded(index: Int, result: Any): Unit = {
    // resultHandler call must be synchronized in case resultHandler itself is not thread safe.
    synchronized {
      resultHandler(index, result.asInstanceOf[T])
    }
    if (finishedTasks.incrementAndGet() == totalTasks) {
      jobPromise.success(())
    }
  }

  override def jobFailed(exception: Exception): Unit = {
    if (!jobPromise.tryFailure(exception)) {
      logWarning("Ignore failure", exception)
    }
  }

} 
Example 115
Source File: BlockManagerManagedBuffer.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.io.InputStream
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicInteger

import org.apache.spark.network.buffer.ManagedBuffer


private[storage] class BlockManagerManagedBuffer(
    blockInfoManager: BlockInfoManager,
    blockId: BlockId,
    data: BlockData,
    dispose: Boolean) extends ManagedBuffer {

  private val refCount = new AtomicInteger(1)

  override def size(): Long = data.size

  override def nioByteBuffer(): ByteBuffer = data.toByteBuffer()

  override def createInputStream(): InputStream = data.toInputStream()

  override def convertToNetty(): Object = data.toNetty()

  override def retain(): ManagedBuffer = {
    refCount.incrementAndGet()
    val locked = blockInfoManager.lockForReading(blockId, blocking = false)
    assert(locked.isDefined)
    this
 }

  override def release(): ManagedBuffer = {
    blockInfoManager.unlock(blockId)
    if (refCount.decrementAndGet() == 0 && dispose) {
      data.dispose()
    }
    this
  }
} 
Example 116
Source File: Utils.scala    From ingraph   with Eclipse Public License 1.0 5 votes vote down vote up
package ingraph.ire.util

import java.util.Collection
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.ActorRef
import ingraph.ire.datatypes.Tuple
import ingraph.ire.messages.{ChangeSet, Primary, ReteMessage, Secondary}

import scala.collection.mutable

object Utils {

  object conversions {
    implicit def toSendingFunction(base: ActorRef): ReteMessage => Unit = base ! _

    implicit class ReteNode(base: ActorRef) extends Serializable {
      def primary(reteMessage: ReteMessage) = {
        base ! Primary(reteMessage)
      }

      def secondary(reteMessage: ReteMessage) = {
        base ! Secondary(reteMessage)
      }
    }

  }

  def time[R](block: => R): Long = {
    val t0 = System.nanoTime()
    val result = block    // call-by-name
    val t1 = System.nanoTime()
    val elapsed = t1 - t0
    println("Elapsed time: " + elapsed + "ns")
    elapsed
  }

}

class AtomicUniqueCounter {
  private val counter: AtomicInteger = new AtomicInteger(0)

  def getNext = counter.getAndIncrement()
}

trait IterableMultiMap[A, B] extends mutable.MultiMap[A, B] {
  def multiUnzip: (Iterable[A], Iterable[B]) = {
    val b1 = genericBuilder[A]
    val b2 = genericBuilder[B]
    this.foreach(keyValueSet => {
      keyValueSet._2.foreach(value => {
        b1 += keyValueSet._1
        b2 += value
      })
    })
    (b1.result(), b2.result())
  }
}

object SizeCounter {
  def countDeeper(containers: Iterable[Iterable[Tuple]]*): Long =
    containers.map(hashmap => hashmap.foldLeft(0)((sum, set) => sum + set.foldLeft(0)(_ + _.size))).sum

  def count(containers: Iterable[Iterable[Any]]*): Long = {
    containers.map(tuples => tuples.foldLeft(0)(_ + _.size)).sum
  }

  def count(containers: Collection[Tuple]): Long = {
    containers.size
  }

} 
Example 117
Source File: KinesisContinuousTest.scala    From kinesis-sql   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.kinesis

import java.util.concurrent.atomic.AtomicInteger

import org.scalatest.time.SpanSugar._

import org.apache.spark.SparkContext
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd, SparkListenerTaskStart}
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.test.TestSparkSession

trait KinesisContinuousTest extends KinesisSourceTest{
  override val defaultTrigger = Trigger.Continuous("1 hour")
  override val defaultUseV2Sink = true

  override val streamingTimeout = 120.seconds

  override protected def createSparkSession = new TestSparkSession(
    new SparkContext(
      "local[10]",
      "continuous-stream-test-sql-context",
      sparkConf.set("spark.sql.testkey", "true")))

  // Continuous processing tasks end asynchronously, so test that they actually end.
  private val tasksEndedListener = new SparkListener() {
    val activeTaskIdCount = new AtomicInteger(0)

    override def onTaskStart(start: SparkListenerTaskStart): Unit = {
      activeTaskIdCount.incrementAndGet()
    }

    override def onTaskEnd(end: SparkListenerTaskEnd): Unit = {
      activeTaskIdCount.decrementAndGet()
    }
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
    spark.sparkContext.addSparkListener(tasksEndedListener)
  }

  override def afterEach(): Unit = {
    eventually(timeout(streamingTimeout)) {
      assert(tasksEndedListener.activeTaskIdCount.get() == 0)
    }
    spark.sparkContext.removeSparkListener(tasksEndedListener)
    super.afterEach()
  }

} 
Example 118
Source File: MultiFuture.scala    From sbt-dependency-updates   with Apache License 2.0 5 votes vote down vote up
package org.jmotor.sbt.concurrent

import java.util.concurrent.CopyOnWriteArrayList
import java.util.concurrent.atomic.AtomicInteger

import org.jmotor.sbt.exception.MultiException

import scala.concurrent.Promise


class MultiFuture[T](p: Promise[T], count: Int, default: T) {
  private[this] val counter = new AtomicInteger(0)
  private[this] val errors = new CopyOnWriteArrayList[Throwable]()

  def tryComplete(): Unit = {
    if (counter.incrementAndGet() == count) {
      if (errors.isEmpty) {
        p success default
      } else {
        import scala.collection.JavaConverters._
        p failure MultiException(errors.asScala: _*)
      }
    }
  }

  def tryComplete(throwable: Throwable): Unit = {
    errors.add(throwable)
    tryComplete()
  }

} 
Example 119
Source File: ProgressBar.scala    From sbt-dependency-updates   with Apache License 2.0 5 votes vote down vote up
package org.jmotor.sbt.util

import java.util.concurrent.atomic.AtomicInteger


class ProgressBar(message: String, done: String) {
  private[this] var running: Boolean = true
  private[this] val chars = Seq("/", "-", "\\", "|")
  private[this] val worker = new Thread() {
    private[this] val index = new AtomicInteger(0)

    override def run(): Unit = {
      while (running) {
        var _index = index.getAndIncrement()
        if (_index >= chars.length) {
          _index = 0
          index.set(0)
        }
        print(s"$message ${chars(_index)} \r")
        Thread.sleep(200)
      }
    }
  }

  def start(): Unit = worker.start()

  def stop(): Unit = {
    running = false
    print(s"$done\n")
  }

} 
Example 120
Source File: ChannelStatisticsHandler.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino.handler.ops

import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}

import com.ebay.neutrino.metrics.Instrumented
import com.typesafe.scalalogging.slf4j.StrictLogging
import io.netty.buffer.{ByteBuf, ByteBufHolder}
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel._
import scala.concurrent.duration._



@Sharable
class ChannelStatisticsHandler(upstream: Boolean) extends ChannelDuplexHandler with StrictLogging with Instrumented
{
  import com.ebay.neutrino.util.AttributeSupport._
  import com.ebay.neutrino.util.Utilities.AtomicLongSupport
  import com.ebay.neutrino.metrics.Metrics._

  // Global statistics
  val readBytes    = if (upstream) UpstreamBytesRead else DownstreamBytesRead
  val writeBytes   = if (upstream) UpstreamBytesWrite else DownstreamBytesWrite
  val readPackets  = if (upstream) UpstreamPacketsRead else DownstreamPacketsRead
  val writePackets = if (upstream) UpstreamPacketsWrite else DownstreamPacketsWrite


  @inline def calculateSize(msg: AnyRef) = msg match {
    case data: ByteBuf => data.readableBytes()
    case data: ByteBufHolder => data.content.readableBytes
    case data => 0
  }


  // Log to global (and local) statistics
  override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = {
    val bytes = calculateSize(msg)

    readPackets.mark
    readBytes += bytes
    ctx.statistics.readPackets += 1
    ctx.statistics.readBytes += bytes

    ctx.fireChannelRead(msg)
  }


  override def write(ctx: ChannelHandlerContext, msg: AnyRef, promise: ChannelPromise): Unit = {
    val bytes = calculateSize(msg)

    writePackets.mark
    writeBytes += bytes
    ctx.statistics.writePackets += 1
    ctx.statistics.writeBytes += bytes

    ctx.write(msg, promise)
  }
}

class ChannelStatistics {

  // Collected traffic statistics
  val readBytes     = new AtomicLong()
  val writeBytes    = new AtomicLong()
  val readPackets   = new AtomicLong()
  val writePackets  = new AtomicLong()

  // Channel usage statistics
  val startTime     = System.nanoTime()
  val allocations   = new AtomicInteger()

  // Request statistics
  val requestCount  = new AtomicInteger()
  val responseCount = new AtomicInteger()

  // Helper methods
  def elapsed       = (System.nanoTime()-startTime).nanos
} 
Example 121
Source File: StreamSpecUtil.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import java.io.File
import java.nio.file.Files
import java.util.concurrent.atomic.AtomicInteger

import akka.stream.ThrottleMode
import akka.stream.scaladsl._
import com.typesafe.config.ConfigFactory
import net.openhft.chronicle.wire.{WireIn, WireOut}

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.collection.JavaConverters._
import scala.util.Random

object StreamSpecUtil {
  val elementCount = 100000
  val failTestAt = elementCount * 3 / 10
  val elementsAfterFail = 100
  val flowRate = 1000
  val flowUnit = 10 millisecond
  val burstSize = 500
}

class StreamSpecUtil[T, S](outputPort: Int = 1) {

  import StreamSpecUtil._
  val outputPorts = outputPort
  val tempPath: File = Files.createTempDirectory("persistent_queue").toFile
  val totalProcessed = elementCount + elementsAfterFail

  val config = ConfigFactory.parseMap {
    Map(
      "persist-dir" -> s"${tempPath.getAbsolutePath}",
      "output-ports" -> s"$outputPorts",
      "roll-cycle" -> "TEST_SECONDLY".toLowerCase()
    ).asJava
  }

  val in = Source(1 to elementCount)
  lazy val atomicCounter = Vector.tabulate(outputPorts)(_ => new AtomicInteger(0))
  lazy val flowCounter = Flow[Any].map(_ => 1L).reduce(_ + _).toMat(Sink.head)(Keep.right)
  lazy val merge = Merge[S](outputPorts)
  lazy val throttle = Flow[S].throttle(flowRate, flowUnit, burstSize, ThrottleMode.shaping)
  lazy val throttleMore = Flow[S].throttle(flowRate * 9 / 10, flowUnit, burstSize, ThrottleMode.shaping)
  lazy val head = Sink.head[S]
  lazy val last = Sink.last[S]
  val minRandom = 100
  lazy val random = Random.nextInt(elementCount - minRandom - 1) + minRandom
  lazy val filterCounter = new AtomicInteger(0)
  lazy val filterARandomElement = Flow[Event[T]].map(e => (e, filterCounter.incrementAndGet())).filter(_._2 != random).map(_._1)

  def commitCounter(outputPortId: Int) = atomicCounter(outputPortId).incrementAndGet()

  def clean() = delete(tempPath)

  private def delete(file: File): Unit = {
    if (file.isDirectory)
      Option(file.listFiles).map(_.toList).getOrElse(Nil).foreach(delete)
    file.delete
  }
}

case class Person(name: String, age: Int)

class PersonSerializer extends QueueSerializer[Person] {

  override def readElement(wire: WireIn): Option[Person] = {
    for {
      name <- Option(wire.read().`object`(classOf[String]))
      age <- Option(wire.read().int32)
    } yield { Person(name, age) }
  }

  override def writeElement(element: Person, wire: WireOut): Unit = {
    wire.write().`object`(classOf[String], element.name)
    wire.write().int32(element.age)
  }
} 
Example 122
Source File: ThrowExceptionStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import java.util.concurrent.atomic.AtomicInteger

import akka.{Done, NotUsed}
import akka.actor.ActorContext
import akka.stream.ClosedShape
import akka.stream.scaladsl.GraphDSL.Implicits._
import akka.stream.scaladsl._

import scala.concurrent.Future
import scala.language.postfixOps

case object NotifyWhenDone {
  def getInstance: NotifyWhenDone.type = this
}

object ThrowExceptionStream {

  val limit = 50000
  val exceptionAt = limit * 3 / 10
  val recordCount = new AtomicInteger(0)
}

class ThrowExceptionStream extends PerpetualStream[Future[Int]] {

  import ThrowExceptionStream._

  def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) { implicit builder =>
    sink =>
      startSource ~> injectError ~> sink
      ClosedShape
  })

  val injectError = Flow[Int].map { n =>
    if (n == exceptionAt) throw new NumberFormatException("This is a fake exception")
    else n
  }

  def counter = Flow[Any].map{ _ => recordCount.incrementAndGet(); 1 }.reduce{ _ + _ }.toMat(Sink.head)(Keep.right)

  override def receive = {
    case NotifyWhenDone =>
      import context.dispatcher
      val target = sender()
      matValue foreach { v => target ! v }
  }

  private def startSource(implicit context: ActorContext): Source[Int, NotUsed] = Source(1 to limit)

  override def shutdown() = {
    println("Neo Stream Result " + recordCount.get + "\n\n")
    Future.successful(Done)
  }

} 
Example 123
Source File: PulsarContinuousTest.scala    From pulsar-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.pulsar

import java.util.concurrent.atomic.AtomicInteger

import scala.language.reflectiveCalls

import org.apache.spark.SparkContext
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd, SparkListenerTaskStart}
import org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.test.TestSparkSession

trait PulsarContinuousTest extends PulsarSourceTest {

  override val defaultTrigger = Trigger.Continuous(1000)
  override val defaultUseV2Sink = true

  // We need more than the default local[2] to be able to schedule all partitions simultaneously.
  override protected def createSparkSession =
    new TestSparkSession(
      new SparkContext(
        "local[10]",
        "continuous-stream-test-sql-context",
        sparkConf.set("spark.sql.testkey", "true")))

  // Continuous processing tasks end asynchronously, so test that they actually end.
  private val tasksEndedListener = new SparkListener() {
    val activeTaskIdCount = new AtomicInteger(0)

    override def onTaskStart(start: SparkListenerTaskStart): Unit = {
      activeTaskIdCount.incrementAndGet()
    }

    override def onTaskEnd(end: SparkListenerTaskEnd): Unit = {
      activeTaskIdCount.decrementAndGet()
    }
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
    spark.sparkContext.addSparkListener(tasksEndedListener)
  }

  override def afterEach(): Unit = {
    eventually(timeout(streamingTimeout)) {
      assert(tasksEndedListener.activeTaskIdCount.get() == 0)
    }
    spark.sparkContext.removeSparkListener(tasksEndedListener)
    super.afterEach()
  }

  test("ensure continuous stream is being used") {
    val query = spark.readStream
      .format("rate")
      .option("numPartitions", "1")
      .option("rowsPerSecond", "1")
      .load()

    testStream(query)(
      Execute(q => assert(q.isInstanceOf[ContinuousExecution]))
    )
  }
} 
Example 124
Source File: CqrsApp.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.couchbase

import java.util.UUID
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.persistence.couchbase.scaladsl.CouchbaseReadJournal
import akka.persistence.query.{NoOffset, PersistenceQuery}
import akka.stream.ActorMaterializer

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.{Random, Try}

object CqrsApp {

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("CouchbaseSystem")
    val materializer = ActorMaterializer()(system)
    val ec: ExecutionContext = system.dispatcher
    val log = system.log

    AkkaManagement(system).start()
    ClusterBootstrap(system).start()

    Cluster(system).registerOnMemberUp {

      val selfRoles = Cluster(system).selfRoles

      log.info("Running with roles {}", selfRoles)

      val shardedSwitchEntity = ShardedSwitchEntity(system)
      shardedSwitchEntity.start()
      EventProcessorWrapper(system).start()

      if (selfRoles.contains("load-generation")) {
        log.info("Starting load generation")
        testIt(system, shardedSwitchEntity)
      }

      if (selfRoles.contains("simple-query")) {
        log.info("Starting simple query")
        verySimpleRead(system, materializer, ec)
      }
    }

    def verySimpleRead(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Unit = {
      val query = PersistenceQuery(system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
      val startTime = System.currentTimeMillis()
      query
        .currentEventsByTag("tag1", NoOffset)
        .runFold(0)((count, _) => count + 1)
        .onComplete { t: Try[Int] =>
          system.log.info("Query finished for tag1 in {}. Read {} rows",
                          (System.currentTimeMillis() - startTime).millis.toSeconds,
                          t)
        }
    }

    // Every instance will add 100 persistent actors and second 2 messages to each per 2 seconds
    def testIt(system: ActorSystem, shardedSwitch: ShardedSwitchEntity): Unit = {
      val uuid = UUID.randomUUID()
      val nrSwitches = 100
      def switchName(nr: Int) = s"switch-$uuid-$nr"
      log.info("Creating {} switches with uuid {}", nrSwitches, uuid)
      (0 until nrSwitches) foreach { s =>
        shardedSwitch.tell(switchName(s), SwitchEntity.CreateSwitch(6))
      }
      import system.dispatcher
      system.scheduler.schedule(3.seconds, 2.second) {
        (0 until nrSwitches) foreach { s =>
          val switch = switchName(s)
          log.debug("Sending messages to switch {}", switch)
          shardedSwitch.tell(switch, SwitchEntity.SetPortStatus(Random.nextInt(6), portEnabled = true))
          shardedSwitch.tell(switch, SwitchEntity.SendPortStatus)
        }
      }
    }
  }

} 
Example 125
Source File: Frontend.scala    From diffy   with GNU Affero General Public License v3.0 5 votes vote down vote up
package ai.diffy

import java.util.concurrent.atomic.AtomicInteger

import ai.diffy.IsotopeSdkModule.IsotopeClient
import ai.diffy.proxy.Settings
import com.twitter.finagle.Http
import com.twitter.finagle.http.Request
import com.twitter.finatra.http.Controller
import javax.inject.Inject

class Frontend @Inject()(settings: Settings, isotopeClient: IsotopeClient) extends Controller {

  case class Dashboard(
    serviceName: String,
    serviceClass: String,
    apiRoot: String,
    excludeNoise: Boolean,
    relativeThreshold: Double,
    absoluteThreshold: Double,
    isotopeReason: String,
    hasIsotope: Boolean = false)

  val reasons = Seq(
    "Do you want to compare side effects like logs and downstream interactions?",
    "Do you want to save and share this comparison?",
    "Do you want to organize all your comparisons across all your services and teams in one place?",
    "Do you want to download sampled traffic?"
  )
  val reasonIndex = new AtomicInteger(0)
  get("/") { req: Request =>
    response.ok.view(
      "dashboard.mustache",
      Dashboard(
        settings.serviceName,
        settings.serviceClass,
        settings.apiRoot,
        req.params.getBooleanOrElse("exclude_noise", false),
        settings.relativeThreshold,
        settings.absoluteThreshold,
        reasons(reasonIndex.getAndIncrement() % reasons.length),
        isotopeClient.isConcrete
      )
    )
  }

  get("/css/:*") { request: Request =>
    response.ok.file(request.path)
  }

  get("/scripts/:*") { request: Request =>
    response.ok.file(request.path)
  }
} 
Example 126
Source File: StarWarsData.scala    From sangria-relay   with Apache License 2.0 5 votes vote down vote up
package sangria.relay.starWars

import java.util.concurrent.atomic.AtomicInteger

import sangria.relay.{Identifiable, Node}


object StarWarsData {
  case class Ship(id: String, name: String) extends Node
  case class Faction(id: String, name: String, ships: List[String])

  object Faction {
    implicit object FactionIdentifiable extends Identifiable[Faction] {
      def id(faction: Faction) = faction.id
    }
  }

  object Ships {
    val xwing = Ship("1", "X-Wing")
    val ywing = Ship("2", "Y-Wing")
    val awing = Ship("3", "A-Wing")
    val falcon = Ship("4", "Millenium Falcon")
    val homeOne = Ship("5", "Home One")
    val tieFighter = Ship("6", "TIE Fighter")
    val tieInterceptor = Ship("7", "TIE Interceptor")
    val executor = Ship("8", "Executor")
    
    val All = xwing :: ywing :: awing :: falcon :: homeOne :: tieFighter :: tieInterceptor :: executor :: Nil 
  }

  object Factions {
    val rebels = Faction("1", "Alliance to Restore the Republic", List("1", "2", "3", "4", "5"))
    val empire = Faction("2", "Galactic Empire", List("6", "7", "8"))

    val All = rebels :: empire :: Nil
  }

  class ShipRepo {
    val nextShipId = new AtomicInteger(9)

    var ships = Ships.All
    var factions = Factions.All

    def createShip(shipName: String, factionId: String) = {
      val newShip = Ship("" + nextShipId.getAndIncrement(), shipName)

      ships = ships :+ newShip
      factions = factions.map {
        case f if f.id == factionId => f.copy(ships = f.ships :+ newShip.id)
        case f => f
      }

      newShip
    }

    def getShip(id: String) = ships find (_.id == id)
    def getFaction(id: String) = factions find (_.id == id)
    def getRebels = factions find (_.id == "1")
    def getEmpire = factions find (_.id == "2")
  }
} 
Example 127
Source File: MeansBuilder.scala    From dl4scala   with MIT License 5 votes vote down vote up
package org.dl4scala.examples.nlp.paragraphvectors.tools

import java.util.concurrent.atomic.AtomicInteger

import lombok.NonNull
import org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable
import org.deeplearning4j.models.word2vec.VocabWord
import org.deeplearning4j.text.documentiterator.LabelledDocument
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.factory.Nd4j
import scala.collection.JavaConverters._


  def documentAsVector(@NonNull document: LabelledDocument): INDArray = {
    val documentAsTokens = tokenizerFactory.create(document.getContent).getTokens.asScala
    val cnt = new AtomicInteger(0)

    for (word <- documentAsTokens) {
      if (vocabCache.containsWord(word)) cnt.incrementAndGet
    }

    val allWords = Nd4j.create(cnt.get, lookupTable.layerSize)
    cnt.set(0)

    for (word <- documentAsTokens) {
      if (vocabCache.containsWord(word)) allWords.putRow(cnt.getAndIncrement, lookupTable.vector(word))
    }

    val mean = allWords.mean(0)

    mean
  }
} 
Example 128
Source File: PoolUtils.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

import java.util.concurrent.{Executors, ThreadFactory}
import java.util.concurrent.atomic.AtomicInteger

private[internals] object PoolUtils {
  // we can initialize this eagerly because the enclosing object is lazy
  val ioAppGlobal: ExecutionContext = {
    // lower-bound of 2 to prevent pathological deadlocks on virtual machines
    val bound = math.max(2, Runtime.getRuntime().availableProcessors())

    val executor = Executors.newFixedThreadPool(
      bound,
      new ThreadFactory {
        val ctr = new AtomicInteger(0)
        def newThread(r: Runnable): Thread = {
          val back = new Thread(r, s"ioapp-compute-${ctr.getAndIncrement()}")
          back.setDaemon(true)
          back
        }
      }
    )

    exitOnFatal(ExecutionContext.fromExecutor(executor))
  }

  def exitOnFatal(ec: ExecutionContext): ExecutionContext = new ExecutionContext {
    def execute(r: Runnable): Unit =
      ec.execute(new Runnable {
        def run(): Unit =
          try {
            r.run()
          } catch {
            case NonFatal(t) =>
              reportFailure(t)

            case t: Throwable =>
              // under most circumstances, this will work even with fatal errors
              t.printStackTrace()
              System.exit(1)
          }
      })

    def reportFailure(t: Throwable): Unit =
      ec.reportFailure(t)
  }
} 
Example 129
Source File: IoC.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import java.util.concurrent.atomic.AtomicInteger

import _root_.akka.pattern.ask
import akka.actor._
import akka.util.Timeout
import com.typesafe.scalalogging.LazyLogging
import io.vamp.common.Namespace
import io.vamp.common.util.TextUtil

import scala.collection.mutable
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect._

object IoC extends LazyLogging {

  private val counter = new AtomicInteger(0)

  private val aliases: mutable.Map[String, mutable.Map[Class[_], Class[_]]] = mutable.Map()

  private val actorRefs: mutable.Map[String, mutable.Map[Class[_], ActorRef]] = mutable.Map()

  private val namespaceMap: mutable.Map[String, Namespace] = mutable.Map()

  private val namespaceActors: mutable.Map[String, ActorRef] = mutable.Map()

  def namespaces: List[Namespace] = namespaceMap.values.toList

  def alias[FROM: ClassTag](implicit namespace: Namespace): Class[_] = {
    alias(classTag[FROM].runtimeClass)
  }

  def alias(from: Class[_])(implicit namespace: Namespace): Class[_] = {
    aliases.get(namespace.name).flatMap(_.get(from)).getOrElse(from)
  }

  def alias[FROM: ClassTag, TO: ClassTag](implicit namespace: Namespace): Option[Class[_]] = {
    alias(classTag[FROM].runtimeClass, classTag[TO].runtimeClass)
  }

  def alias(from: Class[_], to: Class[_])(implicit namespace: Namespace): Option[Class[_]] = {
    aliases.getOrElseUpdate(namespace.name, mutable.Map()).put(from, to)
  }

  def createActor(clazz: Class[_])(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    createActor(Props(clazz))
  }

  def createActor[ACTOR: ClassTag](implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    createActor(classTag[ACTOR].runtimeClass)
  }

  def createActor[ACTOR: ClassTag](arg: Any, args: Any*)(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    createActor(Props(classTag[ACTOR].runtimeClass, arg :: args.toList: _*))
  }

  def createActor(props: Props)(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = {
    logger.info(s"Create Actor ${props.clazz.getSimpleName} for namespace ${namespace.name}")
    implicit val ec: ExecutionContext = actorSystem.dispatcher
    (namespaceActor ? props) map {
      case actorRef: ActorRef ⇒
        actorRefs.getOrElseUpdate(namespace.name, mutable.Map()).put(props.clazz, actorRef)
        aliases.getOrElseUpdate(namespace.name, mutable.Map()).foreach {
          case (from, to) if to == props.clazz ⇒ actorRefs.getOrElseUpdate(namespace.name, mutable.Map()).put(from, actorRef)
          case _                               ⇒
        }
        actorRef
      case _ ⇒ throw new RuntimeException(s"Cannot create actor for: ${props.clazz.getSimpleName}")
    }
  }

  def actorFor[ACTOR: ClassTag](implicit actorSystem: ActorSystem, namespace: Namespace): ActorRef = {
    actorFor(classTag[ACTOR].runtimeClass)
  }

  def actorFor(clazz: Class[_])(implicit actorSystem: ActorSystem, namespace: Namespace): ActorRef = {
    actorRefs.get(namespace.name).flatMap(_.get(alias(clazz))) match {
      case Some(actorRef) ⇒ actorRef
      case _              ⇒ throw new RuntimeException(s"No actor reference for: $clazz")
    }
  }

  private def namespaceActor(implicit actorSystem: ActorSystem, namespace: Namespace): ActorRef = {
    namespaceMap.put(namespace.name, namespace)
    namespaceActors.getOrElseUpdate(namespace.name, actorSystem.actorOf(Props(new Actor {
      def receive = {
        case props: Props ⇒ sender() ! context.actorOf(props, s"${TextUtil.toSnakeCase(props.clazz.getSimpleName)}-${counter.getAndIncrement}")
        case _            ⇒
      }
    }), namespace.name))
  }
} 
Example 130
Source File: TransformationFrontend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.transformation

import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.{ Actor, ActorRef, ActorSystem, Props, Terminated }
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

//#frontend
class TransformationFrontend extends Actor {
  var backends = IndexedSeq.empty[ActorRef]
  var jobCounter = 0

  def receive = {
    case job: TransformationJob if backends.isEmpty =>
      sender() ! JobFailed("Service unavailable, try again later", job)

    case job: TransformationJob =>
      jobCounter += 1
      backends(jobCounter % backends.size) forward job

    case BackendRegistration if !backends.contains(sender()) =>
      context watch sender()
      backends = backends :+ sender()

    case Terminated(a) =>
      backends = backends.filterNot(_ == a)
  }
}
//#frontend

object TransformationFrontend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]"))
      .withFallback(ConfigFactory.load("simple-cluster"))

    val system = ActorSystem("ClusterSystem", config)
    val frontend =
      system.actorOf(Props[TransformationFrontend], name = "frontend")

    val counter = new AtomicInteger
    import system.dispatcher
    system.scheduler.schedule(2.seconds, 2.seconds) {
      implicit val timeout = Timeout(5 seconds)
      (frontend ? TransformationJob("hello-" + counter.incrementAndGet())) foreach {
        case result => println(result)
      }
    }
    Future {
      TimeUnit.SECONDS.sleep(80)
      system.terminate()
    }(Implicits.global)
  }
} 
Example 131
Source File: TestDeleteTopicsConcurrently.scala    From ohara   with Apache License 2.0 4 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.concurrent.{ArrayBlockingQueue, Executors, LinkedBlockingDeque, TimeUnit}

import oharastream.ohara.client.configurator.{BrokerApi, TopicApi}
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.jdk.CollectionConverters._

class TestDeleteTopicsConcurrently extends WithBrokerWorker {
  private[this] val configurator =
    Configurator.builder.fake(testUtil.brokersConnProps, testUtil().workersConnProps()).build()

  private[this] val topicApi = TopicApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] val brokerClusterInfo = result(
    BrokerApi.access.hostname(configurator.hostname).port(configurator.port).list()
  ).head

  @Test
  def test(): Unit = {
    val loopMax       = 10
    val count         = 3
    val topicKeyQueue = new ArrayBlockingQueue[TopicKey](count)
    (0 until count).foreach(i => topicKeyQueue.put(TopicKey.of("test", i.toString)))
    val executors      = Executors.newFixedThreadPool(count)
    val exceptionQueue = new LinkedBlockingDeque[Throwable]()
    val closed         = new AtomicBoolean(false)
    val loopCount      = new AtomicInteger(0)
    (0 until count).foreach(
      _ =>
        executors.execute { () =>
          while (!closed.get() && loopCount.getAndIncrement() <= loopMax) try {
            val topicKey = topicKeyQueue.take()
            try result(
              topicApi.request
                .group(topicKey.group())
                .name(topicKey.name())
                .brokerClusterKey(brokerClusterInfo.key)
                .numberOfPartitions(1)
                .numberOfReplications(1)
                .create()
                .flatMap(_ => topicApi.start(topicKey))
                .flatMap { _ =>
                  TimeUnit.SECONDS.sleep(1)
                  topicApi.stop(topicKey)
                }
                .flatMap { _ =>
                  TimeUnit.SECONDS.sleep(1)
                  topicApi.delete(topicKey)
                }
            )
            finally topicKeyQueue.put(topicKey)
          } catch {
            case t: Throwable =>
              exceptionQueue.put(t)
              closed.set(true)
          }
        }
    )
    executors.shutdown()
    withClue(s"${exceptionQueue.asScala.map(_.getMessage).mkString(",")}") {
      executors.awaitTermination(60, TimeUnit.SECONDS) shouldBe true
    }
    exceptionQueue.size() shouldBe 0
  }
  @After
  def tearDown(): Unit = Releasable.close(configurator)
}