java.util.concurrent.atomic.AtomicBoolean Scala Examples

The following examples show how to use java.util.concurrent.atomic.AtomicBoolean. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: DataGroup.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.sink

import java.time.{Duration => JDuration}
import java.util.concurrent.atomic.AtomicBoolean
import java.util.function.Consumer

import oharastream.ohara.common.util.Releasable
import com.typesafe.scalalogging.Logger
import oharastream.ohara.common.setting.{ObjectKey, TopicKey}
import oharastream.ohara.metrics.basic.Counter

private[sink] class DataGroup(
  val name: String,
  objectKey: ObjectKey,
  brokerProps: String,
  topicKeys: Set[TopicKey],
  pollTimeout: JDuration
) extends Releasable {
  private val log = Logger(classOf[RowQueue])

  private val rowCounter: Counter =
    Counter.builder
      .key(objectKey)
      .item(s"rows-$name")
      .unit("row")
      .document(s"The number of received rows of group $name")
      .value(0)
      .register()

  val queue                = new RowQueue
  val queueProducer        = new QueueProducer(name, queue, brokerProps, topicKeys, pollTimeout, rowCounter)
  private[this] val closed = new AtomicBoolean(false)

  def resume(): Unit =
    if (!closed.get) {
      queueProducer.resume()
    }

  def pause(): Unit =
    if (!closed.get) {
      queueProducer.pause()
    }

  def isIdle(idleTime: JDuration): Boolean = queue.isIdle(idleTime)

  override def close(): Unit = {
    if (closed.compareAndSet(false, true)) {
      var exception: Throwable = null
      val addSuppressedException: Consumer[Throwable] = (ex: Throwable) => {
        if (exception == null) exception = ex else exception.addSuppressed(ex)
      }
      Releasable.close(queueProducer, addSuppressedException)
      Releasable.close(rowCounter, addSuppressedException)
      if (exception != null) throw exception
      log.info("Group {} closed.", name)
    }
  }
} 
Example 2
Source File: QueueProducer.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.sink

import java.time.{Duration => JDuration}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import java.util.{Queue => JQueue}

import oharastream.ohara.common.data.{Row, Serializer}
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.kafka.Consumer
import com.typesafe.scalalogging.Logger
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.metrics.basic.Counter

import scala.jdk.CollectionConverters._

private[sink] class QueueProducer(
  val groupName: String,
  val queue: JQueue[Row],
  val brokerProps: String,
  val topicKeys: Set[TopicKey],
  val pollTimeout: JDuration,
  val rowCounter: Counter
) extends Runnable
    with Releasable {
  private[this] val log                    = Logger(classOf[QueueProducer])
  private[this] val paused: AtomicBoolean  = new AtomicBoolean(false)
  private[this] val stopped: AtomicBoolean = new AtomicBoolean(false)

  private[this] val consumer: Consumer[Row, Array[Byte]] = Consumer
    .builder()
    .keySerializer(Serializer.ROW)
    .valueSerializer(Serializer.BYTES)
    .offsetFromBegin()
    .topicKeys(topicKeys.asJava)
    .connectionProps(brokerProps)
    .build()

  override def run(): Unit = {
    log.info(
      "{} group `{}` start.(topics={}, brokerProps={})",
      this.getClass.getSimpleName,
      groupName,
      topicKeys.mkString(","),
      brokerProps
    )
    try {
      while (!stopped.get) {
        if (!paused.get && queue.isEmpty) {
          val rows = consumer.poll(pollTimeout).asScala.map(_.key.get)
          rows.foreach { r =>
            queue.add(r)
            rowCounter.incrementAndGet()
          }
          log.trace("    group[{}], queue: {}, rows: {}", groupName, queue.size, rows.size)
        } else {
          TimeUnit.MILLISECONDS.sleep(10)
        }
      } // while
    } finally {
      consumer.close()
      log.info("stopped.")
    }
  }

  override def close(): Unit = {
    stop()
  }

  def stop(): Unit = {
    stopped.set(true)
  }

  def pause(): Unit = {
    if (paused.compareAndSet(false, true)) {
      log.info("{} paused.", this.getClass.getSimpleName)
    }
  }

  def resume(): Unit = {
    if (paused.compareAndSet(true, false)) {
      log.info("{} resumed.", this.getClass.getSimpleName)
    }
  }
} 
Example 3
Source File: MetricsCache.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator.store

import java.util.Objects
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean

import oharastream.ohara.client.configurator.BrokerApi.BrokerClusterInfo
import oharastream.ohara.client.configurator.ClusterInfo
import oharastream.ohara.client.configurator.MetricsApi.Metrics
import oharastream.ohara.client.configurator.ShabondiApi.ShabondiClusterInfo
import oharastream.ohara.client.configurator.StreamApi.StreamClusterInfo
import oharastream.ohara.client.configurator.WorkerApi.WorkerClusterInfo
import oharastream.ohara.client.configurator.ZookeeperApi.ZookeeperClusterInfo
import oharastream.ohara.common.annotations.{Optional, VisibleForTesting}
import oharastream.ohara.common.cache.RefreshableCache
import oharastream.ohara.common.setting.ObjectKey
import oharastream.ohara.common.util.Releasable

import scala.concurrent.duration.Duration

trait MetricsCache extends Releasable {
  
  def meters(clusterInfo: ClusterInfo, key: ObjectKey): Map[String, Metrics] =
    meters(clusterInfo)
      .map {
        case (hostname, keyAndMeters) =>
          hostname -> keyAndMeters.getOrElse(key, Metrics.EMPTY)
      }
}

object MetricsCache {
  def builder: Builder = new Builder()

  // TODO: remove this workaround if google guava support the custom comparison ... by chia
  @VisibleForTesting
  private[store] case class RequestKey(key: ObjectKey, service: String) {
    override def equals(obj: Any): Boolean = obj match {
      case another: RequestKey => another.key == key && another.service == service
      case _                   => false
    }
    override def hashCode(): Int  = 31 * key.hashCode + service.hashCode
    override def toString: String = s"key:$key, service:$service"
  }

  class Builder private[MetricsCache] extends oharastream.ohara.common.pattern.Builder[MetricsCache] {
    private[this] var refresher: () => Map[ClusterInfo, Map[String, Map[ObjectKey, Metrics]]] = _
    private[this] var frequency: Duration                                                     = Duration(5, TimeUnit.SECONDS)

    def refresher(refresher: () => Map[ClusterInfo, Map[String, Map[ObjectKey, Metrics]]]): Builder = {
      this.refresher = Objects.requireNonNull(refresher)
      this
    }

    @Optional("default value is equal to timeout")
    def frequency(frequency: Duration): Builder = {
      this.frequency = Objects.requireNonNull(frequency)
      this
    }

    override def build: MetricsCache = new MetricsCache {
      import scala.jdk.CollectionConverters._
      private[this] val refresher = Objects.requireNonNull(Builder.this.refresher)
      private[this] val closed    = new AtomicBoolean(false)
      private[this] val cache = RefreshableCache
        .builder[RequestKey, Map[String, Map[ObjectKey, Metrics]]]()
        .supplier(
          () =>
            refresher().map {
              case (clusterInfo, meters) =>
                key(clusterInfo) -> meters
            }.asJava
        )
        .frequency(java.time.Duration.ofMillis(frequency.toMillis))
        .build()

      private[this] def key(clusterInfo: ClusterInfo): RequestKey = RequestKey(
        key = clusterInfo.key,
        service = clusterInfo match {
          case _: ZookeeperClusterInfo => "zk"
          case _: BrokerClusterInfo    => "bk"
          case _: WorkerClusterInfo    => "wk"
          case _: StreamClusterInfo    => "stream"
          case _: ShabondiClusterInfo  => "shabondi"
          case c: ClusterInfo          => c.getClass.getSimpleName // used by testing
        }
      )

      override def meters(clusterInfo: ClusterInfo): Map[String, Map[ObjectKey, Metrics]] =
        cache.get(key(clusterInfo)).orElse(Map.empty)

      override def close(): Unit = if (closed.compareAndSet(false, true)) Releasable.close(cache)
    }
  }
} 
Example 4
Source File: SimpleRowSourceTask.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.client.kafka

import java.util
import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.atomic.AtomicBoolean

import oharastream.ohara.common.data.{Row, Serializer}
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.kafka.Consumer
import oharastream.ohara.kafka.connector.{RowSourceRecord, RowSourceTask, TaskSetting}

import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class SimpleRowSourceTask extends RowSourceTask {
  private[this] var settings: TaskSetting                = _
  private[this] val queue                                = new LinkedBlockingQueue[RowSourceRecord]
  private[this] val closed                               = new AtomicBoolean(false)
  private[this] var consumer: Consumer[Row, Array[Byte]] = _
  override protected def run(settings: TaskSetting): Unit = {
    this.settings = settings
    this.consumer = Consumer
      .builder()
      .connectionProps(settings.stringValue(SimpleRowSourceConnector.BROKER))
      .groupId(settings.name)
      .topicKeys(java.util.Set.copyOf(TopicKey.toTopicKeys(settings.stringValue(SimpleRowSourceConnector.INPUT))))
      .offsetFromBegin()
      .keySerializer(Serializer.ROW)
      .valueSerializer(Serializer.BYTES)
      .build()
    Future {
      try while (!closed.get) {
        consumer
          .poll(java.time.Duration.ofSeconds(2))
          .asScala
          .filter(_.key.isPresent)
          .map(_.key.get)
          .flatMap(
            row => settings.topicKeys().asScala.map(topic => RowSourceRecord.builder().row(row).topicKey(topic).build())
          )
          .foreach(r => queue.put(r))
      } finally Releasable.close(consumer)
    }
  }

  override protected def pollRecords(): util.List[RowSourceRecord] =
    Iterator.continually(queue.poll()).takeWhile(_ != null).toSeq.asJava

  override protected def terminate(): Unit = {
    closed.set(true)
    consumer.wakeup()
  }
} 
Example 5
Source File: SandboxResetService.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.services

import java.util.concurrent.atomic.AtomicBoolean

import com.daml.dec.{DirectExecutionContext => DE}
import com.daml.ledger.api.auth.Authorizer
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.v1.testing.reset_service.{ResetRequest, ResetServiceGrpc}
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import com.daml.platform.server.api.validation.ErrorFactories
import com.google.protobuf.empty.Empty
import io.grpc.ServerCall.Listener
import io.grpc._

import scala.concurrent.Future

class SandboxResetService(
    ledgerId: LedgerId,
    resetAndRestartServer: () => Future[Unit],
    authorizer: Authorizer,
)(implicit logCtx: LoggingContext)
    extends ResetServiceGrpc.ResetService
    with BindableService
    with ServerInterceptor {

  private val logger = ContextualizedLogger.get(this.getClass)

  private val resetInitialized = new AtomicBoolean(false)

  override def bindService(): ServerServiceDefinition =
    ResetServiceGrpc.bindService(this, DE)

  override def reset(request: ResetRequest): Future[Empty] =
    authorizer.requireAdminClaims(doReset)(request)

  override def interceptCall[ReqT, RespT](
      serverCall: ServerCall[ReqT, RespT],
      metadata: Metadata,
      serverCallHandler: ServerCallHandler[ReqT, RespT]): Listener[ReqT] = {
    if (resetInitialized.get) {
      throw new StatusRuntimeException(
        Status.UNAVAILABLE.withDescription("Sandbox server is currently being reset"))
    }

    serverCallHandler.startCall(serverCall, metadata)
  }

  // to reset:
  // * initiate a graceful shutdown -- note that this won't kill the in-flight requests, including
  //   the reset request itself we're serving;
  // * serve the response to the reset request;
  // * then, close all the services so hopefully the graceful shutdown will terminate quickly...
  // * ...but not before serving the request to the reset request itself, which we've already done.
  private def doReset(request: ResetRequest): Future[Empty] =
    Either
      .cond(
        ledgerId == LedgerId(request.ledgerId),
        request.ledgerId,
        ErrorFactories.ledgerIdMismatch(ledgerId, LedgerId(request.ledgerId)))
      .fold(Future.failed[Empty], _ => actuallyReset().map(_ => Empty())(DE))

  private def actuallyReset() = {
    logger.info("Initiating server reset.")

    if (!resetInitialized.compareAndSet(false, true))
      throw new StatusRuntimeException(
        Status.FAILED_PRECONDITION.withDescription("Sandbox server is currently being reset"))

    logger.info(s"Stopping and starting the server.")
    resetAndRestartServer()
  }
} 
Example 6
Source File: BatchingQueue.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.api

import java.io.Closeable
import java.util.concurrent.atomic.AtomicBoolean

import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete}
import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult}
import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch
import com.daml.ledger.participant.state.v1.SubmissionResult

import scala.concurrent.Future
import scala.concurrent.duration._

object BatchingQueue {
  type CommitBatchFunction =
    Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit]
}


case class DefaultBatchingQueue(
    maxQueueSize: Int,
    maxBatchSizeBytes: Long,
    maxWaitDuration: FiniteDuration,
    maxConcurrentCommits: Int
) extends BatchingQueue {
  private val queue: Source[
    Seq[DamlSubmissionBatch.CorrelatedSubmission],
    SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] =
    Source
      .queue(maxQueueSize, OverflowStrategy.dropNew)
      .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)(
        (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong)

  def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])(
      implicit materializer: Materializer): RunningBatchingQueueHandle = {
    val materializedQueue = queue
      .mapAsync(maxConcurrentCommits)(commitBatch)
      .to(Sink.ignore)
      .run()

    val queueAlive = new AtomicBoolean(true)
    materializedQueue.watchCompletion.foreach { _ =>
      queueAlive.set(false)
    }(materializer.executionContext)

    new RunningBatchingQueueHandle {
      override def alive: Boolean = queueAlive.get()

      override def offer(
          submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = {
        materializedQueue
          .offer(submission)
          .map {
            case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged
            case QueueOfferResult.Dropped => SubmissionResult.Overloaded
            case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString)
            case QueueOfferResult.QueueClosed =>
              SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed")
          }(materializer.executionContext)
      }

      override def close(): Unit = {
        materializedQueue.complete()
      }
    }
  }
} 
Example 7
Source File: TestResourceOwner.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import java.util.concurrent.atomic.AtomicBoolean

import com.daml.resources.TestResourceOwner._

import scala.concurrent.{ExecutionContext, Future}

final class TestResourceOwner[T](acquire: Future[T], release: T => Future[Unit])
    extends ResourceOwner[T] {
  private val acquired = new AtomicBoolean(false)

  def hasBeenAcquired: Boolean = acquired.get

  def acquire()(implicit executionContext: ExecutionContext): Resource[T] = {
    if (!acquired.compareAndSet(false, true)) {
      throw new TriedToAcquireTwice
    }
    Resource(acquire)(
      value =>
        if (acquired.compareAndSet(true, false))
          release(value)
        else
          Future.failed(new TriedToReleaseTwice)
    )
  }
}

object TestResourceOwner {
  def apply[T](value: T): TestResourceOwner[T] =
    new TestResourceOwner(Future.successful(value), _ => Future.successful(()))

  final class TriedToAcquireTwice extends Exception("Tried to acquire twice.")

  final class TriedToReleaseTwice extends Exception("Tried to release twice.")
} 
Example 8
Source File: TestCloseable.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import java.util.concurrent.atomic.AtomicBoolean

import com.daml.resources.TestCloseable._

final class TestCloseable[T](val value: T, acquired: AtomicBoolean) extends AutoCloseable {
  if (!acquired.compareAndSet(false, true)) {
    throw new TriedToAcquireTwice
  }

  override def close(): Unit = {
    if (!acquired.compareAndSet(true, false)) {
      throw new TriedToReleaseTwice
    }
  }
}

object TestCloseable {
  final class TriedToAcquireTwice extends Exception("Tried to acquire twice.")

  final class TriedToReleaseTwice extends Exception("Tried to release twice.")
} 
Example 9
Source File: SchedulerExtensionService.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler.cluster

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.hadoop.yarn.api.records.{ApplicationAttemptId, ApplicationId}

import org.apache.spark.SparkContext
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


  override def stop(): Unit = {
    if (started.getAndSet(false)) {
      logInfo(s"Stopping $this")
      services.foreach { s =>
        Utils.tryLogNonFatalError(s.stop())
      }
    }
  }

  override def toString(): String = s"""SchedulerExtensionServices
    |(serviceOption=$serviceOption,
    | services=$services,
    | started=$started)""".stripMargin
} 
Example 10
Source File: NTP.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.time

import java.net.{InetAddress, SocketTimeoutException}
import java.util.concurrent.RejectedExecutionException
import java.util.concurrent.atomic.AtomicBoolean

import com.wavesplatform.dex.domain.utils.ScorexLogging
import monix.eval.Task
import monix.execution.schedulers.SchedulerService
import monix.execution.{ExecutionModel, Scheduler}
import org.apache.commons.net.ntp.NTPUDPClient

import scala.concurrent.duration.DurationInt

class NTP(ntpServer: String) extends Time with ScorexLogging with AutoCloseable {

  private val offsetPanicThreshold = 1000000L
  private val ExpirationTimeout    = 60.seconds
  private val RetryDelay           = 10.seconds
  private val ResponseTimeout      = 10.seconds

  private val duringShutdown = new AtomicBoolean(false)

  private implicit val scheduler: SchedulerService = Scheduler.singleThread(
    name = "time-impl",
    daemonic = false,
    executionModel = ExecutionModel.AlwaysAsyncExecution,
    reporter = {
      case _: RejectedExecutionException if duringShutdown.get() => // ignore
      case e: Throwable                                          => log.error("An uncaught error", e)
    }
  )

  private val client = new NTPUDPClient()
  client.setDefaultTimeout(ResponseTimeout.toMillis.toInt)

  @volatile private var offset = 0L
  private val updateTask: Task[Unit] = {
    def newOffsetTask: Task[Option[(InetAddress, java.lang.Long)]] = Task {
      try {
        val info = client.getTime(InetAddress.getByName(ntpServer))
        info.computeDetails()
        Option(info.getOffset).map { offset =>
          val r = if (Math.abs(offset) > offsetPanicThreshold) throw new Exception("Offset is suspiciously large") else offset
          (info.getAddress, r)
        }
      } catch {
        case _: SocketTimeoutException =>
          None
        case t: Throwable =>
          log.warn("Problems with NTP: ", t)
          None
      }
    }

    newOffsetTask.flatMap {
      case None if !scheduler.isShutdown => updateTask.delayExecution(RetryDelay)
      case Some((server, newOffset)) if !scheduler.isShutdown =>
        log.trace(s"Adjusting time with $newOffset milliseconds, source: ${server.getHostAddress}.")
        offset = newOffset
        updateTask.delayExecution(ExpirationTimeout)
      case _ => Task.unit
    }
  }

  def correctedTime(): Long = System.currentTimeMillis() + offset

  private var txTime: Long = 0

  def getTimestamp(): Long = {
    txTime = Math.max(correctedTime(), txTime + 1)
    txTime
  }

  private val taskHandle = updateTask.runAsync {
    case Left(e) => log.error(s"Error executing task", e)
    case _       =>
  }

  override def close(): Unit = if (duringShutdown.compareAndSet(false, true)) {
    log.info("Shutting down Time")
    taskHandle.cancel()
    if (client.isOpen) client.close()
    scheduler.shutdown()
  }
} 
Example 11
Source File: EmbeddedCassandra.scala    From phantom-activator-template   with Apache License 2.0 5 votes vote down vote up
package controllers

import java.io.File
import java.util.concurrent.atomic.AtomicBoolean

import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.slf4j.Logger

import scala.concurrent.blocking
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}


  def start(logger: Logger, config: Option[File] = None, timeout: Option[Int] = None): Unit = {
    this.synchronized {
      if (started.compareAndSet(false, true)) {
        blocking {
          val configFile = config.map(_.toURI.toString) getOrElse EmbeddedCassandraServerHelper.DEFAULT_CASSANDRA_YML_FILE
          System.setProperty("cassandra.config", configFile)
          Try {
            EmbeddedCassandraServerHelper.mkdirs()
          } match {
            case Success(value) => logger.info("Successfully created directories for embedded Cassandra.")
            case Failure(NonFatal(e)) =>
              logger.error(s"Error creating Embedded cassandra directories: ${e.getMessage}")
          }

          (config, timeout) match {
            case (Some(file), None) =>
              logger.info(s"Starting Cassandra in embedded mode with configuration from $file.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(
                file,
                EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR,
                EmbeddedCassandraServerHelper.DEFAULT_STARTUP_TIMEOUT
              )
            case (Some(file), Some(time)) =>
              logger.info(s"Starting Cassandra in embedded mode with configuration from $file and timeout set to $timeout ms.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(
                file,
                EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR,
                time
              )

            case (None, Some(time)) =>
              logger.info(s"Starting Cassandra in embedded mode with default configuration and timeout set to $timeout ms.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra(time)
            case (None, None) =>
              logger.info("Starting Cassandra in embedded mode with default configuration.")
              EmbeddedCassandraServerHelper.startEmbeddedCassandra()
              logger.info("Successfully started embedded Cassandra")
          }
        }
      }
      else {
        logger.info("Embedded Cassandra has already been started")
      }
    }
  }


  def cleanup(logger: Logger): Unit = {
    this.synchronized {
      if (started.compareAndSet(true, false)) {
        logger.info("Cleaning up embedded Cassandra")
        EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
      } else {
        logger.info("Cassandra is not running, not cleaning up")
      }
    }
  }
} 
Example 12
Source File: Humid.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.util

import java.util.concurrent.atomic.AtomicBoolean

sealed trait Humid[+A] extends Traversable[A] {
  def flatMap[B](f: A => Humid[B]): Humid[B]
  def get:A
  def isEmpty:Boolean

  override def foreach[U](f: A => U): Unit =
      if (! isEmpty) f(get)
}

case class Wet[+A](a: A) extends Humid[A] {
  override def flatMap[B](f: A => Humid[B]): Humid[B] = f(a)
  override def get:A = a
  override def isEmpty:Boolean = false
}

case object Dry extends Humid[Nothing] {
  override def flatMap[B](f: Nothing => Humid[B]): Humid[B] = Dry
  override def isEmpty = true
  override def get = throw new NoSuchElementException("Dry.get")
}

object Humid {
  val globalDry = new AtomicBoolean(true)
  def isDry = globalDry.get
  def apply[A](x:A):Humid[A] = if (isDry) Dry else Wet(x)
}

object HumidTest extends App {
  Humid("Uala !!! 1").foreach(println(_));
  Humid.globalDry.set(false)
  Humid("Uala !!! 2").foreach(println(_));
  val t = Humid("Something").map {i => i.toUpperCase() + " uala" }
  t.foreach(println(_))


} 
Example 13
Source File: Subscription.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging

trait Subscription {
  def cancel()
}

object Subscription {

  val empty = new Subscription {
    override def cancel(): Unit = ()
  }

  def fromRunning(running: AtomicBoolean) = new Subscription with Logging {
    assert(running.get)
    override def cancel(): Unit = {
      logger.debug(s"Subscription cancellation requested")
      running.set(false)
    }
  }
} 
Example 14
Source File: DataStreamPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.atomic.{AtomicBoolean, AtomicMarkableReference, AtomicReference}

import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator
import io.eels.Row
import io.eels.schema.StructType


class DataStreamPublisher(override val schema: StructType) extends DataStream {

  private val queue = new LinkedBlockingQueue[Seq[Row]]
  private val running = new AtomicBoolean(true)
  private val failure = new AtomicReference[Throwable](null)

  def isCancelled: Boolean = !running.get

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      subscriber.subscribed(new Subscription {
        override def cancel(): Unit = {
          queue.clear()
          queue.put(Row.Sentinel)
          running.set(false)
        }
      })
      BlockingQueueConcurrentIterator(queue, Row.Sentinel).takeWhile(_ => running.get).foreach(subscriber.next)
      failure.get match {
        case t: Throwable => subscriber.error(t)
        case _ => subscriber.completed()
      }
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }

  def publish(row: Seq[Row]): Unit = queue.put(row)
  def error(t: Throwable): Unit = {
    failure.set(t)
    queue.clear()
    queue.add(Row.Sentinel)
  }
  def close(): Unit = queue.add(Row.Sentinel)
} 
Example 15
Source File: AvroSource.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.avro

import java.io.File
import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import io.eels._
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}

case class AvroSource(path: Path)
                     (implicit conf: Configuration, fs: FileSystem) extends Source with Using {

  override lazy val schema: StructType = {
    using(AvroReaderFns.createAvroReader(path)) { reader =>
      val record = reader.next()
      AvroSchemaFns.fromAvroSchema(record.getSchema)
    }
  }

  override def parts(): Seq[Publisher[Seq[Row]]] = Seq(AvroSourcePublisher(path))
}

case class AvroSourcePublisher(path: Path)
                              (implicit conf: Configuration, fs: FileSystem)
  extends Publisher[Seq[Row]] with Logging with Using {
  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    val deserializer = new AvroDeserializer()
    try {
      using(AvroReaderFns.createAvroReader(path)) { reader =>
        val running = new AtomicBoolean(true)
        subscriber.subscribed(Subscription.fromRunning(running))
        AvroRecordIterator(reader)
          .takeWhile(_ => running.get)
          .map(deserializer.toRow)
          .grouped(DataStream.DefaultBatchSize)
          .foreach(subscriber.next)
        subscriber.completed()
      }
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
}

object AvroSource {
  def apply(file: File)(implicit conf: Configuration, fs: FileSystem): AvroSource = AvroSource(new Path(file.getAbsoluteFile.toString))
  def apply(path: java.nio.file.Path)(implicit conf: Configuration, fs: FileSystem): AvroSource = apply(path.toFile)
} 
Example 16
Source File: JdbcPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.jdbc

import java.sql.{Connection, PreparedStatement}
import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.io.Using
import com.sksamuel.exts.metrics.Timed
import io.eels.Row
import io.eels.component.jdbc.dialect.JdbcDialect
import io.eels.datastream.{Publisher, Subscriber, Subscription}

import scala.collection.mutable.ArrayBuffer

class JdbcPublisher(connFn: () => Connection,
                    query: String,
                    bindFn: (PreparedStatement) => Unit,
                    fetchSize: Int,
                    dialect: JdbcDialect
              ) extends Publisher[Seq[Row]] with Timed with JdbcPrimitives with Using {

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      using(connFn()) { conn =>

        logger.debug(s"Preparing query $query")
        using(conn.prepareStatement(query)) { stmt =>

          stmt.setFetchSize(fetchSize)
          bindFn(stmt)

          logger.debug(s"Executing query $query")
          using(stmt.executeQuery()) { rs =>

            val schema = schemaFor(dialect, rs)

            val running = new AtomicBoolean(true)
            subscriber.subscribed(Subscription.fromRunning(running))

            val buffer = new ArrayBuffer[Row](fetchSize)
            while (rs.next && running.get) {
              val values = schema.fieldNames().map { name =>
                val raw = rs.getObject(name)
                dialect.sanitize(raw)
              }
              buffer append Row(schema, values)
              if (buffer.size == fetchSize) {
                subscriber.next(buffer.toVector)
                buffer.clear()
              }
            }

            if (buffer.nonEmpty)
              subscriber.next(buffer.toVector)

            subscriber.completed()
          }
        }
      }
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 17
Source File: SequenceSource.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.sequence

import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import io.eels._
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, IntWritable, SequenceFile}

case class SequenceSource(path: Path)(implicit conf: Configuration) extends Source with Logging {
  logger.debug(s"Creating sequence source from $path")

  override def schema: StructType = SequenceSupport.schema(path)
  override def parts(): Seq[Publisher[Seq[Row]]] = List(new SequencePublisher(path))
}

object SequenceReaderIterator {
  def apply(schema: StructType, reader: SequenceFile.Reader): Iterator[Row] = new Iterator[Row] {
    private val k = new IntWritable()
    private val v = new BytesWritable()
    // throw away the header
    reader.next(k, v)
    override def next(): Row = Row(schema, SequenceSupport.toValues(v).toVector)
    override def hasNext(): Boolean = reader.next(k, v)
  }
}

class SequencePublisher(val path: Path)(implicit conf: Configuration) extends Publisher[Seq[Row]] with Logging with Using {

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      using(SequenceSupport.createReader(path)) { reader =>
        val schema = SequenceSupport.schema(path)
        val running = new AtomicBoolean(true)
        subscriber.subscribed(Subscription.fromRunning(running))
        SequenceReaderIterator(schema, reader)
          .takeWhile(_ => running.get)
          .grouped(DataStream.DefaultBatchSize)
          .foreach(subscriber.next)

        subscriber.completed()
      }
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 18
Source File: ParquetPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.parquet

import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import com.sksamuel.exts.OptionImplicits._
import com.sksamuel.exts.io.Using
import io.eels.component.parquet.util.ParquetIterator
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import io.eels.{Predicate, Row}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.format.converter.ParquetMetadataConverter
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.parquet.schema.MessageType

class ParquetPublisher(path: Path,
                       predicate: Option[Predicate],
                       projection: Seq[String],
                       caseSensitive: Boolean,
                       dictionaryFiltering: Boolean)
                      (implicit conf: Configuration) extends Publisher[Seq[Row]] with Logging with Using {

  def readSchema: Option[MessageType] = {
    if (projection.isEmpty) None
    else {

      val fileSchema = ParquetFileReader.readFooter(conf, path, ParquetMetadataConverter.NO_FILTER).getFileMetaData.getSchema
      val structType = ParquetSchemaFns.fromParquetMessageType(fileSchema)

      if (caseSensitive) {
        assert(
          structType.fieldNames.toSet.size == structType.fieldNames.map(_.toLowerCase).toSet.size,
          "Cannot use case sensitive = true when this would result in a clash of field names"
        )
      }

      val projectionSchema = StructType(projection.map { field =>
        structType.field(field, caseSensitive).getOrError(s"Requested field $field does not exist in the parquet schema")
      })

      ParquetSchemaFns.toParquetMessageType(projectionSchema).some
    }
  }

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      using(RowParquetReaderFn(path, predicate, readSchema, dictionaryFiltering)) { reader =>
        val running = new AtomicBoolean(true)
        subscriber.subscribed(Subscription.fromRunning(running))
        ParquetIterator(reader)
          .grouped(DataStream.DefaultBatchSize)
          .takeWhile(_ => running.get)
          .foreach(subscriber.next)
        subscriber.completed()
      }
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 19
Source File: AvroParquetPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.parquet.avro

import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import io.eels.component.avro.AvroDeserializer
import io.eels.component.parquet.util.ParquetIterator
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.{Predicate, Row}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path

class AvroParquetPublisher(path: Path,
                           predicate: Option[Predicate])(implicit conf: Configuration)
  extends Publisher[Seq[Row]] with Logging with Using {

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      val deser = new AvroDeserializer()
      val running = new AtomicBoolean(true)
      subscriber.subscribed(Subscription.fromRunning(running))
      using(AvroParquetReaderFn(path, predicate, None)) { reader =>
        ParquetIterator(reader)
          .map(deser.toRow)
          .grouped(DataStream.DefaultBatchSize)
          .takeWhile(_ => running.get)
          .foreach(subscriber.next)
      }
      subscriber.completed()
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 20
Source File: HdfsWatcher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hdfs

import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import io.eels.util.HdfsIterator
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hdfs.client.HdfsAdmin
import org.apache.hadoop.hdfs.inotify.Event

import scala.concurrent.duration._
import scala.util.control.NonFatal

class HdfsWatcher(path: Path, callback: FileCallback)
                 (implicit fs: FileSystem, conf: Configuration) extends Logging {

  private val files = HdfsIterator.remote(fs.listFiles(path, false)).map(_.getPath).toBuffer
  files.foreach(callback.onStart)

  private val executor = Executors.newSingleThreadExecutor()
  private val running = new AtomicBoolean(true)
  private val interval = 5.seconds

  private val admin = new HdfsAdmin(path.toUri, conf)
  private val eventStream = admin.getInotifyEventStream

  executor.submit(new Runnable {
    override def run(): Unit = {
      while (running.get) {
        try {
          Thread.sleep(interval.toMillis)
          val events = eventStream.take
          for (event <- events.getEvents) {
            event match {
              case create: Event.CreateEvent => callback.onCreate(create)
              case append: Event.AppendEvent => callback.onAppend(append)
              case rename: Event.RenameEvent => callback.onRename(rename)
              case close: Event.CloseEvent => callback.onClose(close)
              case _ =>
            }
          }
        } catch {
          case NonFatal(e) => logger.error("Error while polling fs", e)
        }
      }
    }
  })

  def stop(): Unit = {
    running.set(false)
    executor.shutdownNow()
  }
}

trait FileCallback {
  def onStart(path: Path): Unit
  def onClose(close: Event.CloseEvent): Unit
  def onRename(rename: Event.RenameEvent): Unit
  def onAppend(append: Event.AppendEvent): Unit
  def onCreate(path: Event.CreateEvent): Unit
} 
Example 21
Source File: CsvPublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.csv

import java.io.InputStream
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import com.univocity.parsers.csv.CsvParser
import io.eels.Row
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType

class CsvPublisher(createParser: () => CsvParser,
                   inputFn: () => InputStream,
                   header: Header,
                   schema: StructType) extends Publisher[Seq[Row]] with Logging with Using {

  val rowsToSkip: Int = header match {
    case Header.FirstRow => 1
    case _ => 0
  }

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {

    val input = inputFn()
    val parser = createParser()

    try {
      parser.beginParsing(input)

      val running = new AtomicBoolean(true)
      subscriber.subscribed(Subscription.fromRunning(running))

      logger.debug(s"CSV Source will skip $rowsToSkip rows")

      val count = new AtomicLong(0)

      Iterator.continually(parser.parseNext)
        .takeWhile(_ != null)
        .takeWhile(_ => running.get)
        .drop(rowsToSkip)
        .map { record => Row(schema, record.toVector) }
        .grouped(DataStream.DefaultBatchSize)
        .foreach { ts =>
          count.addAndGet(ts.size)
          subscriber.next(ts)
        }

      logger.debug(s"All ${count.get} rows read, notifying subscriber")
      subscriber.completed()

    } catch {
      case t: Throwable =>
        logger.error(s"Error in CSV Source, subscriber will be notified", t)
        subscriber.error(t)

    } finally {
      logger.debug("Closing CSV source resources")
      parser.stopParsing()
    }
  }
} 
Example 22
Source File: KuduSource.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.kudu

import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema._
import io.eels.{Row, Source}
import org.apache.kudu.client.{KuduClient, KuduScanner, RowResultIterator}

import scala.collection.JavaConverters._

case class KuduSource(tableName: String)(implicit client: KuduClient) extends Source with Logging {

  override lazy val schema: StructType = {
    val schema = client.openTable(tableName).getSchema
    KuduSchemaFns.fromKuduSchema(schema)
  }

  override def parts(): Seq[Publisher[Seq[Row]]] = Seq(new KuduPublisher(tableName))

  class KuduPublisher(tableName: String) extends Publisher[Seq[Row]] with Using {

    override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {

      val projectColumns = schema.fieldNames()
      val table = client.openTable(tableName)

      val scanner = client.newScannerBuilder(table)
        .setProjectedColumnNames(projectColumns.asJava)
        .build

      try {
        val iterator = new ScannerIterator(scanner, schema)
        val running = new AtomicBoolean(true)
        subscriber.subscribed(Subscription.fromRunning(running))
        iterator.takeWhile(_ => running.get).grouped(DataStream.DefaultBatchSize).foreach(subscriber.next)
        subscriber.completed()
      } catch {
        case t: Throwable => subscriber.error(t)
      } finally {
        scanner.close()
      }
    }
  }
}

object ResultsIterator {
  def apply(schema: StructType, iter: RowResultIterator) = new Iterator[Row] {

    private val zipped = schema.fields.zipWithIndex

    override def hasNext: Boolean = iter.hasNext
    override def next(): Row = {
      val next = iter.next()
      val values = zipped.map { case (field, index) =>
        field.dataType match {
          case BinaryType => BinaryValueReader.read(next, index)
          case BooleanType => BooleanValueReader.read(next, index)
          case _: ByteType => ByteValueReader.read(next, index)
          case DoubleType => DoubleValueReader.read(next, index)
          case FloatType => FloatValueReader.read(next, index)
          case _: IntType => IntValueReader.read(next, index)
          case _: LongType => LongValueReader.read(next, index)
          case _: ShortType => ShortValueReader.read(next, index)
          case StringType => StringValueReader.read(next, index)
          case TimeMicrosType => LongValueReader.read(next, index)
          case TimeMillisType => LongValueReader.read(next, index)
          case TimestampMillisType => LongValueReader.read(next, index)
        }
      }
      Row(schema, values)
    }
  }
}

class ScannerIterator(scanner: KuduScanner, schema: StructType) extends Iterator[Row] {
  var iter: Iterator[Row] = Iterator.empty
  override def hasNext: Boolean = iter.hasNext || {
    if (scanner.hasMoreRows) {
      iter = ResultsIterator(schema, scanner.nextRows)
      iter.hasNext
    } else {
      false
    }
  }
  override def next(): Row = iter.next()
}

object KuduSource {
  def apply(master: String, table: String): KuduSource = {
    implicit val client = new KuduClient.KuduClientBuilder(master).build()
    KuduSource(table)
  }
} 
Example 23
Source File: HbasePublisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hbase

import java.util
import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.io.Using
import com.sksamuel.exts.metrics.Timed
import io.eels.Row
import io.eels.datastream.{Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{Connection, Result, Scan}

import scala.collection.mutable.ArrayBuffer

class HbasePublisher(connection: Connection,
                     schema: StructType,
                     namespace: String,
                     tableName: String,
                     bufferSize: Int,
                     maxRows: Long,
                     scanner: Scan,
                     implicit val serializer: HbaseSerializer) extends Publisher[Seq[Row]] with Timed with Using {

  private val table = connection.getTable(TableName.valueOf(namespace, tableName))

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      using(new CloseableIterator) { rowIter =>
        val running = new AtomicBoolean(true)
        subscriber.subscribed(Subscription.fromRunning(running))
        val buffer = new ArrayBuffer[Row](bufferSize)
        while (rowIter.hasNext && running.get()) {
          buffer append rowIter.next()
          if (buffer.size == bufferSize) {
            subscriber.next(buffer.toVector)
            buffer.clear()
          }
        }
        if (buffer.nonEmpty) subscriber.next(buffer.toVector)
        subscriber.completed()
      }
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }

  class CloseableIterator extends Iterator[Row] with AutoCloseable {
    private val resultScanner = table.getScanner(scanner)
    private val resultScannerIter = resultScanner.iterator()
    private var rowCount = 0
    private var iter: Iterator[Row] = Iterator.empty

    override def hasNext: Boolean = rowCount < maxRows && iter.hasNext || {
      if (rowCount < maxRows && resultScannerIter.hasNext) {
        iter = HBaseResultsIterator(schema, resultScannerIter)
        iter.hasNext
      } else false
    }

    override def next(): Row = {
      rowCount += 1
      iter.next()
    }

    override def close(): Unit = {
      resultScanner.close()
    }
  }

  case class HBaseResultsIterator(schema: StructType, resultIter: util.Iterator[Result])(implicit serializer: HbaseSerializer) extends Iterator[Row] {
    override def hasNext: Boolean = resultIter.hasNext

    override def next(): Row = {
      val resultRow = resultIter.next()
      val values = schema.fields.map { field =>
        if (!field.key) {
          val value = resultRow.getValue(field.columnFamily.getOrElse(sys.error(s"No Column Family defined for field '${field.name}'")).getBytes, field.name.getBytes)
          if (value != null) serializer.fromBytes(value, field.name, field.dataType) else null
        } else serializer.fromBytes(resultRow.getRow, field.name, field.dataType)
      }
      Row(schema, values)
    }
  }


} 
Example 24
Source File: OrcSource.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.orc

import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.OptionImplicits._
import com.sksamuel.exts.io.Using
import io.eels._
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.orc.OrcFile.ReaderOptions
import org.apache.orc._

import scala.collection.JavaConverters._

object OrcSource {
  def apply(path: Path)(implicit fs: FileSystem, conf: Configuration): OrcSource = apply(FilePattern(path))
  def apply(str: String)(implicit fs: FileSystem, conf: Configuration): OrcSource = apply(FilePattern(str))
}

case class OrcSource(pattern: FilePattern,
                     projection: Seq[String] = Nil,
                     predicate: Option[Predicate] = None)
                    (implicit fs: FileSystem, conf: Configuration) extends Source with Using {

  override def parts(): Seq[Publisher[Seq[Row]]] = pattern.toPaths().map(new OrcPublisher(_, projection, predicate))

  def withPredicate(predicate: Predicate): OrcSource = copy(predicate = predicate.some)
  def withProjection(first: String, rest: String*): OrcSource = withProjection(first +: rest)
  def withProjection(fields: Seq[String]): OrcSource = {
    require(fields.nonEmpty)
    copy(projection = fields.toList)
  }

  override def schema: StructType = {
    val reader = OrcFile.createReader(pattern.toPaths().head, new ReaderOptions(conf))
    val schema = reader.getSchema
    OrcSchemaFns.fromOrcType(schema).asInstanceOf[StructType]
  }

  private def reader() = {
    val options = new ReaderOptions(conf)
    OrcFile.createReader(pattern.toPaths().head, options)
  }

  def count(): Long = reader().getNumberOfRows
  def statistics(): Seq[ColumnStatistics] = reader().getStatistics.toVector
  def stripes(): Seq[StripeInformation] = reader().getStripes.asScala
  def stripeStatistics(): Seq[StripeStatistics] = reader().getStripeStatistics.asScala
}

class OrcPublisher(path: Path,
                   projection: Seq[String],
                   predicate: Option[Predicate])(implicit conf: Configuration) extends Publisher[Seq[Row]] {

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      val reader = OrcFile.createReader(path, new ReaderOptions(conf))
      val fileSchema = OrcSchemaFns.fromOrcType(reader.getSchema).asInstanceOf[StructType]
      val iterator: Iterator[Row] = OrcBatchIterator(reader, fileSchema, projection, predicate).flatten

      val running = new AtomicBoolean(true)
      subscriber.subscribed(Subscription.fromRunning(running))
      iterator.grouped(DataStream.DefaultBatchSize).takeWhile(_ => running.get).foreach(subscriber.next)
      subscriber.completed()
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 25
Source File: ElasticsearchSource.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.elasticsearch

import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.elastic4s.http.HttpClient
import com.sksamuel.elastic4s.http.search.SearchIterator
import io.eels.datastream.{Publisher, Subscriber, Subscription}
import io.eels.schema.{Field, StructType}
import io.eels.{Row, Source}

import scala.concurrent.duration._

class ElasticsearchSource(index: String)(implicit client: HttpClient) extends Source {

  import com.sksamuel.elastic4s.http.ElasticDsl._

  implicit val duration = 5.minutes

  override def schema: StructType = {
    val resp = client.execute {
      getMapping(index)
    }.await.head.mappings
    val fields = resp.head._2.keys.map { key => Field(key) }.toSeq
    StructType(fields)
  }

  override def parts(): Seq[Publisher[Seq[Row]]] = Seq(new ElasticsearchPublisher(index))
}

class ElasticsearchPublisher(index: String)(implicit client: HttpClient) extends Publisher[Seq[Row]] {

  import com.sksamuel.elastic4s.http.ElasticDsl._

  implicit val duration = 5.minutes

  override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
    try {
      val running = new AtomicBoolean(true)
      subscriber.subscribed(Subscription.fromRunning(running))
      SearchIterator.hits(client, search(index).matchAllQuery.keepAlive("1m").size(50))
        .takeWhile(_ => running.get)
        .grouped(50)
        .foreach { batch =>
          val chunk = batch.map { hit =>
            val schema = StructType(hit.fields.keys.map { key => Field(key) }.toSeq)
            Row(schema, hit.fields.values.toVector)
          }
          subscriber.next(chunk)
        }
      subscriber.completed()
    } catch {
      case t: Throwable => subscriber.error(t)
    }
  }
} 
Example 26
Source File: InfiniteSource.scala    From flink-jpmml   with GNU Affero General Public License v3.0 5 votes vote down vote up
package io.radicalbit.examples.sources

import java.util.concurrent.atomic.AtomicBoolean

import io.radicalbit.examples.model.Utils
import io.radicalbit.flink.pmml.scala.models.control.{AddMessage, ServingMessage}
import org.apache.flink.streaming.api.functions.source.SourceFunction

import scala.util.Random


  override def run(ctx: SourceFunction.SourceContext[ServingMessage]): Unit =
    if (policy == ControlSource.Loop) loopedGeneration(ctx) else randomGeneration(ctx)

  private def loopedGeneration(context: SourceFunction.SourceContext[ServingMessage]) =
    while (isRunning.get()) {
      mappingIdPath.foreach { tuple =>
        val (id, path) = tuple
        context.getCheckpointLock.synchronized {
          context.collect(AddMessage(id, 1, path, Utils.now))
        }

        Thread.sleep(rand.nextDouble() * maxInterval toLong)
      }
    }

  private def randomGeneration(context: SourceFunction.SourceContext[ServingMessage]) =
    while (isRunning.get()) {
      val (currentId, currentPath) = rand.shuffle(mappingIdPath).head
      context.getCheckpointLock.synchronized {
        context.collect(AddMessage(currentId, 1, currentPath, Utils.now))
      }

      Thread.sleep(rand.nextDouble() * maxInterval toLong)
    }

} 
Example 27
Source File: PythonUDFRunner.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.python

import java.io._
import java.net._
import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark._
import org.apache.spark.api.python._


class PythonUDFRunner(
    funcs: Seq[ChainedPythonFunctions],
    evalType: Int,
    argOffsets: Array[Array[Int]])
  extends BasePythonRunner[Array[Byte], Array[Byte]](
    funcs, evalType, argOffsets) {

  protected override def newWriterThread(
      env: SparkEnv,
      worker: Socket,
      inputIterator: Iterator[Array[Byte]],
      partitionIndex: Int,
      context: TaskContext): WriterThread = {
    new WriterThread(env, worker, inputIterator, partitionIndex, context) {

      protected override def writeCommand(dataOut: DataOutputStream): Unit = {
        PythonUDFRunner.writeUDFs(dataOut, funcs, argOffsets)
      }

      protected override def writeIteratorToStream(dataOut: DataOutputStream): Unit = {
        PythonRDD.writeIteratorToStream(inputIterator, dataOut)
        dataOut.writeInt(SpecialLengths.END_OF_DATA_SECTION)
      }
    }
  }

  protected override def newReaderIterator(
      stream: DataInputStream,
      writerThread: WriterThread,
      startTime: Long,
      env: SparkEnv,
      worker: Socket,
      releasedOrClosed: AtomicBoolean,
      context: TaskContext): Iterator[Array[Byte]] = {
    new ReaderIterator(stream, writerThread, startTime, env, worker, releasedOrClosed, context) {

      protected override def read(): Array[Byte] = {
        if (writerThread.exception.isDefined) {
          throw writerThread.exception.get
        }
        try {
          stream.readInt() match {
            case length if length > 0 =>
              val obj = new Array[Byte](length)
              stream.readFully(obj)
              obj
            case 0 => Array.empty[Byte]
            case SpecialLengths.TIMING_DATA =>
              handleTimingData()
              read()
            case SpecialLengths.PYTHON_EXCEPTION_THROWN =>
              throw handlePythonException()
            case SpecialLengths.END_OF_DATA_SECTION =>
              handleEndOfDataSection()
              null
          }
        } catch handleException
      }
    }
  }
}

object PythonUDFRunner {

  def writeUDFs(
      dataOut: DataOutputStream,
      funcs: Seq[ChainedPythonFunctions],
      argOffsets: Array[Array[Int]]): Unit = {
    dataOut.writeInt(funcs.length)
    funcs.zip(argOffsets).foreach { case (chained, offsets) =>
      dataOut.writeInt(offsets.length)
      offsets.foreach { offset =>
        dataOut.writeInt(offset)
      }
      dataOut.writeInt(chained.funcs.length)
      chained.funcs.foreach { f =>
        dataOut.writeInt(f.command.length)
        dataOut.write(f.command)
      }
    }
  }
} 
Example 28
Source File: RPCContinuousShuffleReader.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming.continuous.shuffle

import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.internal.Logging
import org.apache.spark.rpc.{RpcCallContext, RpcEnv, ThreadSafeRpcEndpoint}
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.util.NextIterator


      override def getNext(): UnsafeRow = {
        var nextRow: UnsafeRow = null
        while (!finished && nextRow == null) {
          completion.poll(epochIntervalMs, TimeUnit.MILLISECONDS) match {
            case null =>
              // Try again if the poll didn't wait long enough to get a real result.
              // But we should be getting at least an epoch marker every checkpoint interval.
              val writerIdsUncommitted = writerEpochMarkersReceived.zipWithIndex.collect {
                case (flag, idx) if !flag => idx
              }
              logWarning(
                s"Completion service failed to make progress after $epochIntervalMs ms. Waiting " +
                  s"for writers ${writerIdsUncommitted.mkString(",")} to send epoch markers.")

            // The completion service guarantees this future will be available immediately.
            case future => future.get() match {
              case ReceiverRow(writerId, r) =>
                // Start reading the next element in the queue we just took from.
                completion.submit(completionTask(writerId))
                nextRow = r
              case ReceiverEpochMarker(writerId) =>
                // Don't read any more from this queue. If all the writers have sent epoch markers,
                // the epoch is over; otherwise we need to loop again to poll from the remaining
                // writers.
                writerEpochMarkersReceived(writerId) = true
                if (writerEpochMarkersReceived.forall(_ == true)) {
                  finished = true
                }
            }
          }
        }

        nextRow
      }

      override def close(): Unit = {
        executor.shutdownNow()
      }
    }
  }
} 
Example 29
Source File: ExecutorNumListener.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.monitor

import java.text.SimpleDateFormat
import java.util
import java.util.Date
import java.util.concurrent.atomic.AtomicBoolean

import com.fasterxml.jackson.annotation.JsonIgnore

import org.apache.spark.SparkContext
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.{
  SparkListener,
  SparkListenerExecutorAdded,
  SparkListenerExecutorRemoved
}
import org.apache.spark.util.kvstore.KVIndex

class ExecutorNumListener extends SparkListener with Logging {

  lazy val kvstore = SparkContext.getActive.get.statusStore.store
  var initialized: AtomicBoolean = new AtomicBoolean(false)
  var lastPointTime: Long = new Date().getTime
  var recentEventTime: Long = new Date().getTime
  private val liveExecutors = new util.HashSet[String]()

  def initialize(): Unit = {
    SparkContext.getActive.map(_.ui).flatten.foreach {
      case ui =>
        ui.attachTab(new ExecutorNumTab(ui))
        ui.addStaticHandler("static", "/static/special")
    }
  }

  def maybeAddPoint(time: Long): Unit = {
    if (!initialized.get) {
      initialize()
      initialized.compareAndSet(false, true)
    }
    if (time - lastPointTime > 20 * 1000) {
      addPoint(recentEventTime)
      addPoint(time)
      lastPointTime = time
    }
    recentEventTime = time
  }
  def addPoint(time: Long): Unit = {
    val executorNum = liveExecutors.size
    kvstore.write(new ExecutorNumWrapper(new ExecutorNum(
      s"own ${executorNum} executors at ${new SimpleDateFormat("HH:mm:ss").format(new Date(time))}",
      IndexedSeq(time, executorNum))))
  }

  override def onExecutorAdded(event: SparkListenerExecutorAdded): Unit = {
    liveExecutors.add(event.executorId)
    maybeAddPoint(event.time)
  }

  override def onExecutorRemoved(event: SparkListenerExecutorRemoved): Unit = {
    liveExecutors.remove(event.executorId)
    maybeAddPoint(event.time)
  }

}

private[spark] class ExecutorNumWrapper(val point: ExecutorNum) {
  @JsonIgnore @KVIndex
  def id: Long = point.value(0)
}

private[spark] class ExecutorNum(val name: String, val value: IndexedSeq[Long]) 
Example 30
Source File: Loop.scala    From Hive-JDBC-Proxy   with Apache License 2.0 5 votes vote down vote up
package com.enjoyyin.hive.proxy.jdbc.util

import java.util.concurrent.atomic.AtomicBoolean
import scala.util.control.NonFatal
import com.enjoyyin.hive.proxy.jdbc.exception.IngoreEventException



abstract class DaemonThread(override val threadName: String) extends LoopThread

trait LoopThread extends Logging {
  
  val threadName: String
  
  private val stopped = new AtomicBoolean(false)
  
  private var startTime = 0l
  
  private val eventThread: Thread = new Thread(threadName) {
    setDaemon(true)
    override def run(): Unit = {
      startTime = System.currentTimeMillis
      while (!stopped.get) {
        Utils.tryCatch(doLoop){
          case _: InterruptedException => return // exit even if eventQueue is not empty
          case NonFatal(e) => logError("Unexpected error in " + threadName, e)
        }
      }
    }
  }
  
  protected def doLoop: Unit
  
  def start: Unit = {
    if (stopped.get) {
      throw new IllegalStateException(threadName + " has already been stopped!")
    }
    // Call onStart before starting the event thread to make sure it happens before onReceive
    onStart
    eventThread.start()
  }
  
  def getStartTime: Long = startTime
  
  def stop: Unit = {
    if (stopped.compareAndSet(false, true)) {
      eventThread.interrupt()
      var onStopCalled = false
      Utils.tryCatch {
        eventThread.join()
        onStopCalled = true
        onStop
      } {
        case _: InterruptedException =>
          Thread.currentThread.interrupt()
          if (!onStopCalled) {
            onStop
          }
        case t => throw t
      }
    } else {
      // Keep quiet to allow calling `stop` multiple times.
    }
  }
  
  def isActive: Boolean = eventThread.isAlive
  
  protected def onStart: Unit = {}

  protected def onStop: Unit = {}

}

trait LoopCollection[E] extends Logging {
  
  def post(event: E): Unit
  
  val threadName: String
  
  protected def onReceiveSafety(event: E): Unit = Utils.tryCatch(onReceive(event)){
    case _: IngoreEventException => //ignore it
    case NonFatal(e) => Utils.tryCatch(onError(e)){
      case NonFatal(_) => logError("Unexpected error in " + threadName, e)
    }
  }
  
  protected def onReceive(event: E): Unit
  
  def remove(obj: E): Unit
  
  def remove(filter: E => Boolean): Unit
  
  protected def onError(e: Throwable): Unit
} 
Example 31
Source File: TaskInstances.scala    From shims   with Apache License 2.0 5 votes vote down vote up
package shims.effect.instances

import cats.{Applicative, Monad, Parallel, StackSafeMonad, ~>}
import cats.effect.{Effect, ExitCase, IO, SyncIO}

import scalaz.{Tag, -\/, \/, \/-}
import scalaz.concurrent.Task.ParallelTask
import scalaz.concurrent.{Future, Task}

import shims.conversions.MonadErrorConversions

import java.util.concurrent.atomic.AtomicBoolean

import scala.util.control.NonFatal

trait TaskInstances extends MonadErrorConversions {

  // cribbed from quasar, where it was mostly cribbed from scalaz-task-effect
  implicit object taskEffect extends Effect[Task] with StackSafeMonad[Task] {

    def pure[A](x: A): Task[A] = Task.now(x)

    def handleErrorWith[A](fa: Task[A])(f: Throwable => Task[A]): Task[A] =
      fa.handleWith(functionToPartial(f))

    def raiseError[A](e: Throwable): Task[A] = Task.fail(e)

    // In order to comply with `repeatedCallbackIgnored` law
    // on async, a custom AtomicBoolean is required to ignore
    // second callbacks.
    def async[A](k: (Either[Throwable, A] => Unit) => Unit): Task[A] = Task.async { registered =>
      val a = new AtomicBoolean(true)
      try k(e => if (a.getAndSet(false)) registered(\/.fromEither(e)) else ())
      catch { case NonFatal(t) => registered(-\/(t)) }
    }

    def asyncF[A](k: (Either[Throwable, A] => Unit) => Task[Unit]): Task[A] =
      async(k.andThen(_.unsafePerformAsync(forget)))

    // emulates using attempt
    def bracketCase[A, B](acquire: Task[A])(use: A => Task[B])(release: (A, ExitCase[Throwable]) => Task[Unit]): Task[B] = {
      for {
        a <- acquire
        bOr <- use(a).attempt
        ec = bOr.fold(ExitCase.Error(_), _ => ExitCase.Completed)
        _ <- release(a, ec)
        b <- bOr.fold(Task.fail, Task.now)
      } yield b
    }

    
    def runAsync[A](fa: Task[A])(cb: Either[Throwable, A] => IO[Unit]): SyncIO[Unit] =
      SyncIO {
        fa unsafePerformAsync { disjunction =>
          cb(disjunction.toEither).unsafeRunAsync(forget)
        }
      }

    def runSyncStep[A](fa: Task[A]): IO[Either[Task[A], A]] =
      IO {
        fa.get match {
          case Future.Now(-\/(_)) => Left(fa)

          case other => other.step match {
            case Future.Now(\/-(a)) => Right(a)
            case other => Left(new Task(other))
          }
        }
      }

    override def map[A, B](fa: Task[A])(f: A => B): Task[B] =
      fa.map(f)

    def flatMap[A, B](fa: Task[A])(f: A => Task[B]): Task[B] = fa.flatMap(f)

    override def delay[A](thunk: => A): Task[A] = Task.delay(thunk)

    def suspend[A](thunk: => Task[A]): Task[A] = Task.suspend(thunk)
  }

  implicit val taskParallel: Parallel.Aux[Task, ParallelTask] = new Parallel[Task] {
    import Task.taskParallelApplicativeInstance

    type F[A] = ParallelTask[A]

    val monad: Monad[Task] = taskEffect
    val applicative: Applicative[ParallelTask] = Applicative[ParallelTask]
    val sequential: ParallelTask ~> Task = λ[ParallelTask ~> Task](Tag.unwrap(_))
    val parallel: Task ~> ParallelTask = λ[Task ~> ParallelTask](Tag(_))
  }

  private def functionToPartial[A, B](f: A => B): PartialFunction[A, B] = {
    case a => f(a)
  }

  private def forget[A](x: A): Unit = ()
} 
Example 32
Source File: ReferenceCounted.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb

import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}

private[cosmosdb] case class ReferenceCounted[T <: AutoCloseable](private val inner: T) {
  private val count = new AtomicInteger(0)

  private def inc(): Unit = count.incrementAndGet()

  private def dec(): Unit = {
    val newCount = count.decrementAndGet()
    if (newCount <= 0) {
      inner.close()
      //Turn count to negative to ensure future reference call fail
      count.decrementAndGet()
    }
  }

  def isClosed: Boolean = count.get() < 0

  def reference(): CountedReference = {
    require(count.get >= 0, "Reference is already closed")
    new CountedReference
  }

  class CountedReference extends AutoCloseable {
    private val closed = new AtomicBoolean()
    inc()
    override def close(): Unit = if (closed.compareAndSet(false, true)) dec()

    def get: T = inner
  }
} 
Example 33
Source File: BlockingSpec.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.blocking

import java.util.concurrent.atomic.AtomicBoolean

import zio.duration._
import zio.test.Assertion._
import zio.test.TestAspect.nonFlaky
import zio.test._
import zio.{ UIO, ZIOBaseSpec }

object BlockingSpec extends ZIOBaseSpec {

  def spec = suite("BlockingSpec")(
    suite("Make a Blocking Service and verify that")(
      testM("effectBlocking completes successfully") {
        assertM(effectBlocking(()))(isUnit)
      },
      testM("effectBlocking runs on the blocking thread pool") {
        for {
          name <- effectBlocking(Thread.currentThread.getName)
        } yield assert(name)(containsString("zio-default-blocking"))
      },
      testM("effectBlockingCancelable completes successfully") {
        assertM(effectBlockingCancelable(())(UIO.unit))(isUnit)
      },
      testM("effectBlockingCancelable runs on the blocking thread pool") {
        for {
          name <- effectBlockingCancelable(Thread.currentThread.getName)(UIO.unit)
        } yield assert(name)(containsString("zio-default-blocking"))
      },
      testM("effectBlockingCancelable can be interrupted") {
        val release = new AtomicBoolean(false)
        val cancel  = UIO.effectTotal(release.set(true))
        assertM(effectBlockingCancelable(blockingAtomic(release))(cancel).timeout(Duration.Zero))(isNone)
      },
      testM("effectBlockingInterrupt completes successfully") {
        assertM(effectBlockingInterrupt(()))(isUnit)
      },
      testM("effectBlockingInterrupt runs on the blocking thread pool") {
        for {
          name <- effectBlockingInterrupt(Thread.currentThread.getName)
        } yield assert(name)(containsString("zio-default-blocking"))
      },
      testM("effectBlockingInterrupt can be interrupted") {
        assertM(effectBlockingInterrupt(Thread.sleep(50000)).timeout(Duration.Zero))(isNone)
      } @@ nonFlaky
    )
  )

  def blockingAtomic(released: AtomicBoolean): Unit =
    while (!released.get()) {
      try {
        Thread.sleep(10L)
      } catch {
        case _: InterruptedException => ()
      }
    }
} 
Example 34
Source File: OneElementConcurrentQueue.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.io.Serializable
import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong, AtomicReference }

import zio.internal.MutableConcurrentQueue

final class OneElementConcurrentQueue[A] extends MutableConcurrentQueue[A] with Serializable {
  private[this] val ref = new AtomicReference[AnyRef]()

  private[this] val headCounter   = new AtomicLong(0L)
  private[this] val deqInProgress = new AtomicBoolean(false)

  private[this] val tailCounter   = new AtomicLong(0L)
  private[this] val enqInProgress = new AtomicBoolean(false)

  override final val capacity = 1

  override def dequeuedCount(): Long = headCounter.get()
  override def enqueuedCount(): Long = tailCounter.get()

  override def isEmpty(): Boolean = ref.get() == null
  override def isFull(): Boolean  = !isEmpty()

  override def offer(a: A): Boolean = {
    assert(a != null)

    var res     = false
    var looping = true

    while (looping) {
      if (isFull()) {
        looping = false
      } else {
        if (enqInProgress.compareAndSet(false, true)) { // get an exclusive right to offer
          if (ref.get() == null) {
            tailCounter.lazySet(tailCounter.get() + 1)
            ref.lazySet(a.asInstanceOf[AnyRef])
            res = true
          }

          enqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def poll(default: A): A = {
    var res     = default
    var looping = true

    while (looping) {
      if (isEmpty()) {
        looping = false
      } else {
        if (deqInProgress.compareAndSet(false, true)) { // get an exclusive right to poll
          val el = ref.get().asInstanceOf[A]

          if (el != null) {
            res = el
            headCounter.lazySet(headCounter.get() + 1)
            ref.lazySet(null.asInstanceOf[AnyRef])
          }

          deqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def size(): Int = if (isEmpty()) 0 else 1
} 
Example 35
Source File: OneElementConcurrentQueue.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.internal.impls

import java.io.Serializable

import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong, AtomicReference }

import zio.internal.MutableConcurrentQueue

final class OneElementConcurrentQueue[A] extends MutableConcurrentQueue[A] with Serializable {
  private[this] val ref = new AtomicReference[AnyRef]()

  private[this] val headCounter   = new AtomicLong(0L)
  private[this] val deqInProgress = new AtomicBoolean(false)

  private[this] val tailCounter   = new AtomicLong(0L)
  private[this] val enqInProgress = new AtomicBoolean(false)

  override final val capacity = 1

  override def dequeuedCount(): Long = headCounter.get()
  override def enqueuedCount(): Long = tailCounter.get()

  override def isEmpty(): Boolean = ref.get() == null
  override def isFull(): Boolean  = !isEmpty()

  override def offer(a: A): Boolean = {
    assert(a != null)

    var res     = false
    var looping = true

    while (looping) {
      if (isFull()) {
        looping = false
      } else {
        if (enqInProgress.compareAndSet(false, true)) { // get an exclusive right to offer
          if (ref.get() == null) {
            tailCounter.lazySet(tailCounter.get() + 1)
            ref.lazySet(a.asInstanceOf[AnyRef])
            res = true
          }

          enqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def poll(default: A): A = {
    var res     = default
    var looping = true

    while (looping) {
      if (isEmpty()) {
        looping = false
      } else {
        if (deqInProgress.compareAndSet(false, true)) { // get an exclusive right to poll
          val el = ref.get().asInstanceOf[A]

          if (el != null) {
            res = el
            headCounter.lazySet(headCounter.get() + 1)
            ref.lazySet(null.asInstanceOf[AnyRef])
          }

          deqInProgress.lazySet(false)
          looping = false
        }
      }
    }

    res
  }

  override def size(): Int = if (isEmpty()) 0 else 1
} 
Example 36
Source File: CloseableShutdownHookTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.lifecycle

import java.util.concurrent.atomic.AtomicBoolean

import wvlet.airframe.{bind, newSilentDesign}
import wvlet.airspec.AirSpec


class CloseableShutdownHookTest extends AirSpec {
  scalaJsSupport

  class A extends AutoCloseable {
    val closeIsCalled = new AtomicBoolean(false)

    override def close(): Unit = {
      closeIsCalled.set(true)
    }
  }

  def `support closeable`: Unit = {
    val a = new A
    val d = newSilentDesign.bind[A].toInstance(a)
    d.build[A] { a => }

    a.closeIsCalled.get() shouldBe true
  }

  trait A1 {
    val onShutdownIsCalled = new AtomicBoolean(false)
    val a                  = bind[A].onShutdown { x => onShutdownIsCalled.set(true) }
  }

  def `favor onShutdownHook`: Unit = {
    val x = newSilentDesign
      .build[A1] { a1 => a1 }.asInstanceOf[A1]

    x.onShutdownIsCalled.get shouldBe true
    x.a.closeIsCalled.get shouldBe false
  }
} 
Example 37
Source File: LazyStartTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.lifecycle

import java.util.concurrent.atomic.AtomicBoolean

import wvlet.airframe.{bind, newSilentDesign}
import wvlet.airspec.AirSpec
import wvlet.log.LogSupport
import wvlet.airframe._

object LazyStartTest {
  type F1 = AtomicBoolean
  type F2 = AtomicBoolean

  trait MyApp extends LogSupport {
    val a = bind[F1]
      .onStart { x => x.set(true) }
      .onShutdown { x => x.set(false) }
  }

  trait MyApp2 extends LogSupport {
    val a = bind[F2]
      .onStart { x => x.set(true) }
      .onShutdown { x => x.set(false) }
  }
}


class LazyStartTest extends AirSpec {
  scalaJsSupport

  import LazyStartTest._

  val f1 = new AtomicBoolean(false)
  val f2 = new AtomicBoolean(false)

  val d = newSilentDesign
    .bind[MyApp].toSingleton
    .bind[MyApp2].toSingleton
    .bind[F1].toInstance(f1)
    .bind[F2].toInstance(f2)

  def `support lazy start`: Unit = {
    (f1.get, f2.get) shouldBe (false, false)
    d.build[MyApp] { app => (f1.get, f2.get) shouldBe (true, false) }
    (f1.get, f2.get) shouldBe (false, false)

    d.withLazyMode.build[MyApp] { app => (f1.get, f2.get) shouldBe (true, false) }
    (f1.get, f2.get) shouldBe (false, false)

    // Override config
    d.withProductionMode.withLazyMode.build[MyApp] { app => (f1.get, f2.get) shouldBe (true, false) }
    (f1.get, f2.get) shouldBe (false, false)

    d.build[MyApp2] { app => (f1.get, f2.get) shouldBe (false, true) }
    (f1.get, f2.get) shouldBe (false, false)
  }

  def `support eager start`: Unit = {
    (f1.get, f2.get) shouldBe (false, false)
    d.withProductionMode.build[MyApp] { app => (f1.get, f2.get) shouldBe (true, true) }
    (f1.get, f2.get) shouldBe (false, false)

    // Override config
    (f1.get, f2.get) shouldBe (false, false)
    d.withLazyMode.withProductionMode.build[MyApp] { app => (f1.get, f2.get) shouldBe (true, true) }
    (f1.get, f2.get) shouldBe (false, false)

    d.withProductionMode.build[MyApp2] { app => (f1.get, f2.get) shouldBe (true, true) }
    (f1.get, f2.get) shouldBe (false, false)
  }
} 
Example 38
Source File: CloseableHookPrecedenceTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe

import java.util.concurrent.atomic.AtomicBoolean

import javax.annotation.PreDestroy
import wvlet.airspec.AirSpec


class CloseableHookPrecedenceTest extends AirSpec {
  class A extends AutoCloseable {
    val closeIsCalled    = new AtomicBoolean(false)
    val shutdownIsCalled = new AtomicBoolean(false)

    override def close(): Unit = {
      closeIsCalled.set(true)
    }

    @PreDestroy
    def shutdown: Unit = {
      shutdownIsCalled.set(true)
    }
  }

  def `support closeable`: Unit = {
    val a = new A
    val d = newSilentDesign.bind[A].toInstance(a)
    d.build[A] { a => }

    a.closeIsCalled.get shouldBe false
    a.shutdownIsCalled.get() shouldBe true
  }
} 
Example 39
Source File: FluencyTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.fluentd
import java.io.BufferedInputStream
import java.net.ServerSocket
import java.util.concurrent.atomic.AtomicBoolean

import javax.annotation.{PostConstruct, PreDestroy}
import wvlet.airframe.codec.PrimitiveCodec.ValueCodec
import wvlet.airframe._
import wvlet.airspec.AirSpec
import wvlet.log.LogSupport
import wvlet.log.io.IOUtil

case class MockFluentdConfig(port: Int)

trait MockFluentd extends LogSupport {
  lazy val socket = bind { config: MockFluentdConfig => new ServerSocket(config.port) }

  val shutdown = new AtomicBoolean(false)

  val t = new Thread(new Runnable {
    override def run(): Unit = {
      val clientSocket = socket.accept()
      val out          = clientSocket.getOutputStream
      val in           = new BufferedInputStream(clientSocket.getInputStream)

      while (!shutdown.get()) {
        var b            = new Array[Byte](8192)
        var totalReadLen = 0
        var readLen      = in.read(b)
        while (readLen != -1) {
          val nextReadLen = in.read(b, totalReadLen, readLen)
          totalReadLen += readLen
          readLen = nextReadLen
        }
        if (totalReadLen > 0) {
          val v = ValueCodec.unpackMsgPack(b, 0, totalReadLen)
          logger.debug(s"Received event: ${v}")
        }
      }
    }
  })

  @PostConstruct
  def start: Unit = {
    debug(s"starting MockFluentd")
    t.start()
  }

  @PreDestroy
  def stop: Unit = {
    debug(s"stopping MockFluentd")
    shutdown.set(true)
    socket.close()
    t.interrupt()
  }
}

case class FluencyMetric(id: Int, name: String) extends TaggedMetric {
  def metricTag = "fluency_metric"
}


class FluencyTest extends AirSpec {
  private val fluentdPort = IOUtil.randomPort

  protected override val design: Design = {
    newDesign
      .bind[MockFluentdConfig].toInstance(new MockFluentdConfig(fluentdPort))
      .bind[MockFluentd].toEagerSingleton
      .add(
        fluentd
          .withFluentdLogger(
            port = fluentdPort,
            // Do not send ack for simplicity
            ackResponseMode = false
          )
      )
  }

  def `should send metrics to fluentd through Fluency`(f: MetricLoggerFactory): Unit = {
    // Use a regular emit method
    f.getLogger.emit("mytag", Map("data" -> "hello"))

    // Use object metric logger
    val l = f.getTypedLogger[FluencyMetric]
    l.emit(FluencyMetric(1, "leo"))
    f.getLoggerWithTagPrefix("system").emit("mytag", Map("data" -> "metric value"))
  }

  test(
    "test extended time",
    design = fluentd.withFluentdLogger(port = fluentdPort, ackResponseMode = false, useExtendedEventTime = true)
  ) { f: MetricLoggerFactory =>
    val l = f.getLogger
    l.emit("mytag", Map("data" -> "hello"))
    l.emitMsgPack("tag", Array(0xc6.toByte))
  }
} 
Example 40
Source File: AsyncHandler.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.log

import java.io.Flushable
import java.util
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.{Executors, ThreadFactory}
import java.util.{logging => jl}


class AsyncHandler(parent: jl.Handler) extends jl.Handler with Guard with AutoCloseable with Flushable {
  private val executor = {
    Executors.newCachedThreadPool(
      new ThreadFactory {
        override def newThread(r: Runnable): Thread = {
          val t = new Thread(r, "WvletLogAsyncHandler")
          t.setDaemon(true)
          t
        }
      }
    )
  }

  private val queue      = new util.ArrayDeque[jl.LogRecord]
  private val isNotEmpty = newCondition
  private val closed     = new AtomicBoolean(false)

  // Start a poller thread
  executor.submit(new Runnable {
    override def run(): Unit = {
      while (!closed.get()) {
        val record: jl.LogRecord = guard {
          if (queue.isEmpty) {
            isNotEmpty.await()
          }
          queue.pollFirst()
        }
        if (record != null) {
          parent.publish(record)
        }
      }
    }
  })

  override def flush(): Unit = {
    val records = Seq.newBuilder[jl.LogRecord]
    guard {
      while (!queue.isEmpty) {
        records += queue.pollFirst()
      }
    }

    records.result.map(parent.publish _)
    parent.flush()
  }

  override def publish(record: jl.LogRecord): Unit = {
    guard {
      queue.addLast(record)
      isNotEmpty.signal()
    }
  }

  override def close(): Unit = {
    if (closed.compareAndSet(false, true)) {
      flush()
      // Wake up the poller thread
      guard {
        isNotEmpty.signalAll()
      }
      executor.shutdown()
    }
  }
} 
Example 41
Source File: FlushMode.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.writer.file

import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}

import scribe.util.Time

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

trait FlushMode {
  def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit
}

object FlushMode {
  object NeverFlush extends FlushMode {
    override def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit = {}
  }

  object AlwaysFlush extends FlushMode {
    override def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit = writer.flush()
  }

  case class AsynchronousFlush(delay: FiniteDuration = 1.second) extends FlushMode {
    private lazy val delayMillis = delay.toMillis
    private lazy val flushing = new AtomicBoolean(false)
    private lazy val dirty = new AtomicBoolean(false)
    private lazy val lastFlush = new AtomicLong(0L)
    private var logFile: LogFile = _

    override def dataWritten(logFile: LogFile, writer: LogFileWriter): Unit = {
      this.logFile = logFile
      if (flushing.compareAndSet(false, true)) {
        flush()
      } else {
        dirty.set(true)
      }
    }

    private def flush(): Unit = Future {
      try {
        val delay = this.delayMillis - (Time() - lastFlush.get())
        if (delay > 0L) {
          Thread.sleep(delay)
        }
        logFile.flush()
      } finally {
        lastFlush.set(Time())
        if (dirty.compareAndSet(true, false)) {
          flush()
        } else {
          flushing.set(false)
        }
      }
    }
  }
} 
Example 42
Source File: GrpcAkkaStreamsClientCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Source}
import com.trueaccord.scalapb.grpc.Grpc
import io.grpc.{ClientCall, Metadata, Status}
import io.grpc.stub._

object GrpcAkkaStreamsClientCalls {

  def unaryFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow[I].flatMapConcat(request =>
      Source.fromFuture(
        Grpc.guavaFuture2ScalaFuture(
          ClientCalls.futureUnaryCall(call, request)
        )
      )
    )

  def serverStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(
      new GrpcGraphStage[I, O](outputObserver => {
        val out = outputObserver.asInstanceOf[ClientResponseObserver[I, O]]
        val in = new ClientCallStreamObserver[I] {
          val halfClosed = new AtomicBoolean(false)
          val onReadyHandler = new AtomicReference[Option[Runnable]](None)
          val listener = new ClientCall.Listener[O] {
            override def onClose(status: Status, trailers: Metadata): Unit =
              status.getCode match {
                case Status.Code.OK => out.onCompleted()
                case _ => out.onError(status.asException(trailers))
              }
            override def onMessage(message: O): Unit =
              out.onNext(message)
            override def onReady(): Unit =
              onReadyHandler.get().foreach(_.run())
          }
          call.start(listener, new Metadata())

          override def cancel(message: String, cause: Throwable): Unit =
            call.cancel(message, cause)
          override def setOnReadyHandler(onReadyHandler: Runnable): Unit =
            this.onReadyHandler.set(Some(onReadyHandler))
          override def request(count: Int): Unit = call.request(count)
          override def disableAutoInboundFlowControl(): Unit = ()
          override def isReady: Boolean = !halfClosed.get() || call.isReady
          override def setMessageCompression(enable: Boolean): Unit =
            call.setMessageCompression(enable)
          override def onError(t: Throwable): Unit =
            call.cancel("Cancelled by client with StreamObserver.onError()", t)
          override def onCompleted(): Unit = ()
          override def onNext(request: I): Unit = {
            call.sendMessage(request)
            halfClosed.set(true)
            call.halfClose()
          }
        }
        out.beforeStart(in)
        in
      })
    )

  def clientStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(new GrpcGraphStage[I, O](ClientCalls.asyncClientStreamingCall(call, _)))

  def bidiStreamingFlow[I, O](call: ClientCall[I, O]): Flow[I, O, NotUsed] =
    Flow.fromGraph(new GrpcGraphStage[I, O](ClientCalls.asyncBidiStreamingCall(call, _)))
} 
Example 43
Source File: GrpcGraphStage.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}

import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import io.grpc.stub.{ClientCallStreamObserver, ClientResponseObserver}

class GrpcGraphStage[I, O](operator: GrpcOperator[I, O]) extends GraphStage[FlowShape[I, O]] {
  val in = Inlet[I]("grpc.in")
  val out = Outlet[O]("grpc.out")

  override val shape: FlowShape[I, O] = FlowShape.of(in, out)

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
    new GraphStageLogic(shape) with InHandler with OutHandler {

      var requestStream = new AtomicReference[Option[ClientCallStreamObserver[I]]](None)
      val element = new AtomicReference[Option[I]](None)
      val requested = new AtomicBoolean(false)

      val outObs = new ClientResponseObserver[I, O] with Runnable {
        override def beforeStart(reqStream: ClientCallStreamObserver[I]): Unit = {
          requestStream.set(Some(reqStream))
          reqStream.disableAutoInboundFlowControl()
          reqStream.setOnReadyHandler(this)
        }

        override def onError(t: Throwable) =
          getAsyncCallback((t: Throwable) => fail(out, t)).invoke(t)

        override def onCompleted() =
          getAsyncCallback((_: Unit) => complete(out)).invoke(())

        override def onNext(value: O) =
          getAsyncCallback((value: O) => push(out, value)).invoke(value)

        override def run(): Unit = requestStream.get().foreach { reqStream =>
          if (requested.compareAndSet(true, false)) reqStream.request(1)
          if (reqStream.isReady) {
            element.getAndSet(None).foreach { value =>
              reqStream.onNext(value)
              tryPull(in)
            }
          }
        }
      }

      val inObs = operator(outObs)

      override def onPush(): Unit = {
        val value = grab(in)
        requestStream.get() match {
          case Some(reqStream) if reqStream.isReady() =>
            reqStream.onNext(value)
            pull(in)
          case _ => element.compareAndSet(None, Some(value))
        }
      }

      override def onUpstreamFinish(): Unit = inObs.onCompleted()

      override def onUpstreamFailure(t: Throwable): Unit = inObs.onError(t)

      override def onPull(): Unit =
        requestStream.get() match {
          case Some(reqStream) => reqStream.request(1)
          case _ => requested.compareAndSet(false, true)
        }

      override def preStart(): Unit = pull(in)

      setHandler(in, this)
      setHandler(out, this)
    }
} 
Example 44
Source File: KubernetesLease.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.coordination.lease.kubernetes

import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger }

import akka.actor.ExtendedActorSystem
import akka.coordination.lease.{ LeaseException, LeaseSettings, LeaseTimeoutException }
import akka.coordination.lease.scaladsl.Lease
import akka.coordination.lease.kubernetes.LeaseActor._
import akka.coordination.lease.kubernetes.internal.KubernetesApiImpl
import akka.dispatch.ExecutionContexts
import akka.pattern.AskTimeoutException
import akka.util.{ ConstantFun, Timeout }

import scala.concurrent.Future

object KubernetesLease {
  val configPath = "akka.coordination.lease.kubernetes"
  private val leaseCounter = new AtomicInteger(1)
}

class KubernetesLease private[akka] (system: ExtendedActorSystem, leaseTaken: AtomicBoolean, settings: LeaseSettings)
    extends Lease(settings) {

  private val k8sSettings = KubernetesSettings(settings.leaseConfig, settings.timeoutSettings)
  private val k8sApi = new KubernetesApiImpl(system, k8sSettings)
  private val leaseActor = system.systemActorOf(
    LeaseActor.props(k8sApi, settings, leaseTaken),
    s"kubernetesLease${KubernetesLease.leaseCounter.incrementAndGet}-${settings.leaseName}-${settings.ownerName}"
  )

  def this(leaseSettings: LeaseSettings, system: ExtendedActorSystem) =
    this(system, new AtomicBoolean(false), leaseSettings)

  import akka.pattern.ask
  import system.dispatcher

  private implicit val timeout: Timeout = Timeout(settings.timeoutSettings.operationTimeout)

  override def checkLease(): Boolean = leaseTaken.get()

  override def release(): Future[Boolean] = {
    // replace with transform once 2.11 dropped
    (leaseActor ? Release())
      .flatMap {
        case LeaseReleased       => Future.successful(true)
        case InvalidRequest(msg) => Future.failed(new LeaseException(msg))
      }(ExecutionContexts.sameThreadExecutionContext)
      .recoverWith {
        case _: AskTimeoutException =>
          Future.failed(new LeaseTimeoutException(
            s"Timed out trying to release lease [${settings.leaseName}, ${settings.ownerName}]. It may still be taken."))
      }
  }

  override def acquire(): Future[Boolean] = {
    acquire(ConstantFun.scalaAnyToUnit)

  }
  override def acquire(leaseLostCallback: Option[Throwable] => Unit): Future[Boolean] = {
    // replace with transform once 2.11 dropped
    (leaseActor ? Acquire(leaseLostCallback))
      .flatMap {
        case LeaseAcquired       => Future.successful(true)
        case LeaseTaken          => Future.successful(false)
        case InvalidRequest(msg) => Future.failed(new LeaseException(msg))
      }
      .recoverWith {
        case _: AskTimeoutException =>
          Future.failed[Boolean](new LeaseTimeoutException(
            s"Timed out trying to acquire lease [${settings.leaseName}, ${settings.ownerName}]. It may still be taken."))
      }(ExecutionContexts.sameThreadExecutionContext)
  }
} 
Example 45
Source File: BufferPool.scala    From scredis   with Apache License 2.0 5 votes vote down vote up
package scredis.util

import scala.annotation.tailrec

import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicBoolean

class BufferPool(maxCapacity: Int, maxBufferSize: Int, isDirect: Boolean = false) {
  private[this] val locked = new AtomicBoolean(false)
  private[this] val pool = new Array[ByteBuffer](maxCapacity)
  private[this] var size: Int = 0
  
  @inline
  private def allocate(length: Int): ByteBuffer = if (isDirect) {
    ByteBuffer.allocateDirect(length)
  } else {
    ByteBuffer.allocate(length)
  }
  
  @tailrec
  final def acquire(length: Int): ByteBuffer = {
    if (locked.compareAndSet(false, true)) {
      val buffer = try {
        if (size > 0) {
          size -= 1
          pool(size)
        } else {
          null
        }
      } finally {
        locked.set(false)
      }

      if (buffer == null || buffer.capacity < length) {
        allocate(length)
      } else {
        buffer.clear()
        buffer
      }
    } else {
      acquire(length)
    }
  }

  @tailrec
  final def release(buffer: ByteBuffer): Unit = {
    if (buffer.capacity > maxBufferSize) {
      return
    }
    
    if (locked.compareAndSet(false, true)) {
      try {
        if (size < maxCapacity) {
          pool(size) = buffer
          size += 1
        }
        // else let the buffer get garbage collected
      }
      finally {
        locked.set(false)
      }
    } else {
      release(buffer)
    }
  }
  
} 
Example 46
Source File: HelloWorldSpec.scala    From play-soap   with Apache License 2.0 5 votes vote down vote up
package play.soap.sbtplugin.tester

import java.net.ServerSocket
import java.util.concurrent.atomic.AtomicBoolean
import javax.xml.ws.Endpoint
import javax.xml.ws.handler.soap._
import javax.xml.ws.handler.MessageContext

import org.apache.cxf.jaxws.EndpointImpl
import play.soap.testservice.client._
import scala.collection.JavaConverters._
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag

import play.api.test._
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.soap.testservice.HelloWorldImpl

class HelloWorldSpec extends ServiceSpec {

  sequential

  "HelloWorld" should {
    "say hello" in withClient { client =>
      await(client.sayHello("world")) must_== "Hello world"
    }

    "say hello to many people" in withClient { client =>
      await(client.sayHelloToMany(java.util.Arrays.asList("foo", "bar"))).asScala must_== List("Hello foo", "Hello bar")
    }

    "say hello to one user" in withClient { client =>
      val user = new User
      user.setName("world")
      await(client.sayHelloToUser(user)).getUser.getName must_== "world"
    }

    "say hello with an exception" in withClient { client =>
      await(client.sayHelloException("world")) must throwA[HelloException_Exception].like {
        case e => e.getMessage must_== "Hello world"
      }
    }

    "dont say hello" in withClient { client =>
      await(client.dontSayHello()) must_== ((): Unit)
    }

    "allow adding custom handlers" in {
      val invoked = new AtomicBoolean()
      withApp { app =>
        val client = app.injector.instanceOf[HelloWorldService].helloWorld(new SOAPHandler[SOAPMessageContext] {
          def getHeaders = null
          def handleMessage(context: SOAPMessageContext) = {
            invoked.set(true)
            true
          }
          def close(context: MessageContext) = ()
          def handleFault(context: SOAPMessageContext) = true
        })

        await(client.sayHello("world")) must_== "Hello world"
        invoked.get() must_== true
      }
    }
  }

  override type ServiceClient = HelloWorldService

  override type Service = HelloWorld

  override implicit val serviceClientClass: ClassTag[HelloWorldService] = ClassTag(classOf[HelloWorldService])

  override def getServiceFromClient(c: ServiceClient): Service = c.helloWorld

  override def createServiceImpl(): Any = new HelloWorldImpl

  val servicePath: String = "helloWorld"

} 
Example 47
Source File: ServiceSpec.scala    From play-soap   with Apache License 2.0 5 votes vote down vote up
package play.soap.sbtplugin.tester

import java.net.ServerSocket
import java.util.concurrent.atomic.AtomicBoolean
import javax.xml.ws.Endpoint
import javax.xml.ws.handler.soap._
import javax.xml.ws.handler.MessageContext

import org.apache.cxf.jaxws.EndpointImpl
import play.soap.testservice.client._
import scala.collection.JavaConverters._
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag

import play.api.test._
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder

abstract class ServiceSpec extends PlaySpecification {

  
  val servicePath: String

  def await[T](future: Future[T]): T = Await.result(future, 10.seconds)

  def withClient[T](block: Service => T): T = withApp { app =>
    val client = app.injector.instanceOf[ServiceClient]
    val service = getServiceFromClient(client)
    block(service)
  }

  def withApp[T](block: Application => T): T = withService { port =>
    implicit val app = new GuiceApplicationBuilder()
      .configure("play.soap.address" -> s"http://localhost:$port/$servicePath")
      .build
    Helpers.running(app) {
      block(app)
    }
  }


  def withService[T](block: Int => T): T = {
    val port = findAvailablePort()
    val impl = createServiceImpl()
    val endpoint = Endpoint.publish(s"http://localhost:$port/$servicePath", impl)
    try {
      block(port)
    } finally {
      endpoint.stop()
      // Need to shutdown whole engine.  Note, Jetty's shutdown doesn't seem to happen synchronously, have to wait
      // a few seconds for the port to be released. This is why we use a different port each time.
      endpoint.asInstanceOf[EndpointImpl].getBus.shutdown(true)
    }
  }

  def findAvailablePort() = {
    val socket = new ServerSocket(0)
    try {
      socket.getLocalPort
    } finally {
      socket.close()
    }
  }
} 
Example 48
Source File: package.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package concurrent

import java.util.concurrent.atomic.AtomicBoolean

object `package` {
  
  type Async[T] = Callback[T] => Unit
}

object Async {
  private def guarded[T](async: Async[T]): Async[T] = callback => {
    val called = new AtomicBoolean
    val guardedCallback: Callback[T] = result =>
      if (!called.getAndSet(true)) {
        callback(result) // may possibly throw but better let it fly rather than catch and ignore
      }
    try async(guardedCallback) catch {
      case NonFatal(t) =>
        // if callback was already called then we can't do much with the failure, rethrow it
        if (!called.getAndSet(true)) callback(Failure(t)) else throw t
    }
  }

  def safe[T](async: => Async[T]): Async[T] =
    try guarded(async) catch {
      case NonFatal(t) => failed(t)
    }

  def ready[T](result: Try[T]): Async[T] =
    callback => callback(result)

  def successful[T](value: T): Async[T] =
    ready(Success(value))

  def failed[T](cause: Throwable): Async[T] =
    ready(Failure(cause))

  def transform[A, B](async: Async[A])(f: Try[A] => Try[B]): Async[B] =
    cb => async(Callback.contraTransform(cb)(f))

  def map[A, B](async: Async[A])(f: A => B): Async[B] =
    transform(async)(_.map(f))
}

object Callback {
  def contraTransform[A, B](callback: Callback[B])(f: Try[A] => Try[B]): Callback[A] =
    ta => callback(try f(ta) catch {
      case NonFatal(cause) => Failure(cause)
    })

  def contramap[A, B](callback: Callback[B])(f: A => B): Callback[A] =
    contraTransform(callback)(_.map(f))
} 
Example 49
Source File: DefaultThreadPool.scala    From scrapy4s   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.scrapy4s.thread

import java.util.concurrent.{BlockingQueue, CountDownLatch, Executor, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.scrapy4s.exception.QueueTimeOutException
import org.slf4j.LoggerFactory


class DefaultThreadPool(
                name: String,
                threadCount: Int,
                queue: BlockingQueue[Runnable]
                ) extends ThreadPool {

  val logger = LoggerFactory.getLogger(classOf[DefaultThreadPool])
  val startFlag = new AtomicBoolean(false)
  val countDownLatch = new CountDownLatch(threadCount)
  init()

  private def init(): Unit = {
    if (startFlag.compareAndSet(false, true)) {
      (1 to threadCount).foreach(i => {
        val thread = new Thread(() => {task()})
        thread.setName(s"pool-$name-$i")
        thread.start()
      })
    } else {
      throw new Exception("线程池已经启动")
    }
  }



  def task() = {
    try {
      while (startFlag.get()) {
        try {
          val runnable = queue.poll(1, TimeUnit.SECONDS)
          if (runnable == null) {
            throw new QueueTimeOutException()
          }
          runnable.run()
        } catch {
          case _: QueueTimeOutException =>
          case e: Exception =>
            logger.error("thread pool exception", e)
        }
      }
    } finally {
      countDownLatch.countDown()
    }
  }

  override def shutdown() = {
    startFlag.compareAndSet(true, false)
  }

  override def waitForStop() = {
    countDownLatch.await()
  }

  override def waitForStop(timeout: Long, unit: TimeUnit): Boolean = {
    countDownLatch.await(timeout, unit)
  }

  override def execute(command: Runnable) = {
    if (command == null) throw new NullPointerException()
    queue.put(command)
  }
} 
Example 50
Source File: StreamingListenerBus.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.Logging
import org.apache.spark.util.AsynchronousListenerBus


private[spark] class StreamingListenerBus
  extends AsynchronousListenerBus[StreamingListener, StreamingListenerEvent]("StreamingListenerBus")
  with Logging {

  private val logDroppedEvent = new AtomicBoolean(false)

  override def onPostEvent(listener: StreamingListener, event: StreamingListenerEvent): Unit = {
    event match {
      case receiverStarted: StreamingListenerReceiverStarted =>
        listener.onReceiverStarted(receiverStarted)
      case receiverError: StreamingListenerReceiverError =>
        listener.onReceiverError(receiverError)
      case receiverStopped: StreamingListenerReceiverStopped =>
        listener.onReceiverStopped(receiverStopped)
      case batchSubmitted: StreamingListenerBatchSubmitted =>
        listener.onBatchSubmitted(batchSubmitted)
      case batchStarted: StreamingListenerBatchStarted =>
        listener.onBatchStarted(batchStarted)
      case batchCompleted: StreamingListenerBatchCompleted =>
        listener.onBatchCompleted(batchCompleted)
      case _ =>
    }
  }

  override def onDropEvent(event: StreamingListenerEvent): Unit = {
    if (logDroppedEvent.compareAndSet(false, true)) {
      // Only log the following message once to avoid duplicated annoying logs.
      logError("Dropping StreamingListenerEvent because no remaining room in event queue. " +
        "This likely means one of the StreamingListeners is too slow and cannot keep up with the " +
        "rate at which events are being started by the scheduler.")
    }
  }
} 
Example 51
Source File: StandardVMDeathManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.vm
import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiConstants, ApiTestUtilities, VirtualMachineFixtures}

class StandardVMDeathManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardVMDeathManager") {
    it("should trigger when a virtual machine dies", ApiConstants.NoWindows) {
      val testClass = "org.scaladebugger.test.misc.MainUsingApp"

      val detectedDeath = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Mark that we want to receive vm death events and watch for one
      vmDeathManager.createVMDeathRequest()
      eventManager.addResumingEventHandler(VMDeathEventType, _ => {
        detectedDeath.set(true)
      })

      // Start our VM and listen for the start event
      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Kill the JVM process so we get a disconnect event
        // NOTE: This does not seem to trigger the VMDeathEvent on Windows
        s.underlyingVirtualMachine.process().destroy()

        // Eventually, we should receive the death event
        logTimeTaken(eventually {
          detectedDeath.get() should be (true)
        })
      }
    }
  }
} 
Example 52
Source File: StandardModificationWatchpointManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.watchpoints

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.ModificationWatchpointEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardModificationWatchpointManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardModificationWatchpointManager") {
    it("should be able to detect modification to a field") {
      val testClass = "org.scaladebugger.test.watchpoints.ModificationWatchpoint"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val className = "org.scaladebugger.test.watchpoints.SomeModificationClass"
      val fieldName = "field"

      val detectedModificationWatchpoint = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      modificationWatchpointManager.createModificationWatchpointRequest(
        className,
        fieldName
      )

      // Listen for modification watchpoint events for specific variable
      eventManager.addResumingEventHandler(ModificationWatchpointEventType, e => {
        val modificationWatchpointEvent = e.asInstanceOf[ModificationWatchpointEvent]
        val name = modificationWatchpointEvent.field().name()

        // If we detected modification for our variable, mark our flag
        if (name == fieldName) detectedModificationWatchpoint.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          assert(detectedModificationWatchpoint.get(), s"$fieldName never modificationed!")
        })
      }
    }
  }
} 
Example 53
Source File: StandardAccessWatchpointManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.watchpoints

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.AccessWatchpointEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardAccessWatchpointManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardAccessWatchpointManager") {
    it("should be able to detect access to a field") {
      val testClass = "org.scaladebugger.test.watchpoints.AccessWatchpoint"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val className = "org.scaladebugger.test.watchpoints.SomeAccessClass"
      val fieldName = "field"

      val detectedAccessWatchpoint = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      accessWatchpointManager.createAccessWatchpointRequest(
        className,
        fieldName
      )

      // Listen for access watchpoint events for specific variable
      eventManager.addResumingEventHandler(AccessWatchpointEventType, e => {
        val accessWatchpointEvent = e.asInstanceOf[AccessWatchpointEvent]
        val name = accessWatchpointEvent.field().name()

        // If we detected access for our variable, mark our flag
        if (name == fieldName) detectedAccessWatchpoint.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          assert(detectedAccessWatchpoint.get(), s"$fieldName never accessed!")
        })
      }
    }
  }
} 
Example 54
Source File: StandardMethodEntryManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.methods

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.{BreakpointEvent, MethodEntryEvent}
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.lowlevel.events.filters.MethodNameFilter
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardMethodEntryManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardMethodEntryManager") {
    it("should be able to detect entering a specific method in a class") {
      val testClass = "org.scaladebugger.test.methods.MethodEntry"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val expectedClassName =
        "org.scaladebugger.test.methods.MethodEntryTestClass"
      val expectedMethodName = "testMethod"
      val methodNameFilter = MethodNameFilter(name = expectedMethodName)

      val reachedUnexpectedMethod = new AtomicBoolean(false)
      val reachedExpectedMethod = new AtomicBoolean(false)
      val reachedMethodBeforeFirstLine = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Set up the method entry event
      methodEntryManager.createMethodEntryRequest(
        expectedClassName,
        expectedMethodName
      )

      // First line in test method
      breakpointManager.createBreakpointRequest(testFile, 26)

      // Listen for breakpoint on first line of method, checking if this
      // breakpoint is hit before or after the method entry event
      eventManager.addResumingEventHandler(BreakpointEventType, e => {
        val breakpointEvent = e.asInstanceOf[BreakpointEvent]
        val location = breakpointEvent.location()
        val fileName = location.sourcePath()
        val lineNumber = location.lineNumber()

        logger.debug(s"Reached breakpoint: $fileName:$lineNumber")

        val methodEntryHit = reachedExpectedMethod.get()
        reachedMethodBeforeFirstLine.set(methodEntryHit)
      })

      // Listen for method entry events for the specific method
      eventManager.addResumingEventHandler(MethodEntryEventType, e => {
        val methodEntryEvent = e.asInstanceOf[MethodEntryEvent]
        val method = methodEntryEvent.method()
        val className = method.declaringType().name()
        val methodName = method.name()

        logger.debug(s"Reached method: $className/$methodName")

        if (className == expectedClassName && methodName == expectedMethodName) {
          reachedExpectedMethod.set(true)
        } else {
          reachedUnexpectedMethod.set(true)
        }
      }, methodNameFilter)

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          reachedUnexpectedMethod.get() should be (false)
          reachedExpectedMethod.get() should be (true)
          reachedMethodBeforeFirstLine.get() should be (true)
        })
      }
    }
  }
} 
Example 55
Source File: StandardMethodExitManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.methods

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.{BreakpointEvent, MethodExitEvent}
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.lowlevel.events.filters.MethodNameFilter
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardMethodExitManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardMethodExitManager") {
    it("should be able to detect exiting a specific method in a class") {
      val testClass = "org.scaladebugger.test.methods.MethodExit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val expectedClassName =
        "org.scaladebugger.test.methods.MethodExitTestClass"
      val expectedMethodName = "testMethod"
      val methodNameFilter = MethodNameFilter(name = expectedMethodName)

      val leftUnexpectedMethod = new AtomicBoolean(false)
      val leftExpectedMethod = new AtomicBoolean(false)
      val leftMethodAfterLastLine = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Set up the method exit event
      methodExitManager.createMethodExitRequest(
        expectedClassName,
        expectedMethodName
      )

      // Last line in test method
      breakpointManager.createBreakpointRequest(testFile, 28)

      // Listen for breakpoint on first line of method, checking if this
      // breakpoint is hit before or after the method exit event
      eventManager.addResumingEventHandler(BreakpointEventType, e => {
        val breakpointEvent = e.asInstanceOf[BreakpointEvent]
        val location = breakpointEvent.location()
        val fileName = location.sourcePath()
        val lineNumber = location.lineNumber()

        logger.debug(s"Reached breakpoint: $fileName:$lineNumber")

        val methodExitHit = leftExpectedMethod.get()
        leftMethodAfterLastLine.set(!methodExitHit)
      })

      // Listen for method exit events for the specific method
      eventManager.addResumingEventHandler(MethodExitEventType, e => {
        val methodExitEvent = e.asInstanceOf[MethodExitEvent]
        val method = methodExitEvent.method()
        val className = method.declaringType().name()
        val methodName = method.name()

        logger.debug(s"Left method: $className/$methodName")

        if (className == expectedClassName && methodName == expectedMethodName) {
          leftExpectedMethod.set(true)
        } else {
          leftUnexpectedMethod.set(true)
        }
      }, methodNameFilter)


      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          leftUnexpectedMethod.get() should be (false)
          leftExpectedMethod.get() should be (true)
          leftMethodAfterLastLine.get() should be (true)
        })
      }
    }
  }
} 
Example 56
Source File: StandardMonitorContendedEnterManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.monitors

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.MonitorContendedEnterEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardMonitorContendedEnterManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardMonitorContendedEnterManager") {
    it("should trigger when a thread attempts to enter a monitor already acquired by another thread") {
      val testClass = "org.scaladebugger.test.monitors.MonitorContendedEnter"

      val detectedEnter = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Mark that we want to receive monitor contended enter events and
      // watch for one
      monitorContendedEnterManager.createMonitorContendedEnterRequest()
      eventManager.addResumingEventHandler(MonitorContendedEnterEventType, e => {
        val monitorContendedEnterEvent =
          e.asInstanceOf[MonitorContendedEnterEvent]

        val threadName = monitorContendedEnterEvent.thread().name()
        val monitorTypeName =
          monitorContendedEnterEvent.monitor().referenceType().name()

        logger.debug(s"Detected attempted monitor enter in thread $threadName for monitor of type $monitorTypeName")
        detectedEnter.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor contended enter event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedEnter.get(), s"No monitor enter attempt was detected!")
        })
      }
    }
  }
} 
Example 57
Source File: StandardMonitorContendedEnteredManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.monitors

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.MonitorContendedEnteredEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardMonitorContendedEnteredManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardMonitorContendedEnteredManager") {
    it("should trigger when a thread enters a monitor after waiting for it to be released by another thread") {
      val testClass = "org.scaladebugger.test.monitors.MonitorContendedEntered"

      val detectedEntered = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Mark that we want to receive monitor contended entered events and
      // watch for one
      monitorContendedEnteredManager.createMonitorContendedEnteredRequest()
      eventManager.addResumingEventHandler(MonitorContendedEnteredEventType, e => {
        val monitorContendedEnteredEvent =
          e.asInstanceOf[MonitorContendedEnteredEvent]

        val threadName = monitorContendedEnteredEvent.thread().name()
        val monitorTypeName =
          monitorContendedEnteredEvent.monitor().referenceType().name()

        logger.debug(s"Detected monitor entered in thread $threadName for monitor of type $monitorTypeName")
        detectedEntered.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor contended entered event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedEntered.get(), s"No monitor entered was detected!")
        })
      }
    }
  }
} 
Example 58
Source File: StandardMonitorWaitedManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.monitors

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.MonitorWaitedEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardMonitorWaitedManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardMonitorWaitedManager") {
    it("should trigger when a thread has finished waiting on a monitor object") {
      val testClass = "org.scaladebugger.test.monitors.MonitorWaited"

      val detectedWaited = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Mark that we want to receive monitor waited events and
      // watch for one
      monitorWaitedManager.createMonitorWaitedRequest()
      eventManager.addResumingEventHandler(MonitorWaitedEventType, e => {
        val monitorWaitedEvent =
          e.asInstanceOf[MonitorWaitedEvent]

        val threadName = monitorWaitedEvent.thread().name()
        val monitorTypeName =
          monitorWaitedEvent.monitor().referenceType().name()

        logger.debug(s"Detected finished waiting in thread $threadName for monitor of type $monitorTypeName")
        detectedWaited.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor waited event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedWaited.get(), s"No monitor finished waiting was detected!")
        })
      }
    }
  }
} 
Example 59
Source File: StandardMonitorWaitManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.monitors

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.MonitorWaitEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardMonitorWaitManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardMonitorWaitManager") {
    it("should trigger when a thread is about to wait on a monitor object") {
      val testClass = "org.scaladebugger.test.monitors.MonitorWait"

      val detectedWait = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()
      import s.lowlevel._

      // Mark that we want to receive monitor wait events and
      // watch for one
      monitorWaitManager.createMonitorWaitRequest()
      eventManager.addResumingEventHandler(MonitorWaitEventType, e => {
        val monitorWaitEvent =
          e.asInstanceOf[MonitorWaitEvent]

        val threadName = monitorWaitEvent.thread().name()
        val monitorTypeName =
          monitorWaitEvent.monitor().referenceType().name()

        logger.debug(s"Detected wait in thread $threadName for monitor of type $monitorTypeName")
        detectedWait.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor wait event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedWait.get(), s"No monitor wait was detected!")
        })
      }
    }
  }
} 
Example 60
Source File: StandardClassPrepareManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.classes

import java.util.concurrent.atomic.AtomicBoolean

import com.sun.jdi.event.ClassPrepareEvent
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardClassPrepareManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardClassPrepareManager") {
    it("should trigger when a class is loaded") {
      val testClass = "org.scaladebugger.test.classes.ClassPrepare"

      val expectedClassName = "org.scaladebugger.test.classes.CustomClass"
      val detectedPrepare = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      import s.lowlevel._

      // Mark that we want to receive class prepare events and watch for one
      classPrepareManager.createClassPrepareRequest()
      eventManager.addResumingEventHandler(ClassPrepareEventType, e => {
        val classPrepareEvent = e.asInstanceOf[ClassPrepareEvent]
        val className = classPrepareEvent.referenceType().name()

        logger.debug("New class loaded: " + className)
        if (className == expectedClassName) detectedPrepare.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the class prepare event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedPrepare.get(), s"$expectedClassName was not loaded!")
        })
      }
    }
  }
} 
Example 61
Source File: StandardClassUnloadManagerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.lowlevel.classes

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class StandardClassUnloadManagerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("StandardClassUnloadManager") {
    // NOTE: It is not possible to trigger a class unload accurately due to the
    //       JVM's non-deterministic nature with garbage collection.
    ignore("should trigger when a class is unloaded") {
      val testClass = "org.scaladebugger.test.classes.ClassUnload"

      val detectedUnload = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      import s.lowlevel._

      // Mark that we want to receive class unload events and watch for one
      classUnloadManager.createClassUnloadRequest()
      eventManager.addResumingEventHandler(ClassUnloadEventType, _ => {
        detectedUnload.set(true)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the class unload event
        logTimeTaken(eventually {
          detectedUnload.get() should be (true)
        })
      }
    }
  }
} 
Example 62
Source File: JavaVMDisconnectRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.vm

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaVMDisconnectRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaVMDisconnectRequest") {
    it("should trigger when a virtual machine disconnects") {
      val testClass = "org.scaladebugger.test.misc.MainUsingApp"

      val detectedDisconnect = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateVMDisconnectRequest()
        .foreach(_ => detectedDisconnect.set(true))

      // Start our VM and listen for the disconnect event
      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Kill the JVM process so we get a disconnect event
        s.underlyingVirtualMachine.process().destroy()

        // Eventually, we should receive the disconnect event
        logTimeTaken(eventually {
          detectedDisconnect.get() should be (true)
        })
      }
    }
  }
} 
Example 63
Source File: JavaVMDeathRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.vm
import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiConstants, ApiTestUtilities, VirtualMachineFixtures}

class JavaVMDeathRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaVMDeathRequest") {
    it("should trigger when a virtual machine dies", ApiConstants.NoWindows) {
      val testClass = "org.scaladebugger.test.misc.MainUsingApp"

      val detectedDeath = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive vm death events and watch for one
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateVMDeathRequest()
        .foreach(_ => detectedDeath.set(true))

      // Start our VM and listen for the start event
      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Kill the JVM process so we get a disconnect event
        // NOTE: This does not seem to trigger the VMDeathEvent on Windows
        s.underlyingVirtualMachine.process().destroy()

        // Eventually, we should receive the start event
        logTimeTaken(eventually {
          detectedDeath.get() should be (true)
        })
      }
    }
  }
} 
Example 64
Source File: JavaVMStartRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.vm

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaVMStartRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaVMStartRequest") {
    it("should trigger when a virtual machine starts") {
      val testClass = "org.scaladebugger.test.misc.MainUsingApp"

      val detectedStart = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateVMStartRequest()
        .foreach(_ => detectedStart.set(true))

      // Start our VM and listen for the start event
      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the start event
        logTimeTaken(eventually {
          detectedStart.get() should be (true)
        })
      }
    }
  }
} 
Example 65
Source File: JavaAccessWatchpointRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.watchpoints

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaAccessWatchpointRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaAccessWatchpointRequest") {
    it("should be able to detect access to a field") {
      val testClass = "org.scaladebugger.test.watchpoints.AccessWatchpoint"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val className = "org.scaladebugger.test.watchpoints.SomeAccessClass"
      val fieldName = "field"

      val detectedAccessWatchpoint = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Listen for access watchpoint events for specific variable
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateAccessWatchpointRequest(className, fieldName)
        .filter(_.field.declaringType.name == className)
        .filter(_.field.name == fieldName)
        .foreach(_ => detectedAccessWatchpoint.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          assert(detectedAccessWatchpoint.get(), s"$fieldName never accessed!")
        })
      }
    }
  }
} 
Example 66
Source File: JavaModificationWatchpointRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.watchpoints

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaModificationWatchpointRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaModificationWatchpointRequest") {
    it("should be able to detect modification to a field") {
      val testClass = "org.scaladebugger.test.watchpoints.ModificationWatchpoint"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val className = "org.scaladebugger.test.watchpoints.SomeModificationClass"
      val fieldName = "field"

      val detectedModificationWatchpoint = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Listen for modification watchpoint events for specific variable
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateModificationWatchpointRequest(className, fieldName)
        .filter(_.field.declaringType.name == className)
        .filter(_.field.name == fieldName)
        .foreach(_ => detectedModificationWatchpoint.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          assert(detectedModificationWatchpoint.get(), s"$fieldName never modified!")
        })
      }
    }
  }
} 
Example 67
Source File: JavaMethodExitRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.methods

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaMethodExitRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaMethodExitRequest") {
    it("should be able to detect exiting a specific method in a class") {
      val testClass = "org.scaladebugger.test.methods.MethodExit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val expectedClassName =
        "org.scaladebugger.test.methods.MethodExitTestClass"
      val expectedMethodName = "testMethod"

      val leftUnexpectedMethod = new AtomicBoolean(false)
      val leftExpectedMethod = new AtomicBoolean(false)
      val leftMethodAfterLastLine = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      val methodPipeline = s.withProfile(JavaDebugProfile.Name)
        .getOrCreateMethodExitRequest(expectedClassName, expectedMethodName)
        .map(_.method)
        .map(m => (m.declaringType.name, m.name))

      methodPipeline
        .filter(_._1 == expectedClassName)
        .filter(_._2 == expectedMethodName)
        .foreach(_ => leftExpectedMethod.set(true))

      methodPipeline
        .filterNot(_._1 == expectedClassName)
        .foreach(_ => leftUnexpectedMethod.set(true))

      methodPipeline
        .filterNot(_._2 == expectedMethodName)
        .foreach(_ => leftUnexpectedMethod.set(true))

      // Last line in test method
      s.getOrCreateBreakpointRequest(testFile, 28)
        .map(_.location)
        .map(l => (l.sourcePath, l.lineNumber))
        .foreach(t => {
        val methodExitHit = leftExpectedMethod.get()
        leftMethodAfterLastLine.set(!methodExitHit)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          leftUnexpectedMethod.get() should be (false)
          leftExpectedMethod.get() should be (true)
          leftMethodAfterLastLine.get() should be (true)
        })
      }
    }
  }
} 
Example 68
Source File: JavaMethodEntryRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.methods

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaMethodEntryRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaMethodEntryRequest") {
    it("should be able to detect entering a specific method in a class") {
      val testClass = "org.scaladebugger.test.methods.MethodEntry"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val expectedClassName =
        "org.scaladebugger.test.methods.MethodEntryTestClass"
      val expectedMethodName = "testMethod"

      val reachedUnexpectedMethod = new AtomicBoolean(false)
      val reachedExpectedMethod = new AtomicBoolean(false)
      val reachedMethodBeforeFirstLine = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      val methodPipeline = s.withProfile(JavaDebugProfile.Name)
        .getOrCreateMethodEntryRequest(expectedClassName, expectedMethodName)
        .map(_.method)
        .map(m => (m.declaringType.name, m.name))

      methodPipeline
        .filter(_._1 == expectedClassName)
        .filter(_._2 == expectedMethodName)
        .foreach(_ => reachedExpectedMethod.set(true))

      methodPipeline
        .filterNot(_._1 == expectedClassName)
        .foreach(_ => reachedUnexpectedMethod.set(true))

      methodPipeline
        .filterNot(_._2 == expectedMethodName)
        .foreach(_ => reachedUnexpectedMethod.set(true))

      // First line in test method
      s.getOrCreateBreakpointRequest(testFile, 26)
        .map(_.location)
        .map(l => (l.sourcePath, l.lineNumber))
        .foreach(t => {
        val methodEntryHit = reachedExpectedMethod.get()
        reachedMethodBeforeFirstLine.set(methodEntryHit)
      })

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          reachedUnexpectedMethod.get() should be (false)
          reachedExpectedMethod.get() should be (true)
          reachedMethodBeforeFirstLine.get() should be (true)
        })
      }
    }
  }
} 
Example 69
Source File: JavaExceptionRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.exceptions

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaExceptionRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaExceptionRequest") {
    it("should be able to detect exceptions in try blocks") {
      val testClass =
        "org.scaladebugger.test.exceptions.InsideTryBlockException"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val detectedException = new AtomicBoolean(false)
      val expectedExceptionName =
        "org.scaladebugger.test.exceptions.CustomException"

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark the exception we want to watch (now that the class
      // is available)
      s.withProfile(JavaDebugProfile.Name).getOrCreateExceptionRequest(
        exceptionName = expectedExceptionName,
        notifyCaught = true,
        notifyUncaught = false
      ).map(_.exception.referenceType.name)
        .filter(_ == expectedExceptionName)
        .foreach(_ => detectedException.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          detectedException.get() should be (true)
        })
      }
    }

    it("should be able to detect exceptions in functional try calls") {
      val testClass =
        "org.scaladebugger.test.exceptions.InsideFunctionalTryException"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val detectedException = new AtomicBoolean(false)
      val expectedExceptionName =
        "org.scaladebugger.test.exceptions.CustomException"

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark the exception we want to watch (now that the class
      // is available)
      s.withProfile(JavaDebugProfile.Name).getOrCreateExceptionRequest(
        exceptionName = expectedExceptionName,
        notifyCaught = true,
        notifyUncaught = false
      ).map(_.exception.referenceType.name)
        .filter(_ == expectedExceptionName)
        .foreach(_ => detectedException.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          detectedException.get() should be (true)
        })
      }
    }

    it("should be able to detect exceptions outside of try blocks") {
      val testClass =
        "org.scaladebugger.test.exceptions.OutsideTryException"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val detectedException = new AtomicBoolean(false)
      val expectedExceptionName =
        "org.scaladebugger.test.exceptions.CustomException"

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark the exception we want to watch (now that the class
      // is available)
      s.withProfile(JavaDebugProfile.Name).getOrCreateExceptionRequest(
        exceptionName = expectedExceptionName,
        notifyCaught = false,
        notifyUncaught = true
      ).map(_.exception.referenceType.name)
        .filter(_ == expectedExceptionName)
        .foreach(_ => detectedException.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          detectedException.get() should be (true)
        })
      }
    }
  }
} 
Example 70
Source File: JavaMonitorWaitRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.monitors

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaMonitorWaitRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaMonitorWaitRequest") {
    it("should trigger when a thread is about to wait on a monitor object") {
      val testClass = "org.scaladebugger.test.monitors.MonitorWait"

      val detectedWait = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive monitor wait events and
      // watch for one
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateMonitorWaitRequest()
        .foreach(_ => detectedWait.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor wait event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedWait.get(), s"No monitor wait was detected!")
        })
      }
    }
  }
} 
Example 71
Source File: JavaMonitorContendedEnteredRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.monitors

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaMonitorContendedEnteredRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaMonitorContendedEnteredRequest") {
    it("should trigger when a thread enters a monitor after waiting for it to be released by another thread") {
      val testClass = "org.scaladebugger.test.monitors.MonitorContendedEntered"

      val detectedEntered = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive monitor contended entered events and
      // watch for one
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateMonitorContendedEnteredRequest()
        .foreach(_ => detectedEntered.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor contended entered event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedEntered.get(), s"No monitor entered was detected!")
        })
      }
    }
  }
} 
Example 72
Source File: JavaMonitorWaitedRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.monitors

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaMonitorWaitedRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaMonitorWaitedRequest") {
    it("should trigger when a thread has finished waiting on a monitor object") {
      val testClass = "org.scaladebugger.test.monitors.MonitorWaited"

      val detectedWaited = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive monitor waited events and
      // watch for one
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateMonitorWaitedRequest()
        .foreach(_ => detectedWaited.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor waited event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedWaited.get(), s"No monitor finished waiting was detected!")
        })
      }
    }
  }
} 
Example 73
Source File: JavaMonitorContendedEnterRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.monitors

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaMonitorContendedEnterRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaMonitorContendedEnterRequest") {
    it("should trigger when a thread attempts to enter a monitor already acquired by another thread") {
      val testClass = "org.scaladebugger.test.monitors.MonitorContendedEnter"

      val detectedEnter = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive monitor contended enter events and
      // watch for one
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateMonitorContendedEnterRequest()
        .foreach(_ => detectedEnter.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the monitor contended enter event
        logTimeTaken(eventually {
          // NOTE: Using asserts to provide more helpful failure messages
          assert(detectedEnter.get(), s"No monitor enter attempt was detected!")
        })
      }
    }
  }
} 
Example 74
Source File: JavaClassUnloadRequestIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.requests.classes

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaClassUnloadRequestIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("JavaClassUnloadRequest") {
    // NOTE: It is not possible to trigger a class unload accurately due to the
    //       JVM's non-deterministic nature with garbage collection.
    ignore("should trigger when a class is unloaded") {
      val testClass = "org.scaladebugger.test.classes.ClassUnload"

      val detectedUnload = new AtomicBoolean(false)

      val s = DummyScalaVirtualMachine.newInstance()

      // Mark that we want to receive class unload events and watch for one
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateClassUnloadRequest()
        .foreach(_ => detectedUnload.set(true))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        // Eventually, we should receive the class unload event
        logTimeTaken(eventually {
          detectedUnload.get() should be (true)
        })
      }
    }
  }
} 
Example 75
Source File: ProcessDebuggerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.debuggers
import java.io.IOException
import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

import scala.util.Try

class ProcessDebuggerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("ProcessDebugger") {
    it("should be able to attach to a running JVM process") {
      withProcess((pid, process) => {
        val processDebugger = ProcessDebugger(pid)

        val attachedToVirtualMachine = new AtomicBoolean(false)

        // Need to keep retrying until process is ready to be attached to
        // NOTE: If unable to connect, ensure that hostname is "localhost"
        eventually {
          processDebugger.start(_ => attachedToVirtualMachine.set(true))
        }

        // Keep checking back until we have successfully attached
        eventually {
          attachedToVirtualMachine.get() should be (true)
        }
      })
    }
  }

  
  private def withProcess[T](testCode: (Int, Process) => T): T = {
    val jvmProcess = createProcess()

    val result = Try(testCode(jvmProcess._1, jvmProcess._2))

    destroyProcess(jvmProcess._2)

    result.get
  }

  private def createProcess(): (Int, Process) = {
    val (pid, process) = JDITools.spawnAndGetPid(
      className = "org.scaladebugger.test.misc.AttachingMain",
      server = true,
      port = 0 // Assign ephemeral port
    )

    // If unable to retrieve the process PID, exit now
    if (pid <= 0) {
      process.destroy()
      throw new IOException("Unable to retrieve process PID!")
    }

    (pid, process)
  }

  private def destroyProcess(process: Process): Unit = process.destroy()
} 
Example 76
Source File: LaunchingDebuggerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.debuggers

import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.utils.{JDITools, Logging}
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.ApiTestUtilities

class LaunchingDebuggerIntegrationSpec extends ParallelMockFunSpec
   with ApiTestUtilities with Logging
{
  describe("LaunchingDebugger") {
    it("should be able to start a JVM and connect to it") {
      val launchedJvmConnected = new AtomicBoolean(false)

      val className = "org.scaladebugger.test.misc.LaunchingMain"
      val classpath = JDITools.jvmClassPath
      val jvmOptions = Seq("-classpath", classpath)
      val launchingDebugger = LaunchingDebugger(
        className = className,
        jvmOptions = jvmOptions,
        suspend = false
      )
      launchingDebugger.start { _ => launchedJvmConnected.set(true) }

      // Keep checking back until the launched JVM has been connected
      eventually {
        launchedJvmConnected.get() should be (true)
      }
    }
  }
} 
Example 77
Source File: AttachingDebuggerIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.debuggers

import java.net.ServerSocket
import java.util.concurrent.atomic.AtomicBoolean

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}

import scala.util.Try

class AttachingDebuggerIntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
{
  describe("AttachingDebugger") {
    it("should be able to attach to a running JVM process") {
      withProcess((port, process) => {
        val attachingDebugger = AttachingDebugger(port)

        val attachedToVirtualMachine = new AtomicBoolean(false)

        // Need to keep retrying until process is ready to be attached to
        eventually {
          attachingDebugger.start(_ => attachedToVirtualMachine.set(true))
        }

        // Keep checking back until we have successfully attached
        eventually {
          attachedToVirtualMachine.get() should be (true)
        }
      })
    }
  }

  
  private def withProcess[T](testCode: (Int, Process) => T): T = {
    val jvmProcess = createProcess()

    val result = Try(testCode(jvmProcess._1, jvmProcess._2))

    destroyProcess(jvmProcess._2)

    result.get
  }

  private def createProcess(): (Int, Process) = {
    val port = {
      val socket = new ServerSocket(0)
      val _port = socket.getLocalPort
      socket.close()
      _port
    }

    (port, JDITools.spawn(
      className = "org.scaladebugger.test.misc.AttachingMain",
      server = true,
      suspend = true,
      port = port
    ))
  }

  private def destroyProcess(process: Process): Unit = process.destroy()
} 
Example 78
Source File: KyuubiServer.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.server

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.{KyuubiSparkUtil, SparkConf}
import org.apache.spark.KyuubiConf._
import yaooqinn.kyuubi.ha.{FailoverService, HighAvailableService, LoadBalanceService}
import yaooqinn.kyuubi.metrics.MetricsSystem
import yaooqinn.kyuubi.service.{CompositeService, ServiceException}

import org.apache.kyuubi.Logging


private[kyuubi] class KyuubiServer private(name: String)
  extends CompositeService(name) with Logging {

  private var _beService: BackendService = _
  def beService: BackendService = _beService
  private var _feService: FrontendService = _
  def feService: FrontendService = _feService
  private var _haService: HighAvailableService = _

  private val started = new AtomicBoolean(false)

  private var _isDeregisterWithZK = false
  def deregisterWithZK(): Unit = _isDeregisterWithZK = true
  def isDeregisterWithZk: Boolean = _isDeregisterWithZK

  def this() = this(classOf[KyuubiServer].getSimpleName)

  override def init(conf: SparkConf): Unit = synchronized {
    this.conf = conf

    MetricsSystem.init(conf)

    val OOMHook = new Runnable {
      override def run(): Unit = KyuubiServer.this.stop()
    }

    _beService = new BackendService(this)
    _feService = new FrontendService(_beService, OOMHook)

    addService(_beService)
    addService(_feService)
    if (conf.getBoolean(HA_ENABLED.key, defaultValue = false)) {
      _haService = if (conf.getOption(HA_MODE.key).exists(_.equalsIgnoreCase("failover"))) {
        new FailoverService(this)
      } else {
        new LoadBalanceService(this)
      }
      addService(_haService)
    }
    super.init(conf)
  }

  override def start(): Unit = {
    super.start()
    started.set(true)
  }

  override def stop(): Unit = {
    info("Shutting down " + name)
    if (started.getAndSet(false)) {
      super.stop()
    }
    MetricsSystem.close()
  }
}

object KyuubiServer extends Logging {

  def startKyuubiServer(): KyuubiServer = {
    try {
      KyuubiSparkUtil.initDaemon(logger)
      validate()
      val conf = new SparkConf(loadDefaults = true)
      KyuubiSparkUtil.setupCommonConfig(conf)
      val server = new KyuubiServer()
      KyuubiSparkUtil.addShutdownHook(server.stop())
      server.init(conf)
      server.start()
      info(server.getName + " started!")
      server
    } catch {
      case e: Exception => throw e
    }
  }

  private[kyuubi] def validate(): Unit = {
    if (KyuubiSparkUtil.majorVersion(KyuubiSparkUtil.SPARK_VERSION) < 2) {
      throw new ServiceException(s"${KyuubiSparkUtil.SPARK_VERSION} is too old for Kyuubi" +
        s" Server.")
    }
  }
} 
Example 79
Source File: StreamingListenerBus.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.Logging
import org.apache.spark.util.AsynchronousListenerBus


private[spark] class StreamingListenerBus
  extends AsynchronousListenerBus[StreamingListener, StreamingListenerEvent]("StreamingListenerBus")
  with Logging {

  private val logDroppedEvent = new AtomicBoolean(false)

  override def onPostEvent(listener: StreamingListener, event: StreamingListenerEvent): Unit = {
    event match {
      case receiverStarted: StreamingListenerReceiverStarted =>
        listener.onReceiverStarted(receiverStarted)
      case receiverError: StreamingListenerReceiverError =>
        listener.onReceiverError(receiverError)
      case receiverStopped: StreamingListenerReceiverStopped =>
        listener.onReceiverStopped(receiverStopped)
      case batchSubmitted: StreamingListenerBatchSubmitted =>
        listener.onBatchSubmitted(batchSubmitted)
      case batchStarted: StreamingListenerBatchStarted =>
        listener.onBatchStarted(batchStarted)
      case batchCompleted: StreamingListenerBatchCompleted =>
        listener.onBatchCompleted(batchCompleted)
      case _ =>
    }
  }

  override def onDropEvent(event: StreamingListenerEvent): Unit = {
    if (logDroppedEvent.compareAndSet(false, true)) {
      // Only log the following message once to avoid duplicated annoying logs.
      logError("Dropping StreamingListenerEvent because no remaining room in event queue. " +
        "This likely means one of the StreamingListeners is too slow and cannot keep up with the " +
        "rate at which events are being started by the scheduler.")
    }
  }
} 
Example 80
Source File: LiveListenerBus.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.util.AsynchronousListenerBus


private[spark] class LiveListenerBus
  //事件阻塞队列,先进先出
  extends AsynchronousListenerBus[SparkListener, SparkListenerEvent]("SparkListenerBus")
  with SparkListenerBus {

  private val logDroppedEvent = new AtomicBoolean(false)

  override def onDropEvent(event: SparkListenerEvent): Unit = {
    if (logDroppedEvent.compareAndSet(false, true)) {
      // Only log the following message once to avoid duplicated annoying logs.
      //只能记录一次以下消息,以避免重复的烦人日志,
      logError("Dropping SparkListenerEvent because no remaining room in event queue. " +
        "This likely means one of the SparkListeners is too slow and cannot keep up with " +
        "the rate at which tasks are being started by the scheduler.")
    }
  }

} 
Example 81
Source File: KafkaClient.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.interfaces.async.kafka

import java.util.UUID
import java.util.concurrent.atomic.AtomicBoolean

import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}

import scala.collection.JavaConverters._
import scala.concurrent.{Future, Promise}

class TopicProducer[K, V](
  producer: KafkaProducer[K, V],
  topic: String
) {

  def send(key:K, value: V): Unit = {
    val record = new ProducerRecord(topic, key, value)
    producer.send(record)
  }
  def close(): Unit = {
    producer.close()
  }

}

object TopicProducer {

  def apply(
    host: String,
    port: Int,
    topic: String): TopicProducer[String, String] = {

    val props = new java.util.Properties()
    props.put("bootstrap.servers", s"$host:$port")

    val producer = new KafkaProducer(props, new StringSerializer, new StringSerializer)
    new TopicProducer(producer, topic)
  }
}

class TopicConsumer[K, V](
  consumer: KafkaConsumer[K, V],
  topic: String,
  timeout: Long = 100
) {

  private val promise = Promise[Unit]
  private val stopped = new AtomicBoolean(false)

  def subscribe(f: (K, V) => Unit): Future[Unit] = {
    run(f)
    promise.future
  }

  private def run(f: (K, V) => Unit): Unit = {
    consumer.subscribe(Seq(topic).asJava)
    val thread = new Thread(new Runnable {
      override def run(): Unit = {
        while (!stopped.get()) {
          val records = consumer.poll(timeout).asScala
          records.foreach(r => f(r.key(), r.value()))
        }
        promise.success(())
      }
    })
    thread.setName(s"kafka-topic-consumer-$topic")
    thread.start()
  }

  def close(): Future[Unit] = {
    stopped.set(true)
    promise.future
  }
}

object TopicConsumer {

  def apply(
    host: String,
    port: Int,
    topic: String): TopicConsumer[String, String] = {

    val props = new java.util.Properties()
    props.put("bootstrap.servers", s"$host:$port")
    props.put("group.id", "mist-" + UUID.randomUUID().toString)
    props.put("enable.auto.commit", "true")
    props.put("auto.commit.interval.ms", "1000")
    props.put("session.timeout.ms", "30000")

    val consumer = new KafkaConsumer(props, new StringDeserializer, new StringDeserializer)
    new TopicConsumer(consumer, topic)
  }

} 
Example 82
Source File: WorkerRunnerSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.workers

import java.util.concurrent.atomic.AtomicBoolean

import io.hydrosphere.mist.core.CommonData.WorkerInitInfo
import io.hydrosphere.mist.core.MockitoSugar
import io.hydrosphere.mist.master.execution.workers.starter.{WorkerProcess, WorkerStarter}
import io.hydrosphere.mist.master.execution.{SpawnSettings, workers}
import io.hydrosphere.mist.master.{ActorSpec, FilteredException, TestData}
import io.hydrosphere.mist.utils.akka.ActorRegHub
import org.mockito.Mockito._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}

class WorkerRunnerSpec extends ActorSpec("worker-runner") with TestData with MockitoSugar with Eventually {

  describe("default runner") {

    def mkSpawnSettings(starter: WorkerStarter): SpawnSettings = SpawnSettings(
      runnerCmd = starter,
      timeout = 10 seconds,
      readyTimeout = 10 seconds,
      akkaAddress = "akkaAddr",
      logAddress = "logAddr",
      httpAddress = "httpAddr",
      maxArtifactSize = 100L
    )

    it("should run worker") {
      val starter = new WorkerStarter {
        override def onStart(name: String, initInfo: WorkerInitInfo): WorkerProcess = WorkerProcess.NonLocal
        override def stopAction: StopAction = StopAction.Remote
      }
      val regHub = mock[ActorRegHub]
      when(regHub.waitRef(any[String], any[Duration])).thenSuccess(null)

      val termination = Promise[Unit]
      val runner = new workers.WorkerRunner.DefaultRunner(
        spawn = mkSpawnSettings(starter),
        regHub = regHub,
        connect = (_, _, _, _, _) => Future.successful(WorkerConnection("id", null, workerLinkData, termination.future))
      )

      Await.result(runner("id", FooContext), Duration.Inf)
    }

    it("should call onStop if connect was failed") {
      val check = new AtomicBoolean(false)

      val runnerCmd = new WorkerStarter {
        override def onStart(name: String, initInfo: WorkerInitInfo): WorkerProcess = WorkerProcess.NonLocal
        override def stopAction: StopAction = StopAction.CustomFn(_ => check.set(true))
      }

      val regHub = mock[ActorRegHub]
      when(regHub.waitRef(any[String], any[Duration])).thenFailure(FilteredException())

      val runner = new workers.WorkerRunner.DefaultRunner(
        spawn = mkSpawnSettings(runnerCmd),
        regHub = regHub,
        connect = (_, _, _, _, _) => Future.failed(FilteredException())
      )

      intercept[Throwable] {
        Await.result(runner("id", FooContext), Duration.Inf)
      }

      eventually(timeout(Span(3, Seconds))) {
        check.get shouldBe true
      }
    }

  }
} 
Example 83
Source File: CornichonSbtRunner.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package com.github.agourlay.cornichon.framework

import java.util.concurrent.atomic.AtomicBoolean

import com.github.agourlay.cornichon.core.CornichonError
import com.github.agourlay.cornichon.framework.CornichonSbtRunner._
import com.github.agourlay.cornichon.dsl.BaseFeature
import sbt.testing._

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal

class CornichonSbtRunner(val args: Array[String], val remoteArgs: Array[String]) extends Runner {

  private val gotTasks = new AtomicBoolean(false)

  // A task is a Feature class
  override def tasks(taskDefs: Array[TaskDef]): Array[Task] = {
    gotTasks.set(true)
    val (seeds, scenarioNameFilter) = args.toSet.partition(_.startsWith(seedArg))
    val explicitSeed = try {
      seeds.headOption.map(_.split(seedArg)(1).toLong) //YOLO
    } catch {
      case NonFatal(e) =>
        println(s"Could not parse seed from args ${args.toList}")
        println(CornichonError.genStacktrace(e))
        None
    }
    taskDefs.map(new CornichonFeatureSbtTask(_, scenarioNameFilter, explicitSeed))
  }

  override def done(): String = {
    // Function called twice - main and forked process
    // https://github.com/sbt/sbt/issues/3510
    if (gotTasks.get) Await.result(BaseFeature.shutDownGlobalResources(), Duration.Inf)
    // empty strings means letting SBT generate the result summary
    ""
  }

}

object CornichonSbtRunner {
  val seedArg = "--seed="
} 
Example 84
Source File: PythonUDFRunner.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.python

import java.io._
import java.net._
import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark._
import org.apache.spark.api.python._


class PythonUDFRunner(
    funcs: Seq[ChainedPythonFunctions],
    bufferSize: Int,
    reuseWorker: Boolean,
    evalType: Int,
    argOffsets: Array[Array[Int]])
  extends BasePythonRunner[Array[Byte], Array[Byte]](
    funcs, bufferSize, reuseWorker, evalType, argOffsets) {

  protected override def newWriterThread(
      env: SparkEnv,
      worker: Socket,
      inputIterator: Iterator[Array[Byte]],
      partitionIndex: Int,
      context: TaskContext): WriterThread = {
    new WriterThread(env, worker, inputIterator, partitionIndex, context) {

      protected override def writeCommand(dataOut: DataOutputStream): Unit = {
        PythonUDFRunner.writeUDFs(dataOut, funcs, argOffsets)
      }

      protected override def writeIteratorToStream(dataOut: DataOutputStream): Unit = {
        PythonRDD.writeIteratorToStream(inputIterator, dataOut)
        dataOut.writeInt(SpecialLengths.END_OF_DATA_SECTION)
      }
    }
  }

  protected override def newReaderIterator(
      stream: DataInputStream,
      writerThread: WriterThread,
      startTime: Long,
      env: SparkEnv,
      worker: Socket,
      released: AtomicBoolean,
      context: TaskContext): Iterator[Array[Byte]] = {
    new ReaderIterator(stream, writerThread, startTime, env, worker, released, context) {

      protected override def read(): Array[Byte] = {
        if (writerThread.exception.isDefined) {
          throw writerThread.exception.get
        }
        try {
          stream.readInt() match {
            case length if length > 0 =>
              val obj = new Array[Byte](length)
              stream.readFully(obj)
              obj
            case 0 => Array.empty[Byte]
            case SpecialLengths.TIMING_DATA =>
              handleTimingData()
              read()
            case SpecialLengths.PYTHON_EXCEPTION_THROWN =>
              throw handlePythonException()
            case SpecialLengths.END_OF_DATA_SECTION =>
              handleEndOfDataSection()
              null
          }
        } catch handleException
      }
    }
  }
}

object PythonUDFRunner {

  def writeUDFs(
      dataOut: DataOutputStream,
      funcs: Seq[ChainedPythonFunctions],
      argOffsets: Array[Array[Int]]): Unit = {
    dataOut.writeInt(funcs.length)
    funcs.zip(argOffsets).foreach { case (chained, offsets) =>
      dataOut.writeInt(offsets.length)
      offsets.foreach { offset =>
        dataOut.writeInt(offset)
      }
      dataOut.writeInt(chained.funcs.length)
      chained.funcs.foreach { f =>
        dataOut.writeInt(f.command.length)
        dataOut.write(f.command)
      }
    }
  }
} 
Example 85
Source File: HealthEndpoint.scala    From akka-http-health   with MIT License 5 votes vote down vote up
package io.github.lhotari.akka.http.health

import java.util.concurrent.atomic.AtomicBoolean

import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route

import scala.concurrent.{ExecutionContext, Future}

trait HealthEndpoint {

  protected lazy val checkers = createCheckers

  protected def createCheckers = {
    Seq(new LowDiskSpaceDetector(), new LowMemoryDetector())
  }

  private lazy val successResponse: HttpResponse = createSuccessResponse

  protected def decorateResponse(response: HttpResponse) = response

  protected def createSuccessResponse = {
    decorateResponse(HttpResponse(entity = HttpEntity(ContentTypes.`text/plain(UTF-8)`, "OK")))
  }

  private lazy val errorResponse: HttpResponse = createErrorResponse

  protected def createErrorResponse = {
    decorateResponse(HttpResponse(status = StatusCodes.ServiceUnavailable))
  }

  private val started = new AtomicBoolean(false)

  def createHealthRoute(endpoint: String = HealthEndpoint.DefaultEndpoint)(implicit executor: ExecutionContext): Route =
    get {
      path(endpoint) {
        completeHealthCheck
      }
    }

  def completeHealthCheck(implicit executor: ExecutionContext) = {
    complete {
      Future {
        if (isHealthy()) successResponse else errorResponse
      }
    }
  }

  def start(): Unit = {
    if (started.compareAndSet(false, true)) {
      checkers.foreach(_.start())
    }
  }

  def stop(): Unit = {
    if (started.compareAndSet(true, false)) {
      checkers.foreach(_.stop())
    }
  }

  def isHealthy() = {
    start()
    checkers.forall(_.isHealthy())
  }
}

object HealthEndpoint extends HealthEndpoint {
  lazy val defaultHealthChecker = new HealthEndpoint {}

  val DefaultEndpoint: String = "health"

  def createDefaultHealthRoute(endpoint: String = DefaultEndpoint)(implicit executor: ExecutionContext): Route = {
    defaultHealthChecker.createHealthRoute(endpoint)
  }
} 
Example 86
Source File: KafkaSecurityManager.scala    From kafka-security-manager   with MIT License 5 votes vote down vote up
package com.github.simplesteph.ksm

import java.util.concurrent.atomic.AtomicBoolean

import com.github.simplesteph.ksm.grpc.KsmGrpcServer
import com.github.simplesteph.ksm.parser.CsvAclParser
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory

import scala.util.{Failure, Success, Try}
import java.util.concurrent.{
  ExecutionException,
  Executors,
  ScheduledExecutorService,
  TimeUnit
}

object KafkaSecurityManager extends App {

  val log = LoggerFactory.getLogger(KafkaSecurityManager.getClass)

  val config = ConfigFactory.load()
  val appConfig: AppConfig = new AppConfig(config)

  var isCancelled: AtomicBoolean = new AtomicBoolean(false)
  var grpcServer: KsmGrpcServer = _
  var aclSynchronizer: AclSynchronizer = _
  val aclParser = new CsvAclParser(appConfig.Parser.csvDelimiter)
  val scheduler: ScheduledExecutorService = Executors.newScheduledThreadPool(1)

  if (appConfig.KSM.extract) {
    new ExtractAcl(appConfig.Authorizer.authorizer, aclParser).extract()
  } else {
    aclSynchronizer = new AclSynchronizer(
      appConfig.Authorizer.authorizer,
      appConfig.Source.sourceAcl,
      appConfig.Notification.notification,
      aclParser,
      appConfig.KSM.readOnly
    )

    Try {
      grpcServer = new KsmGrpcServer(
        aclSynchronizer,
        appConfig.GRPC.port,
        appConfig.GRPC.gatewayPort,
        appConfig.Feature.grpc
      )
      grpcServer.start()
    } match {
      case Success(_) =>
      case Failure(e) =>
        log.error("gRPC Server failed to start", e)
        shutdown()
    }

    Runtime.getRuntime.addShutdownHook(new Thread() {
      override def run(): Unit = {
        log.info("Received stop signal")
        shutdown()
      }
    })

    try {
      //if appConfig.KSM.refreshFrequencyMs is equal or less than 0 the aclSyngronizer is run just once.
      if (appConfig.KSM.refreshFrequencyMs <= 0) {
        log.info("Single run mode: ACL will be synchornized once.")
        aclSynchronizer.run()
      } else {
        log.info(
          "Continuous mode: ACL will be synchronized every " + appConfig.KSM.refreshFrequencyMs + " ms."
        )
        val handle = scheduler.scheduleAtFixedRate(
          aclSynchronizer,
          0,
          appConfig.KSM.refreshFrequencyMs,
          TimeUnit.MILLISECONDS
        )
        handle.get
      }
    } catch {
      case e: ExecutionException =>
        log.error("unexpected exception", e)
    } finally {
      shutdown()
    }

  }

  def shutdown(): Unit = {
    log.info("Kafka Security Manager is shutting down...")
    isCancelled = new AtomicBoolean(true)
    aclSynchronizer.close()
    grpcServer.stop()
    scheduler.shutdownNow()
  }
} 
Example 87
Source File: StreamingListenerBus.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.scheduler

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.Logging
import org.apache.spark.util.AsynchronousListenerBus


private[spark] class StreamingListenerBus
  extends AsynchronousListenerBus[StreamingListener, StreamingListenerEvent]("StreamingListenerBus")
  with Logging {

  private val logDroppedEvent = new AtomicBoolean(false)

  override def onPostEvent(listener: StreamingListener, event: StreamingListenerEvent): Unit = {
    event match {
      case receiverStarted: StreamingListenerReceiverStarted =>
        listener.onReceiverStarted(receiverStarted)
      case receiverError: StreamingListenerReceiverError =>
        listener.onReceiverError(receiverError)
      case receiverStopped: StreamingListenerReceiverStopped =>
        listener.onReceiverStopped(receiverStopped)
      case batchSubmitted: StreamingListenerBatchSubmitted =>
        listener.onBatchSubmitted(batchSubmitted)
      case batchStarted: StreamingListenerBatchStarted =>
        listener.onBatchStarted(batchStarted)
      case batchCompleted: StreamingListenerBatchCompleted =>
        listener.onBatchCompleted(batchCompleted)
      case outputOperationStarted: StreamingListenerOutputOperationStarted =>
        listener.onOutputOperationStarted(outputOperationStarted)
      case outputOperationCompleted: StreamingListenerOutputOperationCompleted =>
        listener.onOutputOperationCompleted(outputOperationCompleted)
      case _ =>
    }
  }

  override def onDropEvent(event: StreamingListenerEvent): Unit = {
    if (logDroppedEvent.compareAndSet(false, true)) {
      // Only log the following message once to avoid duplicated annoying logs.
      logError("Dropping StreamingListenerEvent because no remaining room in event queue. " +
        "This likely means one of the StreamingListeners is too slow and cannot keep up with the " +
        "rate at which events are being started by the scheduler.")
    }
  }
} 
Example 88
Source File: QueryGuardListener.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.common.query.guard

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd, SparkListenerJobStart, SparkListenerStageCompleted}
import org.apache.spark.sql.SparkSession
import org.joda.time.{DateTime, Instant}

import com.paypal.gimel.common.conf.{QueryGuardConfigs, QueryGuardConstants}
import com.paypal.gimel.common.utilities.GenericUtils
import com.paypal.gimel.logger.Logger

class QueryGuardListener[E >: QueryGuardDelayedEvent](spark: SparkSession,
                                                      discoveryType: String =
                                                        "job")
    extends SparkListener
    with Producer[E] {
  private val logger = new Logger(this.getClass.getName)
  private val stopped = new AtomicBoolean(true)
  private val HEADER: String = "[DISCOVERY] "
  private var _consumers: Seq[Consumer[E]] = Seq.empty

  override def onJobStart(jobStart: SparkListenerJobStart) {
    logger.info(
      s"${HEADER}Job[${jobStart.jobId}] started with ${jobStart.stageInfos.size} stages @ ${Instant.now()}"
    )
    if (!stopped.get) {
      val job = JobSubmitted(
        jobStart.jobId,
        discoveryType,
        System.currentTimeMillis(),
        jobTtl = GenericUtils.getValue(
          spark.conf,
          QueryGuardConfigs.JOB_TTL,
          QueryGuardConstants.DEFAULT_JOB_TTL
        ),
        delayTtl = GenericUtils.getValue(
          spark.conf,
          QueryGuardConfigs.DELAY_TTL,
          QueryGuardConstants.DEFAULT_DELAY_TTL
        )
      )
      logger.info(
        s"${HEADER}Proceeding to queue in Job[${jobStart.jobId}] onto QueryGuard"
      )
      publish(job)
    } else {
      logger.info(
        s"${HEADER}As QueryGuardListener is ${stopped.get()}," +
          s" unable to queue in Job[${jobStart.jobId}]"
      )
    }
  }

  override def publish(queryGuardEvent: E): Unit = {
    for (consumer <- _consumers) {
      consumer.consume(queryGuardEvent)
    }
  }

  override def onStageCompleted(
    stageCompleted: SparkListenerStageCompleted
  ): Unit = {
    logger.info(
      s"Stage ${stageCompleted.stageInfo.stageId} completed with ${stageCompleted.stageInfo.numTasks} tasks."
    )
  }

  override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
    logger.info(
      s"Job[${jobEnd.jobId}] completed at ${new DateTime(jobEnd.time)}" +
        s" with result -> ${jobEnd.jobResult}"
    )
    super.onJobEnd(jobEnd)
  }

  override def registerConsumers(consumers: Seq[Consumer[E]]): Unit = {
    _consumers = consumers
  }

  def start(): Unit = {
    // toggle stopped to true
    stopped.set(false)
    logger.info(s"${HEADER}Started QueryGuardListener: $stopped")
  }

  def stop(): Unit = {
    // toggle stopped to true
    stopped.compareAndSet(false, true)
    logger.info(s"${HEADER}Stopped QueryGuardListener: $stopped")
  }
} 
Example 89
Source File: QueryGuard.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.common.query.guard

import java.util.concurrent.atomic.AtomicBoolean

import org.apache.spark.sql.SparkSession

class QueryGuard(spark: SparkSession) {
  private val stopped = new AtomicBoolean(true)
  private val appTimer =
    new AppTimer[QueryGuardEvent](spark)
  private val appKill = new AppKill[QueryGuardEvent](spark)
  private val queryGuardListener =
    new QueryGuardListener[QueryGuardDelayedEvent](spark)

  def start(): Unit = {
    if (stopped.compareAndSet(true, false)) {
      spark.sparkContext.addSparkListener(queryGuardListener)
      queryGuardListener.registerConsumers(Seq(appTimer))
      appTimer.registerConsumers(Seq(appKill))
      appTimer.start()
      queryGuardListener.start()
      appKill.start()
    }
  }

  def stop(): Unit = {
    if (stopped.compareAndSet(false, true)) {
      appTimer.stop()
      queryGuardListener.stop()
      appKill.stop()
    }
  }
} 
Example 90
Source File: DelegatingWebSocketListener.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.httpclient

import java.net.http.WebSocket
import java.net.http.WebSocket.Listener
import java.nio.ByteBuffer
import java.util.concurrent.CompletionStage
import java.util.concurrent.atomic.AtomicBoolean

private[httpclient] class DelegatingWebSocketListener[WS_RESULT](
    delegate: Listener,
    onInitialOpen: WebSocket => Unit,
    onInitialError: Throwable => Unit
) extends Listener {
  private val initialised = new AtomicBoolean(false)

  override def onOpen(webSocket: WebSocket): Unit = {
    if (!initialised.getAndSet(true)) {
      onInitialOpen(webSocket)
    }
    delegate.onOpen(webSocket)
  }

  override def onText(webSocket: WebSocket, data: CharSequence, last: Boolean): CompletionStage[_] = {
    delegate.onText(webSocket, data, last)
  }

  override def onBinary(webSocket: WebSocket, data: ByteBuffer, last: Boolean): CompletionStage[_] = {
    delegate.onBinary(webSocket, data, last)
  }

  override def onPing(webSocket: WebSocket, message: ByteBuffer): CompletionStage[_] = {
    delegate.onPing(webSocket, message)
  }

  override def onPong(webSocket: WebSocket, message: ByteBuffer): CompletionStage[_] = {
    delegate.onPong(webSocket, message)
  }

  override def onClose(webSocket: WebSocket, statusCode: Int, reason: String): CompletionStage[_] = {
    delegate.onClose(webSocket, statusCode, reason)
  }
  override def onError(webSocket: WebSocket, error: Throwable): Unit = {
    if (!initialised.getAndSet(true)) {
      onInitialError(error)
    }
    delegate.onError(webSocket, error)
  }
} 
Example 91
Source File: StreamActor.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.impl

import java.util.concurrent.atomic.AtomicBoolean
import scala.util.control.NonFatal
import scala.annotation.tailrec
import com.typesafe.scalalogging.Logger
import org.jctools.queues.MpscLinkedQueue8
import swave.core.Dispatcher


  private[this] val scheduled: AtomicBoolean with Runnable =
    new AtomicBoolean(true) with Runnable {
      final def run(): Unit =
        try {
          @tailrec def processMailbox(maxRemainingMessages: Int): Unit =
            if (maxRemainingMessages > 0) {
              val nextMsg = mailbox.poll()
              if (nextMsg ne null) {
                receive(nextMsg)
                processMailbox(maxRemainingMessages - 1)
              }
            }
          processMailbox(throughput)
        } catch {
          // Non-fatal exceptions thrown in user code should never bubble up to here!
          // Non-fatal exceptions thrown in (async) swave code should always bubble up to here!
          case NonFatal(e) ⇒ log.error("Uncaught exception in StreamActor::receive", e)
        } finally {
          startMessageProcessing()
        }
    }

  protected def receive(msg: MessageType): Unit

  private[impl] final def enqueue(msg: MessageType): Unit = {
    mailbox.offer(msg)
    trySchedule()
  }

  private def trySchedule(): Unit =
    if (scheduled.compareAndSet(false, true)) {
      try dispatcher.execute(scheduled)
      catch {
        case NonFatal(e) ⇒
          scheduled.set(false)
          throw e
      }
    }

  // must be called at the end of the outermost constructor
  protected final def startMessageProcessing(): Unit = {
    scheduled.set(false)
    if (!mailbox.isEmpty)
      trySchedule()
  }
} 
Example 92
Source File: ResultOutputStream.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.kernel.util

import java.io.OutputStream
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicBoolean

import polynote.kernel.{Output, Result}

class ResultOutputStream(publishSync: Result => Unit, bufSize: Int = 1024) extends OutputStream {
  private val buf: ByteBuffer = ByteBuffer.allocate(bufSize)
  private val closed = new AtomicBoolean(false)

  def write(b: Int): Unit = buf.synchronized {
    if (!buf.hasRemaining) {
      flush()
    }

    buf.put(b.toByte)
  }

  override def flush(): Unit = {
    super.flush()
    buf.synchronized {
      val len = buf.position()
      if (len > 0) {
        val b = ByteBuffer.allocate(buf.position())
        val arr = new Array[Byte](buf.position())
        buf.rewind()
        buf.get(arr)
        publishSync(Output("text/plain; rel=stdout", new String(arr, StandardCharsets.UTF_8)))
        buf.rewind()
      }
    }
  }

  override def close(): Unit = buf.synchronized {
    if (!closed.getAndSet(true)) {
      flush()
      super.close()
    }
  }

} 
Example 93
Source File: InProcessDeploy.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing.kernel.remote

import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean

import polynote.kernel.{BaseEnv, GlobalEnv, Kernel}
import polynote.kernel.environment.CurrentNotebook
import polynote.kernel.logging.Logging
import polynote.kernel.remote.{RemoteKernelClient, SocketTransport}
import zio.{Fiber, RIO, Ref, ZIO}
import zio.duration.Duration

class InProcessDeploy(kernelFactory: Kernel.Factory.LocalService, clientRef: Ref[RemoteKernelClient]) extends SocketTransport.Deploy {
  def deployKernel(transport: SocketTransport, serverAddress: InetSocketAddress): RIO[BaseEnv with GlobalEnv with CurrentNotebook, SocketTransport.DeployedProcess] = {
    val connectClient = RemoteKernelClient.tapRunThrowable(
      RemoteKernelClient.Args(
        Some(serverAddress.getHostString),
        Some(serverAddress.getPort),
        Some(kernelFactory)),
      Some(clientRef))

    connectClient.forkDaemon.map(new InProcessDeploy.Process(_))
  }

}

object InProcessDeploy {
  class Process(fiber: Fiber[Throwable, Int]) extends SocketTransport.DeployedProcess {
    def exitStatus: RIO[BaseEnv, Option[Int]] = fiber.poll.flatMap {
      case Some(exit) => ZIO.fromEither(exit.toEither).map(Some(_))
      case None => ZIO.succeed(None)
    }

    def awaitExit(timeout: Long, timeUnit: TimeUnit): RIO[BaseEnv, Option[Int]] = {
      fiber.join.disconnect.timeout(Duration(timeout, timeUnit))
    }

    def kill(): RIO[BaseEnv, Unit] = fiber.interrupt.unit
  }
} 
Example 94
Source File: InterruptManager.scala    From inox   with Apache License 2.0 5 votes vote down vote up
package inox
package utils

import scala.collection.JavaConverters._

import java.util.concurrent.atomic.{AtomicLong, AtomicBoolean}
import sun.misc.{Signal, SignalHandler}
import java.util.WeakHashMap

class InterruptManager(reporter: Reporter) extends Interruptible {
  private[this] val interruptibles = new WeakHashMap[Interruptible, Boolean]()
  private[this] val sigINT = new Signal("INT")

  private[this] val lastTimestamp = new AtomicLong(0L)
  private val exitWindow = 1000L

  private[this] val handler = new SignalHandler {
    def handle(sig: Signal) {
      def now(): Long = System.currentTimeMillis()
      reporter.info("")
      if (now() - lastTimestamp.get < exitWindow) {
        reporter.warning("Aborting...")
        System.exit(1)
      } else {
        reporter.warning("Interrupted...")
        lastTimestamp.set(now())
        interrupt()
      }
    }
  }

  val interrupted: AtomicBoolean = new AtomicBoolean(false)

  @inline
  def isInterrupted = interrupted.get

  def interrupt() = synchronized {
    if (!isInterrupted) {
      interrupted.set(true)

      val it = interruptibles.keySet.iterator
      for (i <- it.asScala.toList) i.interrupt()
    } else {
      reporter.warning("Already interrupted!")
    }
  }

  def reset() = synchronized {
    if (isInterrupted) {
      interrupted.set(false)
    } else {
      reporter.warning("Not interrupted!")
    }
  }

  def registerForInterrupts(i: Interruptible) = synchronized {
    if (isInterrupted) i.interrupt()
    interruptibles.put(i, true)
  }

  // We should not need this because keys should automatically be removed
  // from the WeakHashMap when gc'ed.
  // But let's have it anyway!
  def unregisterForInterrupts(i: Interruptible) = synchronized {
    interruptibles.remove(i)
  }

  def registerSignalHandler() = Signal.handle(sigINT, handler)
} 
Example 95
Source File: TCPListener.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package communicator
package tcp

import java.io.IOException
import java.net.{InetAddress, ServerSocket, SocketException}
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean

import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal

private class TCPListener(
  port: Int, interface: String, properties: TCP.Properties)
    extends Listener[TCP] {

  protected def startListening(connectionEstablished: Connected[TCP]): Try[Listening] =
    try {
      val running = new AtomicBoolean(true)
      val socket = new ServerSocket(port, 0, InetAddress.getByName(interface))
      val executor = Executors.newCachedThreadPool()

      def terminate() = {
        try socket.close()
        catch { case _: IOException => }
        executor.shutdown()
      }

      new Thread() {
        override def run() =
          try
            while (true) {
              val connection = socket.accept()
              if (connection != null)
                executor.execute(new Runnable {
                  def run() = TCPHandler.handleConnection(
                    connection, properties, TCPListener.this, { connection =>
                      connectionEstablished.fire(Success(connection))
                    })
                })
            }
          catch {
            case exception: SocketException =>
              if (running.getAndSet(false)) {
                terminate()
                connectionEstablished.fire(Failure(exception))
              }
          }
      }.start()

      Success(new Listening {
        def stopListening(): Unit =
          if (running.getAndSet(false))
            terminate()
      })
    }
    catch {
      case NonFatal(exception) =>
        Failure(exception)
    }
} 
Example 96
Source File: Remote.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package runtime

import communicator.ProtocolCommon

import java.util.concurrent.atomic.AtomicBoolean

import scala.annotation.compileTimeOnly

object Remote {
  case class Reference(
    id: Long,
    signature: Peer.Signature)(
    val protocol: ProtocolCommon,
    private[runtime] val remoteConnections: RemoteConnections)
      extends loci.Remote.Reference[Nothing] {

    private[runtime] val isConnected = new AtomicBoolean(true)
    private[runtime] val isAuthenticated = new AtomicBoolean(protocol.authenticated)
    private[runtime] val doDisconnected = Notice.Steady[Unit]

    def asReference = this
    def authenticated = isAuthenticated.get
    def authenticate() = isAuthenticated.set(true)
    def connected = isConnected.get
    def disconnect() = remoteConnections.disconnect(this)
    val disconnected = doDisconnected.notice

    override def toString: String = s"remote#$id($signature[$protocol])"
  }

  @compileTimeOnly("Call only allowed in multitier code. Use `remote.asRemote[P]` instead.")
  def cast[P](reference: loci.Remote.Reference[_]): Option[Remote[P]] = ???
} 
Example 97
Source File: Callback.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect.internals

import java.util.concurrent.atomic.AtomicBoolean
import cats.effect.internals.TrampolineEC.immediate
import scala.concurrent.Promise
import scala.util.{Failure, Left, Success, Try}


    def completeWithTryAsync(result: Try[A]): Unit =
      result match {
        case Success(a) => self(Right(a))
        case Failure(e) => self(Left(e))
      }
  }

  final private class AsyncIdempotentCallback[-A](conn: IOConnection, cb: Either[Throwable, A] => Unit)
      extends (Either[Throwable, A] => Unit)
      with Runnable {
    private[this] val canCall = new AtomicBoolean(true)
    private[this] var value: Either[Throwable, A] = _
    def run(): Unit = cb(value)

    def apply(value: Either[Throwable, A]): Unit =
      if (canCall.getAndSet(false)) {
        if (conn ne null) conn.pop()
        this.value = value
        immediate.execute(this)
      } else
        value match {
          case Right(_) => ()
          case Left(e) =>
            Logger.reportFailure(e)
        }
  }
} 
Example 98
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.hbase.kerberos.utils

import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.{Executors, TimeUnit}

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newSingleThreadExecutor

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 99
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{Executors, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newFixedThreadPool(1)

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 100
Source File: ReThinkSourceReadersFactory.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.rethink.source

import java.util
import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.atomic.AtomicBoolean

import com.datamountaineer.streamreactor.connect.rethink.ReThinkConnection
import com.datamountaineer.streamreactor.connect.rethink.config.{ReThinkSourceConfig, ReThinkSourceSetting, ReThinkSourceSettings}
import com.rethinkdb.RethinkDB
import com.rethinkdb.net.{Connection, Cursor}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.connect.data.SchemaBuilder
import org.apache.kafka.connect.source.SourceRecord

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object ReThinkSourceReadersFactory {

  def apply(config: ReThinkSourceConfig, r: RethinkDB): Set[ReThinkSourceReader] = {
    val conn = Some(ReThinkConnection(r, config))
    val settings = ReThinkSourceSettings(config)
    settings.map(s => new ReThinkSourceReader(r, conn.get, s))
  }
}

class ReThinkSourceReader(rethink: RethinkDB, conn: Connection, setting: ReThinkSourceSetting)
  extends StrictLogging {

  logger.info(s"Initialising ReThink Reader for ${setting.source}")
  private val keySchema = SchemaBuilder.string().optional().build()
  private val valueSchema = ChangeFeedStructBuilder.schema
  private val sourcePartition = Map.empty[String, String]
  private val offset = Map.empty[String, String]
  private val stopFeed = new AtomicBoolean(false)
  private val handlingFeed = new AtomicBoolean(false)
  private var feed : Cursor[util.HashMap[String, String]] = _
  val queue = new LinkedBlockingQueue[SourceRecord]()
  val batchSize = setting.batchSize

  def start() = {
    feed = getChangeFeed()
    startFeed(feed)
  }

  def stop() = {
    logger.info(s"Closing change feed for ${setting.source}")
    stopFeed.set(true)
    while (handlingFeed.get()) {
      logger.debug("Waiting for feed to shutdown...")
      Thread.sleep(1000)
    }
    feed.close()
    logger.info(s"Change feed closed for ${setting.source}")
  }

  
  private def handleFeed(feed: Cursor[util.HashMap[String, String]]) = {
    handlingFeed.set(true)

    //feed.next is blocking
    while(!stopFeed.get()) {
      logger.debug(s"Waiting for next change feed event for ${setting.source}")
      val cdc = convert(feed.next().asScala.toMap)
      queue.put(cdc)
    }
    handlingFeed.set(false)
  }

  private def getChangeFeed(): Cursor[util.HashMap[String, String]] = {
    logger.info(s"Initialising change feed for ${setting.source}")
    rethink
      .db(setting.db)
      .table(setting.source)
      .changes()
      .optArg("include_states", true)
      .optArg("include_initial", setting.initialise)
      .optArg("include_types", true)
      .run(conn)
  }

  private def convert(feed: Map[String, String]) = {
    new SourceRecord(sourcePartition.asJava, offset.asJava, setting.target, keySchema, setting.source, valueSchema,
      ChangeFeedStructBuilder(feed))
  }
} 
Example 101
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{Executors, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newFixedThreadPool(1)

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 102
Source File: AlohaEventBus.scala    From aloha   with Apache License 2.0 5 votes vote down vote up
package me.jrwang.aloha.scheduler.bus

import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}

import me.jrwang.aloha.scheduler.bus.AsyncEventQueue.POISON_PILL

class AlohaEventBus extends EventBus[AlohaEventListener, AlohaEvent] {
  override protected def doPostEvent(listener: AlohaEventListener, event: AlohaEvent): Unit = {
    event match {
      case e: AppStateChangedEvent =>
        listener.onApplicationStateChange(e)
      case e: AppRelaunchedEvent =>
        listener.onApplicationRelaunched(e)
        //TODO other specific event
      case _ =>
        listener.onOtherEvent(event)
    }
  }
}



  private[aloha] def stop(): Unit = {
    if (!started.get()) {
      throw new IllegalStateException(s"Attempted to stop $name that has not yet started!")
    }
    if (stopped.compareAndSet(false, true)) {
      eventCount.incrementAndGet()
      eventQueue.put(POISON_PILL)
    }
    // this thread might be trying to stop itself as part of error handling -- we can't join
    // in that case.
    if (Thread.currentThread() != dispatchThread) {
      dispatchThread.join()
    }
  }

  def post(event: AlohaEvent): Unit = {
    if (stopped.get()) {
      return
    }

    eventCount.incrementAndGet()
    if (eventQueue.offer(event)) {
      return
    }

    eventCount.decrementAndGet()
    if (logDroppedEvent.compareAndSet(false,true)) {
      // Only log the following message once to avoid duplicated annoying logs.
      logError(s"Dropping event from queue $name. " +
        "This likely means one of the listeners is too slow and cannot keep up with " +
        "the rate at which tasks are being started by the scheduler.")
    }
    logTrace(s"Dropping event $event")
  }

  override def removeListenerOnError(listener: AlohaEventListener): Unit = {
    // the listener failed in an unrecoverably way, we want to remove it from the entire
    // LiveListenerBus (potentially stopping a queue if it is empty)
    bus.removeListener(listener)
  }

}

object AsyncEventQueue {
  case object POISON_PILL extends AlohaEvent
} 
Example 103
Source File: Switch.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.util

import java.util.concurrent.atomic.AtomicBoolean

import scala.collection.immutable

import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FanInShape2, Inlet, Outlet}

//TODO: Maybe use a third inlet to determine where to listen to
class Switch[A](ref: AtomicBoolean, emitChangeTrue: immutable.Seq[A], emitChangeFalse: immutable.Seq[A])
    extends GraphStage[FanInShape2[A, A, A]] {
  override val shape: FanInShape2[A, A, A] = new FanInShape2[A, A, A]("Switch")

  val in1: Inlet[A]  = shape.in0
  val in2: Inlet[A]  = shape.in1
  val out: Outlet[A] = shape.out

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
    new GraphStageLogic(shape) with OutHandler {

      private var lastState: Boolean = ref.get()
      private var waitingOther: A    = _

      private def activeIn(): Inlet[A] = {
        val newState = ref.get()
        val newIn    = if (newState) in1 else in2

        if (lastState != newState) {
          lastState = newState

          emitMultiple(out, if (newState) emitChangeTrue else emitChangeFalse)

          if (waitingOther != null) {
            emit(out, waitingOther)
            waitingOther = null.asInstanceOf[A]
          }

          tryPull(newIn)
        }

        newIn
      }

      private def setInHandler(in: Inlet[A]): Unit = {
        setHandler(
          in,
          new InHandler {
            override def onPush(): Unit = {

              if (activeIn() == in) {
                emit(out, grab(in))
              } else {
                require(waitingOther == null, "Pushed other when a waiting other was already defined")
                waitingOther = grab(in)
              }
            }
          }
        )
      }

      setInHandler(in1)
      setInHandler(in2)

      setHandler(out, this)

      override def onPull(): Unit = pull(activeIn())
    }
} 
Example 104
Source File: SubSeqIterator.scala    From aloha   with MIT License 5 votes vote down vote up
package com.eharmony.aloha.util

import java.util.concurrent.atomic.{AtomicLong, AtomicBoolean}


object SubSeqIterator {
    private val Masks = 0 to 62 map { i => 1L << i }

    def apply[A](values: IndexedSeq[A], k: Int): Iterator[Seq[A]] =
        if (0 == k) OneIterator[A]()
        else if (k > values.size) Iterator.empty
        else new SubSeqIterator(values, k)

    case class OneIterator[A]() extends Iterator[Seq[A]] {
        val more = new AtomicBoolean(true)
        def hasNext: Boolean = more.get
        def next(): Seq[A] =
            if (more.getAndSet(false)) Seq[A]()
            else throw new NoSuchElementException
    }

    case class SubSeqIterator[A](values: IndexedSeq[A], k: Int)
        extends Iterator[Seq[A]] {
        require(values.size <= 63)
        require(k <= values.size)

        private[this] val current = new AtomicLong(smallest(k))
        private[this] val last = largest(values.size, k)
        private[this] val numValues = values.size

        def hasNext: Boolean = current.get <= last

        def next(): IndexedSeq[A] = {
            var c = current.get
            var n = 0L
            if (c <= last) {
                n = next(c)
                while (c <= last && !current.compareAndSet(c, n)) {
                    c = current.get
                    n = next(c)
                }
            }
            if (c <= last) subset(c)
            else throw new NoSuchElementException
        }

        protected[this] def subset(n: Long) = {
            var i = 0
            var els = values.companion.empty[A]
            while(i < numValues) {
                if (0 != (Masks(i) & n))
                    els = els :+ values(i)
                i += 1
            }
            els
        }

        protected[this] def largest(n: Int, k: Int): Long = smallest(k) << (n - k)
        protected[this] def smallest(n: Int): Long = (1L << n) - 1
        protected[this] def next(x: Long): Long = {
            if (x == 0) 0
            else {
                val smallest = x & -x
                val ripple = x + smallest
                val newSmallest = ripple & -ripple
                val ones = ((newSmallest / smallest) >> 1) - 1
                ripple | ones
            }
        }
    }
} 
Example 105
Source File: CloserTesterModel.scala    From aloha   with MIT License 5 votes vote down vote up
package com.eharmony.aloha.models

import java.util.concurrent.atomic.AtomicBoolean

import com.eharmony.aloha.audit.Auditor
import com.eharmony.aloha.ex.SchrodingerException
import com.eharmony.aloha.factory.{ModelSubmodelParsingPlugin, ModelParser, ParserProviderCompanion, SubmodelFactory}
import com.eharmony.aloha.id.ModelIdentity
import com.eharmony.aloha.reflect.RefInfo
import com.eharmony.aloha.semantics.Semantics
import spray.json.{DeserializationException, JsValue, JsonFormat, JsonReader}
import spray.json.DefaultJsonProtocol.{BooleanJsonFormat, optionFormat}
import com.eharmony.aloha.factory.jsext.JsValueExtensions


case class CloserTesterModel[U, N, +B <: U](modelId: ModelIdentity, auditor: Auditor[U, N, B], shouldThrowOnClose: Boolean = false)
   extends Model[Any, B]
      with Submodel[N, Any, B] {

  private[this] val closed = new AtomicBoolean(false)
  def isClosed: Boolean = closed.get()
  override def apply(a: Any): B = throw new UnsupportedOperationException
  override def subvalue(a: Any): Subvalue[B, N] = throw new UnsupportedOperationException
  override def close(): Unit = {
    closed.set(true)
    if (shouldThrowOnClose)
      throw new SchrodingerException
  }
}

object CloserTesterModel extends ParserProviderCompanion {
  object Parser extends ModelSubmodelParsingPlugin {
    val modelType = "CloserTester"
    private val shouldThrowField = "shouldThrow"

    override def commonJsonReader[U, N, A, B <: U](
        factory: SubmodelFactory[U, A],
        semantics: Semantics[A],
        auditor: Auditor[U, N, B])
       (implicit r: RefInfo[N], jf: JsonFormat[N]): Option[JsonReader[CloserTesterModel[U, N, B]]] = {

      Some(new JsonReader[CloserTesterModel[U, N, B]] {
        override def read(json: JsValue): CloserTesterModel[U, N, B] = {
          val model = getModelId(json) map { id =>
            val shouldThrow = json(shouldThrowField).flatMap(st => st.convertTo[Option[Boolean]]) getOrElse false
            new CloserTesterModel(id, auditor, shouldThrow)
          }
          model getOrElse { throw new DeserializationException("Couldn't get model ID for CloserTester model.") }
        }
      })
    }
  }

  def parser: ModelParser = Parser
} 
Example 106
Source File: TestDeleteTopicsConcurrently.scala    From ohara   with Apache License 2.0 4 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.concurrent.{ArrayBlockingQueue, Executors, LinkedBlockingDeque, TimeUnit}

import oharastream.ohara.client.configurator.{BrokerApi, TopicApi}
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.jdk.CollectionConverters._

class TestDeleteTopicsConcurrently extends WithBrokerWorker {
  private[this] val configurator =
    Configurator.builder.fake(testUtil.brokersConnProps, testUtil().workersConnProps()).build()

  private[this] val topicApi = TopicApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] val brokerClusterInfo = result(
    BrokerApi.access.hostname(configurator.hostname).port(configurator.port).list()
  ).head

  @Test
  def test(): Unit = {
    val loopMax       = 10
    val count         = 3
    val topicKeyQueue = new ArrayBlockingQueue[TopicKey](count)
    (0 until count).foreach(i => topicKeyQueue.put(TopicKey.of("test", i.toString)))
    val executors      = Executors.newFixedThreadPool(count)
    val exceptionQueue = new LinkedBlockingDeque[Throwable]()
    val closed         = new AtomicBoolean(false)
    val loopCount      = new AtomicInteger(0)
    (0 until count).foreach(
      _ =>
        executors.execute { () =>
          while (!closed.get() && loopCount.getAndIncrement() <= loopMax) try {
            val topicKey = topicKeyQueue.take()
            try result(
              topicApi.request
                .group(topicKey.group())
                .name(topicKey.name())
                .brokerClusterKey(brokerClusterInfo.key)
                .numberOfPartitions(1)
                .numberOfReplications(1)
                .create()
                .flatMap(_ => topicApi.start(topicKey))
                .flatMap { _ =>
                  TimeUnit.SECONDS.sleep(1)
                  topicApi.stop(topicKey)
                }
                .flatMap { _ =>
                  TimeUnit.SECONDS.sleep(1)
                  topicApi.delete(topicKey)
                }
            )
            finally topicKeyQueue.put(topicKey)
          } catch {
            case t: Throwable =>
              exceptionQueue.put(t)
              closed.set(true)
          }
        }
    )
    executors.shutdown()
    withClue(s"${exceptionQueue.asScala.map(_.getMessage).mkString(",")}") {
      executors.awaitTermination(60, TimeUnit.SECONDS) shouldBe true
    }
    exceptionQueue.size() shouldBe 0
  }
  @After
  def tearDown(): Unit = Releasable.close(configurator)
}