java.util.concurrent.Semaphore Scala Examples

The following examples show how to use java.util.concurrent.Semaphore. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: GuardedProcess.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks

import java.io.{IOException, InputStream, OutputStream}
import java.lang.System.currentTimeMillis
import java.util.concurrent.Semaphore

import android.util.Log

import scala.collection.JavaConversions._


class GuardedProcess(cmd: Seq[String]) extends Process {
  private val TAG = classOf[GuardedProcess].getSimpleName

  @volatile private var guardThread: Thread = _
  @volatile private var isDestroyed: Boolean = _
  @volatile private var process: Process = _
  @volatile private var isRestart = false

  def start(onRestartCallback: () => Unit = null): GuardedProcess = {
    val semaphore = new Semaphore(1)
    semaphore.acquire
    @volatile var ioException: IOException = null

    guardThread = new Thread(() => {
      try {
        var callback: () => Unit = null
        while (!isDestroyed) {
          Log.i(TAG, "start process: " + cmd)
          val startTime = currentTimeMillis

          process = new ProcessBuilder(cmd).redirectErrorStream(true).start

          if (callback == null) callback = onRestartCallback else callback()

          semaphore.release
          process.waitFor

          if (isRestart) {
            isRestart = false
          } else {
            if (currentTimeMillis - startTime < 1000) {
              Log.w(TAG, "process exit too fast, stop guard: " + cmd)
              isDestroyed = true
            }
          }
        }
      } catch {
        case ignored: InterruptedException =>
          Log.i(TAG, "thread interrupt, destroy process: " + cmd)
          process.destroy()
        case e: IOException => ioException = e
      } finally semaphore.release
    }, "GuardThread-" + cmd)

    guardThread.start()
    semaphore.acquire

    if (ioException != null) {
      throw ioException
    }

    this
  }

  def destroy() {
    isDestroyed = true
    guardThread.interrupt()
    process.destroy()
    try guardThread.join() catch {
      case ignored: InterruptedException =>
    }
  }

  def restart() {
    isRestart = true
    process.destroy()
  }

  def exitValue: Int = throw new UnsupportedOperationException
  def getErrorStream: InputStream = throw new UnsupportedOperationException
  def getInputStream: InputStream = throw new UnsupportedOperationException
  def getOutputStream: OutputStream = throw new UnsupportedOperationException

  @throws(classOf[InterruptedException])
  def waitFor = {
    guardThread.join()
    0
  }
} 
Example 2
Source File: RRunner.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import java.io._
import java.util.concurrent.{Semaphore, TimeUnit}

import scala.collection.JavaConversions._

import org.apache.hadoop.fs.Path

import org.apache.spark.api.r.RBackend
import org.apache.spark.util.RedirectThread


object RRunner {
  def main(args: Array[String]): Unit = {
    val rFile = PythonRunner.formatPath(args(0))

    val otherArgs = args.slice(1, args.length)

    // Time to wait for SparkR backend to initialize in seconds
    val backendTimeout = sys.env.getOrElse("SPARKR_BACKEND_TIMEOUT", "120").toInt
    val rCommand = "Rscript"

    // Check if the file path exists.
    // If not, change directory to current working directory for YARN cluster mode
    val rF = new File(rFile)
    val rFileNormalized = if (!rF.exists()) {
      new Path(rFile).getName
    } else {
      rFile
    }

    // Launch a SparkR backend server for the R process to connect to; this will let it see our
    // Java system properties etc.
    val sparkRBackend = new RBackend()
    @volatile var sparkRBackendPort = 0
    val initialized = new Semaphore(0)
    val sparkRBackendThread = new Thread("SparkR backend") {
      override def run() {
        sparkRBackendPort = sparkRBackend.init()
        initialized.release()
        sparkRBackend.run()
      }
    }

    sparkRBackendThread.start()
    // Wait for RBackend initialization to finish
    if (initialized.tryAcquire(backendTimeout, TimeUnit.SECONDS)) {
      // Launch R
      val returnCode = try {
        val builder = new ProcessBuilder(Seq(rCommand, rFileNormalized) ++ otherArgs)
        val env = builder.environment()
        env.put("EXISTING_SPARKR_BACKEND_PORT", sparkRBackendPort.toString)
        val sparkHome = System.getenv("SPARK_HOME")
        env.put("R_PROFILE_USER",
          Seq(sparkHome, "R", "lib", "SparkR", "profile", "general.R").mkString(File.separator))
        builder.redirectErrorStream(true) // Ugly but needed for stdout and stderr to synchronize
        val process = builder.start()

        new RedirectThread(process.getInputStream, System.out, "redirect R output").start()

        process.waitFor()
      } finally {
        sparkRBackend.close()
      }
      System.exit(returnCode)
    } else {
      System.err.println("SparkR backend did not initialize in " + backendTimeout + " seconds")
      System.exit(-1)
    }
  }
} 
Example 3
Source File: UIWorkloadGenerator.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ui

import java.util.concurrent.Semaphore

import scala.util.Random

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.scheduler.SchedulingMode

// scalastyle:off

// scalastyle:on
private[spark] object UIWorkloadGenerator {

  val NUM_PARTITIONS = 100
  val INTER_JOB_WAIT_MS = 5000

  def main(args: Array[String]) {
    if (args.length < 3) {
      println(
        "usage: ./bin/spark-class org.apache.spark.ui.UIWorkloadGenerator " +
          "[master] [FIFO|FAIR] [#job set (4 jobs per set)]")
      System.exit(1)
    }

    val conf = new SparkConf().setMaster(args(0)).setAppName("Spark UI tester")

    val schedulingMode = SchedulingMode.withName(args(1))
    if (schedulingMode == SchedulingMode.FAIR) {
      conf.set("spark.scheduler.mode", "FAIR")
    }
    val nJobSet = args(2).toInt
    val sc = new SparkContext(conf)

    def setProperties(s: String): Unit = {
      if (schedulingMode == SchedulingMode.FAIR) {
        sc.setLocalProperty("spark.scheduler.pool", s)
      }
      sc.setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, s)
    }

    val baseData = sc.makeRDD(1 to NUM_PARTITIONS * 10, NUM_PARTITIONS)
    def nextFloat(): Float = new Random().nextFloat()

    val jobs = Seq[(String, () => Long)](
      ("Count", baseData.count),
      ("Cache and Count", baseData.map(x => x).cache().count),
      ("Single Shuffle", baseData.map(x => (x % 10, x)).reduceByKey(_ + _).count),
      ("Entirely failed phase", baseData.map(x => throw new Exception).count),
      ("Partially failed phase", {
        baseData.map{x =>
          val probFailure = (4.0 / NUM_PARTITIONS)
          if (nextFloat() < probFailure) {
            throw new Exception("This is a task failure")
          }
          1
        }.count
      }),
      ("Partially failed phase (longer tasks)", {
        baseData.map{x =>
          val probFailure = (4.0 / NUM_PARTITIONS)
          if (nextFloat() < probFailure) {
            Thread.sleep(100)
            throw new Exception("This is a task failure")
          }
          1
        }.count
      }),
      ("Job with delays", baseData.map(x => Thread.sleep(100)).count)
    )

    val barrier = new Semaphore(-nJobSet * jobs.size + 1)

    (1 to nJobSet).foreach { _ =>
      for ((desc, job) <- jobs) {
        new Thread {
          override def run() {
            try {
              setProperties(desc)
              job()
              println("Job funished: " + desc)
            } catch {
              case e: Exception =>
                println("Job Failed: " + desc)
            } finally {
              barrier.release()
            }
          }
        }.start
        Thread.sleep(INTER_JOB_WAIT_MS)
      }
    }

    // Waiting for threads.
    barrier.acquire()
    sc.stop()
  }
} 
Example 4
Source File: TestProcessHelper.scala    From seed   with Apache License 2.0 5 votes vote down vote up
package seed.generation.util

import java.nio.file.Path
import java.util.concurrent.{Executors, Semaphore}

import scala.concurrent.{ExecutionContext, Future}
import seed.Log
import seed.cli.util.RTS
import seed.process.ProcessHelper

object TestProcessHelper {
  // Single-threaded execution context to avoid CI problems
  private val executor = Executors.newFixedThreadPool(1)
  implicit val ec      = ExecutionContext.fromExecutor(executor)

  // Use binary semaphore to synchronise test suite execution. Prevent Bloop
  // processes from running concurrently.
  val semaphore = new Semaphore(1)

  def runBloop(cwd: Path)(args: String*): Future[String] = {
    val sb = new StringBuilder
    val process =
      ProcessHelper.runBloop(cwd, Log.urgent, out => sb.append(out + "\n"))(
        args: _*
      )
    RTS.unsafeRunToFuture(process).map(_ => sb.toString)
  }

  def runCommand(cwd: Path, cmd: List[String]): Future[String] = {
    val sb = new StringBuilder
    val process = ProcessHelper.runCommand(
      cwd,
      cmd,
      None,
      List(),
      None,
      Log.urgent,
      out => sb.append(out + "\n")
    )
    RTS.unsafeRunToFuture(process).map(_ => sb.toString)
  }
} 
Example 5
Source File: WriteSpansResponseObserver.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.indexer.writers.grpc

import java.util.concurrent.Semaphore

import com.codahale.metrics.Timer
import com.expedia.open.tracing.backend.WriteSpansResponse
import com.expedia.www.haystack.commons.metrics.MetricsSupport
import com.expedia.www.haystack.trace.indexer.metrics.AppMetricNames
import io.grpc.stub.StreamObserver
import org.slf4j.LoggerFactory


class WriteSpansResponseObserver(timer:Timer.Context, inflightRequest: Semaphore) extends StreamObserver[WriteSpansResponse] with MetricsSupport {

  private val LOGGER = LoggerFactory.getLogger(classOf[WriteSpansResponseObserver])
  private val writeFailures = metricRegistry.meter(AppMetricNames.BACKEND_WRITE_FAILURE)

  

  override def onNext(writeSpanResponse: WriteSpansResponse): Unit = {
    timer.close()
    inflightRequest.release()
  }

  override def onError(error: Throwable): Unit = {
    timer.close()
    inflightRequest.release()
    writeFailures.mark()
    LOGGER.error(s"Fail to write to trace-backend with exception ", error)
  }

  override def onCompleted(): Unit = {
    LOGGER.debug(s"Closing WriteSpans Trace Observer")
  }
} 
Example 6
Source File: LockManager.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome.utils.state

import com.google.common.cache.{LoadingCache, CacheLoader, CacheBuilder}
import java.util.concurrent.Semaphore

import scala.concurrent.{ExecutionContext, Future}


  def executeSequentially[T](key: String)(future: => Future[T])(implicit ec: ExecutionContext): Future[T]
}

object LockManager {

  def create(): LockManager =
    new LockManager {
      val locks = loadingCache[String]()
      override def executeSequentially[T](
          key: String
      )(future: => Future[T])(implicit ec: ExecutionContext): Future[T] = {
        val lock = locks.get(key)
        scala.concurrent.blocking {
          lock.acquire()
        }
        val result = future
        result.onComplete { _ => lock.release() }
        result
      }
    }

  private[this] def loadingCache[A <: AnyRef](): LoadingCache[A, Semaphore] = {
    CacheBuilder
      .newBuilder()
      .weakValues()
      .build[A, Semaphore](new CacheLoader[A, Semaphore] {
        override def load(key: A): Semaphore = new Semaphore(1)
      })
  }
} 
Example 7
Source File: GuardedProcess.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks

import java.io.{IOException, InputStream, OutputStream}
import java.lang.System.currentTimeMillis
import java.util.concurrent.Semaphore

import android.util.Log

import scala.collection.JavaConversions._


class GuardedProcess(cmd: Seq[String]) extends Process {
  private val TAG = classOf[GuardedProcess].getSimpleName

  @volatile private var guardThread: Thread = _
  @volatile private var isDestroyed: Boolean = _
  @volatile private var process: Process = _
  @volatile private var isRestart = false

  def start(onRestartCallback: () => Unit = null): GuardedProcess = {
    val semaphore = new Semaphore(1)
    semaphore.acquire
    @volatile var ioException: IOException = null

    guardThread = new Thread(() => {
      try {
        var callback: () => Unit = null
        while (!isDestroyed) {
          Log.i(TAG, "start process: " + cmd)
          val startTime = currentTimeMillis

          process = new ProcessBuilder(cmd).redirectErrorStream(true).start

          if (callback == null) callback = onRestartCallback else callback()

          semaphore.release
          process.waitFor

          if (isRestart) {
            isRestart = false
          } else {
            if (currentTimeMillis - startTime < 1000) {
              Log.w(TAG, "process exit too fast, stop guard: " + cmd)
              isDestroyed = true
            }
          }
        }
      } catch {
        case ignored: InterruptedException =>
          Log.i(TAG, "thread interrupt, destroy process: " + cmd)
          process.destroy()
        case e: IOException => ioException = e
      } finally semaphore.release
    }, "GuardThread-" + cmd)

    guardThread.start()
    semaphore.acquire

    if (ioException != null) {
      throw ioException
    }

    this
  }

  def destroy() {
    isDestroyed = true
    guardThread.interrupt()
    process.destroy()
    try guardThread.join() catch {
      case ignored: InterruptedException =>
    }
  }

  def restart() {
    isRestart = true
    process.destroy()
  }

  def exitValue: Int = throw new UnsupportedOperationException
  def getErrorStream: InputStream = throw new UnsupportedOperationException
  def getInputStream: InputStream = throw new UnsupportedOperationException
  def getOutputStream: OutputStream = throw new UnsupportedOperationException

  @throws(classOf[InterruptedException])
  def waitFor = {
    guardThread.join()
    0
  }
} 
Example 8
Source File: ParallelLimitedFn.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.util

import java.util.concurrent.Semaphore

import com.spotify.scio.transforms.DoFnWithResource
import com.spotify.scio.transforms.DoFnWithResource.ResourceType
import org.apache.beam.sdk.transforms.DoFn
import org.apache.beam.sdk.transforms.DoFn.ProcessElement


abstract private[scio] class ParallelLimitedFn[T, U](maxDoFns: Int)
    extends DoFnWithResource[T, U, Semaphore]
    with NamedFn {
  def getResourceType: ResourceType = ResourceType.PER_CLASS

  def createResource: Semaphore = new Semaphore(maxDoFns, true)

  def parallelProcessElement(x: DoFn[T, U]#ProcessContext): Unit

  @ProcessElement def processElement(x: DoFn[T, U]#ProcessContext): Unit = {
    val semaphore = getResource
    try {
      semaphore.acquire()
      parallelProcessElement(x)
    } finally {
      semaphore.release()
    }
  }
}