java.util.concurrent.Future Scala Examples
The following examples show how to use java.util.concurrent.Future.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: GenericContainer.scala From testcontainers-scala with MIT License | 5 votes |
package com.dimafeng.testcontainers import java.util.concurrent.Future import com.dimafeng.testcontainers.GenericContainer.DockerImage import org.testcontainers.containers.wait.strategy.WaitStrategy import org.testcontainers.containers.{BindMode, GenericContainer => JavaGenericContainer} import org.testcontainers.images.ImagePullPolicy import scala.collection.JavaConverters._ class GenericContainer( override val underlyingUnsafeContainer: JavaGenericContainer[_] ) extends SingleContainer[JavaGenericContainer[_]] { override implicit val container: JavaGenericContainer[_] = underlyingUnsafeContainer def this( dockerImage: DockerImage, exposedPorts: Seq[Int] = Seq(), env: Map[String, String] = Map(), command: Seq[String] = Seq(), classpathResourceMapping: Seq[(String, String, BindMode)] = Seq(), waitStrategy: Option[WaitStrategy] = None, labels: Map[String, String] = Map.empty, tmpFsMapping: Map[String, String] = Map.empty, imagePullPolicy: Option[ImagePullPolicy] = None ) = this({ val underlying: JavaGenericContainer[_] = dockerImage match { case DockerImage(Left(imageFromDockerfile)) => new JavaGenericContainer(imageFromDockerfile) case DockerImage(Right(imageName)) => new JavaGenericContainer(imageName) } if (exposedPorts.nonEmpty) { underlying.withExposedPorts(exposedPorts.map(int2Integer): _*) } env.foreach(Function.tupled(underlying.withEnv)) if (command.nonEmpty) { underlying.withCommand(command: _*) } classpathResourceMapping.foreach(Function.tupled(underlying.withClasspathResourceMapping)) waitStrategy.foreach(underlying.waitingFor) if (labels.nonEmpty) { underlying.withLabels(labels.asJava) } if (tmpFsMapping.nonEmpty) { underlying.withTmpFs(tmpFsMapping.asJava) } imagePullPolicy.foreach(underlying.withImagePullPolicy) underlying }) def this(genericContainer: GenericContainer) = this(genericContainer.underlyingUnsafeContainer) } object GenericContainer { case class DockerImage(image: Either[String, Future[String]]) implicit def javaFutureToDockerImage(javaFuture: Future[String]): DockerImage = { DockerImage(Right(javaFuture)) } implicit def stringToDockerImage(imageName: String): DockerImage = { DockerImage(Left(imageName)) } def apply(dockerImage: DockerImage, exposedPorts: Seq[Int] = Seq(), env: Map[String, String] = Map(), command: Seq[String] = Seq(), classpathResourceMapping: Seq[(String, String, BindMode)] = Seq(), waitStrategy: WaitStrategy = null, labels: Map[String, String] = Map.empty, tmpFsMapping: Map[String, String] = Map.empty, imagePullPolicy: ImagePullPolicy = null): GenericContainer = new GenericContainer(dockerImage, exposedPorts, env, command, classpathResourceMapping, Option(waitStrategy), labels, tmpFsMapping) abstract class Def[C <: GenericContainer](init: => C) extends ContainerDef { override type Container = C protected def createContainer(): C = init } }
Example 2
Source File: KafkaCollector.scala From Swallow with Apache License 2.0 | 5 votes |
package com.intel.hibench.common.streaming.metrics import java.io.{FileWriter, File} import java.util.Date import java.util.concurrent.{TimeUnit, Future, Executors} import com.codahale.metrics.{UniformReservoir, Histogram} import kafka.utils.{ZKStringSerializer, ZkUtils} import org.I0Itec.zkclient.ZkClient import scala.collection.mutable.ArrayBuffer class KafkaCollector(zkConnect: String, metricsTopic: String, outputDir: String, sampleNumber: Int, desiredThreadNum: Int) extends LatencyCollector { private val histogram = new Histogram(new UniformReservoir(sampleNumber)) private val threadPool = Executors.newFixedThreadPool(desiredThreadNum) private val fetchResults = ArrayBuffer.empty[Future[FetchJobResult]] def start(): Unit = { val partitions = getPartitions(metricsTopic, zkConnect) println("Starting MetricsReader for kafka topic: " + metricsTopic) partitions.foreach(partition => { val job = new FetchJob(zkConnect, metricsTopic, partition, histogram) val fetchFeature = threadPool.submit(job) fetchResults += fetchFeature }) threadPool.shutdown() threadPool.awaitTermination(30, TimeUnit.MINUTES) val finalResults = fetchResults.map(_.get()).reduce((a, b) => { val minTime = Math.min(a.minTime, b.minTime) val maxTime = Math.max(a.maxTime, b.maxTime) val count = a.count + b.count new FetchJobResult(minTime, maxTime, count) }) report(finalResults.minTime, finalResults.maxTime, finalResults.count) } private def getPartitions(topic: String, zkConnect: String): Seq[Int] = { val zkClient = new ZkClient(zkConnect, 6000, 6000, ZKStringSerializer) try { ZkUtils.getPartitionsForTopics(zkClient, Seq(topic)).flatMap(_._2).toSeq } finally { zkClient.close() } } private def report(minTime: Long, maxTime: Long, count: Long): Unit = { val outputFile = new File(outputDir, metricsTopic + ".csv") println(s"written out metrics to ${outputFile.getCanonicalPath}") val header = "time,count,throughput(msgs/s),max_latency(ms),mean_latency(ms),min_latency(ms)," + "stddev_latency(ms),p50_latency(ms),p75_latency(ms),p95_latency(ms),p98_latency(ms)," + "p99_latency(ms),p999_latency(ms)\n" val fileExists = outputFile.exists() if (!fileExists) { val parent = outputFile.getParentFile if (!parent.exists()) { parent.mkdirs() } outputFile.createNewFile() } val outputFileWriter = new FileWriter(outputFile, true) if (!fileExists) { outputFileWriter.append(header) } val time = new Date(System.currentTimeMillis()).toString val count = histogram.getCount val snapshot = histogram.getSnapshot val throughput = count * 1000 / (maxTime - minTime) outputFileWriter.append(s"$time,$count,$throughput," + s"${formatDouble(snapshot.getMax)}," + s"${formatDouble(snapshot.getMean)}," + s"${formatDouble(snapshot.getMin)}," + s"${formatDouble(snapshot.getStdDev)}," + s"${formatDouble(snapshot.getMedian)}," + s"${formatDouble(snapshot.get75thPercentile())}," + s"${formatDouble(snapshot.get95thPercentile())}," + s"${formatDouble(snapshot.get98thPercentile())}," + s"${formatDouble(snapshot.get99thPercentile())}," + s"${formatDouble(snapshot.get999thPercentile())}\n") outputFileWriter.close() } private def formatDouble(d: Double): String = { "%.3f".format(d) } }
Example 3
Source File: PSVector.scala From sona with Apache License 2.0 | 5 votes |
package com.tencent.angel.sona.models import java.util.concurrent.Future import scala.collection.Map import org.apache.spark.SparkException import com.tencent.angel.ml.math2.vector.Vector import com.tencent.angel.ml.math2.utils.RowType import com.tencent.angel.ml.matrix.psf.get.base.{GetFunc, GetResult} import com.tencent.angel.ml.matrix.psf.update.base.{UpdateFunc, VoidResult} import com.tencent.angel.sona.context.PSContext def longKeySparse(dim: Long, maxRange: Long, capacity: Int = 20, rowType: RowType = RowType.T_DOUBLE_SPARSE_LONGKEY, additionalConfiguration: Map[String, String] = Map()): PSVector = { sparse(dim, capacity, maxRange, rowType, additionalConfiguration) } def sparse(dimension: Long, capacity: Int, range: Long, rowType: RowType, additionalConfiguration: Map[String, String]): PSVector = { PSContext.instance().createVector(dimension, rowType, capacity, range, additionalConfiguration) } def sparse(dimension: Long, capacity: Int = 20, rowType: RowType = RowType.T_DOUBLE_SPARSE_LONGKEY, additionalConfiguration: Map[String, String] = Map()): PSVector = { sparse(dimension, capacity, dimension, rowType, additionalConfiguration) } }
Example 4
Source File: ConcurrentUtil.scala From sona with Apache License 2.0 | 5 votes |
package com.tencent.angel.sona.tree.util import java.util.concurrent.{Callable, ExecutorService, Executors, Future} object ConcurrentUtil { private[tree] var numThread: Int = 1 private[tree] var threadPool: ExecutorService = _ private[tree] val DEFAULT_BATCH_SIZE = 1000000 private[tree] def reset(parallelism: Int): Unit = { ConcurrentUtil.getClass.synchronized { this.numThread = parallelism this.threadPool = Executors.newFixedThreadPool(parallelism) } } private[tree] def rangeParallel[A](f: (Int, Int) => A, start: Int, end: Int, batchSize: Int = DEFAULT_BATCH_SIZE): Array[Future[A]] = { val futures = Array.ofDim[Future[A]](MathUtil.idivCeil(end - start, batchSize)) var cur = start var threadId = 0 while (cur < end) { val i = cur val j = (cur + batchSize) min end futures(threadId) = threadPool.submit(new Callable[A] { override def call(): A = f(i, j) }) cur = j threadId += 1 } futures } private[tree] def shutdown(): Unit = ConcurrentUtil.getClass.synchronized { if (threadPool != null) threadPool.shutdown() } }
Example 5
Source File: Threads.scala From shapenet-viewer with MIT License | 5 votes |
package edu.stanford.graphics.shapenet.util import java.util.concurrent.{Future, Executors} object Threads extends Loggable { lazy val threadPool = Executors.newCachedThreadPool() def execute(runnable: Runnable, logger: org.slf4j.Logger = this.logger, desc: String = ""): Future[_] = { val wrappedRunnable = new RunnableWithLogging(runnable, logger, desc) threadPool.submit(wrappedRunnable) } } class RunnableWithLogging(val runnable: Runnable, val logger: org.slf4j.Logger, val desc: String) extends Runnable { override def run(): Unit = { try { runnable.run() } catch { case ex: Throwable => { logger.error("Error running " + desc, ex) } } } }
Example 6
Source File: HikariDataSourceTransactor.scala From mist with Apache License 2.0 | 5 votes |
package io.hydrosphere.mist.master.store import java.util.concurrent.{ExecutorService, Executors, Future, TimeUnit} import cats.arrow.FunctionK import cats.effect._ import com.zaxxer.hikari.{HikariConfig, HikariDataSource} import doobie.util.transactor.Transactor import doobie.util.transactor.Transactor.Aux import io.hydrosphere.mist.utils.Logger import scala.concurrent.ExecutionContext def shutdown(): Unit = { if (!ds.isClosed) { logger.info("Closing Hikari data source") ds.close() } else { logger.warn("Hikari datasource had not been properly initialized before closing") } shutdownExecutorService(awaitShutdown, ce, "connections EC") shutdownExecutorService(awaitShutdown, te, "tx EC") } }
Example 7
Source File: ReadSpansResponseListener.scala From haystack-traces with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trace.reader.stores.readers.grpc import java.util.concurrent.Future import com.codahale.metrics.{Meter, Timer} import com.expedia.open.tracing.api.Trace import com.expedia.open.tracing.backend.{ReadSpansResponse, TraceRecord} import com.expedia.www.haystack.trace.commons.packer.Unpacker import com.expedia.www.haystack.trace.reader.exceptions.TraceNotFoundException import org.slf4j.{Logger, LoggerFactory} import scala.collection.JavaConverters._ import scala.collection.mutable import scala.concurrent.Promise import scala.util.{Failure, Success, Try} object ReadSpansResponseListener { protected val LOGGER: Logger = LoggerFactory.getLogger(classOf[ReadSpansResponseListener]) } class ReadSpansResponseListener(readSpansResponse: Future[ReadSpansResponse], promise: Promise[Seq[Trace]], timer: Timer.Context, failure: Meter, tracesFailure: Meter, traceIdCount: Int) extends Runnable { import ReadSpansResponseListener._ override def run(): Unit = { timer.close() Try(readSpansResponse.get) .flatMap(tryGetTraceRows) .flatMap(tryDeserialize) match { case Success(traces) => tracesFailure.mark(traceIdCount - traces.length) promise.success(traces) case Failure(ex) => LOGGER.error("Failed in reading the record from trace-backend", ex) failure.mark() tracesFailure.mark(traceIdCount) promise.failure(ex) } } private def tryGetTraceRows(response: ReadSpansResponse): Try[Seq[TraceRecord]] = { val records = response.getRecordsList if (records.isEmpty) Failure(new TraceNotFoundException) else Success(records.asScala) } private def tryDeserialize(records: Seq[TraceRecord]): Try[Seq[Trace]] = { val traceBuilderMap = new mutable.HashMap[String, Trace.Builder]() var deserFailed: Failure[Seq[Trace]] = null records.foreach(record => { Try(Unpacker.readSpanBuffer(record.getSpans.toByteArray)) match { case Success(sBuffer) => traceBuilderMap.getOrElseUpdate(sBuffer.getTraceId, Trace.newBuilder().setTraceId(sBuffer.getTraceId)).addAllChildSpans(sBuffer.getChildSpansList) case Failure(cause) => deserFailed = Failure(cause) } }) if (deserFailed == null) Success(traceBuilderMap.values.map(_.build).toSeq) else deserFailed } }
Example 8
Source File: ExecutorPoolCaptureOom.scala From kyuubi with Apache License 2.0 | 5 votes |
package org.apache.kyuubi.util import java.util.concurrent.{Future, SynchronousQueue, ThreadPoolExecutor, TimeUnit} case class ExecutorPoolCaptureOom( poolName: String, corePoolSize: Int, maximumPoolSize: Int, keepAliveSeconds: Long, hook: Runnable) extends ThreadPoolExecutor( corePoolSize, maximumPoolSize, keepAliveSeconds, TimeUnit.SECONDS, new SynchronousQueue[Runnable](), NamedThreadFactory(poolName)) { override def afterExecute(r: Runnable, t: Throwable): Unit = { super.afterExecute(r, t) t match { case _: OutOfMemoryError => hook.run() case null => r match { case f: Future[_] => try { if (f.isDone) f.get() } catch { case _: InterruptedException => Thread.currentThread().interrupt() case _: OutOfMemoryError => hook.run() } case _ => } case _ => } } }
Example 9
Source File: FailingKafkaStorage.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util.concurrent.Future import io.amient.affinity.core.storage.{LogStorageConf, Record} import io.amient.affinity.core.util.MappedJavaFuture import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata} class FailingKafkaStorage(conf: LogStorageConf) extends KafkaLogStorage(conf) { override def append(record: Record[Array[Byte], Array[Byte]]): Future[java.lang.Long] = { val producerRecord = new ProducerRecord(topic, null, record.timestamp, record.key, record.value) new MappedJavaFuture[RecordMetadata, java.lang.Long](producer.send(producerRecord)) { override def map(result: RecordMetadata): java.lang.Long = { if (System.currentTimeMillis() % 3 == 0) throw new RuntimeException("simulated kafka producer error") result.offset() } } } }
Example 10
Source File: ParallelExecutor.scala From nyaya with GNU Lesser General Public License v2.1 | 5 votes |
package nyaya.test import java.util.concurrent.{Callable, ExecutorService, Executors, Future, TimeUnit} import java.util.concurrent.atomic.AtomicInteger import nyaya.gen.ThreadNumber import nyaya.prop.Prop import ParallelExecutor._ import PTest._ import Executor.{DataCtx, Data} // TODO data SampleSize = TotalSamples(n) | Fn(qty|%, gensize|%) | PerWorker(sampleSize) object ParallelExecutor { val defaultThreadCount = 1.max(Runtime.getRuntime.availableProcessors - 1) def merge[A](a: RunState[A], b: RunState[A]): RunState[A] = { val runs = a.runs max b.runs (a.success, b.success) match { case (false, true) => RunState(runs, a.result) case _ => RunState(runs, b.result) } } } case class ParallelExecutor(workers: Int = defaultThreadCount) extends Executor { val debugPrefixes = (0 until workers).toVector.map(i => s"Worker #$i: ") override def run[A](p: Prop[A], g: Data[A], S: Settings): RunState[A] = { val sss = { var rem = S.sampleSize.value var i = workers var v = Vector.empty[SampleSize] while(i > 0) { val p = rem / i v :+= SampleSize(p) rem -= p i -= 1 } v } if (S.debug) { val szs = sss.map(_.value) println(s"Samples/Worker: ${szs.mkString("{", ",", "}")} = Σ${szs.sum}") } val ai = new AtomicInteger(0) def task(worker: Int) = mkTask { val dp = debugPrefixes(worker) val data = g(DataCtx(sss(worker), ThreadNumber(worker), S.seed, dp)) testN(p, data, () => ai.incrementAndGet(), S) } runAsync2(workers, task) } override def prove[A](p: Prop[A], d: Domain[A], S: Settings): RunState[A] = { val threads = workers min d.size val ai = new AtomicInteger(0) def task(worker: Int) = mkTask { proveN(p, d, worker, threads, _ => ai.incrementAndGet, S) } runAsync2(threads, task) } private[this] def mkTask[A](f: => RunState[A]) = new Callable[RunState[A]] { override def call(): RunState[A] = f } private[this] def runAsync2[A](threads: Int, f: Int => Callable[RunState[A]]): RunState[A] = runAsync(es => (0 until threads).toList.map(es submit f(_))) private[this] def runAsync[A](start: ExecutorService => List[Future[RunState[A]]]): RunState[A] = { val es: ExecutorService = Executors.newFixedThreadPool(workers) val fs = start(es) es.shutdown() val rss = fs.map(_.get()) es.awaitTermination(1, TimeUnit.MINUTES) rss.foldLeft(RunState.empty[A])(merge) } }
Example 11
Source File: PowerBIAuthenticationWithUsernamePassword.scala From spark-powerbi-connector with Apache License 2.0 | 5 votes |
package com.microsoft.azure.powerbi.authentication import java.net.URI import java.util.concurrent.{Executors, ExecutorService, Future} import javax.naming.ServiceUnavailableException import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult} case class PowerBIAuthenticationWithUsernamePassword(powerBIAuthorityURL: String, powerBIResourceURL: String, powerBIClientID: String, activeDirectoryUsername: String, activeDirectoryPassword: String) extends PowerBIAuthentication{ def getAccessToken: String = if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken else refreshAccessToken def refreshAccessToken: String = retrieveToken.getAccessToken private def retrieveToken: AuthenticationResult = { var authenticationResult: AuthenticationResult = null var executorService: ExecutorService = null try { executorService = Executors.newFixedThreadPool(1) val authenticationContext: AuthenticationContext = new AuthenticationContext(powerBIAuthorityURL, true, executorService) val authenticationResultFuture: Future[AuthenticationResult] = authenticationContext.acquireToken(powerBIResourceURL, powerBIClientID, activeDirectoryUsername, activeDirectoryPassword, null) authenticationResult = authenticationResultFuture.get() } finally { executorService.shutdown() } if (authenticationResult == null) { throw new ServiceUnavailableException("Authentication result empty") } this.accessToken = authenticationResult.getAccessToken authenticationResult } private var accessToken: String = _ }
Example 12
Source File: PowerBIAuthenticationWithAuthorizationCode.scala From spark-powerbi-connector with Apache License 2.0 | 5 votes |
package com.microsoft.azure.powerbi.authentication import java.net.URI import java.util.concurrent.{Executors, ExecutorService, Future} import javax.naming.ServiceUnavailableException import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult} case class PowerBIAuthenticationWithAuthorizationCode(powerBIAuthorityURL: String, powerBIResourceURL: String, powerBIClientID: String, activeDirectoryAuthorizationCode: String, activeDirectoryRedirectUri: URI) extends PowerBIAuthentication{ def getAccessToken: String = if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken else refreshAccessToken def refreshAccessToken: String = retrieveToken.getAccessToken private def retrieveToken: AuthenticationResult = { var authenticationResult: AuthenticationResult = null var executorService: ExecutorService = null try { executorService = Executors.newFixedThreadPool(1) val authenticationContext: AuthenticationContext = new AuthenticationContext(powerBIAuthorityURL, true, executorService) val authenticationResultFuture: Future[AuthenticationResult] = authenticationContext.acquireTokenByAuthorizationCode(activeDirectoryAuthorizationCode, powerBIResourceURL, powerBIClientID, activeDirectoryRedirectUri, null) authenticationResult = authenticationResultFuture.get() } finally { executorService.shutdown() } if (authenticationResult == null) { throw new ServiceUnavailableException("Authentication result empty") } this.accessToken = authenticationResult.getAccessToken authenticationResult } private var accessToken: String = _ }
Example 13
Source File: KfProducer.scala From Adenium with Apache License 2.0 | 5 votes |
package com.adenium.externals.kafka import java.util.Properties import java.util.concurrent.Future import com.adenium.utils.May._ import org.apache.kafka.clients.producer._ object KfProducer { def apply( broker: String, props: Option[Properties] = None): KfProducer = { val prop = props.getOrElse { val p = new Properties() p.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, broker) p.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") p.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") p.put(ProducerConfig.ACKS_CONFIG, "0") p.put(ProducerConfig.RETRIES_CONFIG, "3") p.put(ProducerConfig.LINGER_MS_CONFIG, "0") //props.put(ProducerConfig.BATCH_SIZE_CONFIG, "1") p } val prod = new KafkaProducer[String, String](prop) new KfProducer( broker, prod ) } }
Example 14
Source File: TestCreateTableIfNotExists.scala From carbondata with Apache License 2.0 | 5 votes |
package org.apache.carbondata.spark.testsuite.createTable import java.util.concurrent.{Callable, Executors, ExecutorService, Future, TimeUnit} import org.apache.spark.sql.test.util.QueryTest import org.apache.spark.sql.AnalysisException import org.scalatest.BeforeAndAfterAll class TestCreateTableIfNotExists extends QueryTest with BeforeAndAfterAll { override def beforeAll { sql("use default") sql("drop table if exists test") sql("drop table if exists sourceTable") sql("drop table if exists targetTable") } test("test create table if not exists") { sql("create table test(a int, b string) STORED AS carbondata") try { // table creation should be successful sql("create table if not exists test(a int, b string) STORED AS carbondata") assert(true) } catch { case ex: Exception => assert(false) } } test("test create table if not exist concurrently") { val executorService: ExecutorService = Executors.newFixedThreadPool(10) var futures: List[Future[_]] = List() for (i <- 0 until (3)) { futures = futures :+ runAsync() } executorService.shutdown() executorService.awaitTermination(30L, TimeUnit.SECONDS) futures.foreach { future => assertResult("PASS")(future.get.toString) } def runAsync(): Future[String] = { executorService.submit(new Callable[String] { override def call() = { // Create table var result = "PASS" try { sql("create table IF NOT EXISTS TestIfExists(name string) STORED AS carbondata") } catch { case exception: Exception => result = exception.getMessage exception.printStackTrace() } result } }) } } test("test create table without column specified") { val exception = intercept[AnalysisException] { sql("create table TableWithoutColumn STORED AS carbondata tblproperties('sort_columns'='')") } assert(exception.getMessage.contains("Unable to infer the schema")) } override def afterAll { sql("use default") sql("drop table if exists test") sql("drop table if exists sourceTable") sql("drop table if exists targetTable") sql("drop table if exists TestIfExists") } }
Example 15
Source File: KeyVaultADALAuthenticator.scala From azure-kusto-spark with Apache License 2.0 | 5 votes |
package com.microsoft.kusto.spark.utils import java.net.MalformedURLException import java.util.concurrent.{ExecutionException, ExecutorService, Executors, Future} import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult, ClientCredential} import com.microsoft.azure.keyvault.KeyVaultClient import com.microsoft.azure.keyvault.authentication.KeyVaultCredentials class KeyVaultADALAuthenticator(clientId: String, clientKey: String) { def getAuthenticatedClient: KeyVaultClient = { // Creates the KeyVaultClient using the created credentials. new KeyVaultClient(createCredentials) } private def createCredentials: KeyVaultCredentials = { new KeyVaultCredentials() { //Callback that supplies the token type and access token on request. override def doAuthenticate(authorization: String, resource: String, scope: String): String = { try { val authResult = getAccessToken(authorization, resource) authResult.getAccessToken } catch { case e: Exception => KustoDataSourceUtils.logError("KeyVaultADALAuthenticator", "Exception trying to access Key Vault:" + e.getMessage) "" } } } } @throws[InterruptedException] @throws[ExecutionException] @throws[MalformedURLException] private def getAccessToken(authorization: String, resource: String): AuthenticationResult = { var result: AuthenticationResult = null var service: ExecutorService = null //Starts a service to fetch access token. try { service = Executors.newFixedThreadPool(1) val context = new AuthenticationContext(authorization, false, service) //Acquires token based on client ID and client secret. var future: Future[AuthenticationResult] = null if (clientId != null && clientKey != null) { val credentials = new ClientCredential(clientId, clientKey) future = context.acquireToken(resource, credentials, null) } result = future.get } finally service.shutdown() if (result == null) throw new RuntimeException("Authentication results were null.") result } }
Example 16
Source File: BDPFutureTask.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.scheduler.future import java.util.concurrent.{Future, FutureTask} import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils} class BDPFutureTask(future: Future[_]) extends BDPFuture with Logging { override def cancel(): Unit = Utils.tryAndErrorMsg { future match { case futureTask: FutureTask[_] => info("Start to interrupt BDPFutureTask") val futureType = futureTask.getClass val field = futureType.getDeclaredField("runner") field.setAccessible(true) val runner = field.get(futureTask).asInstanceOf[Thread] runner.interrupt() info(s"Finished to interrupt BDPFutureTask of ${runner.getName}") } }("Failed to interrupt BDPFutureTask") }
Example 17
Source File: RMEventConsumer.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.resourcemanager.schedule import java.util.concurrent.{ExecutorService, Future} import com.webank.wedatasphere.linkis.common.utils.Utils import com.webank.wedatasphere.linkis.resourcemanager.event.RMEvent import com.webank.wedatasphere.linkis.resourcemanager.event.metric.{MetricRMEvent, MetricRMEventExecutor} import com.webank.wedatasphere.linkis.resourcemanager.event.notify.{NotifyRMEvent, NotifyRMEventExecutor} import com.webank.wedatasphere.linkis.scheduler.SchedulerContext import com.webank.wedatasphere.linkis.scheduler.queue._ import scala.collection.mutable.ArrayBuffer class RMEventConsumer(schedulerContext: SchedulerContext, executeService: ExecutorService) extends Consumer(schedulerContext, executeService) { private var queue: ConsumeQueue = _ private var group: Group = _ private var maxRunningJobsNum = 1000 //Not put(暂未放) private val runningJobs = new Array[SchedulerEvent](maxRunningJobsNum) private val executorManager = schedulerContext.getOrCreateExecutorManager private var rmConsumerListener : RMConsumerListener = _ var future: Future[_] = _ def this(schedulerContext: SchedulerContext, executeService: ExecutorService, group: Group) = { this(schedulerContext, executeService) this.group = group maxRunningJobsNum = group.getMaximumCapacity } def start():Unit = future = executeService.submit(this) def setRmConsumerListener(rmConsumerListener: RMConsumerListener): Unit ={ this.rmConsumerListener = rmConsumerListener } override def setConsumeQueue(consumeQueue: ConsumeQueue) = { queue = consumeQueue } override def getConsumeQueue = queue override def getGroup = group override def setGroup(group: Group) = { this.group = group } override def getRunningEvents = getEvents(_.isRunning) private def getEvents(op: SchedulerEvent => Boolean): Array[SchedulerEvent] = { val result = ArrayBuffer[SchedulerEvent]() runningJobs.filter(_ != null).filter(x => op(x)).foreach(result += _) result.toArray } override def run() = { Thread.currentThread().setName(s"${toString}Thread") info(s"$toString thread started!") while (!terminate) { Utils.tryAndError(loop()) Utils.tryQuietly(Thread.sleep(10)) } info(s"$toString thread stopped!") } def loop(): Unit = { var event = queue.take() while (event.turnToScheduled() != true) { event = queue.take() } if(rmConsumerListener != null){rmConsumerListener.beforeEventExecute(this,event.asInstanceOf[RMEvent])} Utils.tryAndError({ val executor = executorManager.askExecutor(event) if (executor.isDefined) { event match { case x: MetricRMEvent =>{ Utils.tryQuietly(executor.get.asInstanceOf[MetricRMEventExecutor].execute(new EventJob(x))) } case y: NotifyRMEvent =>{ Utils.tryQuietly(executor.get.asInstanceOf[NotifyRMEventExecutor].execute(new EventJob(y))) } } } }) if(rmConsumerListener != null){rmConsumerListener.afterEventExecute(this,event.asInstanceOf[RMEvent])} } override def shutdown() = { future.cancel(true) super.shutdown() } }